diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml
index acf91ea12a..cbfeb2ba5b 100644
--- a/.github/workflows/buf-lint.yml
+++ b/.github/workflows/buf-lint.yml
@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- - uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2
+ - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml
index f52d20785f..8b964ef24c 100644
--- a/.github/workflows/buf.yml
+++ b/.github/workflows/buf.yml
@@ -13,7 +13,7 @@ jobs:
if: github.repository_owner == 'prometheus'
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- - uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2
+ - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 1ea1f5efae..12ffc659c2 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -27,12 +27,12 @@ jobs:
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Initialize CodeQL
- uses: github/codeql-action/init@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
+ uses: github/codeql-action/init@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
with:
languages: ${{ matrix.language }}
- name: Autobuild
- uses: github/codeql-action/autobuild@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
+ uses: github/codeql-action/autobuild@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
+ uses: github/codeql-action/analyze@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index 078084888a..c82fa87a1e 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
- uses: github/codeql-action/upload-sarif@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # tag=v3.25.8
+ uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # tag=v3.25.11
with:
sarif_file: results.sarif
diff --git a/.golangci.yml b/.golangci.yml
index 026d68a313..e924fe3d5b 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,12 +1,5 @@
run:
timeout: 15m
- skip-files:
- # Skip autogenerated files.
- - ^.*\.(pb|y)\.go$
- skip-dirs:
- # Copied it from a different source
- - storage/remote/otlptranslator/prometheusremotewrite
- - storage/remote/otlptranslator/prometheus
output:
sort-results: true
@@ -33,6 +26,13 @@ linters:
issues:
max-same-issues: 0
+ exclude-files:
+ # Skip autogenerated files.
+ - ^.*\.(pb|y)\.go$
+ exclude-dirs:
+ # Copied it from a different source
+ - storage/remote/otlptranslator/prometheusremotewrite
+ - storage/remote/otlptranslator/prometheus
exclude-rules:
- linters:
- gocritic
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2ad5bb50da..1c5c751442 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -8,6 +8,14 @@ _Please add changes here that are only in the release-3.0 branch. These will be
## unreleased
+## 2.53.1 / 2024-07-10
+
+Fix a bug which would drop samples in remote-write if the sending flow stalled
+for longer than it takes to write one "WAL segment". How long this takes depends on the size
+of your Prometheus; as a rough guide with 10 million series it is about 2-3 minutes.
+
+* [BUGFIX] Remote-write: stop dropping samples in catch-up #14446
+
## 2.53.0 / 2024-06-16
This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75.
diff --git a/RELEASE.md b/RELEASE.md
index f9a42be6b8..0d3f7456cd 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -57,7 +57,8 @@ Release cadence of first pre-releases being cut is 6 weeks.
| v2.50 | 2024-01-16 | Augustin Husson (GitHub: @nexucis) |
| v2.51 | 2024-03-07 | Bryan Boreham (GitHub: @bboreham) |
| v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) |
-| v2.53 | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
+| v2.53 LTS | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
+| v2.54 | 2024-07-17 | Bryan Boreham (GitHub: @bboreham) |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md
index 2232602430..2faf65105e 100644
--- a/docs/command-line/prometheus.md
+++ b/docs/command-line/prometheus.md
@@ -30,7 +30,7 @@ The Prometheus monitoring server
| --web.console.templates | Path to the console template directory, available at /consoles. | `consoles` |
| --web.console.libraries | Path to the console library directory. | `console_libraries` |
| --web.page-title | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` |
-| --web.cors.origin | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com' | `.*` |
+| --web.cors.origin | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1\|domain2)\.com' | `.*` |
| --storage.tsdb.path | Base path for metrics storage. Use with server mode only. | `data/` |
| --storage.tsdb.retention | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | |
| --storage.tsdb.retention.time | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | |
diff --git a/docs/querying/api.md b/docs/querying/api.md
index 28ee1b2b4b..efa244fbc8 100644
--- a/docs/querying/api.md
+++ b/docs/querying/api.md
@@ -260,7 +260,7 @@ URL query parameters:
series to return. At least one `match[]` argument must be provided.
- `start=`: Start timestamp.
- `end=`: End timestamp.
-- `limit=`: Maximum number of returned series. Optional.
+- `limit=`: Maximum number of returned series. Optional. 0 means disabled.
You can URL-encode these parameters directly in the request body by using the `POST` method and
`Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large
@@ -311,7 +311,7 @@ URL query parameters:
- `end=`: End timestamp. Optional.
- `match[]=`: Repeated series selector argument that selects the
series from which to read the label names. Optional.
-- `limit=`: Maximum number of returned series. Optional.
+- `limit=`: Maximum number of returned series. Optional. 0 means disabled.
The `data` section of the JSON response is a list of string label names.
@@ -362,7 +362,7 @@ URL query parameters:
- `end=`: End timestamp. Optional.
- `match[]=`: Repeated series selector argument that selects the
series from which to read the label values. Optional.
-- `limit=`: Maximum number of returned series. Optional.
+- `limit=`: Maximum number of returned series. Optional. 0 means disabled.
The `data` section of the JSON response is a list of string label values.
@@ -693,7 +693,8 @@ URL query parameters:
- `rule_name[]=`: only return rules with the given rule name. If the parameter is repeated, rules with any of the provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done.
- `rule_group[]=`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done.
- `file[]=`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done.
-- `exclude_alerts=`: only return rules, do not return active alerts.
+- `exclude_alerts=`: only return rules, do not return active alerts.
+- `match[]=`: only return rules that have configured labels that satisfy the label selectors. If the parameter is repeated, rules that match any of the sets of label selectors are returned. Note that matching is on the labels in the definition of each rule, not on the values after template expansion (for alerting rules). Optional.
```json
$ curl http://localhost:9090/api/v1/rules
diff --git a/docs/querying/basics.md b/docs/querying/basics.md
index 4d5012b7d8..bf43865c0e 100644
--- a/docs/querying/basics.md
+++ b/docs/querying/basics.md
@@ -81,6 +81,16 @@ Examples:
0x8f
-Inf
NaN
+
+
+As of version 2.54, float literals can also be represented using the syntax of time durations, where the time duration is converted into a float value corresponding to the number of seconds the time duration represents. This is an experimental feature and might still change.
+
+Examples:
+
+ 1s # Equivalent to 1.0
+ 2m # Equivalent to 120.0
+ 1ms # Equivalent to 0.001
+
## Time series selectors
@@ -224,6 +234,15 @@ Here are some examples of valid time durations:
5m
10s
+
+As of version 2.54, time durations can also be represented using the syntax of float literals, implying the number of seconds of the time duration. This is an experimental feature and might still change.
+
+Examples:
+
+ 1.0 # Equivalent to 1s
+ 0.001 # Equivalent to 1ms
+ 120 # Equivalent to 2m
+
### Offset modifier
The `offset` modifier allows changing the time offset for individual
diff --git a/docs/querying/functions.md b/docs/querying/functions.md
index 9a552f697a..de65e693d0 100644
--- a/docs/querying/functions.md
+++ b/docs/querying/functions.md
@@ -98,8 +98,9 @@ vector.
clamps the sample values of all elements in `v` to have a lower limit of `min` and an upper limit of `max`.
Special cases:
-- Return an empty vector if `min > max`
-- Return `NaN` if `min` or `max` is `NaN`
+
+* Return an empty vector if `min > max`
+* Return `NaN` if `min` or `max` is `NaN`
## `clamp_max()`
@@ -349,8 +350,8 @@ a histogram.
Buckets of classic histograms are cumulative. Therefore, the following should always be the case:
-- The counts in the buckets are monotonically increasing (strictly non-decreasing).
-- A lack of observations between the upper limits of two consecutive buckets results in equal counts
+* The counts in the buckets are monotonically increasing (strictly non-decreasing).
+* A lack of observations between the upper limits of two consecutive buckets results in equal counts
in those two buckets.
However, floating point precision issues (e.g. small discrepancies introduced by computing of buckets
@@ -692,21 +693,21 @@ ignore histogram samples.
The trigonometric functions work in radians:
-- `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)).
-- `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)).
-- `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)).
-- `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)).
-- `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)).
-- `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)).
-- `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)).
-- `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)).
-- `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)).
-- `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)).
-- `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)).
-- `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)).
+* `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)).
+* `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)).
+* `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)).
+* `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)).
+* `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)).
+* `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)).
+* `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)).
+* `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)).
+* `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)).
+* `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)).
+* `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)).
+* `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)).
The following are useful for converting between degrees and radians:
-- `deg(v instant-vector)`: converts radians to degrees for all elements in `v`.
-- `pi()`: returns pi.
-- `rad(v instant-vector)`: converts degrees to radians for all elements in `v`.
+* `deg(v instant-vector)`: converts radians to degrees for all elements in `v`.
+* `pi()`: returns pi.
+* `rad(v instant-vector)`: converts degrees to radians for all elements in `v`.
diff --git a/docs/storage.md b/docs/storage.md
index 947960fe12..55d4309d37 100644
--- a/docs/storage.md
+++ b/docs/storage.md
@@ -137,6 +137,18 @@ will be used.
Expired block cleanup happens in the background. It may take up to two hours
to remove expired blocks. Blocks must be fully expired before they are removed.
+## Right-Sizing Retention Size
+
+If you are utilizing `storage.tsdb.retention.size` to set a size limit, you
+will want to consider the right size for this value relative to the storage you
+have allocated for Prometheus. It is wise to reduce the retention size to provide
+a buffer, ensuring that older entries will be removed before the allocated storage
+for Prometheus becomes full.
+
+At present, we recommend setting the retention size to, at most, 80-85% of your
+allocated Prometheus disk space. This increases the likelihood that older entires
+will be removed prior to hitting any disk limitations.
+
## Remote storage integrations
Prometheus's local storage is limited to a single node's scalability and durability.
diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod
index 2491bbe2db..348b27dfc7 100644
--- a/documentation/examples/remote_storage/go.mod
+++ b/documentation/examples/remote_storage/go.mod
@@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/documentation/examples/remote_storage
-go 1.21
+go 1.21.0
require (
github.com/alecthomas/kingpin/v2 v2.4.0
diff --git a/go.mod b/go.mod
index ce2f0714a0..4107f3a099 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,8 @@
module github.com/prometheus/prometheus
-go 1.21
+go 1.21.0
+
+toolchain go1.22.5
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
@@ -60,8 +62,8 @@ require (
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.9.0
github.com/vultr/govultr/v2 v2.17.2
- go.opentelemetry.io/collector/pdata v1.8.0
- go.opentelemetry.io/collector/semconv v0.101.0
+ go.opentelemetry.io/collector/pdata v1.11.0
+ go.opentelemetry.io/collector/semconv v0.104.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0
go.opentelemetry.io/otel v1.27.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0
@@ -83,7 +85,7 @@ require (
google.golang.org/api v0.183.0
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157
google.golang.org/grpc v1.64.0
- google.golang.org/protobuf v1.34.1
+ google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.29.3
diff --git a/go.sum b/go.sum
index 956b9d8949..dc4a7ecfd7 100644
--- a/go.sum
+++ b/go.sum
@@ -396,8 +396,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
-github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
+github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -723,10 +723,10 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764=
-go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY=
-go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q=
-go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
+go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE=
+go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE=
+go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k=
+go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
@@ -1119,8 +1119,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
-google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/promql/engine.go b/promql/engine.go
index 102614befd..3cfdd52d6c 100644
--- a/promql/engine.go
+++ b/promql/engine.go
@@ -3456,6 +3456,12 @@ func setOffsetForAtModifier(evalTime int64, expr parser.Expr) {
// required for correctness.
func detectHistogramStatsDecoding(expr parser.Expr) {
parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
+ if n, ok := node.(*parser.BinaryExpr); ok {
+ detectHistogramStatsDecoding(n.LHS)
+ detectHistogramStatsDecoding(n.RHS)
+ return fmt.Errorf("stop")
+ }
+
n, ok := (node).(*parser.VectorSelector)
if !ok {
return nil
diff --git a/promql/engine_test.go b/promql/engine_test.go
index 283af1a5f6..7d4aed80b7 100644
--- a/promql/engine_test.go
+++ b/promql/engine_test.go
@@ -238,11 +238,11 @@ func (q *errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*lab
return errSeriesSet{err: q.err}
}
-func (*errQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (*errQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
-func (*errQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (*errQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
func (*errQuerier) Close() error { return nil }
diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y
index d84acc37c5..b99e67424f 100644
--- a/promql/parser/generated_parser.y
+++ b/promql/parser/generated_parser.y
@@ -43,7 +43,6 @@ import (
int int64
uint uint64
float float64
- duration time.Duration
}
@@ -176,8 +175,7 @@ START_METRIC_SELECTOR
%type int
%type uint
%type number series_value signed_number signed_or_unsigned_number
-%type step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector
-%type duration maybe_duration
+%type step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector
%start start
@@ -218,7 +216,7 @@ expr :
| binary_expr
| function_call
| matrix_selector
- | number_literal
+ | number_duration_literal
| offset_expr
| paren_expr
| string_literal
@@ -415,18 +413,22 @@ paren_expr : LEFT_PAREN expr RIGHT_PAREN
* Offset modifiers.
*/
-offset_expr: expr OFFSET duration
+offset_expr: expr OFFSET number_duration_literal
{
- yylex.(*parser).addOffset($1, $3)
- $$ = $1
+ numLit, _ := $3.(*NumberLiteral)
+ dur := time.Duration(numLit.Val * 1000) * time.Millisecond
+ yylex.(*parser).addOffset($1, dur)
+ $$ = $1
}
- | expr OFFSET SUB duration
+ | expr OFFSET SUB number_duration_literal
{
- yylex.(*parser).addOffset($1, -$4)
- $$ = $1
+ numLit, _ := $4.(*NumberLiteral)
+ dur := time.Duration(numLit.Val * 1000) * time.Millisecond
+ yylex.(*parser).addOffset($1, -dur)
+ $$ = $1
}
| expr OFFSET error
- { yylex.(*parser).unexpected("offset", "duration"); $$ = $1 }
+ { yylex.(*parser).unexpected("offset", "number or duration"); $$ = $1 }
;
/*
* @ modifiers.
@@ -452,7 +454,7 @@ at_modifier_preprocessors: START | END;
* Subquery and range selectors.
*/
-matrix_selector : expr LEFT_BRACKET duration RIGHT_BRACKET
+matrix_selector : expr LEFT_BRACKET number_duration_literal RIGHT_BRACKET
{
var errMsg string
vs, ok := $1.(*VectorSelector)
@@ -469,32 +471,44 @@ matrix_selector : expr LEFT_BRACKET duration RIGHT_BRACKET
yylex.(*parser).addParseErrf(errRange, errMsg)
}
+ numLit, _ := $3.(*NumberLiteral)
$$ = &MatrixSelector{
VectorSelector: $1.(Expr),
- Range: $3,
+ Range: time.Duration(numLit.Val * 1000) * time.Millisecond,
EndPos: yylex.(*parser).lastClosing,
}
}
;
-subquery_expr : expr LEFT_BRACKET duration COLON maybe_duration RIGHT_BRACKET
+subquery_expr : expr LEFT_BRACKET number_duration_literal COLON number_duration_literal RIGHT_BRACKET
{
+ numLitRange, _ := $3.(*NumberLiteral)
+ numLitStep, _ := $5.(*NumberLiteral)
$$ = &SubqueryExpr{
Expr: $1.(Expr),
- Range: $3,
- Step: $5,
-
+ Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond,
+ Step: time.Duration(numLitStep.Val * 1000) * time.Millisecond,
EndPos: $6.Pos + 1,
}
}
- | expr LEFT_BRACKET duration COLON duration error
+ | expr LEFT_BRACKET number_duration_literal COLON RIGHT_BRACKET
+ {
+ numLitRange, _ := $3.(*NumberLiteral)
+ $$ = &SubqueryExpr{
+ Expr: $1.(Expr),
+ Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond,
+ Step: 0,
+ EndPos: $5.Pos + 1,
+ }
+ }
+ | expr LEFT_BRACKET number_duration_literal COLON number_duration_literal error
{ yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 }
- | expr LEFT_BRACKET duration COLON error
- { yylex.(*parser).unexpected("subquery selector", "duration or \"]\""); $$ = $1 }
- | expr LEFT_BRACKET duration error
+ | expr LEFT_BRACKET number_duration_literal COLON error
+ { yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\""); $$ = $1 }
+ | expr LEFT_BRACKET number_duration_literal error
{ yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 }
| expr LEFT_BRACKET error
- { yylex.(*parser).unexpected("subquery selector", "duration"); $$ = $1 }
+ { yylex.(*parser).unexpected("subquery selector", "number or duration"); $$ = $1 }
;
/*
@@ -866,16 +880,43 @@ match_op : EQL | NEQ | EQL_REGEX | NEQ_REGEX ;
* Literals.
*/
-number_literal : NUMBER
+number_duration_literal : NUMBER
{
- $$ = &NumberLiteral{
+ $$ = &NumberLiteral{
Val: yylex.(*parser).number($1.Val),
PosRange: $1.PositionRange(),
+ }
}
+ | DURATION
+ {
+ var err error
+ var dur time.Duration
+ dur, err = parseDuration($1.Val)
+ if err != nil {
+ yylex.(*parser).addParseErr($1.PositionRange(), err)
+ }
+ $$ = &NumberLiteral{
+ Val: dur.Seconds(),
+ PosRange: $1.PositionRange(),
+ }
}
;
-number : NUMBER { $$ = yylex.(*parser).number($1.Val) } ;
+number : NUMBER
+ {
+ $$ = yylex.(*parser).number($1.Val)
+ }
+ | DURATION
+ {
+ var err error
+ var dur time.Duration
+ dur, err = parseDuration($1.Val)
+ if err != nil {
+ yylex.(*parser).addParseErr($1.PositionRange(), err)
+ }
+ $$ = dur.Seconds()
+ }
+ ;
signed_number : ADD number { $$ = $2 }
| SUB number { $$ = -$2 }
@@ -897,17 +938,6 @@ int : SUB uint { $$ = -int64($2) }
| uint { $$ = int64($1) }
;
-duration : DURATION
- {
- var err error
- $$, err = parseDuration($1.Val)
- if err != nil {
- yylex.(*parser).addParseErr($1.PositionRange(), err)
- }
- }
- ;
-
-
string_literal : STRING
{
$$ = &StringLiteral{
@@ -931,11 +961,6 @@ string_identifier : STRING
* Wrappers for optional arguments.
*/
-maybe_duration : /* empty */
- {$$ = 0}
- | duration
- ;
-
maybe_grouping_labels: /* empty */ { $$ = nil }
| grouping_labels
;
diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go
index 07899c0a00..423082dafe 100644
--- a/promql/parser/generated_parser.y.go
+++ b/promql/parser/generated_parser.y.go
@@ -31,7 +31,6 @@ type yySymType struct {
int int64
uint uint64
float float64
- duration time.Duration
}
const EQL = 57346
@@ -236,16 +235,10 @@ var yyExca = [...]int16{
1, -1,
-2, 0,
-1, 37,
- 1, 136,
- 10, 136,
- 24, 136,
+ 1, 137,
+ 10, 137,
+ 24, 137,
-2, 0,
- -1, 60,
- 2, 174,
- 15, 174,
- 78, 174,
- 84, 174,
- -2, 100,
-1, 61,
2, 175,
15, 175,
@@ -257,7 +250,7 @@ var yyExca = [...]int16{
15, 176,
78, 176,
84, 176,
- -2, 103,
+ -2, 102,
-1, 63,
2, 177,
15, 177,
@@ -275,19 +268,19 @@ var yyExca = [...]int16{
15, 179,
78, 179,
84, 179,
- -2, 110,
+ -2, 106,
-1, 66,
2, 180,
15, 180,
78, 180,
84, 180,
- -2, 112,
+ -2, 111,
-1, 67,
2, 181,
15, 181,
78, 181,
84, 181,
- -2, 114,
+ -2, 113,
-1, 68,
2, 182,
15, 182,
@@ -317,45 +310,21 @@ var yyExca = [...]int16{
15, 186,
78, 186,
84, 186,
- -2, 122,
+ -2, 119,
-1, 73,
2, 187,
15, 187,
78, 187,
84, 187,
-2, 123,
- -1, 199,
- 12, 237,
- 13, 237,
- 18, 237,
- 19, 237,
- 25, 237,
- 40, 237,
- 46, 237,
- 47, 237,
- 50, 237,
- 56, 237,
- 61, 237,
- 62, 237,
- 63, 237,
- 64, 237,
- 65, 237,
- 66, 237,
- 67, 237,
- 68, 237,
- 69, 237,
- 70, 237,
- 71, 237,
- 72, 237,
- 73, 237,
- 74, 237,
- 78, 237,
- 82, 237,
- 84, 237,
- 87, 237,
- 88, 237,
- -2, 0,
+ -1, 74,
+ 2, 188,
+ 15, 188,
+ 78, 188,
+ 84, 188,
+ -2, 124,
-1, 200,
+ 9, 237,
12, 237,
13, 237,
18, 237,
@@ -386,151 +355,170 @@ var yyExca = [...]int16{
87, 237,
88, 237,
-2, 0,
- -1, 221,
- 21, 235,
- -2, 0,
- -1, 292,
- 21, 236,
+ -1, 201,
+ 9, 237,
+ 12, 237,
+ 13, 237,
+ 18, 237,
+ 19, 237,
+ 25, 237,
+ 40, 237,
+ 46, 237,
+ 47, 237,
+ 50, 237,
+ 56, 237,
+ 61, 237,
+ 62, 237,
+ 63, 237,
+ 64, 237,
+ 65, 237,
+ 66, 237,
+ 67, 237,
+ 68, 237,
+ 69, 237,
+ 70, 237,
+ 71, 237,
+ 72, 237,
+ 73, 237,
+ 74, 237,
+ 78, 237,
+ 82, 237,
+ 84, 237,
+ 87, 237,
+ 88, 237,
-2, 0,
}
const yyPrivate = 57344
-const yyLast = 793
+const yyLast = 728
var yyAct = [...]int16{
- 155, 330, 328, 274, 335, 152, 225, 39, 191, 148,
- 288, 287, 156, 117, 81, 177, 227, 106, 105, 6,
- 154, 108, 107, 197, 132, 198, 237, 109, 199, 200,
- 159, 59, 243, 325, 324, 110, 321, 159, 189, 268,
- 348, 301, 265, 127, 159, 192, 349, 264, 290, 195,
- 176, 160, 159, 269, 308, 175, 319, 195, 160, 347,
- 239, 240, 346, 112, 241, 113, 299, 161, 174, 270,
- 263, 111, 254, 160, 161, 228, 230, 232, 233, 234,
- 242, 244, 247, 248, 249, 250, 251, 255, 256, 161,
- 114, 229, 231, 235, 236, 238, 245, 246, 108, 266,
- 258, 252, 253, 329, 109, 157, 158, 159, 2, 3,
- 4, 5, 307, 160, 162, 257, 262, 299, 172, 166,
- 169, 217, 104, 164, 110, 165, 150, 306, 193, 161,
- 178, 104, 179, 151, 305, 183, 196, 179, 185, 261,
- 194, 201, 202, 203, 204, 205, 206, 207, 208, 209,
- 210, 211, 212, 213, 214, 215, 128, 227, 88, 216,
- 120, 218, 219, 100, 336, 103, 168, 237, 97, 98,
- 118, 181, 100, 243, 103, 87, 181, 224, 259, 167,
- 149, 180, 182, 121, 187, 76, 180, 182, 120, 260,
- 102, 35, 124, 7, 10, 296, 151, 123, 118, 102,
- 295, 239, 240, 267, 78, 241, 116, 186, 285, 286,
- 122, 121, 289, 254, 318, 294, 228, 230, 232, 233,
- 234, 242, 244, 247, 248, 249, 250, 251, 255, 256,
- 317, 292, 229, 231, 235, 236, 238, 245, 246, 316,
- 315, 314, 252, 253, 133, 134, 135, 136, 137, 138,
- 139, 140, 141, 142, 143, 144, 145, 146, 147, 313,
- 312, 311, 310, 309, 320, 293, 297, 298, 300, 273,
- 302, 222, 151, 8, 85, 221, 272, 37, 303, 304,
- 276, 277, 275, 282, 284, 281, 283, 278, 279, 280,
- 220, 163, 126, 50, 125, 36, 1, 291, 151, 77,
- 83, 49, 322, 323, 48, 83, 47, 104, 46, 327,
- 82, 131, 332, 333, 334, 82, 331, 45, 184, 338,
- 337, 340, 339, 80, 44, 43, 341, 342, 129, 53,
- 76, 343, 55, 86, 88, 22, 54, 345, 170, 171,
- 42, 130, 56, 41, 97, 98, 40, 350, 100, 101,
- 103, 87, 58, 51, 190, 9, 9, 74, 344, 271,
- 84, 188, 223, 18, 19, 79, 119, 20, 153, 57,
- 226, 52, 115, 75, 0, 102, 0, 0, 60, 61,
- 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
- 72, 73, 0, 0, 0, 13, 0, 0, 0, 24,
- 0, 30, 0, 0, 31, 32, 38, 0, 53, 76,
- 0, 55, 326, 0, 22, 54, 0, 0, 0, 0,
- 0, 56, 0, 276, 277, 275, 282, 284, 281, 283,
- 278, 279, 280, 0, 0, 0, 74, 0, 0, 0,
- 0, 0, 18, 19, 0, 0, 20, 0, 0, 0,
- 0, 0, 75, 0, 0, 0, 0, 60, 61, 62,
+ 155, 331, 329, 275, 336, 152, 226, 39, 192, 44,
+ 289, 288, 156, 118, 82, 178, 55, 106, 6, 53,
+ 77, 109, 56, 133, 108, 22, 54, 110, 107, 172,
+ 300, 198, 57, 199, 200, 201, 60, 111, 326, 151,
+ 325, 302, 321, 308, 266, 154, 55, 75, 128, 105,
+ 291, 300, 160, 18, 19, 309, 54, 20, 307, 218,
+ 105, 320, 159, 76, 113, 306, 114, 330, 61, 62,
63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
- 73, 0, 0, 0, 13, 0, 0, 0, 24, 0,
- 30, 0, 0, 31, 32, 53, 76, 0, 55, 0,
- 0, 22, 54, 0, 0, 0, 0, 0, 56, 0,
+ 73, 74, 112, 161, 180, 13, 87, 89, 265, 24,
+ 101, 30, 104, 150, 31, 32, 115, 98, 99, 162,
+ 109, 101, 102, 104, 88, 349, 110, 2, 3, 4,
+ 5, 264, 196, 149, 111, 163, 160, 103, 337, 173,
+ 167, 170, 84, 182, 348, 166, 159, 347, 103, 194,
+ 157, 158, 83, 181, 183, 165, 184, 197, 77, 186,
+ 185, 195, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 129, 269, 263,
+ 217, 160, 219, 220, 55, 38, 35, 53, 77, 267,
+ 56, 159, 270, 22, 54, 121, 297, 188, 7, 259,
+ 57, 296, 262, 161, 319, 119, 318, 317, 271, 179,
+ 261, 180, 161, 260, 258, 75, 295, 84, 122, 162,
+ 187, 18, 19, 316, 268, 20, 315, 83, 162, 286,
+ 287, 76, 314, 290, 313, 81, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 182, 86, 292, 13, 55, 10, 312, 24, 311, 30,
+ 181, 183, 31, 32, 54, 79, 134, 135, 136, 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 310, 127, 36, 126, 1, 121, 298, 299, 301,
+ 164, 303, 49, 48, 190, 294, 119, 55, 160, 304,
+ 305, 193, 55, 160, 117, 196, 223, 54, 159, 122,
+ 222, 228, 54, 159, 293, 350, 50, 47, 46, 169,
+ 132, 238, 78, 323, 324, 221, 45, 244, 43, 161,
+ 328, 322, 168, 333, 334, 335, 130, 332, 171, 177,
+ 339, 338, 341, 340, 176, 162, 125, 342, 343, 42,
+ 59, 124, 344, 9, 9, 240, 241, 175, 346, 242,
+ 131, 8, 41, 40, 123, 37, 51, 255, 351, 191,
+ 229, 231, 233, 234, 235, 243, 245, 248, 249, 250,
+ 251, 252, 256, 257, 345, 272, 230, 232, 236, 237,
+ 239, 246, 247, 85, 189, 55, 253, 254, 53, 77,
+ 224, 56, 80, 120, 22, 54, 153, 58, 227, 52,
+ 116, 57, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 228, 0, 0, 0, 0, 75, 0, 0, 0,
+ 0, 238, 18, 19, 0, 0, 20, 244, 0, 0,
+ 0, 225, 76, 0, 0, 0, 0, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 0, 0, 0, 13, 240, 241, 0, 24, 242,
+ 30, 0, 0, 31, 32, 0, 0, 255, 105, 0,
+ 229, 231, 233, 234, 235, 243, 245, 248, 249, 250,
+ 251, 252, 256, 257, 0, 0, 230, 232, 236, 237,
+ 239, 246, 247, 17, 77, 89, 253, 254, 0, 22,
+ 0, 0, 327, 0, 0, 98, 99, 0, 0, 101,
+ 0, 104, 88, 277, 278, 276, 283, 285, 282, 284,
+ 279, 280, 281, 17, 35, 0, 0, 18, 19, 22,
+ 0, 20, 0, 0, 0, 0, 103, 0, 0, 0,
+ 0, 0, 11, 12, 14, 15, 16, 21, 23, 25,
+ 26, 27, 28, 29, 33, 34, 0, 18, 19, 13,
+ 0, 20, 0, 24, 0, 30, 0, 0, 31, 32,
+ 0, 0, 11, 12, 14, 15, 16, 21, 23, 25,
+ 26, 27, 28, 29, 33, 34, 105, 0, 0, 13,
+ 0, 0, 0, 24, 174, 30, 0, 0, 31, 32,
+ 0, 0, 0, 0, 0, 105, 0, 0, 0, 0,
+ 0, 0, 87, 89, 90, 0, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 0, 101, 102, 104,
+ 88, 87, 89, 90, 0, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 274, 101, 102, 104, 88,
+ 105, 0, 273, 0, 103, 0, 277, 278, 276, 283,
+ 285, 282, 284, 279, 280, 281, 0, 0, 0, 105,
+ 0, 0, 0, 103, 0, 0, 87, 89, 90, 0,
+ 91, 92, 93, 0, 95, 96, 97, 98, 99, 100,
+ 0, 101, 102, 104, 88, 87, 89, 90, 0, 91,
+ 92, 0, 0, 95, 96, 0, 98, 99, 100, 0,
+ 101, 102, 104, 88, 0, 0, 0, 0, 103, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 74, 0, 17, 76, 0, 0, 18,
- 19, 22, 0, 20, 0, 0, 0, 0, 0, 75,
- 0, 0, 0, 0, 60, 61, 62, 63, 64, 65,
- 66, 67, 68, 69, 70, 71, 72, 73, 0, 18,
- 19, 13, 0, 20, 0, 24, 0, 30, 0, 0,
- 31, 32, 0, 0, 11, 12, 14, 15, 16, 21,
- 23, 25, 26, 27, 28, 29, 33, 34, 17, 35,
- 0, 13, 0, 0, 22, 24, 0, 30, 0, 0,
- 31, 32, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 18, 19, 0, 0, 20, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 11, 12, 14,
- 15, 16, 21, 23, 25, 26, 27, 28, 29, 33,
- 34, 104, 0, 0, 13, 0, 0, 0, 24, 173,
- 30, 0, 0, 31, 32, 0, 0, 0, 0, 0,
- 104, 0, 0, 0, 0, 0, 0, 86, 88, 89,
- 0, 90, 91, 92, 93, 94, 95, 96, 97, 98,
- 99, 0, 100, 101, 103, 87, 86, 88, 89, 0,
- 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
- 0, 100, 101, 103, 87, 104, 0, 0, 0, 102,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 104, 0, 0, 0, 102, 0,
- 0, 86, 88, 89, 0, 90, 91, 92, 0, 94,
- 95, 96, 97, 98, 99, 0, 100, 101, 103, 87,
- 86, 88, 89, 0, 90, 91, 0, 0, 94, 95,
- 0, 97, 98, 99, 0, 100, 101, 103, 87, 0,
- 0, 0, 0, 102, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 102,
+ 0, 0, 0, 0, 0, 0, 0, 103,
}
var yyPact = [...]int16{
- 17, 183, 566, 566, 396, 503, -1000, -1000, -1000, 178,
+ 16, 168, 501, 501, 155, 471, -1000, -1000, -1000, 153,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, 303, -1000, 272, -1000, 646,
+ -1000, -1000, -1000, -1000, -1000, 195, -1000, 229, -1000, 581,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, 20, 109, -1000, 473, -1000, 473, 172, -1000,
+ -1000, -1000, 22, 99, -1000, -1000, 366, -1000, 366, 125,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, 186, -1000, -1000, 190,
- -1000, -1000, 290, -1000, 19, -1000, -53, -53, -53, -53,
- -53, -53, -53, -53, -53, -53, -53, -53, -53, -53,
- -53, -53, 124, 18, 289, 109, -57, -1000, 164, 164,
- 317, -1000, 627, 108, -1000, 48, -1000, -1000, 128, 133,
- -1000, -1000, -1000, 298, -1000, 182, -1000, 33, 473, -1000,
- -58, -51, -1000, 473, 473, 473, 473, 473, 473, 473,
- 473, 473, 473, 473, 473, 473, 473, 473, -1000, 187,
- -1000, -1000, -1000, 106, -1000, -1000, -1000, -1000, -1000, -1000,
- 88, 88, 269, -1000, -1000, -1000, -1000, 155, -1000, -1000,
- 93, -1000, 646, -1000, -1000, 158, -1000, 114, -1000, -1000,
- -1000, -1000, -1000, 45, -1000, -1000, -1000, -1000, -1000, 16,
- 73, 13, -1000, -1000, -1000, 252, 117, 164, 164, 164,
- 164, 108, 108, 293, 293, 293, 710, 691, 293, 293,
- 710, 108, 108, 293, 108, 117, -1000, 26, -1000, -1000,
- -1000, 263, -1000, 193, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, 264, -1000, -1000,
+ 324, -1000, -1000, 260, -1000, 24, -1000, -54, -54, -54,
+ -54, -54, -54, -54, -54, -54, -54, -54, -54, -54,
+ -54, -54, -54, 37, 43, 268, 99, -57, -1000, 297,
+ 297, 7, -1000, 562, 35, -1000, 317, -1000, -1000, 187,
+ 80, -1000, -1000, -1000, 120, -1000, 175, -1000, 269, 366,
+ -1000, -50, -45, -1000, 366, 366, 366, 366, 366, 366,
+ 366, 366, 366, 366, 366, 366, 366, 366, 366, -1000,
+ 225, -1000, -1000, 44, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 107, 107, 284, -1000, -1000, -1000, -1000, 399, -1000,
+ -1000, 172, -1000, 581, -1000, -1000, 173, -1000, 157, -1000,
+ -1000, -1000, -1000, -1000, 86, -1000, -1000, -1000, -1000, -1000,
+ 18, 143, 132, -1000, -1000, -1000, 618, 444, 297, 297,
+ 297, 297, 35, 35, 46, 46, 46, 645, 626, 46,
+ 46, 645, 35, 35, 46, 35, 444, -1000, 28, -1000,
+ -1000, -1000, 273, -1000, 174, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 473, -1000,
- -1000, -1000, -1000, -1000, -1000, 98, 98, 15, 98, 41,
- 41, 110, 37, -1000, -1000, 257, 256, 255, 254, 253,
- 235, 234, 233, 224, 208, -1000, -1000, -1000, -1000, -1000,
- -1000, 35, 262, -1000, -1000, 14, -1000, 646, -1000, -1000,
- -1000, 98, -1000, 8, 7, 395, -1000, -1000, -1000, 47,
- 11, 88, 88, 88, 150, 150, 47, 150, 47, -1000,
- -1000, -1000, -1000, -1000, 98, 98, -1000, -1000, -1000, 98,
- -1000, -1000, -1000, -1000, -1000, -1000, 88, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, 38, -1000, 25, -1000, -1000, -1000,
- -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 366,
+ -1000, -1000, -1000, -1000, -1000, -1000, 32, 32, 15, 32,
+ 96, 96, 41, 38, -1000, -1000, 255, 232, 230, 208,
+ 206, 200, 197, 181, 180, 178, -1000, -1000, -1000, -1000,
+ -1000, -1000, 40, -1000, -1000, -1000, 289, -1000, 581, -1000,
+ -1000, -1000, 32, -1000, 14, 12, 475, -1000, -1000, -1000,
+ 11, 152, 107, 107, 107, 104, 104, 11, 104, 11,
+ -1000, -1000, -1000, -1000, -1000, 32, 32, -1000, -1000, -1000,
+ 32, -1000, -1000, -1000, -1000, -1000, -1000, 107, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, 103, -1000, 274, -1000, -1000,
+ -1000, -1000,
}
var yyPgo = [...]int16{
- 0, 372, 13, 371, 6, 15, 370, 352, 369, 368,
- 366, 194, 273, 365, 14, 362, 10, 11, 361, 360,
- 8, 359, 3, 4, 358, 2, 1, 0, 354, 12,
- 5, 353, 346, 18, 156, 343, 341, 7, 340, 338,
- 17, 328, 31, 325, 324, 317, 311, 308, 306, 304,
- 301, 293, 9, 297, 296, 295,
+ 0, 390, 13, 389, 6, 15, 388, 330, 387, 386,
+ 383, 235, 341, 382, 14, 380, 10, 11, 374, 373,
+ 8, 365, 3, 4, 364, 2, 1, 0, 349, 12,
+ 5, 346, 343, 17, 157, 342, 340, 7, 329, 318,
+ 28, 316, 36, 308, 9, 306, 300, 298, 297, 273,
+ 272, 296, 265, 263,
}
var yyR1 = [...]int8{
- 0, 54, 54, 54, 54, 54, 54, 54, 37, 37,
+ 0, 52, 52, 52, 52, 52, 52, 52, 37, 37,
37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
32, 32, 32, 32, 33, 33, 35, 35, 35, 35,
35, 35, 35, 35, 35, 35, 35, 35, 35, 35,
@@ -538,22 +526,22 @@ var yyR1 = [...]int8{
41, 16, 16, 16, 16, 15, 15, 15, 4, 4,
38, 40, 40, 39, 39, 39, 47, 45, 45, 45,
31, 31, 31, 9, 9, 43, 49, 49, 49, 49,
- 49, 50, 51, 51, 51, 42, 42, 42, 1, 1,
- 1, 2, 2, 2, 2, 2, 2, 2, 12, 12,
+ 49, 49, 50, 51, 51, 51, 42, 42, 42, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 12,
+ 12, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 11, 11, 11, 11, 13, 13,
- 13, 14, 14, 14, 14, 55, 19, 19, 19, 19,
- 18, 18, 18, 18, 18, 18, 18, 18, 18, 28,
- 28, 28, 20, 20, 20, 20, 21, 21, 21, 22,
- 22, 22, 22, 22, 22, 22, 22, 22, 22, 23,
- 23, 24, 24, 24, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 6, 6,
+ 7, 7, 7, 7, 7, 11, 11, 11, 11, 13,
+ 13, 13, 14, 14, 14, 14, 53, 19, 19, 19,
+ 19, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 28, 28, 28, 20, 20, 20, 20, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 23, 23, 24, 24, 24, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 6, 6, 8, 8, 5,
- 5, 5, 5, 44, 27, 29, 29, 30, 30, 26,
- 25, 25, 52, 48, 10, 53, 53, 17, 17,
+ 6, 6, 6, 6, 6, 6, 6, 6, 8, 8,
+ 5, 5, 5, 5, 44, 44, 27, 27, 29, 29,
+ 30, 30, 26, 25, 25, 48, 10, 17, 17,
}
var yyR2 = [...]int8{
@@ -564,101 +552,101 @@ var yyR2 = [...]int8{
4, 4, 1, 0, 1, 3, 3, 1, 1, 3,
3, 3, 4, 2, 1, 3, 1, 2, 1, 1,
2, 3, 2, 3, 1, 2, 3, 3, 4, 3,
- 3, 5, 3, 1, 1, 4, 6, 6, 5, 4,
- 3, 2, 2, 1, 1, 3, 4, 2, 3, 1,
- 2, 3, 3, 1, 3, 3, 2, 1, 2, 1,
+ 3, 5, 3, 1, 1, 4, 6, 5, 6, 5,
+ 4, 3, 2, 2, 1, 1, 3, 4, 2, 3,
+ 1, 2, 3, 3, 1, 3, 3, 2, 1, 2,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 3, 4, 2, 0, 3, 1,
- 2, 3, 3, 2, 1, 2, 0, 3, 2, 1,
- 1, 3, 1, 3, 4, 1, 3, 5, 5, 1,
- 1, 1, 4, 3, 3, 2, 3, 1, 2, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 4,
- 3, 3, 1, 2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 3, 4, 2, 0, 3,
+ 1, 2, 3, 3, 2, 1, 2, 0, 3, 2,
+ 1, 1, 3, 1, 3, 4, 1, 3, 5, 5,
+ 1, 1, 1, 4, 3, 3, 2, 3, 1, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 3, 3, 1, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 2, 2, 1, 1, 1,
- 2, 1, 1, 1, 1, 0, 1, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
+ 1, 1, 1, 2, 1, 1, 1, 0, 1,
}
var yyChk = [...]int16{
- -1000, -54, 91, 92, 93, 94, 2, 10, -12, -7,
+ -1000, -52, 91, 92, 93, 94, 2, 10, -12, -7,
-11, 61, 62, 78, 63, 64, 65, 12, 46, 47,
50, 66, 18, 67, 82, 68, 69, 70, 71, 72,
- 84, 87, 88, 73, 74, 13, -55, -12, 10, -37,
+ 84, 87, 88, 73, 74, 13, -53, -12, 10, -37,
-32, -35, -38, -43, -44, -45, -47, -48, -49, -50,
- -51, -31, -3, 12, 19, 15, 25, -8, -7, -42,
- 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
- 71, 72, 73, 74, 40, 56, 13, -51, -11, -13,
- 20, -14, 12, 2, -19, 2, 40, 58, 41, 42,
- 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
- 55, 56, 82, 57, 14, -33, -40, 2, 78, 84,
- 15, -40, -37, -37, -42, -1, 20, -2, 12, -10,
- 2, 25, 20, 7, 2, 4, 2, 24, -34, -41,
- -36, -46, 77, -34, -34, -34, -34, -34, -34, -34,
- -34, -34, -34, -34, -34, -34, -34, -34, -52, 56,
- 2, 9, -30, -9, 2, -27, -29, 87, 88, 19,
- 40, 56, -52, 2, -40, -33, -16, 15, 2, -16,
- -39, 22, -37, 22, 20, 7, 2, -5, 2, 4,
- 53, 43, 54, -5, 20, -14, 25, 2, -18, 5,
- -28, -20, 12, -27, -29, 16, -37, 81, 83, 79,
- 80, -37, -37, -37, -37, -37, -37, -37, -37, -37,
- -37, -37, -37, -37, -37, -37, -52, 15, -27, -27,
- 21, 6, 2, -15, 22, -4, -6, 2, 61, 77,
- 62, 78, 63, 64, 65, 79, 80, 12, 81, 46,
- 47, 50, 66, 18, 67, 82, 83, 68, 69, 70,
- 71, 72, 87, 88, 58, 73, 74, 22, 7, 20,
- -2, 25, 2, 25, 2, 26, 26, -29, 26, 40,
- 56, -21, 24, 17, -22, 30, 28, 29, 35, 36,
- 37, 33, 31, 34, 32, -16, -16, -17, -16, -17,
- 22, -53, -52, 2, 22, 7, 2, -37, -26, 19,
- -26, 26, -26, -20, -20, 24, 17, 2, 17, 6,
- 6, 6, 6, 6, 6, 6, 6, 6, 6, 21,
- 2, 22, -4, -26, 26, 26, 17, -22, -25, 56,
- -26, -30, -27, -27, -27, -23, 14, -23, -25, -23,
- -25, -26, -26, -26, -24, -27, 24, 21, 2, 21,
- -27,
+ -51, -31, -3, 12, 19, 9, 15, 25, -8, -7,
+ -42, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 40, 56, 13, -51, -11,
+ -13, 20, -14, 12, 2, -19, 2, 40, 58, 41,
+ 42, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 55, 56, 82, 57, 14, -33, -40, 2, 78,
+ 84, 15, -40, -37, -37, -42, -1, 20, -2, 12,
+ -10, 2, 25, 20, 7, 2, 4, 2, 24, -34,
+ -41, -36, -46, 77, -34, -34, -34, -34, -34, -34,
+ -34, -34, -34, -34, -34, -34, -34, -34, -34, -44,
+ 56, 2, -30, -9, 2, -27, -29, 87, 88, 19,
+ 9, 40, 56, -44, 2, -40, -33, -16, 15, 2,
+ -16, -39, 22, -37, 22, 20, 7, 2, -5, 2,
+ 4, 53, 43, 54, -5, 20, -14, 25, 2, -18,
+ 5, -28, -20, 12, -27, -29, 16, -37, 81, 83,
+ 79, 80, -37, -37, -37, -37, -37, -37, -37, -37,
+ -37, -37, -37, -37, -37, -37, -37, -44, 15, -27,
+ -27, 21, 6, 2, -15, 22, -4, -6, 2, 61,
+ 77, 62, 78, 63, 64, 65, 79, 80, 12, 81,
+ 46, 47, 50, 66, 18, 67, 82, 83, 68, 69,
+ 70, 71, 72, 87, 88, 58, 73, 74, 22, 7,
+ 20, -2, 25, 2, 25, 2, 26, 26, -29, 26,
+ 40, 56, -21, 24, 17, -22, 30, 28, 29, 35,
+ 36, 37, 33, 31, 34, 32, -16, -16, -17, -16,
+ -17, 22, -44, 21, 2, 22, 7, 2, -37, -26,
+ 19, -26, 26, -26, -20, -20, 24, 17, 2, 17,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 21, 2, 22, -4, -26, 26, 26, 17, -22, -25,
+ 56, -26, -30, -27, -27, -27, -23, 14, -23, -25,
+ -23, -25, -26, -26, -26, -24, -27, 24, 21, 2,
+ 21, -27,
}
var yyDef = [...]int16{
- 0, -2, 127, 127, 0, 0, 7, 6, 1, 127,
- 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
- 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
- 119, 120, 121, 122, 123, 0, 2, -2, 3, 4,
+ 0, -2, 128, 128, 0, 0, 7, 6, 1, 128,
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 0, 2, -2, 3, 4,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
- 18, 19, 0, 106, 223, 0, 233, 0, 83, 84,
- -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
- -2, -2, -2, -2, 217, 218, 0, 5, 98, 0,
- 126, 129, 0, 134, 135, 139, 43, 43, 43, 43,
+ 18, 19, 0, 107, 224, 225, 0, 235, 0, 84,
+ 85, -2, -2, -2, -2, -2, -2, -2, -2, -2,
+ -2, -2, -2, -2, -2, 218, 219, 0, 5, 99,
+ 0, 127, 130, 0, 135, 136, 140, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43, 43, 43,
- 43, 43, 0, 0, 0, 0, 22, 23, 0, 0,
- 0, 60, 0, 81, 82, 0, 87, 89, 0, 93,
- 97, 234, 124, 0, 130, 0, 133, 138, 0, 42,
- 47, 48, 44, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 67, 0,
- 69, 232, 70, 0, 72, 227, 228, 73, 74, 224,
- 0, 0, 0, 80, 20, 21, 24, 0, 54, 25,
- 0, 62, 64, 66, 85, 0, 90, 0, 96, 219,
- 220, 221, 222, 0, 125, 128, 131, 132, 137, 140,
- 142, 145, 149, 150, 151, 0, 26, 0, 0, -2,
- -2, 27, 28, 29, 30, 31, 32, 33, 34, 35,
- 36, 37, 38, 39, 40, 41, 68, 0, 225, 226,
- 75, -2, 79, 0, 53, 56, 58, 59, 188, 189,
+ 43, 43, 43, 0, 0, 0, 0, 22, 23, 0,
+ 0, 0, 60, 0, 82, 83, 0, 88, 90, 0,
+ 94, 98, 236, 125, 0, 131, 0, 134, 139, 0,
+ 42, 47, 48, 44, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 67,
+ 0, 69, 70, 0, 72, 230, 231, 73, 74, 226,
+ 227, 0, 0, 0, 81, 20, 21, 24, 0, 54,
+ 25, 0, 62, 64, 66, 86, 0, 91, 0, 97,
+ 220, 221, 222, 223, 0, 126, 129, 132, 133, 138,
+ 141, 143, 146, 150, 151, 152, 0, 26, 0, 0,
+ -2, -2, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 68, 0, 228,
+ 229, 75, 0, 80, 0, 53, 56, 58, 59, 189,
190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
- 210, 211, 212, 213, 214, 215, 216, 61, 65, 86,
- 88, 91, 95, 92, 94, 0, 0, 0, 0, 0,
- 0, 0, 0, 155, 157, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 45, 46, 49, 238, 50,
- 71, 0, -2, 78, 51, 0, 57, 63, 141, 229,
- 143, 0, 146, 0, 0, 0, 153, 158, 154, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 76,
- 77, 52, 55, 144, 0, 0, 152, 156, 159, 0,
- 231, 160, 161, 162, 163, 164, 0, 165, 166, 167,
- 168, 147, 148, 230, 0, 172, 0, 170, 173, 169,
- 171,
+ 210, 211, 212, 213, 214, 215, 216, 217, 61, 65,
+ 87, 89, 92, 96, 93, 95, 0, 0, 0, 0,
+ 0, 0, 0, 0, 156, 158, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 45, 46, 49, 238,
+ 50, 71, 0, 77, 79, 51, 0, 57, 63, 142,
+ 232, 144, 0, 147, 0, 0, 0, 154, 159, 155,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 76, 78, 52, 55, 145, 0, 0, 153, 157, 160,
+ 0, 234, 161, 162, 163, 164, 165, 0, 166, 167,
+ 168, 169, 148, 149, 233, 0, 173, 0, 171, 174,
+ 170, 172,
}
var yyTok1 = [...]int8{
@@ -1304,19 +1292,23 @@ yydefault:
case 67:
yyDollar = yyS[yypt-3 : yypt+1]
{
- yylex.(*parser).addOffset(yyDollar[1].node, yyDollar[3].duration)
+ numLit, _ := yyDollar[3].node.(*NumberLiteral)
+ dur := time.Duration(numLit.Val*1000) * time.Millisecond
+ yylex.(*parser).addOffset(yyDollar[1].node, dur)
yyVAL.node = yyDollar[1].node
}
case 68:
yyDollar = yyS[yypt-4 : yypt+1]
{
- yylex.(*parser).addOffset(yyDollar[1].node, -yyDollar[4].duration)
+ numLit, _ := yyDollar[4].node.(*NumberLiteral)
+ dur := time.Duration(numLit.Val*1000) * time.Millisecond
+ yylex.(*parser).addOffset(yyDollar[1].node, -dur)
yyVAL.node = yyDollar[1].node
}
case 69:
yyDollar = yyS[yypt-3 : yypt+1]
{
- yylex.(*parser).unexpected("offset", "duration")
+ yylex.(*parser).unexpected("offset", "number or duration")
yyVAL.node = yyDollar[1].node
}
case 70:
@@ -1355,48 +1347,61 @@ yydefault:
yylex.(*parser).addParseErrf(errRange, errMsg)
}
+ numLit, _ := yyDollar[3].node.(*NumberLiteral)
yyVAL.node = &MatrixSelector{
VectorSelector: yyDollar[1].node.(Expr),
- Range: yyDollar[3].duration,
+ Range: time.Duration(numLit.Val*1000) * time.Millisecond,
EndPos: yylex.(*parser).lastClosing,
}
}
case 76:
yyDollar = yyS[yypt-6 : yypt+1]
{
+ numLitRange, _ := yyDollar[3].node.(*NumberLiteral)
+ numLitStep, _ := yyDollar[5].node.(*NumberLiteral)
yyVAL.node = &SubqueryExpr{
- Expr: yyDollar[1].node.(Expr),
- Range: yyDollar[3].duration,
- Step: yyDollar[5].duration,
-
+ Expr: yyDollar[1].node.(Expr),
+ Range: time.Duration(numLitRange.Val*1000) * time.Millisecond,
+ Step: time.Duration(numLitStep.Val*1000) * time.Millisecond,
EndPos: yyDollar[6].item.Pos + 1,
}
}
case 77:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ {
+ numLitRange, _ := yyDollar[3].node.(*NumberLiteral)
+ yyVAL.node = &SubqueryExpr{
+ Expr: yyDollar[1].node.(Expr),
+ Range: time.Duration(numLitRange.Val*1000) * time.Millisecond,
+ Step: 0,
+ EndPos: yyDollar[5].item.Pos + 1,
+ }
+ }
+ case 78:
yyDollar = yyS[yypt-6 : yypt+1]
{
yylex.(*parser).unexpected("subquery selector", "\"]\"")
yyVAL.node = yyDollar[1].node
}
- case 78:
+ case 79:
yyDollar = yyS[yypt-5 : yypt+1]
{
- yylex.(*parser).unexpected("subquery selector", "duration or \"]\"")
+ yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\"")
yyVAL.node = yyDollar[1].node
}
- case 79:
+ case 80:
yyDollar = yyS[yypt-4 : yypt+1]
{
yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\"")
yyVAL.node = yyDollar[1].node
}
- case 80:
+ case 81:
yyDollar = yyS[yypt-3 : yypt+1]
{
- yylex.(*parser).unexpected("subquery selector", "duration")
+ yylex.(*parser).unexpected("subquery selector", "number or duration")
yyVAL.node = yyDollar[1].node
}
- case 81:
+ case 82:
yyDollar = yyS[yypt-2 : yypt+1]
{
if nl, ok := yyDollar[2].node.(*NumberLiteral); ok {
@@ -1409,7 +1414,7 @@ yydefault:
yyVAL.node = &UnaryExpr{Op: yyDollar[1].item.Typ, Expr: yyDollar[2].node.(Expr), StartPos: yyDollar[1].item.Pos}
}
}
- case 82:
+ case 83:
yyDollar = yyS[yypt-2 : yypt+1]
{
vs := yyDollar[2].node.(*VectorSelector)
@@ -1418,7 +1423,7 @@ yydefault:
yylex.(*parser).assembleVectorSelector(vs)
yyVAL.node = vs
}
- case 83:
+ case 84:
yyDollar = yyS[yypt-1 : yypt+1]
{
vs := &VectorSelector{
@@ -1429,14 +1434,14 @@ yydefault:
yylex.(*parser).assembleVectorSelector(vs)
yyVAL.node = vs
}
- case 84:
+ case 85:
yyDollar = yyS[yypt-1 : yypt+1]
{
vs := yyDollar[1].node.(*VectorSelector)
yylex.(*parser).assembleVectorSelector(vs)
yyVAL.node = vs
}
- case 85:
+ case 86:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = &VectorSelector{
@@ -1444,7 +1449,7 @@ yydefault:
PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item),
}
}
- case 86:
+ case 87:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.node = &VectorSelector{
@@ -1452,7 +1457,7 @@ yydefault:
PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[4].item),
}
}
- case 87:
+ case 88:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.node = &VectorSelector{
@@ -1460,7 +1465,7 @@ yydefault:
PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[2].item),
}
}
- case 88:
+ case 89:
yyDollar = yyS[yypt-3 : yypt+1]
{
if yyDollar[1].matchers != nil {
@@ -1469,38 +1474,32 @@ yydefault:
yyVAL.matchers = yyDollar[1].matchers
}
}
- case 89:
+ case 90:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.matchers = []*labels.Matcher{yyDollar[1].matcher}
}
- case 90:
+ case 91:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("label matching", "\",\" or \"}\"")
yyVAL.matchers = yyDollar[1].matchers
}
- case 91:
- yyDollar = yyS[yypt-3 : yypt+1]
- {
- yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item)
- }
case 92:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item)
}
case 93:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ {
+ yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item)
+ }
+ case 94:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.matcher = yylex.(*parser).newMetricNameMatcher(yyDollar[1].item)
}
- case 94:
- yyDollar = yyS[yypt-3 : yypt+1]
- {
- yylex.(*parser).unexpected("label matching", "string")
- yyVAL.matcher = nil
- }
case 95:
yyDollar = yyS[yypt-3 : yypt+1]
{
@@ -1508,89 +1507,95 @@ yydefault:
yyVAL.matcher = nil
}
case 96:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ {
+ yylex.(*parser).unexpected("label matching", "string")
+ yyVAL.matcher = nil
+ }
+ case 97:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("label matching", "label matching operator")
yyVAL.matcher = nil
}
- case 97:
+ case 98:
yyDollar = yyS[yypt-1 : yypt+1]
{
yylex.(*parser).unexpected("label matching", "identifier or \"}\"")
yyVAL.matcher = nil
}
- case 98:
+ case 99:
yyDollar = yyS[yypt-2 : yypt+1]
{
b := labels.NewBuilder(yyDollar[2].labels)
b.Set(labels.MetricName, yyDollar[1].item.Val)
yyVAL.labels = b.Labels()
}
- case 99:
+ case 100:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.labels = yyDollar[1].labels
}
- case 124:
- yyDollar = yyS[yypt-3 : yypt+1]
- {
- yyVAL.labels = labels.New(yyDollar[2].lblList...)
- }
case 125:
- yyDollar = yyS[yypt-4 : yypt+1]
+ yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.labels = labels.New(yyDollar[2].lblList...)
}
case 126:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ {
+ yyVAL.labels = labels.New(yyDollar[2].lblList...)
+ }
+ case 127:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.labels = labels.New()
}
- case 127:
+ case 128:
yyDollar = yyS[yypt-0 : yypt+1]
{
yyVAL.labels = labels.New()
}
- case 128:
+ case 129:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.lblList = append(yyDollar[1].lblList, yyDollar[3].label)
}
- case 129:
+ case 130:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.lblList = []labels.Label{yyDollar[1].label}
}
- case 130:
+ case 131:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("label set", "\",\" or \"}\"")
yyVAL.lblList = yyDollar[1].lblList
}
- case 131:
+ case 132:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)}
}
- case 132:
+ case 133:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).unexpected("label set", "string")
yyVAL.label = labels.Label{}
}
- case 133:
+ case 134:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("label set", "\"=\"")
yyVAL.label = labels.Label{}
}
- case 134:
+ case 135:
yyDollar = yyS[yypt-1 : yypt+1]
{
yylex.(*parser).unexpected("label set", "identifier or \"}\"")
yyVAL.label = labels.Label{}
}
- case 135:
+ case 136:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).generatedParserResult = &seriesDescription{
@@ -1598,33 +1603,33 @@ yydefault:
values: yyDollar[2].series,
}
}
- case 136:
+ case 137:
yyDollar = yyS[yypt-0 : yypt+1]
{
yyVAL.series = []SequenceValue{}
}
- case 137:
+ case 138:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...)
}
- case 138:
+ case 139:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.series = yyDollar[1].series
}
- case 139:
+ case 140:
yyDollar = yyS[yypt-1 : yypt+1]
{
yylex.(*parser).unexpected("series values", "")
yyVAL.series = nil
}
- case 140:
+ case 141:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.series = []SequenceValue{{Omitted: true}}
}
- case 141:
+ case 142:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.series = []SequenceValue{}
@@ -1632,12 +1637,12 @@ yydefault:
yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true})
}
}
- case 142:
+ case 143:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}}
}
- case 143:
+ case 144:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.series = []SequenceValue{}
@@ -1646,7 +1651,7 @@ yydefault:
yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float})
}
}
- case 144:
+ case 145:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.series = []SequenceValue{}
@@ -1656,12 +1661,12 @@ yydefault:
yyDollar[1].float += yyDollar[2].float
}
}
- case 145:
+ case 146:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.series = []SequenceValue{{Histogram: yyDollar[1].histogram}}
}
- case 146:
+ case 147:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.series = []SequenceValue{}
@@ -1671,7 +1676,7 @@ yydefault:
//$1 += $2
}
}
- case 147:
+ case 148:
yyDollar = yyS[yypt-5 : yypt+1]
{
val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint)
@@ -1680,7 +1685,7 @@ yydefault:
}
yyVAL.series = val
}
- case 148:
+ case 149:
yyDollar = yyS[yypt-5 : yypt+1]
{
val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint)
@@ -1689,7 +1694,7 @@ yydefault:
}
yyVAL.series = val
}
- case 149:
+ case 150:
yyDollar = yyS[yypt-1 : yypt+1]
{
if yyDollar[1].item.Val != "stale" {
@@ -1697,124 +1702,124 @@ yydefault:
}
yyVAL.float = math.Float64frombits(value.StaleNaN)
}
- case 152:
- yyDollar = yyS[yypt-4 : yypt+1]
- {
- yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors)
- }
case 153:
- yyDollar = yyS[yypt-3 : yypt+1]
+ yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors)
}
case 154:
yyDollar = yyS[yypt-3 : yypt+1]
{
- m := yylex.(*parser).newMap()
- yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m)
+ yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors)
}
case 155:
- yyDollar = yyS[yypt-2 : yypt+1]
+ yyDollar = yyS[yypt-3 : yypt+1]
{
m := yylex.(*parser).newMap()
yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m)
}
case 156:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ {
+ m := yylex.(*parser).newMap()
+ yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m)
+ }
+ case 157:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors))
}
- case 157:
+ case 158:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.descriptors = yyDollar[1].descriptors
}
- case 158:
+ case 159:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]")
}
- case 159:
- yyDollar = yyS[yypt-3 : yypt+1]
- {
- yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["schema"] = yyDollar[3].int
- }
case 160:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["sum"] = yyDollar[3].float
+ yyVAL.descriptors["schema"] = yyDollar[3].int
}
case 161:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["count"] = yyDollar[3].float
+ yyVAL.descriptors["sum"] = yyDollar[3].float
}
case 162:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["z_bucket"] = yyDollar[3].float
+ yyVAL.descriptors["count"] = yyDollar[3].float
}
case 163:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float
+ yyVAL.descriptors["z_bucket"] = yyDollar[3].float
}
case 164:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set
+ yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float
}
case 165:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set
+ yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set
}
case 166:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["offset"] = yyDollar[3].int
+ yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set
}
case 167:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set
+ yyVAL.descriptors["offset"] = yyDollar[3].int
}
case 168:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["n_offset"] = yyDollar[3].int
+ yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set
}
case 169:
- yyDollar = yyS[yypt-4 : yypt+1]
+ yyDollar = yyS[yypt-3 : yypt+1]
{
- yyVAL.bucket_set = yyDollar[2].bucket_set
+ yyVAL.descriptors = yylex.(*parser).newMap()
+ yyVAL.descriptors["n_offset"] = yyDollar[3].int
}
case 170:
- yyDollar = yyS[yypt-3 : yypt+1]
+ yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.bucket_set = yyDollar[2].bucket_set
}
case 171:
yyDollar = yyS[yypt-3 : yypt+1]
{
- yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float)
+ yyVAL.bucket_set = yyDollar[2].bucket_set
}
case 172:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ {
+ yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float)
+ }
+ case 173:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.bucket_set = []float64{yyDollar[1].float}
}
- case 223:
+ case 224:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.node = &NumberLiteral{
@@ -1822,22 +1827,47 @@ yydefault:
PosRange: yyDollar[1].item.PositionRange(),
}
}
- case 224:
+ case 225:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ {
+ var err error
+ var dur time.Duration
+ dur, err = parseDuration(yyDollar[1].item.Val)
+ if err != nil {
+ yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err)
+ }
+ yyVAL.node = &NumberLiteral{
+ Val: dur.Seconds(),
+ PosRange: yyDollar[1].item.PositionRange(),
+ }
+ }
+ case 226:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val)
}
- case 225:
+ case 227:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ {
+ var err error
+ var dur time.Duration
+ dur, err = parseDuration(yyDollar[1].item.Val)
+ if err != nil {
+ yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err)
+ }
+ yyVAL.float = dur.Seconds()
+ }
+ case 228:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.float = yyDollar[2].float
}
- case 226:
+ case 229:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.float = -yyDollar[2].float
}
- case 229:
+ case 232:
yyDollar = yyS[yypt-1 : yypt+1]
{
var err error
@@ -1846,26 +1876,17 @@ yydefault:
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err)
}
}
- case 230:
+ case 233:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.int = -int64(yyDollar[2].uint)
}
- case 231:
+ case 234:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.int = int64(yyDollar[1].uint)
}
- case 232:
- yyDollar = yyS[yypt-1 : yypt+1]
- {
- var err error
- yyVAL.duration, err = parseDuration(yyDollar[1].item.Val)
- if err != nil {
- yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err)
- }
- }
- case 233:
+ case 235:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.node = &StringLiteral{
@@ -1873,7 +1894,7 @@ yydefault:
PosRange: yyDollar[1].item.PositionRange(),
}
}
- case 234:
+ case 236:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.item = Item{
@@ -1882,11 +1903,6 @@ yydefault:
Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val),
}
}
- case 235:
- yyDollar = yyS[yypt-0 : yypt+1]
- {
- yyVAL.duration = 0
- }
case 237:
yyDollar = yyS[yypt-0 : yypt+1]
{
diff --git a/promql/parser/lex.go b/promql/parser/lex.go
index 8c7fbb89b9..18abd49ead 100644
--- a/promql/parser/lex.go
+++ b/promql/parser/lex.go
@@ -478,7 +478,7 @@ func lexStatements(l *Lexer) stateFn {
skipSpaces(l)
}
l.bracketOpen = true
- return lexDuration
+ return lexNumberOrDuration
case r == ']':
if !l.bracketOpen {
return l.errorf("unexpected right bracket %q", r)
@@ -846,18 +846,6 @@ func lexLineComment(l *Lexer) stateFn {
return lexStatements
}
-func lexDuration(l *Lexer) stateFn {
- if l.scanNumber() {
- return l.errorf("missing unit character in duration")
- }
- if !acceptRemainingDuration(l) {
- return l.errorf("bad duration syntax: %q", l.input[l.start:l.pos])
- }
- l.backup()
- l.emit(DURATION)
- return lexStatements
-}
-
// lexNumber scans a number: decimal, hex, oct or float.
func lexNumber(l *Lexer) stateFn {
if !l.scanNumber() {
@@ -909,6 +897,7 @@ func acceptRemainingDuration(l *Lexer) bool {
// scanNumber scans numbers of different formats. The scanned Item is
// not necessarily a valid number. This case is caught by the parser.
func (l *Lexer) scanNumber() bool {
+ initialPos := l.pos
// Modify the digit pattern if the number is hexadecimal.
digitPattern := "0123456789"
// Disallow hexadecimal in series descriptions as the syntax is ambiguous.
@@ -980,7 +969,10 @@ func (l *Lexer) scanNumber() bool {
// Handle digits at the end since we already consumed before this loop.
l.acceptRun(digitPattern)
}
-
+ // Empty string is not a valid number.
+ if l.pos == initialPos {
+ return false
+ }
// Next thing must not be alphanumeric unless it's the times token
// for series repetitions.
if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) {
diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go
index a4fe28e5b8..35c5f54a18 100644
--- a/promql/parser/parse_test.go
+++ b/promql/parser/parse_test.go
@@ -2133,6 +2133,115 @@ var testExpr = []struct {
EndPos: 25,
},
},
+ {
+ input: `test{a="b"}[5m] OFFSET 3600`,
+ expected: &MatrixSelector{
+ VectorSelector: &VectorSelector{
+ Name: "test",
+ OriginalOffset: 1 * time.Hour,
+ LabelMatchers: []*labels.Matcher{
+ MustLabelMatcher(labels.MatchEqual, "a", "b"),
+ MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"),
+ },
+ PosRange: posrange.PositionRange{
+ Start: 0,
+ End: 11,
+ },
+ },
+ Range: 5 * time.Minute,
+ EndPos: 27,
+ },
+ },
+ {
+ input: `foo[3ms] @ 2.345`,
+ expected: &MatrixSelector{
+ VectorSelector: &VectorSelector{
+ Name: "foo",
+ Timestamp: makeInt64Pointer(2345),
+ LabelMatchers: []*labels.Matcher{
+ MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
+ },
+ PosRange: posrange.PositionRange{
+ Start: 0,
+ End: 3,
+ },
+ },
+ Range: 3 * time.Millisecond,
+ EndPos: 16,
+ },
+ },
+ {
+ input: `foo[4s180ms] @ 2.345`,
+ expected: &MatrixSelector{
+ VectorSelector: &VectorSelector{
+ Name: "foo",
+ Timestamp: makeInt64Pointer(2345),
+ LabelMatchers: []*labels.Matcher{
+ MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
+ },
+ PosRange: posrange.PositionRange{
+ Start: 0,
+ End: 3,
+ },
+ },
+ Range: 4*time.Second + 180*time.Millisecond,
+ EndPos: 20,
+ },
+ },
+ {
+ input: `foo[4.18] @ 2.345`,
+ expected: &MatrixSelector{
+ VectorSelector: &VectorSelector{
+ Name: "foo",
+ Timestamp: makeInt64Pointer(2345),
+ LabelMatchers: []*labels.Matcher{
+ MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
+ },
+ PosRange: posrange.PositionRange{
+ Start: 0,
+ End: 3,
+ },
+ },
+ Range: 4*time.Second + 180*time.Millisecond,
+ EndPos: 17,
+ },
+ },
+ {
+ input: `foo[4s18ms] @ 2.345`,
+ expected: &MatrixSelector{
+ VectorSelector: &VectorSelector{
+ Name: "foo",
+ Timestamp: makeInt64Pointer(2345),
+ LabelMatchers: []*labels.Matcher{
+ MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
+ },
+ PosRange: posrange.PositionRange{
+ Start: 0,
+ End: 3,
+ },
+ },
+ Range: 4*time.Second + 18*time.Millisecond,
+ EndPos: 19,
+ },
+ },
+ {
+ input: `foo[4.018] @ 2.345`,
+ expected: &MatrixSelector{
+ VectorSelector: &VectorSelector{
+ Name: "foo",
+ Timestamp: makeInt64Pointer(2345),
+ LabelMatchers: []*labels.Matcher{
+ MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
+ },
+ PosRange: posrange.PositionRange{
+ Start: 0,
+ End: 3,
+ },
+ },
+ Range: 4*time.Second + 18*time.Millisecond,
+ EndPos: 18,
+ },
+ },
{
input: `test{a="b"}[5y] @ 1603774699`,
expected: &MatrixSelector{
@@ -2152,15 +2261,50 @@ var testExpr = []struct {
EndPos: 28,
},
},
+ {
+ input: "test[5]",
+ expected: &MatrixSelector{
+ VectorSelector: &VectorSelector{
+ Name: "test",
+ LabelMatchers: []*labels.Matcher{
+ MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"),
+ },
+ PosRange: posrange.PositionRange{
+ Start: 0,
+ End: 4,
+ },
+ },
+ Range: 5 * time.Second,
+ EndPos: 7,
+ },
+ },
+ {
+ input: `some_metric[5m] @ 1m`,
+ expected: &MatrixSelector{
+ VectorSelector: &VectorSelector{
+ Name: "some_metric",
+ Timestamp: makeInt64Pointer(60000),
+ LabelMatchers: []*labels.Matcher{
+ MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some_metric"),
+ },
+ PosRange: posrange.PositionRange{
+ Start: 0,
+ End: 11,
+ },
+ },
+ Range: 5 * time.Minute,
+ EndPos: 20,
+ },
+ },
{
input: `foo[5mm]`,
fail: true,
- errMsg: "bad duration syntax: \"5mm\"",
+ errMsg: "bad number or duration syntax: \"5mm\"",
},
{
input: `foo[5m1]`,
fail: true,
- errMsg: "bad duration syntax: \"5m1\"",
+ errMsg: "bad number or duration syntax: \"5m1\"",
},
{
input: `foo[5m:1m1]`,
@@ -2194,17 +2338,12 @@ var testExpr = []struct {
{
input: `foo[]`,
fail: true,
- errMsg: "missing unit character in duration",
+ errMsg: "bad number or duration syntax: \"\"",
},
{
- input: `foo[1]`,
+ input: `foo[-1]`,
fail: true,
- errMsg: "missing unit character in duration",
- },
- {
- input: `some_metric[5m] OFFSET 1`,
- fail: true,
- errMsg: "unexpected number \"1\" in offset, expected duration",
+ errMsg: "bad number or duration syntax: \"\"",
},
{
input: `some_metric[5m] OFFSET 1mm`,
@@ -2214,18 +2353,13 @@ var testExpr = []struct {
{
input: `some_metric[5m] OFFSET`,
fail: true,
- errMsg: "unexpected end of input in offset, expected duration",
+ errMsg: "unexpected end of input in offset, expected number or duration",
},
{
input: `some_metric OFFSET 1m[5m]`,
fail: true,
errMsg: "1:22: parse error: no offset modifiers allowed before range",
},
- {
- input: `some_metric[5m] @ 1m`,
- fail: true,
- errMsg: "1:19: parse error: unexpected duration \"1m\" in @, expected timestamp",
- },
{
input: `some_metric[5m] @`,
fail: true,
@@ -2910,6 +3044,11 @@ var testExpr = []struct {
errMsg: "illegal character U+002E '.' in escape sequence",
},
// Subquery.
+ {
+ input: `foo{bar="baz"}[`,
+ fail: true,
+ errMsg: `1:16: parse error: bad number or duration syntax: ""`,
+ },
{
input: `foo{bar="baz"}[10m:6s]`,
expected: &SubqueryExpr{
diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go
index f3a773be8d..83137e661b 100644
--- a/promql/promqltest/test.go
+++ b/promql/promqltest/test.go
@@ -55,6 +55,11 @@ const (
DefaultMaxSamplesPerQuery = 10000
)
+type TBRun interface {
+ testing.TB
+ Run(string, func(*testing.T)) bool
+}
+
var testStartTime = time.Unix(0, 0).UTC()
// LoadedStorage returns storage with generated data using the provided load statements.
@@ -89,7 +94,7 @@ func NewTestEngine(enablePerStepStats bool, lookbackDelta time.Duration, maxSamp
}
// RunBuiltinTests runs an acceptance test suite against the provided engine.
-func RunBuiltinTests(t *testing.T, engine promql.QueryEngine) {
+func RunBuiltinTests(t TBRun, engine promql.QueryEngine) {
t.Cleanup(func() { parser.EnableExperimentalFunctions = false })
parser.EnableExperimentalFunctions = true
diff --git a/promql/promqltest/testdata/at_modifier.test b/promql/promqltest/testdata/at_modifier.test
index 7174a84237..4091f7eabf 100644
--- a/promql/promqltest/testdata/at_modifier.test
+++ b/promql/promqltest/testdata/at_modifier.test
@@ -10,22 +10,54 @@ eval instant at 10s metric @ 100
metric{job="1"} 10
metric{job="2"} 20
+eval instant at 10s metric @ 100s
+ metric{job="1"} 10
+ metric{job="2"} 20
+
+eval instant at 10s metric @ 1m40s
+ metric{job="1"} 10
+ metric{job="2"} 20
+
eval instant at 10s metric @ 100 offset 50s
metric{job="1"} 5
metric{job="2"} 10
+eval instant at 10s metric @ 100 offset 50
+ metric{job="1"} 5
+ metric{job="2"} 10
+
eval instant at 10s metric offset 50s @ 100
metric{job="1"} 5
metric{job="2"} 10
+eval instant at 10s metric offset 50 @ 100
+ metric{job="1"} 5
+ metric{job="2"} 10
+
eval instant at 10s metric @ 0 offset -50s
metric{job="1"} 5
metric{job="2"} 10
+eval instant at 10s metric @ 0 offset -50
+ metric{job="1"} 5
+ metric{job="2"} 10
+
eval instant at 10s metric offset -50s @ 0
metric{job="1"} 5
metric{job="2"} 10
+eval instant at 10s metric offset -50 @ 0
+ metric{job="1"} 5
+ metric{job="2"} 10
+
+eval instant at 10s metric @ 0 offset -50s
+ metric{job="1"} 5
+ metric{job="2"} 10
+
+eval instant at 10s metric @ 0 offset -50
+ metric{job="1"} 5
+ metric{job="2"} 10
+
eval instant at 10s -metric @ 100
{job="1"} -10
{job="2"} -20
@@ -48,6 +80,12 @@ eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100 offset 50s)
eval instant at 25s sum_over_time(metric{job="1"}[100s] offset 50s @ 100)
{job="1"} 15
+eval instant at 25s sum_over_time(metric{job="1"}[100] @ 100 offset 50)
+ {job="1"} 15
+
+eval instant at 25s sum_over_time(metric{job="1"}[100] offset 50s @ 100)
+ {job="1"} 15
+
# Different timestamps.
eval instant at 25s metric{job="1"} @ 50 + metric{job="1"} @ 100
{job="1"} 15
@@ -58,6 +96,9 @@ eval instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metri
eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100) + label_replace(sum_over_time(metric{job="2"}[100s] @ 100), "job", "1", "", "")
{job="1"} 165
+eval instant at 25s sum_over_time(metric{job="1"}[100] @ 100) + label_replace(sum_over_time(metric{job="2"}[100] @ 100), "job", "1", "", "")
+ {job="1"} 165
+
# Subqueries.
# 10*(1+2+...+9) + 10.
@@ -72,6 +113,10 @@ eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] @ 100 offset 20s)
eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] offset 20s @ 100)
{job="1"} 288
+# 10*(1+2+...+7) + 8.
+eval instant at 25s sum_over_time(metric{job="1"}[100:1] offset 20 @ 100)
+ {job="1"} 288
+
# Subquery with different timestamps.
# Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries.
diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test
index dcb178d610..209881c1da 100644
--- a/promql/promqltest/testdata/functions.test
+++ b/promql/promqltest/testdata/functions.test
@@ -12,6 +12,11 @@ eval instant at 50m resets(http_requests[10m])
{path="/bar"} 0
{path="/biz"} 0
+eval instant at 50m resets(http_requests[600])
+ {path="/foo"} 0
+ {path="/bar"} 0
+ {path="/biz"} 0
+
eval instant at 50m resets(http_requests[20m])
{path="/foo"} 1
{path="/bar"} 0
@@ -250,10 +255,16 @@ eval instant at 50m deriv(testcounter_reset_middle[100m])
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600)
{} 70
+eval instant at 50m predict_linear(testcounter_reset_middle[50m], 1h)
+ {} 70
+
# intercept at t = 3000+3600 = 6600
eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600)
{} 76.81818181818181
+eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 1h)
+ {} 76.81818181818181
+
# intercept at t = 600+3600 = 4200
eval instant at 10m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600)
{} 51.36363636363637
diff --git a/promql/promqltest/testdata/histograms.test b/promql/promqltest/testdata/histograms.test
index 1836d7429c..653a634082 100644
--- a/promql/promqltest/testdata/histograms.test
+++ b/promql/promqltest/testdata/histograms.test
@@ -73,22 +73,32 @@ eval instant at 50m histogram_count(testhistogram3)
{start="positive"} 110
{start="negative"} 20
+# Classic way of accessing the count still works.
+eval instant at 50m testhistogram3_count
+ testhistogram3_count{start="positive"} 110
+ testhistogram3_count{start="negative"} 20
+
# Test histogram_sum.
eval instant at 50m histogram_sum(testhistogram3)
{start="positive"} 330
{start="negative"} 80
-# Test histogram_avg.
+# Classic way of accessing the sum still works.
+eval instant at 50m testhistogram3_sum
+ testhistogram3_sum{start="positive"} 330
+ testhistogram3_sum{start="negative"} 80
+
+# Test histogram_avg. This has no classic equivalent.
eval instant at 50m histogram_avg(testhistogram3)
{start="positive"} 3
{start="negative"} 4
-# Test histogram_stddev.
+# Test histogram_stddev. This has no classic equivalent.
eval instant at 50m histogram_stddev(testhistogram3)
{start="positive"} 2.8189265757336734
{start="negative"} 4.182715937754936
-# Test histogram_stdvar.
+# Test histogram_stdvar. This has no classic equivalent.
eval instant at 50m histogram_stdvar(testhistogram3)
{start="positive"} 7.946347039377573
{start="negative"} 17.495112615949154
@@ -98,142 +108,284 @@ eval instant at 50m histogram_stdvar(testhistogram3)
eval instant at 50m histogram_fraction(0, 0.2, testhistogram3)
{start="positive"} 0.6363636363636364
{start="negative"} 0
-
+
eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[10m]))
{start="positive"} 0.6363636363636364
{start="negative"} 0
-# Test histogram_quantile.
+# In the classic histogram, we can access the corresponding bucket (if
+# it exists) and divide by the count to get the same result.
+
+eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count
+ {start="positive"} 0.6363636363636364
+
+eval instant at 50m rate(testhistogram3_bucket{le=".2"}[10m]) / ignoring(le) rate(testhistogram3_count[10m])
+ {start="positive"} 0.6363636363636364
+
+# Test histogram_quantile, native and classic.
+
+eval instant at 50m histogram_quantile(0, testhistogram3)
+ {start="positive"} 0
+ {start="negative"} -0.25
eval instant at 50m histogram_quantile(0, testhistogram3_bucket)
{start="positive"} 0
{start="negative"} -0.25
+eval instant at 50m histogram_quantile(0.25, testhistogram3)
+ {start="positive"} 0.055
+ {start="negative"} -0.225
+
eval instant at 50m histogram_quantile(0.25, testhistogram3_bucket)
{start="positive"} 0.055
{start="negative"} -0.225
+eval instant at 50m histogram_quantile(0.5, testhistogram3)
+ {start="positive"} 0.125
+ {start="negative"} -0.2
+
eval instant at 50m histogram_quantile(0.5, testhistogram3_bucket)
{start="positive"} 0.125
{start="negative"} -0.2
+eval instant at 50m histogram_quantile(0.75, testhistogram3)
+ {start="positive"} 0.45
+ {start="negative"} -0.15
+
eval instant at 50m histogram_quantile(0.75, testhistogram3_bucket)
{start="positive"} 0.45
{start="negative"} -0.15
+eval instant at 50m histogram_quantile(1, testhistogram3)
+ {start="positive"} 1
+ {start="negative"} -0.1
+
eval instant at 50m histogram_quantile(1, testhistogram3_bucket)
{start="positive"} 1
{start="negative"} -0.1
# Quantile too low.
+
+eval_warn instant at 50m histogram_quantile(-0.1, testhistogram)
+ {start="positive"} -Inf
+ {start="negative"} -Inf
+
eval_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket)
{start="positive"} -Inf
{start="negative"} -Inf
# Quantile too high.
+
+eval_warn instant at 50m histogram_quantile(1.01, testhistogram)
+ {start="positive"} +Inf
+ {start="negative"} +Inf
+
eval_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket)
{start="positive"} +Inf
{start="negative"} +Inf
# Quantile invalid.
+
+eval_warn instant at 50m histogram_quantile(NaN, testhistogram)
+ {start="positive"} NaN
+ {start="negative"} NaN
+
eval_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket)
{start="positive"} NaN
{start="negative"} NaN
# Quantile value in lowest bucket.
+
+eval instant at 50m histogram_quantile(0, testhistogram)
+ {start="positive"} 0
+ {start="negative"} -0.2
+
eval instant at 50m histogram_quantile(0, testhistogram_bucket)
{start="positive"} 0
{start="negative"} -0.2
# Quantile value in highest bucket.
+
+eval instant at 50m histogram_quantile(1, testhistogram)
+ {start="positive"} 1
+ {start="negative"} 0.3
+
eval instant at 50m histogram_quantile(1, testhistogram_bucket)
{start="positive"} 1
{start="negative"} 0.3
# Finally some useful quantiles.
+
+eval instant at 50m histogram_quantile(0.2, testhistogram)
+ {start="positive"} 0.048
+ {start="negative"} -0.2
+
eval instant at 50m histogram_quantile(0.2, testhistogram_bucket)
{start="positive"} 0.048
{start="negative"} -0.2
+eval instant at 50m histogram_quantile(0.5, testhistogram)
+ {start="positive"} 0.15
+ {start="negative"} -0.15
+
eval instant at 50m histogram_quantile(0.5, testhistogram_bucket)
{start="positive"} 0.15
{start="negative"} -0.15
+eval instant at 50m histogram_quantile(0.8, testhistogram)
+ {start="positive"} 0.72
+ {start="negative"} 0.3
+
eval instant at 50m histogram_quantile(0.8, testhistogram_bucket)
{start="positive"} 0.72
{start="negative"} 0.3
# More realistic with rates.
+eval instant at 50m histogram_quantile(0.2, rate(testhistogram[10m]))
+ {start="positive"} 0.048
+ {start="negative"} -0.2
+
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m]))
{start="positive"} 0.048
{start="negative"} -0.2
+eval instant at 50m histogram_quantile(0.5, rate(testhistogram[10m]))
+ {start="positive"} 0.15
+ {start="negative"} -0.15
+
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m]))
{start="positive"} 0.15
{start="negative"} -0.15
+eval instant at 50m histogram_quantile(0.8, rate(testhistogram[10m]))
+ {start="positive"} 0.72
+ {start="negative"} 0.3
+
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m]))
{start="positive"} 0.72
{start="negative"} 0.3
# Want results exactly in the middle of the bucket.
+
+eval instant at 7m histogram_quantile(1./6., testhistogram2)
+ {} 1
+
eval instant at 7m histogram_quantile(1./6., testhistogram2_bucket)
{} 1
+eval instant at 7m histogram_quantile(0.5, testhistogram2)
+ {} 3
+
eval instant at 7m histogram_quantile(0.5, testhistogram2_bucket)
{} 3
+eval instant at 7m histogram_quantile(5./6., testhistogram2)
+ {} 5
+
eval instant at 7m histogram_quantile(5./6., testhistogram2_bucket)
{} 5
+eval instant at 47m histogram_quantile(1./6., rate(testhistogram2[15m]))
+ {} 1
+
eval instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m]))
{} 1
+eval instant at 47m histogram_quantile(0.5, rate(testhistogram2[15m]))
+ {} 3
+
eval instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m]))
{} 3
+eval instant at 47m histogram_quantile(5./6., rate(testhistogram2[15m]))
+ {} 5
+
eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m]))
{} 5
-# Aggregated histogram: Everything in one.
+# Aggregated histogram: Everything in one. Note how native histograms
+# don't require aggregation by le.
+
+eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])))
+ {} 0.075
+
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.075
+eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])))
+ {} 0.1277777777777778
+
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.1277777777777778
# Aggregated histogram: Everything in one. Now with avg, which does not change anything.
+
+eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[10m])))
+ {} 0.075
+
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.075
+eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[10m])))
+ {} 0.12777777777777778
+
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.12777777777777778
# Aggregated histogram: By instance.
+
+eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (instance))
+ {instance="ins1"} 0.075
+ {instance="ins2"} 0.075
+
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
{instance="ins1"} 0.075
{instance="ins2"} 0.075
+eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (instance))
+ {instance="ins1"} 0.1333333333
+ {instance="ins2"} 0.125
+
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
{instance="ins1"} 0.1333333333
{instance="ins2"} 0.125
# Aggregated histogram: By job.
+eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job))
+ {job="job1"} 0.1
+ {job="job2"} 0.0642857142857143
+
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
{job="job1"} 0.1
{job="job2"} 0.0642857142857143
+eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job))
+ {job="job1"} 0.14
+ {job="job2"} 0.1125
+
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
{job="job1"} 0.14
{job="job2"} 0.1125
# Aggregated histogram: By job and instance.
+eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job, instance))
+ {instance="ins1", job="job1"} 0.11
+ {instance="ins2", job="job1"} 0.09
+ {instance="ins1", job="job2"} 0.06
+ {instance="ins2", job="job2"} 0.0675
+
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
+eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job, instance))
+ {instance="ins1", job="job1"} 0.15
+ {instance="ins2", job="job1"} 0.1333333333333333
+ {instance="ins1", job="job2"} 0.1
+ {instance="ins2", job="job2"} 0.1166666666666667
+
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.1333333333333333
@@ -241,18 +393,31 @@ eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bu
{instance="ins2", job="job2"} 0.1166666666666667
# The unaggregated histogram for comparison. Same result as the previous one.
+eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[10m]))
+ {instance="ins1", job="job1"} 0.11
+ {instance="ins2", job="job1"} 0.09
+ {instance="ins1", job="job2"} 0.06
+ {instance="ins2", job="job2"} 0.0675
+
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m]))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
+eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[10m]))
+ {instance="ins1", job="job1"} 0.15
+ {instance="ins2", job="job1"} 0.13333333333333333
+ {instance="ins1", job="job2"} 0.1
+ {instance="ins2", job="job2"} 0.11666666666666667
+
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m]))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.13333333333333333
{instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.11666666666666667
+# All NHCBs summed into one.
eval instant at 50m sum(request_duration_seconds)
{} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}}
@@ -303,11 +468,13 @@ load_with_nhcb 5m
eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[10m]))
{instance="ins1", job="job1"} NaN
-# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set
+# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set.
# https://github.com/prometheus/prometheus/issues/9910
load_with_nhcb 5m
- request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
- request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
- request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
+ request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
+ request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
+ request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
-eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket$"})
+eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"})
+
+eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"})
diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test
index b0d473172b..23aab4f210 100644
--- a/promql/promqltest/testdata/native_histograms.test
+++ b/promql/promqltest/testdata/native_histograms.test
@@ -747,6 +747,9 @@ eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_4)
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_4)
{} 1
+eval instant at 10m histogram_sum(scalar(histogram_fraction(-Inf, +Inf, sum(histogram_fraction_4))) * histogram_fraction_4)
+ {} 100
+
clear
# Counter reset only noticeable in a single bucket.
diff --git a/promql/promqltest/testdata/staleness.test b/promql/promqltest/testdata/staleness.test
index 6bbb26692e..a48473d439 100644
--- a/promql/promqltest/testdata/staleness.test
+++ b/promql/promqltest/testdata/staleness.test
@@ -34,6 +34,11 @@ eval instant at 20s count_over_time(metric[10s])
eval instant at 20s count_over_time(metric[20s])
{} 1
+eval instant at 20s count_over_time(metric[10])
+
+eval instant at 20s count_over_time(metric[20])
+ {} 1
+
clear
diff --git a/promql/promqltest/testdata/subquery.test b/promql/promqltest/testdata/subquery.test
index 596fa049b4..3ac547a2b5 100644
--- a/promql/promqltest/testdata/subquery.test
+++ b/promql/promqltest/testdata/subquery.test
@@ -76,6 +76,21 @@ eval instant at 1010s sum_over_time(metric1[30s:10s] offset 3s)
eval instant at 1010s sum_over_time((metric1)[30s:10s] offset 3s)
{} 297
+eval instant at 1010s sum_over_time(metric1[30:10] offset 3)
+ {} 297
+
+eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s)
+ {} 297
+
+eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s)
+ {} 297
+
+eval instant at 1010s sum_over_time((metric1)[30:10] offset 3s)
+ {} 297
+
+eval instant at 1010s sum_over_time((metric1)[30:10] offset 3)
+ {} 297
+
# Nested subqueries
eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s])
{} 0.30000000000000004
diff --git a/rules/group.go b/rules/group.go
index c0ad18c187..0bc219a11b 100644
--- a/rules/group.go
+++ b/rules/group.go
@@ -151,7 +151,42 @@ func (g *Group) Name() string { return g.name }
func (g *Group) File() string { return g.file }
// Rules returns the group's rules.
-func (g *Group) Rules() []Rule { return g.rules }
+func (g *Group) Rules(matcherSets ...[]*labels.Matcher) []Rule {
+ if len(matcherSets) == 0 {
+ return g.rules
+ }
+ var rules []Rule
+ for _, rule := range g.rules {
+ if matchesMatcherSets(matcherSets, rule.Labels()) {
+ rules = append(rules, rule)
+ }
+ }
+ return rules
+}
+
+func matches(lbls labels.Labels, matchers ...*labels.Matcher) bool {
+ for _, m := range matchers {
+ if v := lbls.Get(m.Name); !m.Matches(v) {
+ return false
+ }
+ }
+ return true
+}
+
+// matchesMatcherSets ensures all matches in each matcher set are ANDed and the set of those is ORed.
+func matchesMatcherSets(matcherSets [][]*labels.Matcher, lbls labels.Labels) bool {
+ if len(matcherSets) == 0 {
+ return true
+ }
+
+ var ok bool
+ for _, matchers := range matcherSets {
+ if matches(lbls, matchers...) {
+ ok = true
+ }
+ }
+ return ok
+}
// Queryable returns the group's querable.
func (g *Group) Queryable() storage.Queryable { return g.opts.Queryable }
diff --git a/rules/manager.go b/rules/manager.go
index acc637e718..ab33c3c7d8 100644
--- a/rules/manager.go
+++ b/rules/manager.go
@@ -380,13 +380,13 @@ func (m *Manager) RuleGroups() []*Group {
}
// Rules returns the list of the manager's rules.
-func (m *Manager) Rules() []Rule {
+func (m *Manager) Rules(matcherSets ...[]*labels.Matcher) []Rule {
m.mtx.RLock()
defer m.mtx.RUnlock()
var rules []Rule
for _, g := range m.groups {
- rules = append(rules, g.rules...)
+ rules = append(rules, g.Rules(matcherSets...)...)
}
return rules
diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml
index bb65d7f607..83ae3906cc 100644
--- a/scripts/golangci-lint.yml
+++ b/scripts/golangci-lint.yml
@@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
+ uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Install Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with:
diff --git a/storage/fanout_test.go b/storage/fanout_test.go
index 712f0400f7..4613fe7572 100644
--- a/storage/fanout_test.go
+++ b/storage/fanout_test.go
@@ -238,11 +238,11 @@ func (errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*labels
return storage.ErrSeriesSet(errSelect)
}
-func (errQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (errQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, errors.New("label values error")
}
-func (errQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (errQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, errors.New("label names error")
}
diff --git a/storage/interface.go b/storage/interface.go
index 493c2d6893..f85f985e9d 100644
--- a/storage/interface.go
+++ b/storage/interface.go
@@ -122,11 +122,11 @@ type MockQuerier struct {
SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
}
-func (q *MockQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (q *MockQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
-func (q *MockQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (q *MockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
@@ -161,12 +161,12 @@ type LabelQuerier interface {
// It is not safe to use the strings beyond the lifetime of the querier.
// If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers.
- LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
+ LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
// LabelNames returns all the unique label names present in the block in sorted order.
// If matchers are specified the returned result set is reduced
// to label names of metrics matching the matchers.
- LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
+ LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
// Close releases the resources of the Querier.
Close() error
@@ -190,6 +190,9 @@ type SelectHints struct {
Start int64 // Start time in milliseconds for this select.
End int64 // End time in milliseconds for this select.
+ // Maximum number of results returned. Use a value of 0 to disable.
+ Limit int
+
Step int64 // Query step size in milliseconds.
Func string // String representation of surrounding function or aggregation.
@@ -217,6 +220,13 @@ type SelectHints struct {
DisableTrimming bool
}
+// LabelHints specifies hints passed for label reads.
+// This is used only as an option for implementation to use.
+type LabelHints struct {
+ // Maximum number of results returned. Use a value of 0 to disable.
+ Limit int
+}
+
// TODO(bwplotka): Move to promql/engine_test.go?
// QueryableFunc is an adapter to allow the use of ordinary functions as
// Queryables. It follows the idea of http.HandlerFunc.
diff --git a/storage/memoized_iterator.go b/storage/memoized_iterator.go
index 4ab2aa5d78..273b3caa1d 100644
--- a/storage/memoized_iterator.go
+++ b/storage/memoized_iterator.go
@@ -136,6 +136,11 @@ func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHist
return b.it.AtFloatHistogram(nil)
}
+// AtT returns the timestamp of the current element of the iterator.
+func (b *MemoizedSeriesIterator) AtT() int64 {
+ return b.it.AtT()
+}
+
// Err returns the last encountered error.
func (b *MemoizedSeriesIterator) Err() error {
return b.it.Err()
diff --git a/storage/memoized_iterator_test.go b/storage/memoized_iterator_test.go
index d1cd565170..81e517f96e 100644
--- a/storage/memoized_iterator_test.go
+++ b/storage/memoized_iterator_test.go
@@ -29,13 +29,15 @@ func TestMemoizedSeriesIterator(t *testing.T) {
sampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram) {
if efh == nil {
ts, v := it.At()
- require.Equal(t, ets, ts, "timestamp mismatch")
- require.Equal(t, ev, v, "value mismatch")
+ require.Equal(t, ets, ts, "At() timestamp mismatch")
+ require.Equal(t, ev, v, "At() value mismatch")
} else {
ts, fh := it.AtFloatHistogram()
- require.Equal(t, ets, ts, "timestamp mismatch")
- require.Equal(t, efh, fh, "histogram mismatch")
+ require.Equal(t, ets, ts, "AtFloatHistogram() timestamp mismatch")
+ require.Equal(t, efh, fh, "AtFloatHistogram() histogram mismatch")
}
+
+ require.Equal(t, ets, it.AtT(), "AtT() timestamp mismatch")
}
prevSampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram, eok bool) {
ts, v, fh, ok := it.PeekPrev()
diff --git a/storage/merge.go b/storage/merge.go
index 6714af5e83..194494b6a9 100644
--- a/storage/merge.go
+++ b/storage/merge.go
@@ -169,8 +169,8 @@ func (l labelGenericQueriers) SplitByHalf() (labelGenericQueriers, labelGenericQ
// LabelValues returns all potential values for a label name.
// If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers.
-func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
- res, ws, err := q.lvals(ctx, q.queriers, name, matchers...)
+func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+ res, ws, err := q.lvals(ctx, q.queriers, name, hints, matchers...)
if err != nil {
return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err)
}
@@ -178,22 +178,22 @@ func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, matc
}
// lvals performs merge sort for LabelValues from multiple queriers.
-func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
if lq.Len() == 0 {
return nil, nil, nil
}
if lq.Len() == 1 {
- return lq.Get(0).LabelValues(ctx, n, matchers...)
+ return lq.Get(0).LabelValues(ctx, n, hints, matchers...)
}
a, b := lq.SplitByHalf()
var ws annotations.Annotations
- s1, w, err := q.lvals(ctx, a, n, matchers...)
+ s1, w, err := q.lvals(ctx, a, n, hints, matchers...)
ws.Merge(w)
if err != nil {
return nil, ws, err
}
- s2, ws, err := q.lvals(ctx, b, n, matchers...)
+ s2, ws, err := q.lvals(ctx, b, n, hints, matchers...)
ws.Merge(w)
if err != nil {
return nil, ws, err
@@ -229,13 +229,13 @@ func mergeStrings(a, b []string) []string {
}
// LabelNames returns all the unique label names present in all queriers in sorted order.
-func (q *mergeGenericQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (q *mergeGenericQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
var (
labelNamesMap = make(map[string]struct{})
warnings annotations.Annotations
)
for _, querier := range q.queriers {
- names, wrn, err := querier.LabelNames(ctx, matchers...)
+ names, wrn, err := querier.LabelNames(ctx, hints, matchers...)
if wrn != nil {
// TODO(bwplotka): We could potentially wrap warnings.
warnings.Merge(wrn)
diff --git a/storage/merge_test.go b/storage/merge_test.go
index 1203b3e7bb..7619af3c1f 100644
--- a/storage/merge_test.go
+++ b/storage/merge_test.go
@@ -1361,7 +1361,7 @@ func (m *mockGenericQuerier) Select(_ context.Context, b bool, _ *SelectHints, _
return &mockGenericSeriesSet{resp: m.resp, warnings: m.warnings, err: m.err}
}
-func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock()
m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{
name: name,
@@ -1371,7 +1371,7 @@ func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, matcher
return m.resp, m.warnings, m.err
}
-func (m *mockGenericQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (m *mockGenericQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock()
m.labelNamesCalls++
m.mtx.Unlock()
@@ -1558,7 +1558,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
}
})
t.Run("LabelNames", func(t *testing.T) {
- res, w, err := q.LabelNames(ctx)
+ res, w, err := q.LabelNames(ctx, nil)
require.Subset(t, tcase.expectedWarnings, w)
require.ErrorIs(t, err, tcase.expectedErrs[1], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res)
@@ -1573,7 +1573,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
}
})
t.Run("LabelValues", func(t *testing.T) {
- res, w, err := q.LabelValues(ctx, "test")
+ res, w, err := q.LabelValues(ctx, "test", nil)
require.Subset(t, tcase.expectedWarnings, w)
require.ErrorIs(t, err, tcase.expectedErrs[2], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res)
@@ -1589,7 +1589,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
})
t.Run("LabelValuesWithMatchers", func(t *testing.T) {
matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue")
- res, w, err := q.LabelValues(ctx, "test2", matcher)
+ res, w, err := q.LabelValues(ctx, "test2", nil, matcher)
require.Subset(t, tcase.expectedWarnings, w)
require.ErrorIs(t, err, tcase.expectedErrs[3], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res)
diff --git a/storage/noop.go b/storage/noop.go
index be5741ddd8..f5092da7c7 100644
--- a/storage/noop.go
+++ b/storage/noop.go
@@ -31,11 +31,11 @@ func (noopQuerier) Select(context.Context, bool, *SelectHints, ...*labels.Matche
return NoopSeriesSet()
}
-func (noopQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (noopQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
-func (noopQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (noopQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
@@ -54,11 +54,11 @@ func (noopChunkQuerier) Select(context.Context, bool, *SelectHints, ...*labels.M
return NoopChunkedSeriesSet()
}
-func (noopChunkQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (noopChunkQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
-func (noopChunkQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (noopChunkQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go
index 15f8fe1320..279d10e41b 100644
--- a/storage/remote/codec_test.go
+++ b/storage/remote/codec_test.go
@@ -36,48 +36,48 @@ import (
"github.com/prometheus/prometheus/util/annotations"
)
-var testHistogram = histogram.Histogram{
- Schema: 2,
- ZeroThreshold: 1e-128,
- ZeroCount: 0,
- Count: 0,
- Sum: 20,
- PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
- PositiveBuckets: []int64{1},
- NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
- NegativeBuckets: []int64{-1},
-}
-
-var writeRequestFixture = &prompb.WriteRequest{
- Timeseries: []prompb.TimeSeries{
- {
- Labels: []prompb.Label{
- {Name: "__name__", Value: "test_metric1"},
- {Name: "b", Value: "c"},
- {Name: "baz", Value: "qux"},
- {Name: "d", Value: "e"},
- {Name: "foo", Value: "bar"},
- },
- Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
- Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}},
- Histograms: []prompb.Histogram{prompb.FromIntHistogram(0, &testHistogram), prompb.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
- },
- {
- Labels: []prompb.Label{
- {Name: "__name__", Value: "test_metric1"},
- {Name: "b", Value: "c"},
- {Name: "baz", Value: "qux"},
- {Name: "d", Value: "e"},
- {Name: "foo", Value: "bar"},
- },
- Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
- Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}},
- Histograms: []prompb.Histogram{prompb.FromIntHistogram(2, &testHistogram), prompb.FromFloatHistogram(3, testHistogram.ToFloat(nil))},
- },
- },
-}
-
var (
+ testHistogram = histogram.Histogram{
+ Schema: 2,
+ ZeroThreshold: 1e-128,
+ ZeroCount: 0,
+ Count: 0,
+ Sum: 20,
+ PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
+ PositiveBuckets: []int64{1},
+ NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
+ NegativeBuckets: []int64{-1},
+ }
+
+ writeRequestFixture = &prompb.WriteRequest{
+ Timeseries: []prompb.TimeSeries{
+ {
+ Labels: []prompb.Label{
+ {Name: "__name__", Value: "test_metric1"},
+ {Name: "b", Value: "c"},
+ {Name: "baz", Value: "qux"},
+ {Name: "d", Value: "e"},
+ {Name: "foo", Value: "bar"},
+ },
+ Samples: []prompb.Sample{{Value: 1, Timestamp: 1}},
+ Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 1}},
+ Histograms: []prompb.Histogram{prompb.FromIntHistogram(1, &testHistogram), prompb.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
+ },
+ {
+ Labels: []prompb.Label{
+ {Name: "__name__", Value: "test_metric1"},
+ {Name: "b", Value: "c"},
+ {Name: "baz", Value: "qux"},
+ {Name: "d", Value: "e"},
+ {Name: "foo", Value: "bar"},
+ },
+ Samples: []prompb.Sample{{Value: 2, Timestamp: 2}},
+ Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 2}},
+ Histograms: []prompb.Histogram{prompb.FromIntHistogram(3, &testHistogram), prompb.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
+ },
+ },
+ }
+
writeV2RequestSeries1Metadata = metadata.Metadata{
Type: model.MetricTypeGauge,
Help: "Test gauge for test purposes",
@@ -88,43 +88,78 @@ var (
Help: "Test counter for test purposes",
}
- // writeV2RequestFixture represents the same request as writeRequestFixture, but using the v2 representation.
- writeV2RequestFixture = func() *writev2.Request {
- st := writev2.NewSymbolTable()
- b := labels.NewScratchBuilder(0)
- labelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].ToLabels(&b, nil), nil)
- exemplar1LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
- exemplar2LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
- return &writev2.Request{
- Timeseries: []writev2.TimeSeries{
- {
- LabelsRefs: labelRefs,
- Metadata: writev2.Metadata{
- Type: writev2.Metadata_METRIC_TYPE_GAUGE, // Same as writeV2RequestSeries1Metadata.Type, but in writev2.
- HelpRef: st.Symbolize(writeV2RequestSeries1Metadata.Help),
- UnitRef: st.Symbolize(writeV2RequestSeries1Metadata.Unit),
- },
- Samples: []writev2.Sample{{Value: 1, Timestamp: 0}},
- Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 0}},
- Histograms: []writev2.Histogram{writev2.FromIntHistogram(0, &testHistogram), writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
- },
- {
- LabelsRefs: labelRefs,
- Metadata: writev2.Metadata{
- Type: writev2.Metadata_METRIC_TYPE_COUNTER, // Same as writeV2RequestSeries2Metadata.Type, but in writev2.
- HelpRef: st.Symbolize(writeV2RequestSeries2Metadata.Help),
- // No unit.
- },
- Samples: []writev2.Sample{{Value: 2, Timestamp: 1}},
- Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 1}},
- Histograms: []writev2.Histogram{writev2.FromIntHistogram(2, &testHistogram), writev2.FromFloatHistogram(3, testHistogram.ToFloat(nil))},
+ // writeV2RequestFixture represents the same request as writeRequestFixture,
+ // but using the v2 representation, plus includes writeV2RequestSeries1Metadata and writeV2RequestSeries2Metadata.
+ // NOTE: Use TestWriteV2RequestFixture and copy the diff to regenerate if needed.
+ writeV2RequestFixture = &writev2.Request{
+ Symbols: []string{"", "__name__", "test_metric1", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"},
+ Timeseries: []writev2.TimeSeries{
+ {
+ LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Symbolized writeRequestFixture.Timeseries[0].Labels
+ Metadata: writev2.Metadata{
+ Type: writev2.Metadata_METRIC_TYPE_GAUGE, // writeV2RequestSeries1Metadata.Type.
+
+ HelpRef: 15, // Symbolized writeV2RequestSeries1Metadata.Help.
+ UnitRef: 16, // Symbolized writeV2RequestSeries1Metadata.Unit.
},
+ Samples: []writev2.Sample{{Value: 1, Timestamp: 1}},
+ Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 1}},
+ Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &testHistogram), writev2.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
},
- Symbols: st.Symbols(),
- }
- }()
+ {
+ LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Same series as first.
+ Metadata: writev2.Metadata{
+ Type: writev2.Metadata_METRIC_TYPE_COUNTER, // writeV2RequestSeries2Metadata.Type.
+
+ HelpRef: 17, // Symbolized writeV2RequestSeries2Metadata.Help.
+ // No unit.
+ },
+ Samples: []writev2.Sample{{Value: 2, Timestamp: 2}},
+ Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{13, 14}, Value: 2, Timestamp: 2}},
+ Histograms: []writev2.Histogram{writev2.FromIntHistogram(3, &testHistogram), writev2.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
+ },
+ },
+ }
)
+func TestWriteV2RequestFixture(t *testing.T) {
+ // Generate dynamically writeV2RequestFixture, reusing v1 fixture elements.
+ st := writev2.NewSymbolTable()
+ b := labels.NewScratchBuilder(0)
+ labelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].ToLabels(&b, nil), nil)
+ exemplar1LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
+ exemplar2LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[1].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
+ expected := &writev2.Request{
+ Timeseries: []writev2.TimeSeries{
+ {
+ LabelsRefs: labelRefs,
+ Metadata: writev2.Metadata{
+ Type: writev2.Metadata_METRIC_TYPE_GAUGE,
+ HelpRef: st.Symbolize(writeV2RequestSeries1Metadata.Help),
+ UnitRef: st.Symbolize(writeV2RequestSeries1Metadata.Unit),
+ },
+ Samples: []writev2.Sample{{Value: 1, Timestamp: 1}},
+ Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 1}},
+ Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &testHistogram), writev2.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
+ },
+ {
+ LabelsRefs: labelRefs,
+ Metadata: writev2.Metadata{
+ Type: writev2.Metadata_METRIC_TYPE_COUNTER,
+ HelpRef: st.Symbolize(writeV2RequestSeries2Metadata.Help),
+ // No unit.
+ },
+ Samples: []writev2.Sample{{Value: 2, Timestamp: 2}},
+ Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 2}},
+ Histograms: []writev2.Histogram{writev2.FromIntHistogram(3, &testHistogram), writev2.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
+ },
+ },
+ Symbols: st.Symbols(),
+ }
+ // Check if it matches static writeV2RequestFixture.
+ require.Equal(t, expected, writeV2RequestFixture)
+}
+
func TestValidateLabelsAndMetricName(t *testing.T) {
tests := []struct {
input []prompb.Label
diff --git a/storage/remote/max_timestamp.go b/storage/remote/max_timestamp.go
index 3a0a6d6fd4..bb67d9bb98 100644
--- a/storage/remote/max_timestamp.go
+++ b/storage/remote/max_timestamp.go
@@ -39,9 +39,3 @@ func (m *maxTimestamp) Get() float64 {
defer m.mtx.Unlock()
return m.value
}
-
-func (m *maxTimestamp) Collect(c chan<- prometheus.Metric) {
- if m.Get() > 0 {
- m.Gauge.Collect(c)
- }
-}
diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go
index fb13da70da..5bafb9da20 100644
--- a/storage/remote/queue_manager.go
+++ b/storage/remote/queue_manager.go
@@ -232,7 +232,7 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
Namespace: namespace,
Subsystem: subsystem,
Name: "queue_highest_sent_timestamp_seconds",
- Help: "Timestamp from a WAL sample, the highest timestamp successfully sent by this queue, in seconds since epoch.",
+ Help: "Timestamp from a WAL sample, the highest timestamp successfully sent by this queue, in seconds since epoch. Initialized to 0 when no data has been sent yet.",
ConstLabels: constLabels,
}),
}
@@ -1468,6 +1468,8 @@ func (q *queue) FlushAndShutdown(done <-chan struct{}) {
for q.tryEnqueueingBatch(done) {
time.Sleep(time.Second)
}
+ q.batchMtx.Lock()
+ defer q.batchMtx.Unlock()
q.batch = nil
close(q.batchQueue)
}
diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go
index 9ab563edab..5227c2d6a7 100644
--- a/storage/remote/queue_manager_test.go
+++ b/storage/remote/queue_manager_test.go
@@ -60,7 +60,7 @@ func newHighestTimestampMetric() *maxTimestamp {
Namespace: namespace,
Subsystem: subsystem,
Name: "highest_timestamp_in_seconds",
- Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.",
+ Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet",
}),
}
}
diff --git a/storage/remote/read.go b/storage/remote/read.go
index 723030091a..e54b14f1e3 100644
--- a/storage/remote/read.go
+++ b/storage/remote/read.go
@@ -210,13 +210,13 @@ func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, []s
}
// LabelValues implements storage.Querier and is a noop.
-func (q *querier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (q *querier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
// TODO: Implement: https://github.com/prometheus/prometheus/issues/3351
return nil, nil, errors.New("not implemented")
}
// LabelNames implements storage.Querier and is a noop.
-func (q *querier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (q *querier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
// TODO: Implement: https://github.com/prometheus/prometheus/issues/3351
return nil, nil, errors.New("not implemented")
}
diff --git a/storage/remote/write.go b/storage/remote/write.go
index cd8cd588ca..81902a8f1a 100644
--- a/storage/remote/write.go
+++ b/storage/remote/write.go
@@ -100,7 +100,7 @@ func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, f
Namespace: namespace,
Subsystem: subsystem,
Name: "highest_timestamp_in_seconds",
- Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.",
+ Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet.",
}),
},
}
diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go
index 9997811ab0..d822373717 100644
--- a/storage/remote/write_handler.go
+++ b/storage/remote/write_handler.go
@@ -19,6 +19,7 @@ import (
"fmt"
"io"
"net/http"
+ "strconv"
"strings"
"time"
@@ -27,6 +28,7 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar"
@@ -43,7 +45,8 @@ type writeHandler struct {
logger log.Logger
appendable storage.Appendable
- samplesWithInvalidLabelsTotal prometheus.Counter
+ samplesWithInvalidLabelsTotal prometheus.Counter
+ samplesAppendedWithoutMetadata prometheus.Counter
acceptedProtoMsgs map[config.RemoteWriteProtoMsg]struct{}
}
@@ -52,6 +55,9 @@ const maxAheadTime = 10 * time.Minute
// NewWriteHandler creates a http.Handler that accepts remote write requests with
// the given message in acceptedProtoMsgs and writes them to the provided appendable.
+//
+// NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible
+// as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write.
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler {
protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{}
for _, acc := range acceptedProtoMsgs {
@@ -61,15 +67,18 @@ func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable st
logger: logger,
appendable: appendable,
acceptedProtoMsgs: protoMsgs,
- samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{
+ samplesWithInvalidLabelsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Namespace: "prometheus",
Subsystem: "api",
Name: "remote_write_invalid_labels_samples_total",
- Help: "The total number of remote write samples which contains invalid labels.",
+ Help: "The total number of received remote write samples and histogram samples which were rejected due to invalid labels.",
+ }),
+ samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+ Namespace: "prometheus",
+ Subsystem: "api",
+ Name: "remote_write_without_metadata_appended_samples_total",
+ Help: "The total number of received remote write samples (and histogram samples) which were ingested without corresponding metadata.",
}),
- }
- if reg != nil {
- reg.MustRegister(h.samplesWithInvalidLabelsTotal)
}
return h
}
@@ -108,15 +117,15 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
contentType = appProtoContentType
}
- msg, err := h.parseProtoMsg(contentType)
+ msgType, err := h.parseProtoMsg(contentType)
if err != nil {
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err)
http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
return
}
- if _, ok := h.acceptedProtoMsgs[msg]; !ok {
- err := fmt.Errorf("%v protobuf message is not accepted by this server; accepted %v", msg, func() (ret []string) {
+ if _, ok := h.acceptedProtoMsgs[msgType]; !ok {
+ err := fmt.Errorf("%v protobuf message is not accepted by this server; accepted %v", msgType, func() (ret []string) {
for k := range h.acceptedProtoMsgs {
ret = append(ret, string(k))
}
@@ -154,100 +163,111 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
// Now we have a decompressed buffer we can unmarshal it.
- switch msg {
- case config.RemoteWriteProtoMsgV1:
+
+ if msgType == config.RemoteWriteProtoMsgV1 {
+ // PRW 1.0 flow has different proto message and no partial write handling.
var req prompb.WriteRequest
if err := proto.Unmarshal(decompressed, &req); err != nil {
// TODO(bwplotka): Add more context to responded error?
- level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msg, "err", err.Error())
+ level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msgType, "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
- err = h.write(r.Context(), &req)
- case config.RemoteWriteProtoMsgV2:
- var req writev2.Request
- if err := proto.Unmarshal(decompressed, &req); err != nil {
- // TODO(bwplotka): Add more context to responded error?
- level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msg, "err", err.Error())
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
+ if err = h.write(r.Context(), &req); err != nil {
+ switch {
+ case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample):
+ // Indicated an out-of-order sample is a bad request to prevent retries.
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ default:
+ level.Error(h.logger).Log("msg", "Error while remote writing the v1 request", "err", err.Error())
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
}
- err = h.writeV2(r.Context(), &req)
+ w.WriteHeader(http.StatusNoContent)
+ return
}
- switch {
- case err == nil:
- case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample):
- // Indicated an out of order sample is a bad request to prevent retries.
+ // Remote Write 2.x proto message handling.
+ var req writev2.Request
+ if err := proto.Unmarshal(decompressed, &req); err != nil {
+ // TODO(bwplotka): Add more context to responded error?
+ level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msgType, "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
- default:
- level.Error(h.logger).Log("msg", "Error appending remote write", "err", err.Error())
- http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+
+ respStats, errHTTPCode, err := h.writeV2(r.Context(), &req)
+
+ // Set required X-Prometheus-Remote-Write-Written-* response headers, in all cases.
+ respStats.SetResponseHeaders(w.Header())
+
+ if err != nil {
+ if errHTTPCode/5 == 100 { // 5xx
+ level.Error(h.logger).Log("msg", "Error while remote writing the v2 request", "err", err.Error())
+ }
+ http.Error(w, err.Error(), errHTTPCode)
return
}
-
w.WriteHeader(http.StatusNoContent)
}
-// checkAppendExemplarError modifies the AppendExemplar's returned error based on the error cause.
-func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, outOfOrderErrs *int) error {
- unwrappedErr := errors.Unwrap(err)
- if unwrappedErr == nil {
- unwrappedErr = err
- }
- switch {
- case errors.Is(unwrappedErr, storage.ErrNotFound):
- return storage.ErrNotFound
- case errors.Is(unwrappedErr, storage.ErrOutOfOrderExemplar):
- *outOfOrderErrs++
- level.Debug(h.logger).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e))
- return nil
- default:
- return err
- }
-}
-
func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) {
outOfOrderExemplarErrs := 0
samplesWithInvalidLabels := 0
+ samplesAppended := 0
- timeLimitApp := &timeLimitAppender{
+ app := &timeLimitAppender{
Appender: h.appendable.Appender(ctx),
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
}
defer func() {
if err != nil {
- _ = timeLimitApp.Rollback()
+ _ = app.Rollback()
return
}
- err = timeLimitApp.Commit()
+ err = app.Commit()
+ if err != nil {
+ h.samplesAppendedWithoutMetadata.Add(float64(samplesAppended))
+ }
}()
b := labels.NewScratchBuilder(0)
for _, ts := range req.Timeseries {
ls := ts.ToLabels(&b, nil)
- if !ls.IsValid() {
+ if !ls.Has(labels.MetricName) || !ls.IsValid() {
level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", ls.String())
samplesWithInvalidLabels++
+ // TODO(bwplotka): Even as per 1.0 spec, this should be a 400 error, while other samples are
+ // potentially written. Perhaps unify with fixed writeV2 implementation a bit.
continue
}
- err := h.appendSamples(timeLimitApp, ts.Samples, ls)
- if err != nil {
+ if err := h.appendV1Samples(app, ts.Samples, ls); err != nil {
return err
}
+ samplesAppended += len(ts.Samples)
for _, ep := range ts.Exemplars {
e := ep.ToExemplar(&b, nil)
- h.appendExemplar(timeLimitApp, e, ls, &outOfOrderExemplarErrs)
+ if _, err := app.AppendExemplar(0, ls, e); err != nil {
+ switch {
+ case errors.Is(err, storage.ErrOutOfOrderExemplar):
+ outOfOrderExemplarErrs++
+ level.Debug(h.logger).Log("msg", "Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
+ default:
+ // Since exemplar storage is still experimental, we don't fail the request on ingestion errors
+ level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err)
+ }
+ }
}
- err = h.appendHistograms(timeLimitApp, ts.Histograms, ls)
- if err != nil {
+ if err = h.appendV1Histograms(app, ts.Histograms, ls); err != nil {
return err
}
+ samplesAppended += len(ts.Histograms)
}
if outOfOrderExemplarErrs > 0 {
@@ -256,151 +276,216 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
if samplesWithInvalidLabels > 0 {
h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels))
}
-
return nil
}
-func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (err error) {
- outOfOrderExemplarErrs := 0
+func (h *writeHandler) appendV1Samples(app storage.Appender, ss []prompb.Sample, labels labels.Labels) error {
+ var ref storage.SeriesRef
+ var err error
+ for _, s := range ss {
+ ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
+ if err != nil {
+ if errors.Is(err, storage.ErrOutOfOrderSample) ||
+ errors.Is(err, storage.ErrOutOfBounds) ||
+ errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
+ level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
+ }
+ return err
+ }
+ }
+ return nil
+}
- timeLimitApp := &timeLimitAppender{
+func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Histogram, labels labels.Labels) error {
+ var err error
+ for _, hp := range hh {
+ if hp.IsFloatHistogram() {
+ _, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
+ } else {
+ _, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
+ }
+ if err != nil {
+ // Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
+ // a note indicating its inclusion in the future.
+ if errors.Is(err, storage.ErrOutOfOrderSample) ||
+ errors.Is(err, storage.ErrOutOfBounds) ||
+ errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
+ level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+const (
+ prw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Written-Samples"
+ rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Written-Histograms"
+ rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Written-Exemplars"
+)
+
+type responseStats struct {
+ samples int
+ histograms int
+ exemplars int
+}
+
+func (s responseStats) SetResponseHeaders(h http.Header) {
+ h.Set(prw20WrittenSamplesHeader, strconv.Itoa(s.samples))
+ h.Set(rw20WrittenHistogramsHeader, strconv.Itoa(s.histograms))
+ h.Set(rw20WrittenExemplarsHeader, strconv.Itoa(s.exemplars))
+}
+
+// writeV2 is similar to write, but it works with v2 proto message,
+// allows partial 4xx writes and gathers statistics.
+//
+// writeV2 returns the statistics.
+// In error cases, writeV2, also returns statistics, but also the error that
+// should be propagated to the remote write sender and httpCode to use for status.
+//
+// NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors.
+// Once we have 5xx type of error, we immediately stop and rollback all appends.
+func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ responseStats, errHTTPCode int, _ error) {
+ app := &timeLimitAppender{
Appender: h.appendable.Appender(ctx),
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
}
- defer func() {
- if err != nil {
- _ = timeLimitApp.Rollback()
- return
+ rs := responseStats{}
+ samplesWithoutMetadata, errHTTPCode, err := h.appendV2(app, req, &rs)
+ if err != nil {
+ if errHTTPCode/5 == 100 {
+ // On 5xx, we always rollback, because we expect
+ // sender to retry and TSDB is not idempotent.
+ if rerr := app.Rollback(); rerr != nil {
+ level.Error(h.logger).Log("msg", "writev2 rollback failed on retry-able error", "err", rerr)
+ }
+ return responseStats{}, errHTTPCode, err
}
- err = timeLimitApp.Commit()
- }()
- b := labels.NewScratchBuilder(0)
+ // Non-retriable (e.g. bad request error case). Can be partially written.
+ commitErr := app.Commit()
+ if commitErr != nil {
+ // Bad requests does not matter as we have internal error (retryable).
+ return responseStats{}, http.StatusInternalServerError, commitErr
+ }
+ // Bad request error happened, but rest of data (if any) was written.
+ h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata))
+ return rs, errHTTPCode, err
+ }
+
+ // All good just commit.
+ if err := app.Commit(); err != nil {
+ return responseStats{}, http.StatusInternalServerError, err
+ }
+ h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata))
+ return rs, 0, nil
+}
+
+func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *responseStats) (samplesWithoutMetadata, errHTTPCode int, err error) {
+ var (
+ badRequestErrs []error
+ outOfOrderExemplarErrs, samplesWithInvalidLabels int
+
+ b = labels.NewScratchBuilder(0)
+ )
for _, ts := range req.Timeseries {
ls := ts.ToLabels(&b, req.Symbols)
-
- err := h.appendSamplesV2(timeLimitApp, ts.Samples, ls)
- if err != nil {
- return err
+ // Validate series labels early.
+ // NOTE(bwplotka): While spec allows UTF-8, Prometheus Receiver may impose
+ // specific limits and follow https://prometheus.io/docs/specs/remote_write_spec_2_0/#invalid-samples case.
+ if !ls.Has(labels.MetricName) || !ls.IsValid() {
+ badRequestErrs = append(badRequestErrs, fmt.Errorf("invalid metric name or labels, got %v", ls.String()))
+ samplesWithInvalidLabels += len(ts.Samples) + len(ts.Histograms)
+ continue
}
+ allSamplesSoFar := rs.samples + rs.histograms
+ var ref storage.SeriesRef
+
+ // Samples.
+ for _, s := range ts.Samples {
+ ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue())
+ if err == nil {
+ rs.samples++
+ continue
+ }
+ // Handle append error.
+ if errors.Is(err, storage.ErrOutOfOrderSample) ||
+ errors.Is(err, storage.ErrOutOfBounds) ||
+ errors.Is(err, storage.ErrDuplicateSampleForTimestamp) ||
+ errors.Is(err, storage.ErrTooOldSample) {
+ // TODO(bwplotka): Not too spammy log?
+ level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", ls.String(), "timestamp", s.Timestamp)
+ badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
+ continue
+ }
+ return 0, http.StatusInternalServerError, err
+ }
+
+ // Native Histograms.
+ for _, hp := range ts.Histograms {
+ if hp.IsFloatHistogram() {
+ ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, nil, hp.ToFloatHistogram())
+ } else {
+ ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, hp.ToIntHistogram(), nil)
+ }
+ if err == nil {
+ rs.histograms++
+ continue
+ }
+ // Handle append error.
+ // Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
+ // a note indicating its inclusion in the future.
+ if errors.Is(err, storage.ErrOutOfOrderSample) ||
+ errors.Is(err, storage.ErrOutOfBounds) ||
+ errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
+ // TODO(bwplotka): Not too spammy log?
+ level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp)
+ badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
+ continue
+ }
+ return 0, http.StatusInternalServerError, err
+ }
+
+ // Exemplars.
for _, ep := range ts.Exemplars {
e := ep.ToExemplar(&b, req.Symbols)
- h.appendExemplar(timeLimitApp, e, ls, &outOfOrderExemplarErrs)
- }
-
- err = h.appendHistogramsV2(timeLimitApp, ts.Histograms, ls)
- if err != nil {
- return err
+ ref, err = app.AppendExemplar(ref, ls, e)
+ if err == nil {
+ rs.exemplars++
+ continue
+ }
+ // Handle append error.
+ // TODO(bwplotka): I left the logic as in v1, but we might want to make it consistent with samples and histograms.
+ // Since exemplar storage is still experimental, we don't fail in anyway, the request on ingestion errors.
+ if errors.Is(err, storage.ErrOutOfOrderExemplar) {
+ outOfOrderExemplarErrs++
+ level.Debug(h.logger).Log("msg", "Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
+ continue
+ }
+ level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err)
}
m := ts.ToMetadata(req.Symbols)
- if _, err = timeLimitApp.UpdateMetadata(0, ls, m); err != nil {
+ if _, err = app.UpdateMetadata(ref, ls, m); err != nil {
level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err)
+ // Metadata is attached to each series, so since Prometheus does not reject sample without metadata information,
+ // we don't report remote write error either. We increment metric instead.
+ samplesWithoutMetadata += (rs.samples + rs.histograms) - allSamplesSoFar
}
}
if outOfOrderExemplarErrs > 0 {
- _ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
+ level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
}
+ h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels))
- return nil
-}
-
-func (h *writeHandler) appendExemplar(app storage.Appender, e exemplar.Exemplar, labels labels.Labels, outOfOrderExemplarErrs *int) {
- _, err := app.AppendExemplar(0, labels, e)
- err = h.checkAppendExemplarError(err, e, outOfOrderExemplarErrs)
- if err != nil {
- // Since exemplar storage is still experimental, we don't fail the request on ingestion errors
- level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", err)
+ if len(badRequestErrs) == 0 {
+ return samplesWithoutMetadata, 0, nil
}
-}
-
-func (h *writeHandler) appendSamples(app storage.Appender, ss []prompb.Sample, labels labels.Labels) error {
- var ref storage.SeriesRef
- var err error
- for _, s := range ss {
- ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
- if err != nil {
- unwrappedErr := errors.Unwrap(err)
- if unwrappedErr == nil {
- unwrappedErr = err
- }
- if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
- level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
- }
- return err
- }
- }
- return nil
-}
-
-func (h *writeHandler) appendSamplesV2(app storage.Appender, ss []writev2.Sample, labels labels.Labels) error {
- var ref storage.SeriesRef
- var err error
- for _, s := range ss {
- ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
- if err != nil {
- unwrappedErr := errors.Unwrap(err)
- if unwrappedErr == nil {
- unwrappedErr = err
- }
- if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
- level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
- }
- return err
- }
- }
- return nil
-}
-
-func (h *writeHandler) appendHistograms(app storage.Appender, hh []prompb.Histogram, labels labels.Labels) error {
- var err error
- for _, hp := range hh {
- if hp.IsFloatHistogram() {
- _, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
- } else {
- _, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
- }
- if err != nil {
- unwrappedErr := errors.Unwrap(err)
- if unwrappedErr == nil {
- unwrappedErr = err
- }
- // Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
- // a note indicating its inclusion in the future.
- if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
- level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
- }
- return err
- }
- }
- return nil
-}
-
-func (h *writeHandler) appendHistogramsV2(app storage.Appender, hh []writev2.Histogram, labels labels.Labels) error {
- var err error
- for _, hp := range hh {
- if hp.IsFloatHistogram() {
- _, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
- } else {
- _, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
- }
- if err != nil {
- unwrappedErr := errors.Unwrap(err)
- if unwrappedErr == nil {
- unwrappedErr = err
- }
- // Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
- // a note indicating its inclusion in the future.
- if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
- level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
- }
- return err
- }
- }
- return nil
+ // TODO(bwplotka): Better concat formatting? Perhaps add size limit?
+ return samplesWithoutMetadata, http.StatusBadRequest, errors.Join(badRequestErrs...)
}
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go
index 24bd7059ae..9b5fb1a6ef 100644
--- a/storage/remote/write_handler_test.go
+++ b/storage/remote/write_handler_test.go
@@ -16,6 +16,7 @@ package remote
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"math"
@@ -27,6 +28,7 @@ import (
"time"
"github.com/go-kit/log"
+ "github.com/gogo/protobuf/proto"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require"
@@ -290,64 +292,224 @@ func TestRemoteWriteHandler_V1Message(t *testing.T) {
}
}
+func expectHeaderValue(t testing.TB, expected int, got string) {
+ t.Helper()
+
+ require.NotEmpty(t, got)
+ i, err := strconv.Atoi(got)
+ require.NoError(t, err)
+ require.Equal(t, expected, i)
+}
+
func TestRemoteWriteHandler_V2Message(t *testing.T) {
- payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
- require.NoError(t, err)
+ // V2 supports partial writes for non-retriable errors, so test them.
+ for _, tc := range []struct {
+ desc string
+ input []writev2.TimeSeries
+ expectedCode int
+ expectedRespBody string
- req, err := http.NewRequest("", "", bytes.NewReader(payload))
- require.NoError(t, err)
+ commitErr error
+ appendSampleErr error
+ appendHistogramErr error
+ appendExemplarErr error
+ updateMetadataErr error
+ }{
+ {
+ desc: "All timeseries accepted",
+ input: writeV2RequestFixture.Timeseries,
+ expectedCode: http.StatusNoContent,
+ },
+ {
+ desc: "Partial write; first series with invalid labels (no metric name)",
+ input: append(
+ // Series with test_metric1="test_metric1" labels.
+ []writev2.TimeSeries{{LabelsRefs: []uint32{2, 2}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}},
+ writeV2RequestFixture.Timeseries...),
+ expectedCode: http.StatusBadRequest,
+ expectedRespBody: "invalid metric name or labels, got {test_metric1=\"test_metric1\"}\n",
+ },
+ {
+ desc: "Partial write; first series with invalid labels (empty metric name)",
+ input: append(
+ // Series with __name__="" labels.
+ []writev2.TimeSeries{{LabelsRefs: []uint32{1, 0}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}},
+ writeV2RequestFixture.Timeseries...),
+ expectedCode: http.StatusBadRequest,
+ expectedRespBody: "invalid metric name or labels, got {__name__=\"\"}\n",
+ },
+ {
+ desc: "Partial write; first series with one OOO sample",
+ input: func() []writev2.TimeSeries {
+ f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
+ f.Timeseries[0].Samples = append(f.Timeseries[0].Samples, writev2.Sample{Value: 2, Timestamp: 0})
+ return f.Timeseries
+ }(),
+ expectedCode: http.StatusBadRequest,
+ expectedRespBody: "out of order sample for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
+ },
+ {
+ desc: "Partial write; first series with one dup sample",
+ input: func() []writev2.TimeSeries {
+ f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
+ f.Timeseries[0].Samples = append(f.Timeseries[0].Samples, f.Timeseries[0].Samples[0])
+ return f.Timeseries
+ }(),
+ expectedCode: http.StatusBadRequest,
+ expectedRespBody: "duplicate sample for timestamp for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
+ },
+ {
+ desc: "Partial write; first series with one OOO histogram sample",
+ input: func() []writev2.TimeSeries {
+ f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
+ f.Timeseries[0].Histograms = append(f.Timeseries[0].Histograms, writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil)))
+ return f.Timeseries
+ }(),
+ expectedCode: http.StatusBadRequest,
+ expectedRespBody: "out of order sample for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
+ },
+ {
+ desc: "Partial write; first series with one dup histogram sample",
+ input: func() []writev2.TimeSeries {
+ f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
+ f.Timeseries[0].Histograms = append(f.Timeseries[0].Histograms, f.Timeseries[0].Histograms[1])
+ return f.Timeseries
+ }(),
+ expectedCode: http.StatusBadRequest,
+ expectedRespBody: "duplicate sample for timestamp for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
+ },
+ // Non retriable errors from various parts.
+ {
+ desc: "Internal sample append error; rollback triggered",
+ input: writeV2RequestFixture.Timeseries,
+ appendSampleErr: errors.New("some sample internal append error"),
- req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
- req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
- req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
+ expectedCode: http.StatusInternalServerError,
+ expectedRespBody: "some sample internal append error\n",
+ },
+ {
+ desc: "Internal histogram sample append error; rollback triggered",
+ input: writeV2RequestFixture.Timeseries,
+ appendHistogramErr: errors.New("some histogram sample internal append error"),
- appendable := &mockAppendable{}
- handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
+ expectedCode: http.StatusInternalServerError,
+ expectedRespBody: "some histogram sample internal append error\n",
+ },
+ {
+ desc: "Partial write; skipped exemplar; exemplar storage errs are noop",
+ input: writeV2RequestFixture.Timeseries,
+ appendExemplarErr: errors.New("some exemplar append error"),
- recorder := httptest.NewRecorder()
- handler.ServeHTTP(recorder, req)
+ expectedCode: http.StatusNoContent,
+ },
+ {
+ desc: "Partial write; skipped metadata; metadata storage errs are noop",
+ input: writeV2RequestFixture.Timeseries,
+ updateMetadataErr: errors.New("some metadata update error"),
- resp := recorder.Result()
- require.Equal(t, http.StatusNoContent, resp.StatusCode)
+ expectedCode: http.StatusNoContent,
+ },
+ {
+ desc: "Internal commit error; rollback triggered",
+ input: writeV2RequestFixture.Timeseries,
+ commitErr: errors.New("storage error"),
- b := labels.NewScratchBuilder(0)
- i := 0
- j := 0
- k := 0
- for _, ts := range writeV2RequestFixture.Timeseries {
- ls := ts.ToLabels(&b, writeV2RequestFixture.Symbols)
+ expectedCode: http.StatusInternalServerError,
+ expectedRespBody: "storage error\n",
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), tc.input, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
+ require.NoError(t, err)
- for _, s := range ts.Samples {
- requireEqual(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i])
+ req, err := http.NewRequest("", "", bytes.NewReader(payload))
+ require.NoError(t, err)
- switch i {
- case 0:
- requireEqual(t, mockMetadata{ls, writeV2RequestSeries1Metadata}, appendable.metadata[i])
- case 1:
- requireEqual(t, mockMetadata{ls, writeV2RequestSeries2Metadata}, appendable.metadata[i])
- default:
- t.Fatal("more series/samples then expected")
+ req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
+ req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
+ req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
+
+ appendable := &mockAppendable{
+ commitErr: tc.commitErr,
+ appendSampleErr: tc.appendSampleErr,
+ appendHistogramErr: tc.appendHistogramErr,
+ appendExemplarErr: tc.appendExemplarErr,
+ updateMetadataErr: tc.updateMetadataErr,
}
- i++
- }
- for _, e := range ts.Exemplars {
- exemplarLabels := e.ToExemplar(&b, writeV2RequestFixture.Symbols).Labels
- requireEqual(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
- j++
- }
- for _, hp := range ts.Histograms {
- if hp.IsFloatHistogram() {
- fh := hp.ToFloatHistogram()
- requireEqual(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k])
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
+
+ recorder := httptest.NewRecorder()
+ handler.ServeHTTP(recorder, req)
+
+ resp := recorder.Result()
+ require.Equal(t, tc.expectedCode, resp.StatusCode)
+ respBody, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+ require.Equal(t, tc.expectedRespBody, string(respBody))
+
+ if tc.expectedCode == http.StatusInternalServerError {
+ // We don't expect writes for partial writes with retry-able code.
+ expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Samples"))
+ expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Histograms"))
+ expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars"))
+
+ require.Empty(t, len(appendable.samples))
+ require.Empty(t, len(appendable.histograms))
+ require.Empty(t, len(appendable.exemplars))
+ require.Empty(t, len(appendable.metadata))
+ return
+ }
+
+ // Double check mandatory 2.0 stats.
+ // writeV2RequestFixture has 2 series with 1 sample, 2 histograms, 1 exemplar each.
+ expectHeaderValue(t, 2, resp.Header.Get("X-Prometheus-Remote-Write-Written-Samples"))
+ expectHeaderValue(t, 4, resp.Header.Get("X-Prometheus-Remote-Write-Written-Histograms"))
+ if tc.appendExemplarErr != nil {
+ expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars"))
} else {
- h := hp.ToIntHistogram()
- requireEqual(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k])
+ expectHeaderValue(t, 2, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars"))
}
- k++
- }
+
+ // Double check what was actually appended.
+ var (
+ b = labels.NewScratchBuilder(0)
+ i, j, k, m int
+ )
+ for _, ts := range writeV2RequestFixture.Timeseries {
+ ls := ts.ToLabels(&b, writeV2RequestFixture.Symbols)
+
+ for _, s := range ts.Samples {
+ requireEqual(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i])
+ i++
+ }
+ for _, hp := range ts.Histograms {
+ if hp.IsFloatHistogram() {
+ fh := hp.ToFloatHistogram()
+ requireEqual(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k])
+ } else {
+ h := hp.ToIntHistogram()
+ requireEqual(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k])
+ }
+ k++
+ }
+ if tc.appendExemplarErr == nil {
+ for _, e := range ts.Exemplars {
+ exemplarLabels := e.ToExemplar(&b, writeV2RequestFixture.Symbols).Labels
+ requireEqual(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
+ j++
+ }
+ }
+ if tc.updateMetadataErr == nil {
+ expectedMeta := ts.ToMetadata(writeV2RequestFixture.Symbols)
+ requireEqual(t, mockMetadata{ls, expectedMeta}, appendable.metadata[m])
+ m++
+ }
+ }
+ })
}
}
+// NOTE: V2 Message is tested in TestRemoteWriteHandler_V2Message.
func TestOutOfOrderSample_V1Message(t *testing.T) {
for _, tc := range []struct {
Name string
@@ -372,7 +534,7 @@ func TestOutOfOrderSample_V1Message(t *testing.T) {
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
- appendable := &mockAppendable{latestSample: 100}
+ appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder()
@@ -384,49 +546,10 @@ func TestOutOfOrderSample_V1Message(t *testing.T) {
}
}
-func TestOutOfOrderSample_V2Message(t *testing.T) {
- for _, tc := range []struct {
- Name string
- Timestamp int64
- }{
- {
- Name: "historic",
- Timestamp: 0,
- },
- {
- Name: "future",
- Timestamp: math.MaxInt64,
- },
- } {
- t.Run(tc.Name, func(t *testing.T) {
- payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
- LabelsRefs: []uint32{1, 2},
- Samples: []writev2.Sample{{Value: 1, Timestamp: tc.Timestamp}},
- }}, []string{"", "__name__", "metric1"}, nil, nil, nil, "snappy")
- require.NoError(t, err)
-
- req, err := http.NewRequest("", "", bytes.NewReader(payload))
- require.NoError(t, err)
-
- req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
- req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
- req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
-
- appendable := &mockAppendable{latestSample: 100}
- handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
-
- recorder := httptest.NewRecorder()
- handler.ServeHTTP(recorder, req)
-
- resp := recorder.Result()
- require.Equal(t, http.StatusBadRequest, resp.StatusCode)
- })
- }
-}
-
// This test case currently aims to verify that the WriteHandler endpoint
// don't fail on exemplar ingestion errors since the exemplar storage is
// still experimental.
+// NOTE: V2 Message is tested in TestRemoteWriteHandler_V2Message.
func TestOutOfOrderExemplar_V1Message(t *testing.T) {
tests := []struct {
Name string
@@ -453,7 +576,7 @@ func TestOutOfOrderExemplar_V1Message(t *testing.T) {
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
- appendable := &mockAppendable{latestExemplar: 100}
+ appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder()
@@ -466,49 +589,7 @@ func TestOutOfOrderExemplar_V1Message(t *testing.T) {
}
}
-func TestOutOfOrderExemplar_V2Message(t *testing.T) {
- tests := []struct {
- Name string
- Timestamp int64
- }{
- {
- Name: "historic",
- Timestamp: 0,
- },
- {
- Name: "future",
- Timestamp: math.MaxInt64,
- },
- }
-
- for _, tc := range tests {
- t.Run(tc.Name, func(t *testing.T) {
- payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
- LabelsRefs: []uint32{1, 2},
- Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{3, 4}, Value: 1, Timestamp: tc.Timestamp}},
- }}, []string{"", "__name__", "metric1", "foo", "bar"}, nil, nil, nil, "snappy")
- require.NoError(t, err)
-
- req, err := http.NewRequest("", "", bytes.NewReader(payload))
- require.NoError(t, err)
-
- req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
- req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
- req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
-
- appendable := &mockAppendable{latestExemplar: 100}
- handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
-
- recorder := httptest.NewRecorder()
- handler.ServeHTTP(recorder, req)
-
- resp := recorder.Result()
- // TODO: update to require.Equal(t, http.StatusConflict, resp.StatusCode) once exemplar storage is not experimental.
- require.Equal(t, http.StatusNoContent, resp.StatusCode)
- })
- }
-}
-
+// NOTE: V2 Message is tested in TestRemoteWriteHandler_V2Message.
func TestOutOfOrderHistogram_V1Message(t *testing.T) {
for _, tc := range []struct {
Name string
@@ -533,7 +614,7 @@ func TestOutOfOrderHistogram_V1Message(t *testing.T) {
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
- appendable := &mockAppendable{latestHistogram: 100}
+ appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder()
@@ -545,46 +626,6 @@ func TestOutOfOrderHistogram_V1Message(t *testing.T) {
}
}
-func TestOutOfOrderHistogram_V2Message(t *testing.T) {
- for _, tc := range []struct {
- Name string
- Timestamp int64
- }{
- {
- Name: "historic",
- Timestamp: 0,
- },
- {
- Name: "future",
- Timestamp: math.MaxInt64,
- },
- } {
- t.Run(tc.Name, func(t *testing.T) {
- payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
- LabelsRefs: []uint32{0, 1},
- Histograms: []writev2.Histogram{writev2.FromIntHistogram(0, &testHistogram), writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
- }}, []string{"__name__", "metric1"}, nil, nil, nil, "snappy")
- require.NoError(t, err)
-
- req, err := http.NewRequest("", "", bytes.NewReader(payload))
- require.NoError(t, err)
-
- req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
- req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
- req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
-
- appendable := &mockAppendable{latestHistogram: 100}
- handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
-
- recorder := httptest.NewRecorder()
- handler.ServeHTTP(recorder, req)
-
- resp := recorder.Result()
- require.Equal(t, http.StatusBadRequest, resp.StatusCode)
- })
- }
-}
-
func BenchmarkRemoteWriteHandler(b *testing.B) {
const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
var reqs []*http.Request
@@ -719,15 +760,20 @@ func genSeriesWithSample(numSeries int, ts int64) []prompb.TimeSeries {
}
type mockAppendable struct {
- latestSample int64
+ latestSample map[uint64]int64
samples []mockSample
- latestExemplar int64
+ latestExemplar map[uint64]int64
exemplars []mockExemplar
- latestHistogram int64
+ latestHistogram map[uint64]int64
histograms []mockHistogram
metadata []mockMetadata
- commitErr error
+ // optional errors to inject.
+ commitErr error
+ appendSampleErr error
+ appendHistogramErr error
+ appendExemplarErr error
+ updateMetadataErr error
}
type mockSample struct {
@@ -765,48 +811,92 @@ func requireEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...inte
}
func (m *mockAppendable) Appender(_ context.Context) storage.Appender {
+ if m.latestSample == nil {
+ m.latestSample = map[uint64]int64{}
+ }
+ if m.latestHistogram == nil {
+ m.latestHistogram = map[uint64]int64{}
+ }
+ if m.latestExemplar == nil {
+ m.latestExemplar = map[uint64]int64{}
+ }
return m
}
func (m *mockAppendable) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
- if t < m.latestSample {
- return 0, storage.ErrOutOfOrderSample
+ if m.appendSampleErr != nil {
+ return 0, m.appendSampleErr
}
- m.latestSample = t
+ latestTs := m.latestSample[l.Hash()]
+ if t < latestTs {
+ return 0, storage.ErrOutOfOrderSample
+ }
+ if t == latestTs {
+ return 0, storage.ErrDuplicateSampleForTimestamp
+ }
+
+ m.latestSample[l.Hash()] = t
m.samples = append(m.samples, mockSample{l, t, v})
return 0, nil
}
func (m *mockAppendable) Commit() error {
+ if m.commitErr != nil {
+ _ = m.Rollback() // As per Commit method contract.
+ }
return m.commitErr
}
-func (*mockAppendable) Rollback() error {
- return fmt.Errorf("not implemented")
+func (m *mockAppendable) Rollback() error {
+ m.samples = m.samples[:0]
+ m.exemplars = m.exemplars[:0]
+ m.histograms = m.histograms[:0]
+ m.metadata = m.metadata[:0]
+ return nil
}
func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
- if e.Ts < m.latestExemplar {
- return 0, storage.ErrOutOfOrderExemplar
+ if m.appendExemplarErr != nil {
+ return 0, m.appendExemplarErr
}
- m.latestExemplar = e.Ts
+ latestTs := m.latestExemplar[l.Hash()]
+ if e.Ts < latestTs {
+ return 0, storage.ErrOutOfOrderExemplar
+ }
+ if e.Ts == latestTs {
+ return 0, storage.ErrDuplicateExemplar
+ }
+
+ m.latestExemplar[l.Hash()] = e.Ts
m.exemplars = append(m.exemplars, mockExemplar{l, e.Labels, e.Ts, e.Value})
return 0, nil
}
func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
- if t < m.latestHistogram {
- return 0, storage.ErrOutOfOrderSample
+ if m.appendHistogramErr != nil {
+ return 0, m.appendHistogramErr
}
- m.latestHistogram = t
+ latestTs := m.latestHistogram[l.Hash()]
+ if t < latestTs {
+ return 0, storage.ErrOutOfOrderSample
+ }
+ if t == latestTs {
+ return 0, storage.ErrDuplicateSampleForTimestamp
+ }
+
+ m.latestHistogram[l.Hash()] = t
m.histograms = append(m.histograms, mockHistogram{l, t, h, fh})
return 0, nil
}
func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) {
+ if m.updateMetadataErr != nil {
+ return 0, m.updateMetadataErr
+ }
+
m.metadata = append(m.metadata, mockMetadata{l: l, m: mp})
return 0, nil
}
diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go
index 648ec4b17f..6e7422a585 100644
--- a/storage/remote/write_test.go
+++ b/storage/remote/write_test.go
@@ -369,7 +369,7 @@ func TestWriteStorageApplyConfig_PartialUpdate(t *testing.T) {
}
func TestOTLPWriteHandler(t *testing.T) {
- exportRequest := generateOTLPWriteRequest(t)
+ exportRequest := generateOTLPWriteRequest()
buf, err := exportRequest.MarshalProto()
require.NoError(t, err)
@@ -392,7 +392,7 @@ func TestOTLPWriteHandler(t *testing.T) {
require.Len(t, appendable.exemplars, 1) // 1 (exemplar)
}
-func generateOTLPWriteRequest(t *testing.T) pmetricotlp.ExportRequest {
+func generateOTLPWriteRequest() pmetricotlp.ExportRequest {
d := pmetric.NewMetrics()
// Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram
@@ -422,6 +422,7 @@ func generateOTLPWriteRequest(t *testing.T) pmetricotlp.ExportRequest {
counterDataPoint.Attributes().PutStr("foo.bar", "baz")
counterExemplar := counterDataPoint.Exemplars().AppendEmpty()
+
counterExemplar.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
counterExemplar.SetDoubleValue(10.0)
counterExemplar.SetSpanID(pcommon.SpanID{0, 1, 2, 3, 4, 5, 6, 7})
diff --git a/storage/secondary.go b/storage/secondary.go
index 44d9781835..1cf8024b65 100644
--- a/storage/secondary.go
+++ b/storage/secondary.go
@@ -49,16 +49,16 @@ func newSecondaryQuerierFromChunk(cq ChunkQuerier) genericQuerier {
return &secondaryQuerier{genericQuerier: newGenericQuerierFromChunk(cq)}
}
-func (s *secondaryQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
- vals, w, err := s.genericQuerier.LabelValues(ctx, name, matchers...)
+func (s *secondaryQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+ vals, w, err := s.genericQuerier.LabelValues(ctx, name, hints, matchers...)
if err != nil {
return nil, w.Add(err), nil
}
return vals, w, nil
}
-func (s *secondaryQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
- names, w, err := s.genericQuerier.LabelNames(ctx, matchers...)
+func (s *secondaryQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+ names, w, err := s.genericQuerier.LabelNames(ctx, hints, matchers...)
if err != nil {
return nil, w.Add(err), nil
}
diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go
index 9430de3964..3177762f81 100644
--- a/tsdb/chunkenc/xor.go
+++ b/tsdb/chunkenc/xor.go
@@ -60,7 +60,7 @@ type XORChunk struct {
b bstream
}
-// NewXORChunk returns a new chunk with XOR encoding of the given size.
+// NewXORChunk returns a new chunk with XOR encoding.
func NewXORChunk() *XORChunk {
b := make([]byte, 2, 128)
return &XORChunk{b: bstream{stream: b, count: 0}}
diff --git a/tsdb/db_test.go b/tsdb/db_test.go
index 770b1a1b48..c0edafe087 100644
--- a/tsdb/db_test.go
+++ b/tsdb/db_test.go
@@ -1001,7 +1001,7 @@ func TestWALFlushedOnDBClose(t *testing.T) {
q, err := db.Querier(0, 1)
require.NoError(t, err)
- values, ws, err := q.LabelValues(ctx, "labelname")
+ values, ws, err := q.LabelValues(ctx, "labelname", nil)
require.NoError(t, err)
require.Empty(t, ws)
require.Equal(t, []string{"labelvalue"}, values)
@@ -1976,7 +1976,7 @@ func TestQuerierWithBoundaryChunks(t *testing.T) {
defer q.Close()
// The requested interval covers 2 blocks, so the querier's label values for blockID should give us 2 values, one from each block.
- b, ws, err := q.LabelValues(ctx, "blockID")
+ b, ws, err := q.LabelValues(ctx, "blockID", nil)
require.NoError(t, err)
var nilAnnotations annotations.Annotations
require.Equal(t, nilAnnotations, ws)
@@ -2288,7 +2288,7 @@ func TestDB_LabelNames(t *testing.T) {
q, err := db.Querier(math.MinInt64, math.MaxInt64)
require.NoError(t, err)
var ws annotations.Annotations
- labelNames, ws, err = q.LabelNames(ctx)
+ labelNames, ws, err = q.LabelNames(ctx, nil)
require.NoError(t, err)
require.Empty(t, ws)
require.NoError(t, q.Close())
diff --git a/tsdb/head_append.go b/tsdb/head_append.go
index f45ab606ba..8d66d1e818 100644
--- a/tsdb/head_append.go
+++ b/tsdb/head_append.go
@@ -846,16 +846,17 @@ func (a *headAppender) Commit() (err error) {
// number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled)
floatOOBRejected int
- inOrderMint int64 = math.MaxInt64
- inOrderMaxt int64 = math.MinInt64
- ooomint int64 = math.MaxInt64
- ooomaxt int64 = math.MinInt64
- wblSamples []record.RefSample
- oooMmapMarkers map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef
- oooRecords [][]byte
- oooCapMax = a.head.opts.OutOfOrderCapMax.Load()
- series *memSeries
- appendChunkOpts = chunkOpts{
+ inOrderMint int64 = math.MaxInt64
+ inOrderMaxt int64 = math.MinInt64
+ oooMinT int64 = math.MaxInt64
+ oooMaxT int64 = math.MinInt64
+ wblSamples []record.RefSample
+ oooMmapMarkers map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef
+ oooMmapMarkersCount int
+ oooRecords [][]byte
+ oooCapMax = a.head.opts.OutOfOrderCapMax.Load()
+ series *memSeries
+ appendChunkOpts = chunkOpts{
chunkDiskMapper: a.head.chunkDiskMapper,
chunkRange: a.head.chunkRange.Load(),
samplesPerChunk: a.head.opts.SamplesPerChunk,
@@ -872,6 +873,7 @@ func (a *headAppender) Commit() (err error) {
// WBL is not enabled. So no need to collect.
wblSamples = nil
oooMmapMarkers = nil
+ oooMmapMarkersCount = 0
return
}
// The m-map happens before adding a new sample. So we collect
@@ -880,12 +882,14 @@ func (a *headAppender) Commit() (err error) {
// WBL Before this Commit(): [old samples before this commit for chunk 1]
// WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3]
if oooMmapMarkers != nil {
- markers := make([]record.RefMmapMarker, 0, len(oooMmapMarkers))
- for ref, mmapRef := range oooMmapMarkers {
- markers = append(markers, record.RefMmapMarker{
- Ref: ref,
- MmapRef: mmapRef,
- })
+ markers := make([]record.RefMmapMarker, 0, oooMmapMarkersCount)
+ for ref, mmapRefs := range oooMmapMarkers {
+ for _, mmapRef := range mmapRefs {
+ markers = append(markers, record.RefMmapMarker{
+ Ref: ref,
+ MmapRef: mmapRef,
+ })
+ }
}
r := enc.MmapMarkers(markers, a.head.getBytesBuffer())
oooRecords = append(oooRecords, r)
@@ -928,32 +932,39 @@ func (a *headAppender) Commit() (err error) {
case oooSample:
// Sample is OOO and OOO handling is enabled
// and the delta is within the OOO tolerance.
- var mmapRef chunks.ChunkDiskMapperRef
- ok, chunkCreated, mmapRef = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax)
+ var mmapRefs []chunks.ChunkDiskMapperRef
+ ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax)
if chunkCreated {
r, ok := oooMmapMarkers[series.ref]
- if !ok || r != 0 {
+ if !ok || r != nil {
// !ok means there are no markers collected for these samples yet. So we first flush the samples
// before setting this m-map marker.
- // r != 0 means we have already m-mapped a chunk for this series in the same Commit().
+ // r != nil means we have already m-mapped a chunk for this series in the same Commit().
// Hence, before we m-map again, we should add the samples and m-map markers
// seen till now to the WBL records.
collectOOORecords()
}
if oooMmapMarkers == nil {
- oooMmapMarkers = make(map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef)
+ oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef)
+ }
+ if len(mmapRefs) > 0 {
+ oooMmapMarkers[series.ref] = mmapRefs
+ oooMmapMarkersCount += len(mmapRefs)
+ } else {
+ // No chunk was written to disk, so we need to set an initial marker for this series.
+ oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0}
+ oooMmapMarkersCount++
}
- oooMmapMarkers[series.ref] = mmapRef
}
if ok {
wblSamples = append(wblSamples, s)
- if s.T < ooomint {
- ooomint = s.T
+ if s.T < oooMinT {
+ oooMinT = s.T
}
- if s.T > ooomaxt {
- ooomaxt = s.T
+ if s.T > oooMaxT {
+ oooMaxT = s.T
}
floatOOOAccepted++
} else {
@@ -1053,7 +1064,7 @@ func (a *headAppender) Commit() (err error) {
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended))
a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOOAccepted))
a.head.updateMinMaxTime(inOrderMint, inOrderMaxt)
- a.head.updateMinOOOMaxOOOTime(ooomint, ooomaxt)
+ a.head.updateMinOOOMaxOOOTime(oooMinT, oooMaxT)
collectOOORecords()
if a.head.wbl != nil {
@@ -1069,14 +1080,14 @@ func (a *headAppender) Commit() (err error) {
}
// insert is like append, except it inserts. Used for OOO samples.
-func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRef chunks.ChunkDiskMapperRef) {
+func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) {
if s.ooo == nil {
s.ooo = &memSeriesOOOFields{}
}
c := s.ooo.oooHeadChunk
if c == nil || c.chunk.NumSamples() == int(oooCapMax) {
// Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks.
- c, mmapRef = s.cutNewOOOHeadChunk(t, chunkDiskMapper)
+ c, mmapRefs = s.cutNewOOOHeadChunk(t, chunkDiskMapper)
chunkCreated = true
}
@@ -1089,7 +1100,7 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDisk
c.maxTime = t
}
}
- return ok, chunkCreated, mmapRef
+ return ok, chunkCreated, mmapRefs
}
// chunkOpts are chunk-level options that are passed when appending to a memSeries.
@@ -1431,7 +1442,7 @@ func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange
// cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk.
// The caller must ensure that s.ooo is not nil.
-func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, chunks.ChunkDiskMapperRef) {
+func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) {
ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper)
s.ooo.oooHeadChunk = &oooHeadChunk{
@@ -1443,21 +1454,29 @@ func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.Chunk
return s.ooo.oooHeadChunk, ref
}
-func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) chunks.ChunkDiskMapperRef {
+func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) []chunks.ChunkDiskMapperRef {
if s.ooo == nil || s.ooo.oooHeadChunk == nil {
- // There is no head chunk, so nothing to m-map here.
- return 0
+ // OOO is not enabled or there is no head chunk, so nothing to m-map here.
+ return nil
+ }
+ chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(math.MinInt64, math.MaxInt64)
+ if err != nil {
+ handleChunkWriteError(err)
+ return nil
+ }
+ chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, 1)
+ for _, memchunk := range chks {
+ chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, memchunk.chunk, true, handleChunkWriteError)
+ chunkRefs = append(chunkRefs, chunkRef)
+ s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{
+ ref: chunkRef,
+ numSamples: uint16(memchunk.chunk.NumSamples()),
+ minTime: memchunk.minTime,
+ maxTime: memchunk.maxTime,
+ })
}
- xor, _ := s.ooo.oooHeadChunk.chunk.ToXOR() // Encode to XorChunk which is more compact and implements all of the needed functionality.
- chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, xor, true, handleChunkWriteError)
- s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{
- ref: chunkRef,
- numSamples: uint16(xor.NumSamples()),
- minTime: s.ooo.oooHeadChunk.minTime,
- maxTime: s.ooo.oooHeadChunk.maxTime,
- })
s.ooo.oooHeadChunk = nil
- return chunkRef
+ return chunkRefs
}
// mmapChunks will m-map all but first chunk on s.headChunks list.
diff --git a/tsdb/head_test.go b/tsdb/head_test.go
index fa48345165..c192c8a078 100644
--- a/tsdb/head_test.go
+++ b/tsdb/head_test.go
@@ -4730,6 +4730,14 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) {
// TestWBLReplay checks the replay at a low level.
func TestWBLReplay(t *testing.T) {
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testWBLReplay(t, scenario)
+ })
+ }
+}
+
+func testWBLReplay(t *testing.T, scenario sampleTypeScenario) {
dir := t.TempDir()
wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy)
require.NoError(t, err)
@@ -4745,11 +4753,11 @@ func TestWBLReplay(t *testing.T) {
require.NoError(t, err)
require.NoError(t, h.Init(0))
- var expOOOSamples []sample
+ var expOOOSamples []chunks.Sample
l := labels.FromStrings("foo", "bar")
- appendSample := func(mins int64, isOOO bool) {
+ appendSample := func(mins int64, val float64, isOOO bool) {
app := h.Appender(context.Background())
- ts, v := mins*time.Minute.Milliseconds(), float64(mins)
+ ts, v := mins*time.Minute.Milliseconds(), val
_, err := app.Append(0, l, ts, v)
require.NoError(t, err)
require.NoError(t, app.Commit())
@@ -4760,15 +4768,15 @@ func TestWBLReplay(t *testing.T) {
}
// In-order sample.
- appendSample(60, false)
+ appendSample(60, 60, false)
// Out of order samples.
- appendSample(40, true)
- appendSample(35, true)
- appendSample(50, true)
- appendSample(55, true)
- appendSample(59, true)
- appendSample(31, true)
+ appendSample(40, 40, true)
+ appendSample(35, 35, true)
+ appendSample(50, 50, true)
+ appendSample(55, 55, true)
+ appendSample(59, 59, true)
+ appendSample(31, 31, true)
// Check that Head's time ranges are set properly.
require.Equal(t, 60*time.Minute.Milliseconds(), h.MinTime())
@@ -4792,22 +4800,23 @@ func TestWBLReplay(t *testing.T) {
require.False(t, ok)
require.NotNil(t, ms)
- xor, err := ms.ooo.oooHeadChunk.chunk.ToXOR()
+ chks, err := ms.ooo.oooHeadChunk.chunk.ToEncodedChunks(math.MinInt64, math.MaxInt64)
require.NoError(t, err)
+ require.Len(t, chks, 1)
- it := xor.Iterator(nil)
- actOOOSamples := make([]sample, 0, len(expOOOSamples))
- for it.Next() == chunkenc.ValFloat {
- ts, v := it.At()
- actOOOSamples = append(actOOOSamples, sample{t: ts, f: v})
- }
+ it := chks[0].chunk.Iterator(nil)
+ actOOOSamples, err := storage.ExpandSamples(it, nil)
+ require.NoError(t, err)
// OOO chunk will be sorted. Hence sort the expected samples.
sort.Slice(expOOOSamples, func(i, j int) bool {
- return expOOOSamples[i].t < expOOOSamples[j].t
+ return expOOOSamples[i].T() < expOOOSamples[j].T()
})
- require.Equal(t, expOOOSamples, actOOOSamples)
+ // Passing in true for the 'ignoreCounterResets' parameter prevents differences in counter reset headers
+ // from being factored in to the sample comparison
+ // TODO(fionaliao): understand counter reset behaviour, might want to modify this later
+ requireEqualSamples(t, l.String(), expOOOSamples, actOOOSamples, true)
require.NoError(t, h.Close())
}
diff --git a/tsdb/ooo_head.go b/tsdb/ooo_head.go
index 7f2110fa65..b2556d62e9 100644
--- a/tsdb/ooo_head.go
+++ b/tsdb/ooo_head.go
@@ -17,9 +17,10 @@ import (
"fmt"
"sort"
+ "github.com/prometheus/prometheus/tsdb/chunkenc"
+
"github.com/oklog/ulid"
- "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/tombstones"
)
@@ -74,24 +75,22 @@ func (o *OOOChunk) NumSamples() int {
return len(o.samples)
}
-func (o *OOOChunk) ToXOR() (*chunkenc.XORChunk, error) {
- x := chunkenc.NewXORChunk()
- app, err := x.Appender()
- if err != nil {
- return nil, err
- }
- for _, s := range o.samples {
- app.Append(s.t, s.f)
- }
- return x, nil
-}
-
-func (o *OOOChunk) ToXORBetweenTimestamps(mint, maxt int64) (*chunkenc.XORChunk, error) {
- x := chunkenc.NewXORChunk()
- app, err := x.Appender()
- if err != nil {
- return nil, err
+// ToEncodedChunks returns chunks with the samples in the OOOChunk.
+//
+//nolint:revive // unexported-return.
+func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error) {
+ if len(o.samples) == 0 {
+ return nil, nil
}
+ // The most common case is that there will be a single chunk, with the same type of samples in it - this is always true for float samples.
+ chks = make([]memChunk, 0, 1)
+ var (
+ cmint int64
+ cmaxt int64
+ chunk chunkenc.Chunk
+ app chunkenc.Appender
+ )
+ prevEncoding := chunkenc.EncNone // Yes we could call the chunk for this, but this is more efficient.
for _, s := range o.samples {
if s.t < mint {
continue
@@ -99,9 +98,77 @@ func (o *OOOChunk) ToXORBetweenTimestamps(mint, maxt int64) (*chunkenc.XORChunk,
if s.t > maxt {
break
}
- app.Append(s.t, s.f)
+ encoding := chunkenc.EncXOR
+ if s.h != nil {
+ encoding = chunkenc.EncHistogram
+ } else if s.fh != nil {
+ encoding = chunkenc.EncFloatHistogram
+ }
+
+ // prevApp is the appender for the previous sample.
+ prevApp := app
+
+ if encoding != prevEncoding { // For the first sample, this will always be true as EncNone != EncXOR | EncHistogram | EncFloatHistogram
+ if prevEncoding != chunkenc.EncNone {
+ chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
+ }
+ cmint = s.t
+ switch encoding {
+ case chunkenc.EncXOR:
+ chunk = chunkenc.NewXORChunk()
+ case chunkenc.EncHistogram:
+ chunk = chunkenc.NewHistogramChunk()
+ case chunkenc.EncFloatHistogram:
+ chunk = chunkenc.NewFloatHistogramChunk()
+ default:
+ chunk = chunkenc.NewXORChunk()
+ }
+ app, err = chunk.Appender()
+ if err != nil {
+ return
+ }
+ }
+ switch encoding {
+ case chunkenc.EncXOR:
+ app.Append(s.t, s.f)
+ case chunkenc.EncHistogram:
+ // Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
+ prevHApp, _ := prevApp.(*chunkenc.HistogramAppender)
+ var (
+ newChunk chunkenc.Chunk
+ recoded bool
+ )
+ newChunk, recoded, app, _ = app.AppendHistogram(prevHApp, s.t, s.h, false)
+ if newChunk != nil { // A new chunk was allocated.
+ if !recoded {
+ chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
+ }
+ chunk = newChunk
+ cmint = s.t
+ }
+ case chunkenc.EncFloatHistogram:
+ // Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
+ prevHApp, _ := prevApp.(*chunkenc.FloatHistogramAppender)
+ var (
+ newChunk chunkenc.Chunk
+ recoded bool
+ )
+ newChunk, recoded, app, _ = app.AppendFloatHistogram(prevHApp, s.t, s.fh, false)
+ if newChunk != nil { // A new chunk was allocated.
+ if !recoded {
+ chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
+ }
+ chunk = newChunk
+ cmint = s.t
+ }
+ }
+ cmaxt = s.t
+ prevEncoding = encoding
}
- return x, nil
+ if prevEncoding != chunkenc.EncNone {
+ chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
+ }
+ return chks, nil
}
var _ BlockReader = &OOORangeHead{}
diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go
index 4e8329c99b..a35276af50 100644
--- a/tsdb/ooo_head_read.go
+++ b/tsdb/ooo_head_read.go
@@ -108,11 +108,19 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
c := s.ooo.oooHeadChunk
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && maxMmapRef == 0 {
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
- var xor chunkenc.Chunk
if len(c.chunk.samples) > 0 { // Empty samples happens in tests, at least.
- xor, _ = c.chunk.ToXOR() // Ignoring error because it can't fail.
+ chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(c.minTime, c.maxTime)
+ if err != nil {
+ handleChunkWriteError(err)
+ return nil
+ }
+ for _, chk := range chks {
+ addChunk(c.minTime, c.maxTime, ref, chk.chunk)
+ }
+ } else {
+ var emptyChunk chunkenc.Chunk
+ addChunk(c.minTime, c.maxTime, ref, emptyChunk)
}
- addChunk(c.minTime, c.maxTime, ref, xor)
}
}
for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- {
@@ -341,14 +349,20 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead,
continue
}
- mmapRef := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper)
- if mmapRef == 0 && len(ms.ooo.oooMmappedChunks) > 0 {
+ var lastMmapRef chunks.ChunkDiskMapperRef
+ mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper)
+ if len(mmapRefs) == 0 && len(ms.ooo.oooMmappedChunks) > 0 {
// Nothing was m-mapped. So take the mmapRef from the existing slice if it exists.
- mmapRef = ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref
+ mmapRefs = []chunks.ChunkDiskMapperRef{ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref}
}
- seq, off := mmapRef.Unpack()
+ if len(mmapRefs) == 0 {
+ lastMmapRef = 0
+ } else {
+ lastMmapRef = mmapRefs[len(mmapRefs)-1]
+ }
+ seq, off := lastMmapRef.Unpack()
if seq > lastSeq || (seq == lastSeq && off > lastOff) {
- ch.lastMmapRef, lastSeq, lastOff = mmapRef, seq, off
+ ch.lastMmapRef, lastSeq, lastOff = lastMmapRef, seq, off
}
if len(ms.ooo.oooMmappedChunks) > 0 {
ch.postings = append(ch.postings, seriesRef)
diff --git a/tsdb/querier.go b/tsdb/querier.go
index fb4a87cc8c..910c2d7fc1 100644
--- a/tsdb/querier.go
+++ b/tsdb/querier.go
@@ -77,12 +77,12 @@ func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, er
}, nil
}
-func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, err := q.index.SortedLabelValues(ctx, name, matchers...)
return res, nil, err
}
-func (q *blockBaseQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (q *blockBaseQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, err := q.index.LabelNames(ctx, matchers...)
return res, nil, err
}
diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go
index a1af49465d..ffdf8dc028 100644
--- a/tsdb/querier_test.go
+++ b/tsdb/querier_test.go
@@ -3022,7 +3022,7 @@ func TestQuerierIndexQueriesRace(t *testing.T) {
q, err := db.Querier(math.MinInt64, math.MaxInt64)
require.NoError(t, err)
- values, _, err := q.LabelValues(ctx, "seq", c.matchers...)
+ values, _, err := q.LabelValues(ctx, "seq", nil, c.matchers...)
require.NoError(t, err)
require.Emptyf(t, values, `label values for label "seq" should be empty`)
diff --git a/tsdb/record/record.go b/tsdb/record/record.go
index c95b25f06e..784d0b23d7 100644
--- a/tsdb/record/record.go
+++ b/tsdb/record/record.go
@@ -543,7 +543,7 @@ func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogr
return histograms, nil
}
-// Decode decodes a Histogram from a byte slice.
+// DecodeFloatHistogram decodes a Histogram from a byte slice.
func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) {
fh.CounterResetHint = histogram.CounterResetHint(buf.Byte())
diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go
index 3d74a551db..bc7a144e66 100644
--- a/tsdb/wlog/watcher.go
+++ b/tsdb/wlog/watcher.go
@@ -265,6 +265,11 @@ func (w *Watcher) loop() {
// Run the watcher, which will tail the WAL until the quit channel is closed
// or an error case is hit.
func (w *Watcher) Run() error {
+ _, lastSegment, err := w.firstAndLast()
+ if err != nil {
+ return fmt.Errorf("wal.Segments: %w", err)
+ }
+
// We want to ensure this is false across iterations since
// Run will be called again if there was a failure to read the WAL.
w.sendSamples = false
@@ -289,20 +294,14 @@ func (w *Watcher) Run() error {
return err
}
- level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment)
+ level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment)
for !isClosed(w.quit) {
w.currentSegmentMetric.Set(float64(currentSegment))
- // Re-check on each iteration in case a new segment was added,
- // because watch() will wait for notifications on the last segment.
- _, lastSegment, err := w.firstAndLast()
- if err != nil {
- return fmt.Errorf("wal.Segments: %w", err)
- }
- tail := currentSegment >= lastSegment
-
- level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment, "lastSegment", lastSegment)
- if err := w.watch(currentSegment, tail); err != nil && !errors.Is(err, ErrIgnorable) {
+ // On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment.
+ // On subsequent calls to this function, currentSegment will have been incremented and we should open that segment.
+ level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment)
+ if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil && !errors.Is(err, ErrIgnorable) {
return err
}
diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go
index 824010f30e..70c84b4ff8 100644
--- a/tsdb/wlog/watcher_test.go
+++ b/tsdb/wlog/watcher_test.go
@@ -17,6 +17,7 @@ import (
"math/rand"
"os"
"path"
+ "runtime"
"sync"
"testing"
"time"
@@ -700,11 +701,46 @@ func TestRun_StartupTime(t *testing.T) {
}
}
+func generateWALRecords(w *WL, segment, seriesCount, samplesCount int) error {
+ enc := record.Encoder{}
+ for j := 0; j < seriesCount; j++ {
+ ref := j + (segment * 100)
+ series := enc.Series([]record.RefSeries{
+ {
+ Ref: chunks.HeadSeriesRef(ref),
+ Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", segment)),
+ },
+ }, nil)
+ if err := w.Log(series); err != nil {
+ return err
+ }
+
+ for k := 0; k < samplesCount; k++ {
+ inner := rand.Intn(ref + 1)
+ sample := enc.Samples([]record.RefSample{
+ {
+ Ref: chunks.HeadSeriesRef(inner),
+ T: int64(segment),
+ V: float64(segment),
+ },
+ }, nil)
+ if err := w.Log(sample); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
func TestRun_AvoidNotifyWhenBehind(t *testing.T) {
- const pageSize = 32 * 1024
- const segments = 10
- const seriesCount = 20
- const samplesCount = 300
+ if runtime.GOOS == "windows" { // Takes a really long time, perhaps because min sleep time is 15ms.
+ t.SkipNow()
+ }
+ const segmentSize = pageSize // Smallest allowed segment size.
+ const segmentsToWrite = 5
+ const segmentsToRead = segmentsToWrite - 1
+ const seriesCount = 10
+ const samplesCount = 50
// This test can take longer than intended to finish in cloud CI.
readTimeout := 10 * time.Second
@@ -717,73 +753,37 @@ func TestRun_AvoidNotifyWhenBehind(t *testing.T) {
err := os.Mkdir(wdir, 0o777)
require.NoError(t, err)
- enc := record.Encoder{}
- w, err := NewSize(nil, nil, wdir, pageSize, compress)
+ w, err := NewSize(nil, nil, wdir, segmentSize, compress)
require.NoError(t, err)
var wg sync.WaitGroup
- // add one segment initially to ensure there's a value > 0 for the last segment id
- for i := 0; i < 1; i++ {
- for j := 0; j < seriesCount; j++ {
- ref := j + (i * 100)
- series := enc.Series([]record.RefSeries{
- {
- Ref: chunks.HeadSeriesRef(ref),
- Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)),
- },
- }, nil)
- require.NoError(t, w.Log(series))
-
- for k := 0; k < samplesCount; k++ {
- inner := rand.Intn(ref + 1)
- sample := enc.Samples([]record.RefSample{
- {
- Ref: chunks.HeadSeriesRef(inner),
- T: int64(i),
- V: float64(i),
- },
- }, nil)
- require.NoError(t, w.Log(sample))
- }
- }
- }
+ // Generate one segment initially to ensure that watcher.Run() finds at least one segment on disk.
+ require.NoError(t, generateWALRecords(w, 0, seriesCount, samplesCount))
+ w.NextSegment() // Force creation of the next segment
wg.Add(1)
go func() {
defer wg.Done()
- for i := 1; i < segments; i++ {
- for j := 0; j < seriesCount; j++ {
- ref := j + (i * 100)
- series := enc.Series([]record.RefSeries{
- {
- Ref: chunks.HeadSeriesRef(ref),
- Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)),
- },
- }, nil)
- require.NoError(t, w.Log(series))
-
- for k := 0; k < samplesCount; k++ {
- inner := rand.Intn(ref + 1)
- sample := enc.Samples([]record.RefSample{
- {
- Ref: chunks.HeadSeriesRef(inner),
- T: int64(i),
- V: float64(i),
- },
- }, nil)
- require.NoError(t, w.Log(sample))
- }
- }
+ for i := 1; i < segmentsToWrite; i++ {
+ require.NoError(t, generateWALRecords(w, i, seriesCount, samplesCount))
+ w.NextSegment()
}
}()
wt := newWriteToMock(time.Millisecond)
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
- watcher.MaxSegment = segments
+ watcher.MaxSegment = segmentsToRead
watcher.setMetrics()
startTime := time.Now()
err = watcher.Run()
wg.Wait()
require.Less(t, time.Since(startTime), readTimeout)
+
+ // But samples records shouldn't get dropped
+ retry(t, defaultRetryInterval, defaultRetries, func() bool {
+ return wt.checkNumSeries() > 0
+ })
+ require.Equal(t, segmentsToRead*seriesCount*samplesCount, wt.samplesAppended)
+
require.NoError(t, err)
require.NoError(t, w.Close())
})
diff --git a/util/documentcli/documentcli.go b/util/documentcli/documentcli.go
index 720a7c9c7f..9de2bb8d4c 100644
--- a/util/documentcli/documentcli.go
+++ b/util/documentcli/documentcli.go
@@ -75,7 +75,7 @@ func createFlagRow(flag *kingpin.FlagModel) []string {
name = fmt.Sprintf(`-%c, --%s`, flag.Short, flag.Name)
}
- return []string{name, flag.Help, defaultVal}
+ return []string{name, strings.ReplaceAll(flag.Help, "|", `\|`), defaultVal}
}
func writeFlagTable(writer io.Writer, level int, fgm *kingpin.FlagGroupModel) error {
diff --git a/web/api/v1/api.go b/web/api/v1/api.go
index 7e98dac454..03854787f8 100644
--- a/web/api/v1/api.go
+++ b/web/api/v1/api.go
@@ -660,6 +660,10 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
+ hints := &storage.LabelHints{
+ Limit: toHintLimit(limit),
+ }
+
q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end))
if err != nil {
return apiFuncResult{nil, returnAPIError(err), nil, nil}
@@ -674,7 +678,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
labelNamesSet := make(map[string]struct{})
for _, matchers := range matcherSets {
- vals, callWarnings, err := q.LabelNames(r.Context(), matchers...)
+ vals, callWarnings, err := q.LabelNames(r.Context(), hints, matchers...)
if err != nil {
return apiFuncResult{nil, returnAPIError(err), warnings, nil}
}
@@ -696,7 +700,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
if len(matcherSets) == 1 {
matchers = matcherSets[0]
}
- names, warnings, err = q.LabelNames(r.Context(), matchers...)
+ names, warnings, err = q.LabelNames(r.Context(), hints, matchers...)
if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil}
}
@@ -706,7 +710,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
names = []string{}
}
- if len(names) > limit {
+ if limit > 0 && len(names) > limit {
names = names[:limit]
warnings = warnings.Add(errors.New("results truncated due to limit"))
}
@@ -740,6 +744,10 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
+ hints := &storage.LabelHints{
+ Limit: toHintLimit(limit),
+ }
+
q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end))
if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil}
@@ -764,7 +772,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
var callWarnings annotations.Annotations
labelValuesSet := make(map[string]struct{})
for _, matchers := range matcherSets {
- vals, callWarnings, err = q.LabelValues(ctx, name, matchers...)
+ vals, callWarnings, err = q.LabelValues(ctx, name, hints, matchers...)
if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer}
}
@@ -783,7 +791,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
if len(matcherSets) == 1 {
matchers = matcherSets[0]
}
- vals, warnings, err = q.LabelValues(ctx, name, matchers...)
+ vals, warnings, err = q.LabelValues(ctx, name, hints, matchers...)
if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer}
}
@@ -795,7 +803,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
slices.Sort(vals)
- if len(vals) > limit {
+ if limit > 0 && len(vals) > limit {
vals = vals[:limit]
warnings = warnings.Add(errors.New("results truncated due to limit"))
}
@@ -865,6 +873,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) {
Start: timestamp.FromTime(start),
End: timestamp.FromTime(end),
Func: "series", // There is no series function, this token is used for lookups that don't need samples.
+ Limit: toHintLimit(limit),
}
var set storage.SeriesSet
@@ -891,7 +900,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) {
}
metrics = append(metrics, set.At().Labels())
- if len(metrics) > limit {
+ if limit > 0 && len(metrics) > limit {
metrics = metrics[:limit]
warnings.Add(errors.New("results truncated due to limit"))
return apiFuncResult{metrics, nil, warnings, closer}
@@ -1397,6 +1406,11 @@ func (api *API) rules(r *http.Request) apiFuncResult {
rgSet := queryFormToSet(r.Form["rule_group[]"])
fSet := queryFormToSet(r.Form["file[]"])
+ matcherSets, err := parseMatchersParam(r.Form["match[]"])
+ if err != nil {
+ return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
+ }
+
ruleGroups := api.rulesRetriever(r.Context()).RuleGroups()
res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, 0, len(ruleGroups))}
typ := strings.ToLower(r.URL.Query().Get("type"))
@@ -1436,7 +1450,8 @@ func (api *API) rules(r *http.Request) apiFuncResult {
EvaluationTime: grp.GetEvaluationTime().Seconds(),
LastEvaluation: grp.GetLastEvaluation(),
}
- for _, rr := range grp.Rules() {
+
+ for _, rr := range grp.Rules(matcherSets...) {
var enrichedRule Rule
if len(rnSet) > 0 {
@@ -1902,8 +1917,8 @@ OUTER:
return matcherSets, nil
}
+// parseLimitParam returning 0 means no limit is to be applied.
func parseLimitParam(limitStr string) (limit int, err error) {
- limit = math.MaxInt
if limitStr == "" {
return limit, nil
}
@@ -1912,9 +1927,19 @@ func parseLimitParam(limitStr string) (limit int, err error) {
if err != nil {
return limit, err
}
- if limit <= 0 {
- return limit, errors.New("limit must be positive")
+ if limit < 0 {
+ return limit, errors.New("limit must be non-negative")
}
return limit, nil
}
+
+// toHintLimit increases the API limit, as returned by parseLimitParam, by 1.
+// This allows for emitting warnings when the results are truncated.
+func toHintLimit(limit int) int {
+ // 0 means no limit and avoid int overflow
+ if limit > 0 && limit < math.MaxInt {
+ return limit + 1
+ }
+ return limit
+}
diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go
index 9eb7d08c35..86a57ca088 100644
--- a/web/api/v1/api_test.go
+++ b/web/api/v1/api_test.go
@@ -261,11 +261,36 @@ func (m *rulesRetrieverMock) CreateAlertingRules() {
false,
log.NewNopLogger(),
)
-
+ rule4 := rules.NewAlertingRule(
+ "test_metric6",
+ expr2,
+ time.Second,
+ 0,
+ labels.FromStrings("testlabel", "rule"),
+ labels.Labels{},
+ labels.Labels{},
+ "",
+ true,
+ log.NewNopLogger(),
+ )
+ rule5 := rules.NewAlertingRule(
+ "test_metric7",
+ expr2,
+ time.Second,
+ 0,
+ labels.FromStrings("templatedlabel", "{{ $externalURL }}"),
+ labels.Labels{},
+ labels.Labels{},
+ "",
+ true,
+ log.NewNopLogger(),
+ )
var r []*rules.AlertingRule
r = append(r, rule1)
r = append(r, rule2)
r = append(r, rule3)
+ r = append(r, rule4)
+ r = append(r, rule5)
m.alertingRules = r
}
@@ -300,7 +325,9 @@ func (m *rulesRetrieverMock) CreateRuleGroups() {
recordingExpr, err := parser.ParseExpr(`vector(1)`)
require.NoError(m.testing, err, "unable to parse alert expression")
recordingRule := rules.NewRecordingRule("recording-rule-1", recordingExpr, labels.Labels{})
+ recordingRule2 := rules.NewRecordingRule("recording-rule-2", recordingExpr, labels.FromStrings("testlabel", "rule"))
r = append(r, recordingRule)
+ r = append(r, recordingRule2)
group := rules.NewGroup(rules.GroupOptions{
Name: "grp",
@@ -739,13 +766,16 @@ func TestLabelNames(t *testing.T) {
api := &API{
Queryable: storage,
}
- request := func(method string, matchers ...string) (*http.Request, error) {
+ request := func(method, limit string, matchers ...string) (*http.Request, error) {
u, err := url.Parse("http://example.com")
require.NoError(t, err)
q := u.Query()
for _, matcher := range matchers {
q.Add("match[]", matcher)
}
+ if limit != "" {
+ q.Add("limit", limit)
+ }
u.RawQuery = q.Encode()
r, err := http.NewRequest(method, u.String(), nil)
@@ -759,6 +789,7 @@ func TestLabelNames(t *testing.T) {
name string
api *API
matchers []string
+ limit string
expected []string
expectedErrorType errorType
}{
@@ -773,6 +804,13 @@ func TestLabelNames(t *testing.T) {
expected: []string{"__name__", "abc", "foo", "xyz"},
api: api,
},
+ {
+ name: "non empty label matcher with limit",
+ matchers: []string{`{foo=~".+"}`},
+ expected: []string{"__name__", "abc"},
+ limit: "2",
+ api: api,
+ },
{
name: "exact label matcher",
matchers: []string{`{foo="boo"}`},
@@ -805,7 +843,7 @@ func TestLabelNames(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
for _, method := range []string{http.MethodGet, http.MethodPost} {
ctx := context.Background()
- req, err := request(method, tc.matchers...)
+ req, err := request(method, tc.limit, tc.matchers...)
require.NoError(t, err)
res := tc.api.labelNames(req.WithContext(ctx))
assertAPIError(t, res.err, tc.expectedErrorType)
@@ -1430,6 +1468,15 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
responseLen: 2, // API does not specify which particular value will come back.
warningsCount: 0, // No warnings if limit isn't exceeded.
},
+ {
+ endpoint: api.series,
+ query: url.Values{
+ "match[]": []string{"test_metric1"},
+ "limit": []string{"0"},
+ },
+ responseLen: 2, // API does not specify which particular value will come back.
+ warningsCount: 0, // No warnings if limit isn't exceeded.
+ },
// Missing match[] query params in series requests.
{
endpoint: api.series,
@@ -2151,6 +2198,28 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok",
Type: "alerting",
},
+ AlertingRule{
+ State: "inactive",
+ Name: "test_metric6",
+ Query: "up == 1",
+ Duration: 1,
+ Labels: labels.FromStrings("testlabel", "rule"),
+ Annotations: labels.Labels{},
+ Alerts: []*Alert{},
+ Health: "ok",
+ Type: "alerting",
+ },
+ AlertingRule{
+ State: "inactive",
+ Name: "test_metric7",
+ Query: "up == 1",
+ Duration: 1,
+ Labels: labels.FromStrings("templatedlabel", "{{ $externalURL }}"),
+ Annotations: labels.Labels{},
+ Alerts: []*Alert{},
+ Health: "ok",
+ Type: "alerting",
+ },
RecordingRule{
Name: "recording-rule-1",
Query: "vector(1)",
@@ -2158,6 +2227,13 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok",
Type: "recording",
},
+ RecordingRule{
+ Name: "recording-rule-2",
+ Query: "vector(1)",
+ Labels: labels.FromStrings("testlabel", "rule"),
+ Health: "ok",
+ Type: "recording",
+ },
},
},
},
@@ -2210,6 +2286,28 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok",
Type: "alerting",
},
+ AlertingRule{
+ State: "inactive",
+ Name: "test_metric6",
+ Query: "up == 1",
+ Duration: 1,
+ Labels: labels.FromStrings("testlabel", "rule"),
+ Annotations: labels.Labels{},
+ Alerts: nil,
+ Health: "ok",
+ Type: "alerting",
+ },
+ AlertingRule{
+ State: "inactive",
+ Name: "test_metric7",
+ Query: "up == 1",
+ Duration: 1,
+ Labels: labels.FromStrings("templatedlabel", "{{ $externalURL }}"),
+ Annotations: labels.Labels{},
+ Alerts: nil,
+ Health: "ok",
+ Type: "alerting",
+ },
RecordingRule{
Name: "recording-rule-1",
Query: "vector(1)",
@@ -2217,6 +2315,13 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok",
Type: "recording",
},
+ RecordingRule{
+ Name: "recording-rule-2",
+ Query: "vector(1)",
+ Labels: labels.FromStrings("testlabel", "rule"),
+ Health: "ok",
+ Type: "recording",
+ },
},
},
},
@@ -2276,6 +2381,28 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok",
Type: "alerting",
},
+ AlertingRule{
+ State: "inactive",
+ Name: "test_metric6",
+ Query: "up == 1",
+ Duration: 1,
+ Labels: labels.FromStrings("testlabel", "rule"),
+ Annotations: labels.Labels{},
+ Alerts: []*Alert{},
+ Health: "ok",
+ Type: "alerting",
+ },
+ AlertingRule{
+ State: "inactive",
+ Name: "test_metric7",
+ Query: "up == 1",
+ Duration: 1,
+ Labels: labels.FromStrings("templatedlabel", "{{ $externalURL }}"),
+ Annotations: labels.Labels{},
+ Alerts: []*Alert{},
+ Health: "ok",
+ Type: "alerting",
+ },
},
},
},
@@ -2302,6 +2429,13 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok",
Type: "recording",
},
+ RecordingRule{
+ Name: "recording-rule-2",
+ Query: "vector(1)",
+ Labels: labels.FromStrings("testlabel", "rule"),
+ Health: "ok",
+ Type: "recording",
+ },
},
},
},
@@ -2369,6 +2503,179 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
},
zeroFunc: rulesZeroFunc,
},
+ {
+ endpoint: api.rules,
+ query: url.Values{
+ "match[]": []string{`{testlabel="rule"}`},
+ },
+ response: &RuleDiscovery{
+ RuleGroups: []*RuleGroup{
+ {
+ Name: "grp",
+ File: "/path/to/file",
+ Interval: 1,
+ Limit: 0,
+ Rules: []Rule{
+ AlertingRule{
+ State: "inactive",
+ Name: "test_metric6",
+ Query: "up == 1",
+ Duration: 1,
+ Labels: labels.FromStrings("testlabel", "rule"),
+ Annotations: labels.Labels{},
+ Alerts: []*Alert{},
+ Health: "ok",
+ Type: "alerting",
+ },
+ RecordingRule{
+ Name: "recording-rule-2",
+ Query: "vector(1)",
+ Labels: labels.FromStrings("testlabel", "rule"),
+ Health: "ok",
+ Type: "recording",
+ },
+ },
+ },
+ },
+ },
+ zeroFunc: rulesZeroFunc,
+ },
+ {
+ endpoint: api.rules,
+ query: url.Values{
+ "type": []string{"alert"},
+ "match[]": []string{`{templatedlabel="{{ $externalURL }}"}`},
+ },
+ response: &RuleDiscovery{
+ RuleGroups: []*RuleGroup{
+ {
+ Name: "grp",
+ File: "/path/to/file",
+ Interval: 1,
+ Limit: 0,
+ Rules: []Rule{
+ AlertingRule{
+ State: "inactive",
+ Name: "test_metric7",
+ Query: "up == 1",
+ Duration: 1,
+ Labels: labels.FromStrings("templatedlabel", "{{ $externalURL }}"),
+ Annotations: labels.Labels{},
+ Alerts: []*Alert{},
+ Health: "ok",
+ Type: "alerting",
+ },
+ },
+ },
+ },
+ },
+ zeroFunc: rulesZeroFunc,
+ },
+ {
+ endpoint: api.rules,
+ query: url.Values{
+ "match[]": []string{`{testlabel="abc"}`},
+ },
+ response: &RuleDiscovery{
+ RuleGroups: []*RuleGroup{},
+ },
+ },
+ // This is testing OR condition, the api response should return rule if it matches one of the label selector
+ {
+ endpoint: api.rules,
+ query: url.Values{
+ "match[]": []string{`{testlabel="abc"}`, `{testlabel="rule"}`},
+ },
+ response: &RuleDiscovery{
+ RuleGroups: []*RuleGroup{
+ {
+ Name: "grp",
+ File: "/path/to/file",
+ Interval: 1,
+ Limit: 0,
+ Rules: []Rule{
+ AlertingRule{
+ State: "inactive",
+ Name: "test_metric6",
+ Query: "up == 1",
+ Duration: 1,
+ Labels: labels.FromStrings("testlabel", "rule"),
+ Annotations: labels.Labels{},
+ Alerts: []*Alert{},
+ Health: "ok",
+ Type: "alerting",
+ },
+ RecordingRule{
+ Name: "recording-rule-2",
+ Query: "vector(1)",
+ Labels: labels.FromStrings("testlabel", "rule"),
+ Health: "ok",
+ Type: "recording",
+ },
+ },
+ },
+ },
+ },
+ zeroFunc: rulesZeroFunc,
+ },
+ {
+ endpoint: api.rules,
+ query: url.Values{
+ "type": []string{"record"},
+ "match[]": []string{`{testlabel="rule"}`},
+ },
+ response: &RuleDiscovery{
+ RuleGroups: []*RuleGroup{
+ {
+ Name: "grp",
+ File: "/path/to/file",
+ Interval: 1,
+ Limit: 0,
+ Rules: []Rule{
+ RecordingRule{
+ Name: "recording-rule-2",
+ Query: "vector(1)",
+ Labels: labels.FromStrings("testlabel", "rule"),
+ Health: "ok",
+ Type: "recording",
+ },
+ },
+ },
+ },
+ },
+ zeroFunc: rulesZeroFunc,
+ },
+ {
+ endpoint: api.rules,
+ query: url.Values{
+ "type": []string{"alert"},
+ "match[]": []string{`{testlabel="rule"}`},
+ },
+ response: &RuleDiscovery{
+ RuleGroups: []*RuleGroup{
+ {
+ Name: "grp",
+ File: "/path/to/file",
+ Interval: 1,
+ Limit: 0,
+ Rules: []Rule{
+ AlertingRule{
+ State: "inactive",
+ Name: "test_metric6",
+ Query: "up == 1",
+ Duration: 1,
+ Labels: labels.FromStrings("testlabel", "rule"),
+ Annotations: labels.Labels{},
+ Alerts: []*Alert{},
+ Health: "ok",
+ Type: "alerting",
+ },
+ },
+ },
+ },
+ },
+ zeroFunc: rulesZeroFunc,
+ },
{
endpoint: api.queryExemplars,
query: url.Values{
diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go
index a83bfe0173..99ef810186 100644
--- a/web/api/v1/errors_test.go
+++ b/web/api/v1/errors_test.go
@@ -171,11 +171,11 @@ type errorTestQuerier struct {
err error
}
-func (t errorTestQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (t errorTestQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, t.err
}
-func (t errorTestQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (t errorTestQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, t.err
}
diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json
index a08f53741c..ba924346f3 100644
--- a/web/ui/module/codemirror-promql/package.json
+++ b/web/ui/module/codemirror-promql/package.json
@@ -1,6 +1,6 @@
{
"name": "@prometheus-io/codemirror-promql",
- "version": "0.53.0",
+ "version": "0.53.1",
"description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js",
@@ -29,15 +29,15 @@
},
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": {
- "@prometheus-io/lezer-promql": "0.53.0",
+ "@prometheus-io/lezer-promql": "0.53.1",
"lru-cache": "^7.18.3"
},
"devDependencies": {
- "@codemirror/autocomplete": "^6.16.2",
+ "@codemirror/autocomplete": "^6.17.0",
"@codemirror/language": "^6.10.2",
- "@codemirror/lint": "^6.8.0",
+ "@codemirror/lint": "^6.8.1",
"@codemirror/state": "^6.3.3",
- "@codemirror/view": "^6.22.1",
+ "@codemirror/view": "^6.28.3",
"@lezer/common": "^1.2.1",
"@lezer/highlight": "^1.2.0",
"@lezer/lr": "^1.4.1",
diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json
index 426f06e697..cbd03ae2b1 100644
--- a/web/ui/module/lezer-promql/package.json
+++ b/web/ui/module/lezer-promql/package.json
@@ -1,6 +1,6 @@
{
"name": "@prometheus-io/lezer-promql",
- "version": "0.53.0",
+ "version": "0.53.1",
"description": "lezer-based PromQL grammar",
"main": "dist/index.cjs",
"type": "module",
diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json
index 0458e961d0..62ac34e436 100644
--- a/web/ui/package-lock.json
+++ b/web/ui/package-lock.json
@@ -1,19 +1,19 @@
{
"name": "prometheus-io",
- "version": "0.53.0",
+ "version": "0.53.1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "prometheus-io",
- "version": "0.53.0",
+ "version": "0.53.1",
"workspaces": [
"react-app",
"module/*"
],
"devDependencies": {
"@types/jest": "^29.5.12",
- "@types/node": "^20.14.2",
+ "@types/node": "^20.14.9",
"eslint-config-prettier": "^9.1.0",
"eslint-config-react-app": "^7.0.1",
"eslint-plugin-prettier": "^4.2.1",
@@ -21,7 +21,7 @@
"jest-fetch-mock": "^3.0.3",
"prettier": "^2.8.8",
"react-scripts": "^5.0.1",
- "ts-jest": "^29.1.4",
+ "ts-jest": "^29.2.2",
"typescript": "^4.9.5"
},
"engines": {
@@ -30,18 +30,18 @@
},
"module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql",
- "version": "0.53.0",
+ "version": "0.53.1",
"license": "Apache-2.0",
"dependencies": {
- "@prometheus-io/lezer-promql": "0.53.0",
+ "@prometheus-io/lezer-promql": "0.53.1",
"lru-cache": "^7.18.3"
},
"devDependencies": {
- "@codemirror/autocomplete": "^6.16.2",
+ "@codemirror/autocomplete": "^6.17.0",
"@codemirror/language": "^6.10.2",
- "@codemirror/lint": "^6.8.0",
+ "@codemirror/lint": "^6.8.1",
"@codemirror/state": "^6.3.3",
- "@codemirror/view": "^6.22.1",
+ "@codemirror/view": "^6.28.3",
"@lezer/common": "^1.2.1",
"@lezer/highlight": "^1.2.0",
"@lezer/lr": "^1.4.1",
@@ -69,7 +69,7 @@
},
"module/lezer-promql": {
"name": "@prometheus-io/lezer-promql",
- "version": "0.53.0",
+ "version": "0.53.1",
"license": "Apache-2.0",
"devDependencies": {
"@lezer/generator": "^1.7.0",
@@ -2027,9 +2027,9 @@
"license": "MIT"
},
"node_modules/@codemirror/autocomplete": {
- "version": "6.16.2",
- "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.16.2.tgz",
- "integrity": "sha512-MjfDrHy0gHKlPWsvSsikhO1+BOh+eBHNgfH1OXs1+DAf30IonQldgMM3kxLDTG9ktE7kDLaA1j/l7KMPA4KNfw==",
+ "version": "6.17.0",
+ "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.17.0.tgz",
+ "integrity": "sha512-fdfj6e6ZxZf8yrkMHUSJJir7OJkHkZKaOZGzLWIYp2PZ3jd+d+UjG8zVPqJF6d3bKxkhvXTPan/UZ1t7Bqm0gA==",
"dependencies": {
"@codemirror/language": "^6.0.0",
"@codemirror/state": "^6.0.0",
@@ -2068,9 +2068,9 @@
}
},
"node_modules/@codemirror/lint": {
- "version": "6.8.0",
- "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.0.tgz",
- "integrity": "sha512-lsFofvaw0lnPRJlQylNsC4IRt/1lI4OD/yYslrSGVndOJfStc58v+8p9dgGiD90ktOfL7OhBWns1ZETYgz0EJA==",
+ "version": "6.8.1",
+ "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.1.tgz",
+ "integrity": "sha512-IZ0Y7S4/bpaunwggW2jYqwLuHj0QtESf5xcROewY6+lDNwZ/NzvR4t+vpYgg9m7V8UXLPYqG+lu3DF470E5Oxg==",
"dependencies": {
"@codemirror/state": "^6.0.0",
"@codemirror/view": "^6.0.0",
@@ -2093,9 +2093,9 @@
"integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A=="
},
"node_modules/@codemirror/view": {
- "version": "6.27.0",
- "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.27.0.tgz",
- "integrity": "sha512-8kqX1sHbVW1lVzWwrjAbh4dR7eKhV8eIQ952JKaBXOoXE04WncoqCy4DMU701LSrPZ3N2Q4zsTawz7GQ+2mrUw==",
+ "version": "6.28.3",
+ "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.28.3.tgz",
+ "integrity": "sha512-QVqP+ko078/h9yrW+u5grX3rQhC+BkGKADRrlDaJznfPngJOv5zObiVf0+SgAWhL/Yt0nvZ+10rO3L+gU5IbFw==",
"dependencies": {
"@codemirror/state": "^6.4.0",
"style-mod": "^4.1.0",
@@ -4199,9 +4199,9 @@
"license": "MIT"
},
"node_modules/@types/node": {
- "version": "20.14.2",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.2.tgz",
- "integrity": "sha512-xyu6WAMVwv6AKFLB+e/7ySZVr/0zLCzOa7rSpq6jNwpqOrUbcACDWC+53d4n2QHOnDou0fbIsg8wZu/sxrnI4Q==",
+ "version": "20.14.9",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.9.tgz",
+ "integrity": "sha512-06OCtnTXtWOZBJlRApleWndH4JsRVs1pDCc8dLSQp+7PpUpX3ePdHyeNSFTeSe7FtKyQkrlPvHwJOW3SLd8Oyg==",
"dependencies": {
"undici-types": "~5.26.4"
}
@@ -16807,9 +16807,9 @@
"license": "CC0-1.0"
},
"node_modules/sass": {
- "version": "1.77.4",
- "resolved": "https://registry.npmjs.org/sass/-/sass-1.77.4.tgz",
- "integrity": "sha512-vcF3Ckow6g939GMA4PeU7b2K/9FALXk2KF9J87txdHzXbUF9XRQRwSxcAs/fGaTnJeBFd7UoV22j3lzMLdM0Pw==",
+ "version": "1.77.6",
+ "resolved": "https://registry.npmjs.org/sass/-/sass-1.77.6.tgz",
+ "integrity": "sha512-ByXE1oLD79GVq9Ht1PeHWCPMPB8XHpBuz1r85oByKHjZY6qV6rWnQovQzXJXuQ/XyE1Oj3iPk3lo28uzaRA2/Q==",
"dependencies": {
"chokidar": ">=3.0.0 <4.0.0",
"immutable": "^4.0.0",
@@ -18028,12 +18028,13 @@
"license": "MIT"
},
"node_modules/ts-jest": {
- "version": "29.1.4",
- "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.4.tgz",
- "integrity": "sha512-YiHwDhSvCiItoAgsKtoLFCuakDzDsJ1DLDnSouTaTmdOcOwIkSzbLXduaQ6M5DRVhuZC/NYaaZ/mtHbWMv/S6Q==",
+ "version": "29.2.2",
+ "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.2.2.tgz",
+ "integrity": "sha512-sSW7OooaKT34AAngP6k1VS669a0HdLxkQZnlC7T76sckGCokXFnvJ3yRlQZGRTAoV5K19HfSgCiSwWOSIfcYlg==",
"dev": true,
"dependencies": {
"bs-logger": "0.x",
+ "ejs": "^3.0.0",
"fast-json-stable-stringify": "2.x",
"jest-util": "^29.0.0",
"json5": "^2.2.3",
@@ -19331,15 +19332,15 @@
},
"react-app": {
"name": "@prometheus-io/app",
- "version": "0.53.0",
+ "version": "0.53.1",
"dependencies": {
- "@codemirror/autocomplete": "^6.16.2",
+ "@codemirror/autocomplete": "^6.17.0",
"@codemirror/commands": "^6.6.0",
"@codemirror/language": "^6.10.2",
- "@codemirror/lint": "^6.8.0",
+ "@codemirror/lint": "^6.8.1",
"@codemirror/search": "^6.5.6",
"@codemirror/state": "^6.3.3",
- "@codemirror/view": "^6.22.1",
+ "@codemirror/view": "^6.28.3",
"@forevolve/bootstrap-dark": "^4.0.2",
"@fortawesome/fontawesome-svg-core": "6.5.2",
"@fortawesome/free-solid-svg-icons": "6.5.2",
@@ -19349,7 +19350,7 @@
"@lezer/lr": "^1.4.1",
"@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1",
- "@prometheus-io/codemirror-promql": "0.53.0",
+ "@prometheus-io/codemirror-promql": "0.53.1",
"bootstrap": "^4.6.2",
"css.escape": "^1.5.1",
"downshift": "^9.0.6",
@@ -19368,7 +19369,7 @@
"react-test-renderer": "^17.0.2",
"reactstrap": "^8.10.1",
"sanitize-html": "^2.13.0",
- "sass": "1.77.4",
+ "sass": "1.77.6",
"tempusdominus-bootstrap-4": "^5.39.2",
"tempusdominus-core": "^5.19.3"
},
diff --git a/web/ui/package.json b/web/ui/package.json
index 355b610eb2..693a73decb 100644
--- a/web/ui/package.json
+++ b/web/ui/package.json
@@ -17,7 +17,7 @@
},
"devDependencies": {
"@types/jest": "^29.5.12",
- "@types/node": "^20.14.2",
+ "@types/node": "^20.14.9",
"eslint-config-prettier": "^9.1.0",
"eslint-config-react-app": "^7.0.1",
"eslint-plugin-prettier": "^4.2.1",
@@ -25,8 +25,8 @@
"jest-fetch-mock": "^3.0.3",
"prettier": "^2.8.8",
"react-scripts": "^5.0.1",
- "ts-jest": "^29.1.4",
+ "ts-jest": "^29.2.2",
"typescript": "^4.9.5"
},
- "version": "0.53.0"
+ "version": "0.53.1"
}
diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json
index 1997a22afd..c8002433a0 100644
--- a/web/ui/react-app/package.json
+++ b/web/ui/react-app/package.json
@@ -1,15 +1,15 @@
{
"name": "@prometheus-io/app",
- "version": "0.53.0",
+ "version": "0.53.1",
"private": true,
"dependencies": {
- "@codemirror/autocomplete": "^6.16.2",
+ "@codemirror/autocomplete": "^6.17.0",
"@codemirror/commands": "^6.6.0",
"@codemirror/language": "^6.10.2",
- "@codemirror/lint": "^6.8.0",
+ "@codemirror/lint": "^6.8.1",
"@codemirror/search": "^6.5.6",
"@codemirror/state": "^6.3.3",
- "@codemirror/view": "^6.22.1",
+ "@codemirror/view": "^6.28.3",
"@forevolve/bootstrap-dark": "^4.0.2",
"@fortawesome/fontawesome-svg-core": "6.5.2",
"@fortawesome/free-solid-svg-icons": "6.5.2",
@@ -19,7 +19,7 @@
"@lezer/lr": "^1.4.1",
"@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1",
- "@prometheus-io/codemirror-promql": "0.53.0",
+ "@prometheus-io/codemirror-promql": "0.53.1",
"bootstrap": "^4.6.2",
"css.escape": "^1.5.1",
"downshift": "^9.0.6",
@@ -38,7 +38,7 @@
"react-test-renderer": "^17.0.2",
"reactstrap": "^8.10.1",
"sanitize-html": "^2.13.0",
- "sass": "1.77.4",
+ "sass": "1.77.6",
"tempusdominus-bootstrap-4": "^5.39.2",
"tempusdominus-core": "^5.19.3"
},