From 5e342d315c7e191427e7ec1570cefc094c99a922 Mon Sep 17 00:00:00 2001 From: Mohammad Varmazyar Date: Fri, 17 Oct 2025 00:29:36 +0200 Subject: [PATCH 01/39] discovery/consul: Fix filter parameter not applied to health endpoint The filter parameter was only being passed to catalog.Services() but not to health.ServiceMultipleTags(), causing filters on Node and Node.Meta to be ignored when discovering service instances. This adds the missing Filter field to QueryOptions in the consulService.watch() method. Fixes #16087 Signed-off-by: Mohammad Varmazyar Signed-off-by: Mohammad Varmazyar --- CHANGELOG.md | 2 ++ discovery/consul/consul.go | 1 + discovery/consul/consul_test.go | 50 +++++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 649196758b..d35bfce65d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## main / unreleased +* [BUGFIX] Discovery/Consul: Fix filter parameter not being applied to health service endpoint, causing Node and Node.Meta filters to be ignored. #16087 + ## 3.7.0 / 2025-10-15 * [CHANGE] Remote-write: the following metrics are deprecated: diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index 74b5d0724e..600bd274a4 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -499,6 +499,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr WaitTime: watchTimeout, AllowStale: srv.discovery.allowStale, NodeMeta: srv.discovery.watchedNodeMeta, + Filter: srv.discovery.watchedFilter, } t0 := time.Now() diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index a6ff4a625e..b813146089 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -240,6 +240,8 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) { response = ServiceTestAnswer case "/v1/health/service/test?wait=120000ms": response = ServiceTestAnswer + case "/v1/health/service/test?filter=NodeMeta.rack_name+%3D%3D+%222304%22&wait=120000ms": + response = ServiceTestAnswer case "/v1/health/service/other?wait=120000ms": response = `[]` case "/v1/catalog/services?node-meta=rack_name%3A2304&stale=&wait=120000ms": @@ -392,6 +394,54 @@ func TestFilterOption(t *testing.T) { cancel() } +// TestFilterOnHealthEndpoint verifies that filter is passed to health service endpoint. +func TestFilterOnHealthEndpoint(t *testing.T) { + filterReceived := false + stub := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + response := "" + switch r.URL.Path { + case "/v1/agent/self": + response = AgentAnswer + case "/v1/health/service/test": + // Verify filter parameter is present in the query + filter := r.URL.Query().Get("filter") + if filter == `Node.Meta.rack_name == "2304"` { + filterReceived = true + } + response = ServiceTestAnswer + default: + t.Errorf("Unhandled consul call: %s", r.URL) + } + w.Header().Add("X-Consul-Index", "1") + w.Write([]byte(response)) + })) + defer stub.Close() + + stuburl, err := url.Parse(stub.URL) + require.NoError(t, err) + + config := &SDConfig{ + Server: stuburl.Host, + Services: []string{"test"}, + Filter: `Node.Meta.rack_name == "2304"`, + RefreshInterval: model.Duration(1 * time.Second), + } + + d := newDiscovery(t, config) + + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan []*targetgroup.Group) + go func() { + d.Run(ctx, ch) + close(ch) + }() + checkOneTarget(t, <-ch) + cancel() + + // Verify the filter was actually sent to the health endpoint + require.True(t, filterReceived, "Filter parameter should be sent to health service endpoint") +} + func TestGetDatacenterShouldReturnError(t *testing.T) { for _, tc := range []struct { handler func(http.ResponseWriter, *http.Request) From e8bfcfcf1a6e0e084574851c135b46cbfd479a8b Mon Sep 17 00:00:00 2001 From: sujal shah Date: Thu, 27 Mar 2025 04:24:18 +0530 Subject: [PATCH 02/39] promql: Implement / operators for trimming native histograms. This implements the TRIM_UPPER (/) operators that allow removing observations below or above a threshold from a histogram. The implementation zeros out buckets outside the desired range. It also recalculates the sum, including only bucket counts within the specified threshold range. Fixes #14651. Signed-off-by: sujal shah --- promql/engine.go | 309 +++- promql/parser/generated_parser.y | 6 +- promql/parser/generated_parser.y.go | 1395 +++++++++-------- promql/parser/lex.go | 38 +- .../testdata/native_histograms.test | 55 + web/ui/mantine-ui/src/promql/ast.ts | 2 + web/ui/mantine-ui/src/promql/utils.ts | 2 + .../codemirror-promql/src/complete/hybrid.ts | 6 +- .../src/complete/promql.terms.ts | 2 + .../codemirror-promql/src/parser/parser.ts | 20 +- web/ui/module/lezer-promql/src/promql.grammar | 4 + .../module/lezer-promql/test/expression.txt | 28 + 12 files changed, 1156 insertions(+), 711 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index b609dc4f0a..749352c23d 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3140,6 +3140,301 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { panic(fmt.Errorf("operator %q not allowed for Scalar operations", op)) } +// processCustomBucket handles custom bucket processing for histogram trimming. +// It returns the count to keep and the bucket midpoint for sum calculations. +func processCustomBucket( + bucket histogram.Bucket[float64], + rhs float64, + op parser.ItemType, +) (keepCount, bucketMidpoint float64) { + // Midpoint calculation + switch { + case math.IsInf(bucket.Lower, -1): + // First bucket: no lower bound, assume midpoint is near upper bound. + bucketMidpoint = bucket.Upper + case math.IsInf(bucket.Upper, 1): + bucketMidpoint = bucket.Lower + default: + bucketMidpoint = (bucket.Lower + bucket.Upper) / 2 + } + + // Fractional keepCount calculation + switch op { + case parser.TRIM_UPPER: + switch { + case math.IsInf(bucket.Lower, -1): + // Special case for -Inf lower bound + if rhs >= bucket.Upper { + // Trim point is above bucket upper bound, keep all + keepCount = bucket.Count + } else { + // Trim point is within bucket or below, keep none + keepCount = 0 + } + case math.IsInf(bucket.Upper, 1): + // Special case for +Inf upper bound + if rhs <= bucket.Lower { + // Trim point is below bucket lower bound, keep none + keepCount = 0 + } else { + // Trim point is within the bucket, keep a portion + // Since we can't interpolate with +Inf, assume keep half for simplicity + // Another approach would be to use a different interpolation scheme + keepCount = bucket.Count * 0.5 + } + default: + // Normal case - finite bounds + switch { + case bucket.Upper <= rhs: + // Bucket entirely below trim point - keep all + keepCount = bucket.Count + case bucket.Lower < rhs: + // Bucket contains trim point - interpolate + fraction := (rhs - bucket.Lower) / (bucket.Upper - bucket.Lower) + keepCount = bucket.Count * fraction + default: + // Bucket entirely above trim point - discard + keepCount = 0 + } + } + + case parser.TRIM_LOWER: + switch { + case math.IsInf(bucket.Upper, 1): + // Special case for +Inf upper bound + if rhs <= bucket.Lower { + keepCount = bucket.Count + } else { + keepCount = 0 + } + case math.IsInf(bucket.Lower, -1): + // Special case for -Inf lower bound + if rhs >= bucket.Upper { + keepCount = 0 + } else { + keepCount = bucket.Count * 0.5 + } + default: + switch { + case bucket.Lower >= rhs: + keepCount = bucket.Count + case bucket.Upper > rhs: + fraction := (bucket.Upper - rhs) / (bucket.Upper - bucket.Lower) + keepCount = bucket.Count * fraction + default: + keepCount = 0 + } + } + } + + return keepCount, bucketMidpoint +} + +func computeBucketTrim(op parser.ItemType, bucket histogram.Bucket[float64], rhs float64, isPostive, isCustomBucket bool) (float64, float64) { + if isCustomBucket { + return processCustomBucket(bucket, rhs, op) + } + return computeExponentialTrim(bucket, rhs, isPostive, op) +} + +// Helper function to trim native histogram buckets. +func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, op parser.ItemType) { + updatedCount := 0.0 + origSum := trimmedHist.Sum + removedSum := 0.0 + hasPositive, hasNegative := false, false + isCustomBucket := trimmedHist.UsesCustomBuckets() + + // Calculate the fraction to keep for buckets that contain the trim value + // For TRIM_UPPER, we keep observations below the trim point (rhs) + switch op { + case parser.TRIM_UPPER: + for i, iter := 0, trimmedHist.PositiveBucketIterator(); iter.Next(); i++ { + hasPositive = true + bucket := iter.At() + var keepCount, bucketMidpoint float64 + keepCount, bucketMidpoint = computeBucketTrim(op, bucket, rhs, true, isCustomBucket) + + // Bucket is entirely below the trim point - keep all + switch { + case bucket.Upper <= rhs: + updatedCount += bucket.Count + case bucket.Lower < rhs: + // Bucket contains the trim point - interpolate + removedCount := bucket.Count - keepCount + removedMid := bucketMidpoint + removedSum += removedCount * removedMid + + updatedCount += keepCount + trimmedHist.PositiveBuckets[i] = keepCount + default: + if !isCustomBucket { + bucketMidpoint = math.Sqrt(bucket.Lower * bucket.Upper) + } + removedSum += bucket.Count * bucketMidpoint + // Bucket is entirely above the trim point - discard + trimmedHist.PositiveBuckets[i] = 0 + } + } + + for i, iter := 0, trimmedHist.NegativeBucketIterator(); iter.Next(); i++ { + hasNegative = true + bucket := iter.At() + var keepCount, bucketMidpoint float64 + keepCount, bucketMidpoint = computeBucketTrim(op, bucket, rhs, false, isCustomBucket) + + switch { + case bucket.Upper <= rhs: + updatedCount += bucket.Count + case bucket.Lower < rhs: + removedCount := bucket.Count - keepCount + removedMid := bucketMidpoint + removedSum += removedCount * removedMid + + trimmedHist.NegativeBuckets[i] = keepCount + updatedCount += keepCount + default: + bucketMidpoint = math.Sqrt(bucket.Lower * bucket.Upper) + removedSum += bucket.Count * bucketMidpoint + trimmedHist.NegativeBuckets[i] = 0 + } + } + + // For TRIM_LOWER, we keep observations above the trim point (rhs) + case parser.TRIM_LOWER: + for i, iter := 0, trimmedHist.PositiveBucketIterator(); iter.Next(); i++ { + hasPositive = true + bucket := iter.At() + var keepCount, bucketMidpoint float64 + keepCount, bucketMidpoint = computeBucketTrim(op, bucket, rhs, true, isCustomBucket) + + switch { + case bucket.Lower >= rhs: + updatedCount += bucket.Count + case bucket.Upper > rhs: + removedCount := bucket.Count - keepCount + removedMid := bucketMidpoint + removedSum += removedCount * removedMid + + trimmedHist.PositiveBuckets[i] = keepCount + updatedCount += keepCount + default: + if !isCustomBucket { + bucketMidpoint = math.Sqrt(bucket.Lower * bucket.Upper) + } + removedSum += bucket.Count * bucketMidpoint + trimmedHist.PositiveBuckets[i] = 0 + } + } + + for i, iter := 0, trimmedHist.NegativeBucketIterator(); iter.Next(); i++ { + hasNegative = true + bucket := iter.At() + var keepCount, bucketMidpoint float64 + keepCount, bucketMidpoint = computeBucketTrim(op, bucket, rhs, false, isCustomBucket) + switch { + case bucket.Lower >= rhs: + updatedCount += bucket.Count + case bucket.Upper > rhs: + removedCount := bucket.Count - keepCount + removedMid := bucketMidpoint + removedSum += removedCount * removedMid + + trimmedHist.NegativeBuckets[i] = keepCount + updatedCount += keepCount + default: + bucketMidpoint = math.Sqrt(bucket.Lower * bucket.Upper) + removedSum += bucket.Count * bucketMidpoint + trimmedHist.NegativeBuckets[i] = 0 + } + } + } + + // Handle the zero count bucket + if trimmedHist.ZeroCount > 0 { + zeroBucket := trimmedHist.ZeroBucket() + zLower := zeroBucket.Lower + zUpper := zeroBucket.Upper + + switch op { + case parser.TRIM_UPPER: + switch { + case rhs < zLower: + trimmedHist.ZeroCount = 0 + case rhs > zUpper: + updatedCount += trimmedHist.ZeroCount + default: + fraction := (rhs - zLower) / (zUpper - zLower) + keepCount := trimmedHist.ZeroCount * fraction + trimmedHist.ZeroCount = keepCount + updatedCount += keepCount + } + + case parser.TRIM_LOWER: + switch { + case rhs > zUpper: + trimmedHist.ZeroCount = 0 + case rhs < zLower: + updatedCount += trimmedHist.ZeroCount + default: + fraction := (zUpper - rhs) / (zUpper - zLower) + keepCount := trimmedHist.ZeroCount * fraction + trimmedHist.ZeroCount = keepCount + updatedCount += keepCount + } + } + } + + // Apply new sum + newSum := origSum - removedSum + + // Clamp correction + if !hasNegative && newSum < 0 { + newSum = 0 + } + if !hasPositive && newSum > 0 { + newSum = 0 + } + + // Update the histogram's count and sum + trimmedHist.Count = updatedCount + trimmedHist.Sum = newSum + + trimmedHist.Compact(0) +} + +func computeExponentialTrim(bucket histogram.Bucket[float64], rhs float64, isPositive bool, op parser.ItemType) (float64, float64) { + var fraction, bucketMidpoint, keepCount float64 + + logLower := math.Log2(math.Abs(bucket.Lower)) + logUpper := math.Log2(math.Abs(bucket.Upper)) + logRHS := math.Log2(math.Abs(rhs)) + + switch op { + case parser.TRIM_UPPER: + if isPositive { + fraction = (logRHS - logLower) / (logUpper - logLower) + bucketMidpoint = math.Sqrt(bucket.Lower * rhs) + } else { + fraction = 1 - ((logRHS - logUpper) / (logLower - logUpper)) + bucketMidpoint = -math.Sqrt(math.Abs(bucket.Lower) * math.Abs(rhs)) + } + + case parser.TRIM_LOWER: + if isPositive { + fraction = (logUpper - logRHS) / (logUpper - logLower) + bucketMidpoint = math.Sqrt(rhs * bucket.Upper) + } else { + fraction = (logRHS - logUpper) / (logLower - logUpper) + bucketMidpoint = -math.Sqrt(math.Abs(rhs) * math.Abs(bucket.Upper)) + } + } + + keepCount = bucket.Count * fraction + + return keepCount, bucketMidpoint +} + // vectorElemBinop evaluates a binary operation between two Vector elements. func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram, pos posrange.PositionRange) (res float64, resH *histogram.FloatHistogram, keep bool, info, err error) { switch { @@ -3172,6 +3467,8 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram return lhs, nil, lhs <= rhs, nil, nil case parser.ATAN2: return math.Atan2(lhs, rhs), nil, true, nil, nil + case parser.TRIM_LOWER, parser.TRIM_UPPER: + return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("float", parser.ItemTypeStr[op], "float", pos) } } case hlhs == nil && hrhs != nil: @@ -3179,7 +3476,7 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram switch op { case parser.MUL: return 0, hrhs.Copy().Mul(lhs).Compact(0), true, nil, nil - case parser.ADD, parser.SUB, parser.DIV, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: + case parser.ADD, parser.SUB, parser.DIV, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.TRIM_LOWER, parser.TRIM_UPPER, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("float", parser.ItemTypeStr[op], "histogram", pos) } } @@ -3190,6 +3487,14 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram return 0, hlhs.Copy().Mul(rhs).Compact(0), true, nil, nil case parser.DIV: return 0, hlhs.Copy().Div(rhs).Compact(0), true, nil, nil + case parser.TRIM_UPPER: + trimmedHist := hlhs.Copy() + trimHistogram(trimmedHist, rhs, op) + return 0, trimmedHist, true, nil, nil + case parser.TRIM_LOWER: + trimmedHist := hlhs.Copy() + trimHistogram(trimmedHist, rhs, op) + return 0, trimmedHist, true, nil, nil case parser.ADD, parser.SUB, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("histogram", parser.ItemTypeStr[op], "float", pos) } @@ -3230,7 +3535,7 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram case parser.NEQ: // This operation expects that both histograms are compacted. return 0, hlhs, !hlhs.Equals(hrhs), nil, nil - case parser.MUL, parser.DIV, parser.POW, parser.MOD, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: + case parser.MUL, parser.DIV, parser.POW, parser.MOD, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2, parser.TRIM_LOWER, parser.TRIM_UPPER: return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("histogram", parser.ItemTypeStr[op], "histogram", pos) } } diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index 71ab6ed4b3..7ac16f3c08 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -98,6 +98,8 @@ EQLC EQL_REGEX GTE GTR +TRIM_UPPER +TRIM_LOWER LAND LOR LSS @@ -200,7 +202,7 @@ START_METRIC_SELECTOR // Operators are listed with increasing precedence. %left LOR %left LAND LUNLESS -%left EQLC GTE GTR LSS LTE NEQ +%left EQLC GTE GTR LSS LTE NEQ TRIM_UPPER TRIM_LOWER %left ADD SUB %left MUL DIV MOD ATAN2 %right POW @@ -291,6 +293,8 @@ binary_expr : expr ADD bin_modifier expr { $$ = yylex.(*parser).newBinar | expr EQLC bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) } | expr GTE bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) } | expr GTR bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) } + | expr TRIM_UPPER bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) } + | expr TRIM_LOWER bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) } | expr LAND bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) } | expr LOR bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) } | expr LSS bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) } diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index d20460ed5b..17a001e11b 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -78,69 +78,71 @@ const EQLC = 57385 const EQL_REGEX = 57386 const GTE = 57387 const GTR = 57388 -const LAND = 57389 -const LOR = 57390 -const LSS = 57391 -const LTE = 57392 -const LUNLESS = 57393 -const MOD = 57394 -const MUL = 57395 -const NEQ = 57396 -const NEQ_REGEX = 57397 -const POW = 57398 -const SUB = 57399 -const AT = 57400 -const ATAN2 = 57401 -const operatorsEnd = 57402 -const aggregatorsStart = 57403 -const AVG = 57404 -const BOTTOMK = 57405 -const COUNT = 57406 -const COUNT_VALUES = 57407 -const GROUP = 57408 -const MAX = 57409 -const MIN = 57410 -const QUANTILE = 57411 -const STDDEV = 57412 -const STDVAR = 57413 -const SUM = 57414 -const TOPK = 57415 -const LIMITK = 57416 -const LIMIT_RATIO = 57417 -const aggregatorsEnd = 57418 -const keywordsStart = 57419 -const BOOL = 57420 -const BY = 57421 -const GROUP_LEFT = 57422 -const GROUP_RIGHT = 57423 -const FILL = 57424 -const FILL_LEFT = 57425 -const FILL_RIGHT = 57426 -const IGNORING = 57427 -const OFFSET = 57428 -const SMOOTHED = 57429 -const ANCHORED = 57430 -const ON = 57431 -const WITHOUT = 57432 -const keywordsEnd = 57433 -const preprocessorStart = 57434 -const START = 57435 -const END = 57436 -const STEP = 57437 -const RANGE = 57438 -const preprocessorEnd = 57439 -const counterResetHintsStart = 57440 -const UNKNOWN_COUNTER_RESET = 57441 -const COUNTER_RESET = 57442 -const NOT_COUNTER_RESET = 57443 -const GAUGE_TYPE = 57444 -const counterResetHintsEnd = 57445 -const startSymbolsStart = 57446 -const START_METRIC = 57447 -const START_SERIES_DESCRIPTION = 57448 -const START_EXPRESSION = 57449 -const START_METRIC_SELECTOR = 57450 -const startSymbolsEnd = 57451 +const TRIM_UPPER = 57389 +const TRIM_LOWER = 57390 +const LAND = 57391 +const LOR = 57392 +const LSS = 57393 +const LTE = 57394 +const LUNLESS = 57395 +const MOD = 57396 +const MUL = 57397 +const NEQ = 57398 +const NEQ_REGEX = 57399 +const POW = 57400 +const SUB = 57401 +const AT = 57402 +const ATAN2 = 57403 +const operatorsEnd = 57404 +const aggregatorsStart = 57405 +const AVG = 57406 +const BOTTOMK = 57407 +const COUNT = 57408 +const COUNT_VALUES = 57409 +const GROUP = 57410 +const MAX = 57411 +const MIN = 57412 +const QUANTILE = 57413 +const STDDEV = 57414 +const STDVAR = 57415 +const SUM = 57416 +const TOPK = 57417 +const LIMITK = 57418 +const LIMIT_RATIO = 57419 +const aggregatorsEnd = 57420 +const keywordsStart = 57421 +const BOOL = 57422 +const BY = 57423 +const GROUP_LEFT = 57424 +const GROUP_RIGHT = 57425 +const FILL = 57426 +const FILL_LEFT = 57427 +const FILL_RIGHT = 57428 +const IGNORING = 57429 +const OFFSET = 57430 +const SMOOTHED = 57431 +const ANCHORED = 57432 +const ON = 57433 +const WITHOUT = 57434 +const keywordsEnd = 57435 +const preprocessorStart = 57436 +const START = 57437 +const END = 57438 +const STEP = 57439 +const RANGE = 57440 +const preprocessorEnd = 57441 +const counterResetHintsStart = 57442 +const UNKNOWN_COUNTER_RESET = 57443 +const COUNTER_RESET = 57444 +const NOT_COUNTER_RESET = 57445 +const GAUGE_TYPE = 57446 +const counterResetHintsEnd = 57447 +const startSymbolsStart = 57448 +const START_METRIC = 57449 +const START_SERIES_DESCRIPTION = 57450 +const START_EXPRESSION = 57451 +const START_METRIC_SELECTOR = 57452 +const startSymbolsEnd = 57453 var yyToknames = [...]string{ "$end", @@ -189,6 +191,8 @@ var yyToknames = [...]string{ "EQL_REGEX", "GTE", "GTR", + "TRIM_UPPER", + "TRIM_LOWER", "LAND", "LOR", "LSS", @@ -265,369 +269,372 @@ var yyExca = [...]int16{ 1, -1, -2, 0, -1, 44, - 1, 161, - 10, 161, - 24, 161, + 1, 163, + 10, 163, + 24, 163, -2, 0, -1, 75, - 2, 204, - 15, 204, - 79, 204, - 90, 204, - -2, 115, - -1, 76, - 2, 205, - 15, 205, - 79, 205, - 90, 205, - -2, 116, - -1, 77, 2, 206, 15, 206, - 79, 206, - 90, 206, - -2, 118, - -1, 78, + 81, 206, + 92, 206, + -2, 117, + -1, 76, 2, 207, 15, 207, - 79, 207, - 90, 207, - -2, 119, - -1, 79, + 81, 207, + 92, 207, + -2, 118, + -1, 77, 2, 208, 15, 208, - 79, 208, - 90, 208, - -2, 123, - -1, 80, + 81, 208, + 92, 208, + -2, 120, + -1, 78, 2, 209, 15, 209, - 79, 209, - 90, 209, - -2, 128, - -1, 81, + 81, 209, + 92, 209, + -2, 121, + -1, 79, 2, 210, 15, 210, - 79, 210, - 90, 210, - -2, 130, - -1, 82, + 81, 210, + 92, 210, + -2, 125, + -1, 80, 2, 211, 15, 211, - 79, 211, - 90, 211, - -2, 132, - -1, 83, + 81, 211, + 92, 211, + -2, 130, + -1, 81, 2, 212, 15, 212, - 79, 212, - 90, 212, - -2, 133, - -1, 84, + 81, 212, + 92, 212, + -2, 132, + -1, 82, 2, 213, 15, 213, - 79, 213, - 90, 213, + 81, 213, + 92, 213, -2, 134, - -1, 85, + -1, 83, 2, 214, 15, 214, - 79, 214, - 90, 214, + 81, 214, + 92, 214, -2, 135, - -1, 86, + -1, 84, 2, 215, 15, 215, - 79, 215, - 90, 215, + 81, 215, + 92, 215, -2, 136, - -1, 87, + -1, 85, 2, 216, 15, 216, - 79, 216, - 90, 216, - -2, 140, - -1, 88, + 81, 216, + 92, 216, + -2, 137, + -1, 86, 2, 217, 15, 217, - 79, 217, - 90, 217, - -2, 141, - -1, 140, - 41, 288, - 42, 288, - 52, 288, - 53, 288, - 57, 288, + 81, 217, + 92, 217, + -2, 138, + -1, 87, + 2, 218, + 15, 218, + 81, 218, + 92, 218, + -2, 142, + -1, 88, + 2, 219, + 15, 219, + 81, 219, + 92, 219, + -2, 143, + -1, 142, + 41, 290, + 42, 290, + 54, 290, + 55, 290, + 59, 290, -2, 22, - -1, 258, - 9, 273, - 12, 273, - 13, 273, - 18, 273, - 19, 273, - 25, 273, - 41, 273, - 47, 273, - 48, 273, - 51, 273, - 57, 273, - 62, 273, - 63, 273, - 64, 273, - 65, 273, - 66, 273, - 67, 273, - 68, 273, - 69, 273, - 70, 273, - 71, 273, - 72, 273, - 73, 273, - 74, 273, - 75, 273, - 79, 273, - 82, 273, - 83, 273, - 84, 273, - 86, 273, - 87, 273, - 88, 273, - 90, 273, - 93, 273, - 94, 273, - 95, 273, - 96, 273, + -1, 262, + 9, 275, + 12, 275, + 13, 275, + 18, 275, + 19, 275, + 25, 275, + 41, 275, + 49, 275, + 50, 275, + 53, 275, + 59, 275, + 64, 275, + 65, 275, + 66, 275, + 67, 275, + 68, 275, + 69, 275, + 70, 275, + 71, 275, + 72, 275, + 73, 275, + 74, 275, + 75, 275, + 76, 275, + 77, 275, + 81, 275, + 84, 275, + 85, 275, + 86, 275, + 88, 275, + 89, 275, + 90, 275, + 92, 275, + 95, 275, + 96, 275, + 97, 275, + 98, 275, -2, 0, - -1, 259, - 9, 273, - 12, 273, - 13, 273, - 18, 273, - 19, 273, - 25, 273, - 41, 273, - 47, 273, - 48, 273, - 51, 273, - 57, 273, - 62, 273, - 63, 273, - 64, 273, - 65, 273, - 66, 273, - 67, 273, - 68, 273, - 69, 273, - 70, 273, - 71, 273, - 72, 273, - 73, 273, - 74, 273, - 75, 273, - 79, 273, - 82, 273, - 83, 273, - 84, 273, - 86, 273, - 87, 273, - 88, 273, - 90, 273, - 93, 273, - 94, 273, - 95, 273, - 96, 273, + -1, 263, + 9, 275, + 12, 275, + 13, 275, + 18, 275, + 19, 275, + 25, 275, + 41, 275, + 49, 275, + 50, 275, + 53, 275, + 59, 275, + 64, 275, + 65, 275, + 66, 275, + 67, 275, + 68, 275, + 69, 275, + 70, 275, + 71, 275, + 72, 275, + 73, 275, + 74, 275, + 75, 275, + 76, 275, + 77, 275, + 81, 275, + 84, 275, + 85, 275, + 86, 275, + 88, 275, + 89, 275, + 90, 275, + 92, 275, + 95, 275, + 96, 275, + 97, 275, + 98, 275, -2, 0, } const yyPrivate = 57344 -const yyLast = 1224 +const yyLast = 1241 var yyAct = [...]int16{ - 61, 363, 190, 429, 351, 436, 431, 293, 247, 201, - 98, 51, 147, 193, 369, 96, 231, 412, 413, 370, - 132, 133, 68, 130, 73, 163, 194, 131, 443, 444, - 445, 446, 134, 135, 256, 253, 254, 255, 257, 258, - 259, 129, 70, 426, 123, 425, 124, 127, 391, 342, - 157, 458, 223, 198, 447, 389, 415, 128, 126, 345, - 451, 129, 125, 197, 414, 465, 398, 138, 379, 140, - 6, 103, 105, 106, 346, 107, 108, 109, 110, 111, - 112, 113, 114, 115, 116, 199, 117, 118, 122, 104, - 347, 136, 343, 46, 124, 127, 389, 133, 334, 251, - 397, 200, 149, 377, 192, 128, 126, 199, 134, 129, - 125, 198, 141, 333, 420, 396, 119, 121, 120, 123, - 186, 197, 395, 200, 203, 208, 209, 210, 211, 212, - 213, 181, 376, 419, 430, 204, 204, 204, 204, 204, - 204, 204, 182, 199, 185, 227, 205, 205, 205, 205, - 205, 205, 205, 216, 219, 215, 204, 341, 214, 200, - 137, 117, 139, 122, 339, 385, 237, 205, 239, 464, - 384, 249, 226, 2, 3, 4, 5, 91, 290, 225, - 340, 123, 289, 280, 250, 383, 364, 338, 124, 127, - 284, 119, 121, 120, 275, 195, 196, 288, 218, 128, - 126, 204, 460, 129, 125, 205, 280, 278, 158, 105, - 374, 217, 205, 286, 287, 423, 243, 204, 241, 114, - 115, 124, 127, 117, 373, 122, 104, 372, 205, 222, - 143, 437, 128, 126, 124, 127, 129, 125, 65, 242, - 149, 240, 337, 142, 42, 128, 126, 418, 64, 129, - 125, 285, 252, 119, 121, 120, 365, 366, 260, 261, - 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, - 272, 273, 274, 344, 371, 127, 367, 368, 198, 283, - 375, 124, 127, 282, 378, 128, 126, 281, 197, 129, - 203, 204, 128, 126, 135, 204, 129, 125, 198, 380, - 65, 204, 205, 144, 7, 409, 205, 408, 197, 407, - 64, 406, 205, 164, 165, 166, 167, 168, 169, 170, - 171, 172, 173, 174, 175, 176, 177, 178, 202, 232, - 199, 233, 89, 156, 417, 65, 387, 405, 463, 233, - 404, 189, 102, 224, 403, 64, 200, 204, 90, 388, - 390, 10, 392, 124, 127, 393, 394, 462, 205, 402, - 461, 93, 124, 127, 128, 126, 401, 89, 129, 125, - 400, 235, 399, 128, 126, 416, 410, 129, 125, 235, - 8, 234, 236, 90, 44, 59, 204, 411, 43, 234, - 236, 92, 422, 188, 187, 1, 179, 205, 424, 155, - 428, 154, 230, 432, 433, 434, 150, 229, 74, 335, - 439, 438, 441, 440, 449, 450, 148, 435, 58, 452, - 228, 206, 207, 448, 336, 57, 296, 56, 386, 100, - 204, 69, 453, 454, 9, 9, 309, 455, 99, 55, - 457, 205, 315, 124, 127, 162, 421, 150, 97, 295, - 99, 54, 459, 53, 128, 126, 238, 148, 129, 125, - 97, 100, 153, 204, 466, 146, 52, 152, 95, 50, - 100, 311, 312, 100, 205, 313, 160, 220, 49, 161, - 151, 48, 159, 326, 47, 60, 297, 299, 301, 302, - 303, 314, 316, 319, 320, 321, 322, 323, 327, 328, - 246, 456, 298, 300, 304, 305, 306, 307, 308, 310, - 317, 332, 331, 318, 296, 348, 101, 324, 325, 329, - 330, 245, 244, 291, 309, 198, 94, 442, 248, 191, - 315, 350, 251, 294, 292, 197, 62, 295, 349, 145, - 0, 0, 353, 354, 352, 359, 361, 358, 360, 355, - 356, 357, 362, 0, 0, 0, 0, 199, 0, 311, - 312, 0, 0, 313, 0, 0, 0, 0, 0, 0, - 0, 326, 0, 200, 297, 299, 301, 302, 303, 314, - 316, 319, 320, 321, 322, 323, 327, 328, 0, 0, - 298, 300, 304, 305, 306, 307, 308, 310, 317, 332, - 331, 318, 0, 0, 0, 324, 325, 329, 330, 65, - 0, 0, 63, 91, 0, 66, 427, 0, 25, 64, - 0, 0, 221, 0, 0, 67, 0, 353, 354, 352, - 359, 361, 358, 360, 355, 356, 357, 362, 0, 0, - 0, 89, 0, 0, 0, 0, 0, 21, 22, 0, - 0, 23, 0, 0, 0, 0, 0, 90, 0, 0, - 0, 0, 75, 76, 77, 78, 79, 80, 81, 82, - 83, 84, 85, 86, 87, 88, 0, 0, 0, 13, - 0, 0, 16, 17, 18, 0, 27, 41, 40, 0, - 33, 0, 0, 34, 35, 71, 72, 65, 45, 0, - 63, 91, 0, 66, 0, 0, 25, 64, 0, 0, - 0, 0, 0, 67, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 89, - 0, 0, 0, 0, 0, 21, 22, 0, 0, 23, - 0, 0, 0, 0, 0, 90, 0, 0, 0, 0, - 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, - 85, 86, 87, 88, 0, 0, 0, 13, 0, 0, - 16, 17, 18, 0, 27, 41, 40, 0, 33, 0, - 0, 34, 35, 71, 72, 65, 0, 0, 63, 91, - 0, 66, 0, 0, 25, 64, 0, 0, 0, 0, - 0, 67, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 89, 0, 0, - 0, 0, 0, 21, 22, 0, 0, 23, 0, 0, - 0, 0, 0, 90, 0, 0, 0, 0, 75, 76, - 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, - 87, 88, 0, 0, 0, 13, 0, 0, 16, 17, - 18, 0, 27, 41, 40, 0, 33, 20, 91, 34, - 35, 71, 72, 25, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 21, 22, 0, 0, 23, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 11, 12, 14, - 15, 19, 24, 26, 28, 29, 30, 31, 32, 36, - 37, 0, 0, 0, 13, 0, 0, 16, 17, 18, - 0, 27, 41, 40, 0, 33, 20, 42, 34, 35, - 38, 39, 25, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 61, 369, 194, 435, 357, 442, 437, 299, 251, 205, + 98, 51, 149, 197, 73, 96, 235, 260, 375, 376, + 134, 261, 68, 135, 133, 132, 449, 450, 451, 452, + 418, 419, 198, 137, 136, 257, 258, 259, 262, 263, + 165, 70, 126, 129, 131, 125, 432, 431, 397, 125, + 348, 159, 236, 227, 237, 130, 128, 395, 395, 131, + 127, 404, 464, 453, 421, 202, 457, 140, 420, 142, + 6, 385, 103, 105, 106, 201, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 118, 138, 119, + 120, 124, 104, 119, 239, 124, 46, 203, 436, 135, + 126, 129, 151, 383, 382, 230, 238, 240, 229, 255, + 136, 143, 370, 130, 128, 204, 380, 131, 127, 121, + 123, 122, 190, 121, 123, 122, 207, 212, 213, 214, + 215, 216, 217, 185, 379, 65, 189, 208, 208, 208, + 208, 208, 208, 208, 186, 64, 378, 231, 209, 209, + 209, 209, 209, 209, 209, 220, 223, 218, 208, 219, + 347, 345, 145, 139, 65, 141, 351, 89, 241, 209, + 243, 144, 426, 253, 64, 2, 3, 4, 5, 125, + 349, 352, 291, 346, 344, 90, 249, 286, 247, 237, + 202, 425, 254, 252, 290, 203, 443, 255, 281, 353, + 201, 284, 289, 288, 287, 208, 103, 105, 137, 209, + 286, 246, 245, 204, 160, 129, 209, 292, 293, 116, + 117, 208, 203, 119, 120, 124, 104, 130, 128, 239, + 146, 131, 209, 152, 226, 244, 222, 202, 91, 42, + 204, 238, 240, 150, 151, 196, 343, 201, 7, 221, + 415, 342, 202, 121, 123, 122, 100, 256, 414, 413, + 371, 372, 201, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, + 373, 374, 377, 350, 203, 340, 381, 412, 99, 391, + 384, 469, 158, 10, 390, 43, 207, 208, 97, 411, + 339, 208, 204, 93, 202, 386, 242, 208, 209, 389, + 468, 100, 209, 467, 201, 410, 471, 409, 209, 166, + 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 203, 408, 199, 200, + 470, 407, 393, 406, 405, 59, 296, 466, 403, 102, + 295, 92, 1, 208, 204, 394, 396, 183, 398, 126, + 129, 399, 400, 402, 209, 294, 126, 129, 152, 234, + 401, 8, 130, 128, 233, 44, 131, 127, 150, 130, + 128, 422, 416, 131, 127, 74, 148, 232, 388, 58, + 99, 100, 208, 417, 155, 65, 57, 56, 428, 154, + 97, 193, 55, 209, 430, 64, 434, 387, 95, 438, + 439, 440, 153, 100, 424, 164, 445, 444, 447, 446, + 455, 456, 157, 441, 156, 458, 54, 89, 69, 454, + 53, 9, 9, 52, 302, 50, 208, 392, 459, 460, + 429, 162, 224, 461, 315, 90, 463, 209, 126, 129, + 321, 49, 163, 48, 427, 192, 191, 301, 465, 126, + 129, 130, 128, 161, 47, 131, 127, 60, 250, 208, + 472, 462, 130, 128, 354, 101, 131, 127, 248, 297, + 209, 317, 318, 210, 211, 319, 94, 448, 195, 300, + 62, 147, 0, 332, 0, 0, 303, 305, 307, 308, + 309, 320, 322, 325, 326, 327, 328, 329, 333, 334, + 0, 0, 304, 306, 310, 311, 312, 313, 314, 316, + 323, 338, 337, 324, 302, 0, 423, 330, 331, 335, + 336, 0, 228, 0, 315, 0, 0, 0, 0, 0, + 321, 0, 0, 0, 298, 126, 129, 301, 0, 0, + 0, 126, 129, 0, 0, 0, 0, 0, 130, 128, + 0, 0, 131, 127, 130, 128, 0, 0, 131, 127, + 0, 317, 318, 0, 0, 319, 0, 0, 0, 0, + 0, 341, 0, 332, 0, 0, 303, 305, 307, 308, + 309, 320, 322, 325, 326, 327, 328, 329, 333, 334, + 0, 0, 304, 306, 310, 311, 312, 313, 314, 316, + 323, 338, 337, 324, 0, 126, 129, 330, 331, 335, + 336, 65, 0, 0, 63, 91, 0, 66, 130, 128, + 25, 64, 131, 127, 225, 0, 0, 67, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 89, 0, 0, 0, 0, 0, 0, 0, 21, 22, 0, 0, 23, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 11, 12, 14, 15, - 19, 24, 26, 28, 29, 30, 31, 32, 36, 37, - 123, 0, 0, 13, 0, 0, 16, 17, 18, 0, - 27, 41, 40, 0, 33, 0, 0, 34, 35, 38, - 39, 123, 0, 0, 0, 0, 0, 103, 105, 106, - 0, 107, 108, 109, 110, 111, 112, 113, 114, 115, - 116, 0, 117, 118, 122, 104, 0, 0, 103, 105, - 106, 0, 107, 108, 109, 0, 111, 112, 113, 114, - 115, 116, 382, 117, 118, 122, 104, 0, 0, 65, - 0, 123, 119, 121, 120, 189, 65, 0, 0, 64, - 0, 381, 189, 0, 0, 0, 64, 0, 0, 0, - 0, 0, 0, 119, 121, 120, 0, 0, 103, 105, - 106, 89, 107, 108, 0, 0, 111, 112, 89, 114, - 115, 116, 180, 117, 118, 122, 104, 90, 0, 65, - 0, 0, 0, 0, 90, 189, 65, 188, 187, 64, - 0, 0, 279, 0, 188, 187, 64, 123, 0, 0, - 0, 0, 0, 119, 121, 120, 0, 0, 0, 0, - 0, 89, 0, 0, 0, 206, 207, 0, 89, 0, - 0, 0, 206, 207, 103, 105, 0, 90, 0, 0, - 0, 0, 0, 0, 90, 114, 115, 188, 187, 117, - 118, 122, 104, 0, 188, 187, 0, 0, 0, 0, + 0, 90, 0, 0, 0, 0, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 0, 0, 0, 13, 0, 0, 16, 17, 18, 0, + 27, 41, 40, 0, 33, 0, 0, 34, 35, 71, + 72, 65, 45, 0, 63, 91, 0, 66, 356, 0, + 25, 64, 0, 0, 0, 355, 0, 67, 0, 359, + 360, 358, 365, 367, 364, 366, 361, 362, 363, 368, + 0, 0, 0, 89, 0, 0, 0, 0, 0, 0, + 0, 21, 22, 0, 0, 23, 0, 0, 0, 0, + 0, 90, 0, 0, 0, 0, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 0, 0, 0, 13, 0, 0, 16, 17, 18, 0, + 27, 41, 40, 0, 33, 0, 0, 34, 35, 71, + 72, 65, 0, 0, 63, 91, 0, 66, 433, 0, + 25, 64, 0, 0, 0, 0, 0, 67, 0, 359, + 360, 358, 365, 367, 364, 366, 361, 362, 363, 368, + 0, 0, 0, 89, 0, 0, 206, 0, 0, 0, + 0, 21, 22, 65, 0, 23, 0, 0, 0, 193, + 0, 90, 0, 64, 0, 0, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 0, 0, 0, 13, 0, 89, 16, 17, 18, 0, + 27, 41, 40, 0, 33, 20, 91, 34, 35, 71, + 72, 25, 0, 90, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 192, 191, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 184, 0, 0, + 0, 0, 21, 22, 65, 0, 23, 0, 0, 0, + 193, 210, 211, 0, 64, 0, 0, 11, 12, 14, + 15, 19, 24, 26, 28, 29, 30, 31, 32, 36, + 37, 0, 0, 0, 13, 0, 89, 16, 17, 18, + 0, 27, 41, 40, 0, 33, 20, 42, 34, 35, + 38, 39, 25, 0, 90, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 192, 191, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 183, 184, 0, 0, 119, - 121, 120, 276, 277, + 0, 0, 0, 21, 22, 0, 0, 23, 0, 0, + 0, 0, 187, 188, 0, 0, 0, 0, 11, 12, + 14, 15, 19, 24, 26, 28, 29, 30, 31, 32, + 36, 37, 125, 0, 0, 13, 0, 0, 16, 17, + 18, 0, 27, 41, 40, 0, 33, 0, 0, 34, + 35, 38, 39, 125, 0, 0, 0, 0, 0, 103, + 105, 106, 0, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 0, 119, 120, 124, 104, + 103, 105, 106, 0, 107, 108, 109, 110, 111, 0, + 113, 114, 115, 116, 117, 118, 0, 119, 120, 124, + 104, 0, 0, 125, 0, 0, 121, 123, 122, 0, + 65, 0, 0, 0, 0, 0, 193, 0, 125, 0, + 64, 0, 0, 0, 0, 0, 0, 121, 123, 122, + 103, 105, 106, 0, 107, 108, 109, 110, 0, 0, + 113, 114, 89, 116, 117, 118, 105, 119, 120, 124, + 104, 65, 0, 0, 0, 0, 0, 285, 116, 117, + 90, 64, 119, 0, 124, 104, 0, 0, 0, 0, + 192, 191, 0, 0, 0, 0, 0, 121, 123, 122, + 0, 0, 0, 89, 0, 0, 0, 0, 0, 0, + 0, 0, 121, 123, 122, 0, 0, 0, 210, 211, + 0, 90, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 192, 191, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 282, + 283, } var yyPact = [...]int16{ - 68, 294, 934, 934, 688, 855, -1000, -1000, -1000, 231, + 68, 238, 954, 954, 702, 873, -1000, -1000, -1000, 226, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 448, -1000, 340, -1000, 996, -1000, -1000, -1000, + -1000, -1000, 388, -1000, 347, -1000, 1018, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 5, 18, 279, -1000, -1000, 776, -1000, 776, 164, - -1000, 228, 215, 288, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 1, 18, 193, -1000, -1000, 792, -1000, 792, 225, + -1000, 156, 147, 215, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 445, -1000, -1000, 460, -1000, -1000, 397, 329, -1000, - -1000, 26, -1000, -53, -53, -53, -53, -53, -53, -53, - -53, -53, -53, -53, -53, -53, -53, -53, -53, 1120, - -1000, -1000, 102, 326, 1077, 1077, 1077, 1077, 1077, 1077, - 279, -58, -1000, 196, 196, 600, -1000, 30, 321, 105, - -15, -1000, 157, 150, 1077, 400, -1000, -1000, 327, 335, - -1000, -1000, 436, -1000, 216, -1000, 214, 516, 776, -1000, - -47, -51, -41, -1000, 776, 776, 776, 776, 776, 776, - 776, 776, 776, 776, 776, 776, 776, 776, 776, -1000, - -1000, -1000, 1127, 272, 268, 264, 5, -1000, -1000, 1077, - -1000, 236, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 269, - 269, 176, -1000, 5, -1000, 1077, 228, 215, 233, 233, - -15, -15, -15, -15, -1000, -1000, -1000, 512, -1000, -1000, - 91, -1000, 996, -1000, -1000, -1000, -1000, 402, -1000, 404, - -1000, 162, -1000, -1000, -1000, -1000, -1000, 155, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 23, 66, 33, -1000, -1000, - -1000, 514, 167, 171, 171, 171, 196, 196, 196, 196, - 105, 105, 1133, 1133, 1133, 1067, 1017, 1133, 1133, 1067, - 105, 105, 1133, 105, 167, -1000, 212, 209, 195, 1077, - -15, 110, 81, 1077, 321, 46, -1000, -1000, -1000, 1070, - -1000, 163, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 366, -1000, -1000, 392, -1000, -1000, 420, 288, -1000, + -1000, 27, -1000, -40, -40, -40, -40, -40, -40, -40, + -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, + -40, 915, -1000, -1000, 243, 834, 1101, 1101, 1101, 1101, + 1101, 1101, 193, -58, -1000, 234, 234, 612, -1000, 31, + 510, 35, -14, -1000, 86, 83, 1101, 367, -1000, -1000, + 50, 185, -1000, -1000, 286, -1000, 210, -1000, 186, 181, + 792, -1000, -49, -70, -44, -1000, 792, 792, 792, 792, + 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, + 792, 792, 792, -1000, -1000, -1000, 1142, 189, 188, 187, + 1, -1000, -1000, 1101, -1000, 167, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 228, 228, 344, -1000, 1, -1000, 1101, + 156, 147, 173, 173, -14, -14, -14, -14, -1000, -1000, + -1000, 522, -1000, -1000, 278, -1000, 1018, -1000, -1000, -1000, + -1000, 574, -1000, 231, -1000, 159, -1000, -1000, -1000, -1000, + -1000, 158, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 24, + 154, 140, -1000, -1000, -1000, 701, 1104, 97, 97, 97, + 234, 234, 234, 234, 35, 35, 165, 165, 165, 165, + 165, 1089, 1039, 165, 165, 1089, 35, 35, 165, 35, + 1104, -1000, 131, 119, 101, 1101, -14, 82, 81, 1101, + 510, 49, -1000, -1000, -1000, 386, -1000, 287, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 776, 1077, -1000, -1000, -1000, -1000, - -1000, -1000, 36, 36, 22, 36, 83, 83, 98, 49, - -1000, -1000, 366, 364, 360, 353, 338, 334, 331, 305, - 303, 301, 299, -1000, 291, -67, -65, -1000, -1000, -1000, - -1000, -1000, 42, 34, 1077, 312, -1000, -1000, 240, -1000, - 112, -1000, -1000, -1000, 424, -1000, 996, 193, -1000, -1000, - -1000, 36, -1000, 19, 17, 599, -1000, -1000, -1000, 77, - 289, 289, 289, 269, 217, 217, 77, 217, 77, -71, - 32, 229, 171, 171, -1000, -1000, 53, -1000, 1077, -1000, - -1000, -1000, -1000, -1000, -1000, 36, 36, -1000, -1000, -1000, - 36, -1000, -1000, -1000, -1000, -1000, -1000, 289, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 29, -1000, - -1000, 1077, 180, -1000, -1000, -1000, 336, -1000, -1000, 147, - -1000, 44, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 792, 1101, -1000, -1000, -1000, -1000, -1000, -1000, 38, 38, + 22, 38, 93, 93, 346, 44, -1000, -1000, 338, 337, + 335, 331, 311, 309, 293, 281, 253, 252, 244, -1000, + 126, -56, -54, -1000, -1000, -1000, -1000, -1000, 46, 42, + 1101, 504, -1000, -1000, 407, -1000, 170, -1000, -1000, -1000, + 432, -1000, 1018, 418, -1000, -1000, -1000, 38, -1000, 21, + 20, 791, -1000, -1000, -1000, 39, 56, 56, 56, 228, + 182, 182, 39, 182, 39, -75, 41, 155, 97, 97, + -1000, -1000, 59, -1000, 1101, -1000, -1000, -1000, -1000, -1000, + -1000, 38, 38, -1000, -1000, -1000, 38, -1000, -1000, -1000, + -1000, -1000, -1000, 56, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 40, -1000, -1000, 1101, 325, -1000, + -1000, -1000, 289, -1000, -1000, 318, -1000, 295, -1000, -1000, + -1000, -1000, -1000, } var yyPgo = [...]int16{ - 0, 539, 12, 536, 7, 16, 533, 431, 22, 529, - 10, 527, 24, 351, 380, 526, 15, 523, 19, 14, - 522, 516, 8, 515, 4, 5, 501, 3, 6, 13, - 500, 26, 2, 485, 484, 23, 208, 482, 481, 479, - 93, 478, 477, 27, 476, 1, 42, 469, 11, 466, - 453, 451, 445, 439, 427, 425, 418, 385, 0, 408, - 9, 396, 395, 388, + 0, 491, 12, 490, 7, 16, 489, 428, 22, 488, + 10, 487, 14, 293, 371, 486, 15, 479, 19, 18, + 478, 475, 8, 474, 4, 5, 471, 3, 6, 13, + 468, 32, 2, 467, 464, 25, 214, 463, 453, 452, + 96, 451, 442, 24, 441, 1, 41, 435, 11, 433, + 430, 426, 415, 402, 397, 396, 389, 345, 0, 385, + 9, 357, 352, 295, } var yyR1 = [...]int8{ @@ -635,32 +642,33 @@ var yyR1 = [...]int8{ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 34, 34, 34, 34, 35, 35, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, - 38, 38, 38, 38, 38, 36, 39, 39, 52, 52, - 44, 44, 44, 44, 37, 37, 37, 37, 37, 37, - 18, 18, 18, 18, 17, 17, 17, 4, 4, 4, - 45, 45, 41, 43, 43, 42, 42, 42, 53, 60, - 49, 49, 50, 51, 33, 33, 33, 9, 9, 47, - 55, 55, 55, 55, 55, 55, 56, 57, 57, 57, - 46, 46, 46, 1, 1, 1, 2, 2, 2, 2, - 2, 2, 2, 14, 14, 7, 7, 7, 7, 7, + 38, 38, 38, 38, 38, 38, 38, 36, 39, 39, + 52, 52, 44, 44, 44, 44, 37, 37, 37, 37, + 37, 37, 18, 18, 18, 18, 17, 17, 17, 4, + 4, 4, 45, 45, 41, 43, 43, 42, 42, 42, + 53, 60, 49, 49, 50, 51, 33, 33, 33, 9, + 9, 47, 55, 55, 55, 55, 55, 55, 56, 57, + 57, 57, 46, 46, 46, 1, 1, 1, 2, 2, + 2, 2, 2, 2, 2, 14, 14, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 13, 13, 13, 13, - 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, - 63, 21, 21, 21, 21, 20, 20, 20, 20, 20, - 20, 20, 20, 20, 30, 30, 30, 22, 22, 22, - 22, 23, 23, 23, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 25, 25, 26, 26, 26, - 11, 11, 11, 11, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 6, 6, + 7, 7, 7, 7, 7, 7, 7, 7, 13, 13, + 13, 13, 15, 15, 15, 16, 16, 16, 16, 16, + 16, 16, 63, 21, 21, 21, 21, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 30, 30, 30, 22, + 22, 22, 22, 23, 23, 23, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 25, 25, 26, + 26, 26, 11, 11, 11, 11, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 8, 8, 5, 5, 5, 5, - 48, 48, 29, 29, 31, 31, 32, 32, 28, 27, - 27, 54, 10, 19, 19, 61, 61, 61, 61, 61, - 61, 61, 61, 61, 61, 12, 12, 58, 58, 58, - 58, 58, 58, 58, 58, 58, 58, 58, 58, 59, + 6, 6, 6, 6, 6, 6, 8, 8, 5, 5, + 5, 5, 48, 48, 29, 29, 31, 31, 32, 32, + 28, 27, 27, 54, 10, 19, 19, 61, 61, 61, + 61, 61, 61, 61, 61, 61, 61, 12, 12, 58, + 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, + 58, 59, } var yyR2 = [...]int8{ @@ -668,132 +676,135 @@ var yyR2 = [...]int8{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 1, 0, 1, 3, 3, - 1, 1, 3, 3, 1, 3, 3, 3, 5, 5, - 3, 4, 2, 1, 3, 1, 2, 1, 1, 1, - 3, 4, 2, 3, 2, 3, 1, 2, 3, 1, - 3, 3, 2, 2, 3, 5, 3, 1, 1, 4, - 6, 5, 6, 5, 4, 3, 2, 2, 1, 1, - 3, 4, 2, 3, 1, 2, 3, 3, 1, 3, - 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, + 4, 4, 4, 4, 4, 4, 4, 1, 0, 1, + 3, 3, 1, 1, 3, 3, 1, 3, 3, 3, + 5, 5, 3, 4, 2, 1, 3, 1, 2, 1, + 1, 1, 3, 4, 2, 3, 2, 3, 1, 2, + 3, 1, 3, 3, 2, 2, 3, 5, 3, 1, + 1, 4, 6, 5, 6, 5, 4, 3, 2, 2, + 1, 1, 3, 4, 2, 3, 1, 2, 3, 3, + 1, 3, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 3, 4, 2, 0, - 3, 1, 2, 3, 3, 1, 3, 3, 2, 1, - 2, 0, 3, 2, 1, 1, 3, 1, 3, 4, - 1, 3, 5, 5, 1, 1, 1, 4, 3, 3, - 2, 3, 1, 2, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 4, 3, 3, 1, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 3, 4, + 2, 0, 3, 1, 2, 3, 3, 1, 3, 3, + 2, 1, 2, 0, 3, 2, 1, 1, 3, 1, + 3, 4, 1, 3, 5, 5, 1, 1, 1, 4, + 3, 3, 2, 3, 1, 2, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, + 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, - 1, 1, 1, 0, 1, 1, 2, 3, 3, 4, - 4, 6, 7, 4, 1, 1, 1, 1, 2, 3, - 3, 3, 3, 3, 3, 3, 3, 6, 1, 3, + 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, + 1, 2, 1, 1, 1, 0, 1, 1, 2, 3, + 3, 4, 4, 6, 7, 4, 1, 1, 1, 1, + 2, 3, 3, 3, 3, 3, 3, 3, 3, 6, + 1, 3, } var yyChk = [...]int16{ - -1000, -62, 105, 106, 107, 108, 2, 10, -14, -7, - -13, 62, 63, 79, 64, 65, 82, 83, 84, 66, - 12, 47, 48, 51, 67, 18, 68, 86, 69, 70, - 71, 72, 73, 90, 93, 94, 74, 75, 95, 96, - 88, 87, 13, -63, -14, 10, -40, -34, -38, -41, + -1000, -62, 107, 108, 109, 110, 2, 10, -14, -7, + -13, 64, 65, 81, 66, 67, 84, 85, 86, 68, + 12, 49, 50, 53, 69, 18, 70, 88, 71, 72, + 73, 74, 75, 92, 95, 96, 76, 77, 97, 98, + 90, 89, 13, -63, -14, 10, -40, -34, -38, -41, -47, -48, -49, -50, -51, -53, -54, -55, -56, -57, -33, -58, -3, 12, 19, 9, 15, 25, -8, -7, - -46, 95, 96, -12, -59, 62, 63, 64, 65, 66, - 67, 68, 69, 70, 71, 72, 73, 74, 75, 41, - 57, 13, -57, -13, -15, 20, -16, 12, -10, 2, - 25, -21, 2, 41, 59, 42, 43, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 56, 57, 86, - 88, 87, 58, 14, 41, 57, 53, 42, 52, 56, - -35, -43, 2, 79, 90, 15, -43, -40, -58, -40, - -58, -46, 15, 15, 15, -1, 20, -2, 12, -10, - 2, 20, 7, 2, 4, 2, 4, 24, -36, -37, - -44, -39, -52, 78, -36, -36, -36, -36, -36, -36, - -36, -36, -36, -36, -36, -36, -36, -36, -36, -61, - 2, -48, -8, 95, 96, -12, -58, 68, 67, 15, - -32, -9, 2, -29, -31, 93, 94, 19, 9, 41, - 57, -60, 2, -58, -48, -8, 95, 96, -58, -58, - -58, -58, -58, -58, -43, -35, -18, 15, 2, -18, - -42, 22, -40, 22, 22, 22, 22, -58, 20, 7, - 2, -5, 2, 4, 54, 44, 55, -5, 20, -16, - 25, 2, 25, 2, -20, 5, -30, -22, 12, -29, - -31, 16, -40, 82, 83, 84, 85, 89, 80, 81, + -46, 97, 98, -12, -59, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 41, + 59, 13, -57, -13, -15, 20, -16, 12, -10, 2, + 25, -21, 2, 41, 61, 42, 43, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 58, + 59, 88, 90, 89, 60, 14, 41, 59, 55, 42, + 54, 58, -35, -43, 2, 81, 92, 15, -43, -40, + -58, -40, -58, -46, 15, 15, 15, -1, 20, -2, + 12, -10, 2, 20, 7, 2, 4, 2, 4, 24, + -36, -37, -44, -39, -52, 80, -36, -36, -36, -36, + -36, -36, -36, -36, -36, -36, -36, -36, -36, -36, + -36, -36, -36, -61, 2, -48, -8, 97, 98, -12, + -58, 70, 69, 15, -32, -9, 2, -29, -31, 95, + 96, 19, 9, 41, 59, -60, 2, -58, -48, -8, + 97, 98, -58, -58, -58, -58, -58, -58, -43, -35, + -18, 15, 2, -18, -42, 22, -40, 22, 22, 22, + 22, -58, 20, 7, 2, -5, 2, 4, 56, 44, + 57, -5, 20, -16, 25, 2, 25, 2, -20, 5, + -30, -22, 12, -29, -31, 16, -40, 84, 85, 86, + 87, 91, 82, 83, -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, - -40, -40, -40, -40, -40, -48, 95, 96, -12, 15, - -58, 15, 15, 15, -58, 15, -29, -29, 21, 6, - 2, -17, 22, -4, -6, 25, 2, 62, 78, 63, - 79, 64, 65, 66, 80, 81, 82, 83, 84, 12, - 85, 47, 48, 51, 67, 18, 68, 86, 89, 69, - 70, 71, 72, 73, 93, 94, 59, 74, 75, 95, - 96, 88, 87, 22, 7, 7, 20, -2, 25, 2, - 25, 2, 26, 26, -31, 26, 41, 57, -23, 24, - 17, -24, 30, 28, 29, 35, 36, 37, 33, 31, - 34, 32, 38, -45, 15, -45, -45, -18, -18, -19, - -18, -19, 15, 15, 15, -58, 22, 22, -58, 22, - -60, 21, 2, 22, 7, 2, -40, -58, -28, 19, - -28, 26, -28, -22, -22, 24, 17, 2, 17, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - -48, -8, 84, 83, 22, 22, -58, 22, 7, 21, - 2, 22, -4, 22, -28, 26, 26, 17, -24, -27, - 57, -28, -32, -32, -32, -29, -25, 14, -25, -27, - -25, -27, -11, 99, 100, 101, 102, 22, -48, -45, - -45, 7, -58, -28, -28, -28, -26, -32, 22, -58, - 22, 24, 21, 2, 22, 21, -32, + -40, -48, 97, 98, -12, 15, -58, 15, 15, 15, + -58, 15, -29, -29, 21, 6, 2, -17, 22, -4, + -6, 25, 2, 64, 80, 65, 81, 66, 67, 68, + 82, 83, 84, 85, 86, 12, 87, 49, 50, 53, + 69, 18, 70, 88, 91, 71, 72, 73, 74, 75, + 95, 96, 61, 76, 77, 97, 98, 90, 89, 22, + 7, 7, 20, -2, 25, 2, 25, 2, 26, 26, + -31, 26, 41, 59, -23, 24, 17, -24, 30, 28, + 29, 35, 36, 37, 33, 31, 34, 32, 38, -45, + 15, -45, -45, -18, -18, -19, -18, -19, 15, 15, + 15, -58, 22, 22, -58, 22, -60, 21, 2, 22, + 7, 2, -40, -58, -28, 19, -28, 26, -28, -22, + -22, 24, 17, 2, 17, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, -48, -8, 86, 85, + 22, 22, -58, 22, 7, 21, 2, 22, -4, 22, + -28, 26, 26, 17, -24, -27, 59, -28, -32, -32, + -32, -29, -25, 14, -25, -27, -25, -27, -11, 101, + 102, 103, 104, 22, -48, -45, -45, 7, -58, -28, + -28, -28, -26, -32, 22, -58, 22, 24, 21, 2, + 22, 21, -32, } var yyDef = [...]int16{ - 0, -2, 149, 149, 0, 0, 7, 6, 1, 149, - 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, - 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, - 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, - 144, 145, 0, 2, -2, 3, 4, 8, 9, 10, + 0, -2, 151, 151, 0, 0, 7, 6, 1, 151, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, + 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, + 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, + 146, 147, 0, 2, -2, 3, 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, - 21, 22, 0, 124, 260, 261, 0, 271, 0, 98, - 99, 142, 143, 0, 298, -2, -2, -2, -2, -2, - -2, -2, -2, -2, -2, -2, -2, -2, -2, 254, - 255, 0, 5, 113, 0, 148, 151, 0, 155, 159, - 272, 160, 164, 46, 46, 46, 46, 46, 46, 46, - 46, 46, 46, 46, 46, 46, 46, 46, 46, 0, - 82, 83, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 25, 26, 0, 0, 0, 72, 0, 22, 96, - -2, 97, 0, 0, 0, 0, 102, 104, 0, 108, - 112, 146, 0, 152, 0, 158, 0, 163, 0, 45, - 54, 50, 51, 47, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 80, - 81, 275, 0, 0, 0, 0, 284, 285, 286, 0, - 84, 0, 86, 266, 267, 87, 88, 262, 263, 0, - 0, 0, 95, 79, 287, 0, 0, 0, 289, 290, - 291, 292, 293, 294, 23, 24, 27, 0, 63, 28, - 0, 74, 76, 78, 299, 295, 296, 0, 100, 0, - 105, 0, 111, 256, 257, 258, 259, 0, 147, 150, - 153, 156, 154, 157, 162, 165, 167, 170, 174, 175, - 176, 0, 29, 0, 0, 0, 0, 0, -2, -2, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 276, 0, 0, 0, 0, - 288, 0, 0, 0, 0, 0, 264, 265, 89, 0, - 94, 0, 62, 65, 67, 68, 69, 218, 219, 220, - 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, - 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, - 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, - 251, 252, 253, 73, 77, 0, 101, 103, 106, 110, - 107, 109, 0, 0, 0, 0, 0, 0, 0, 0, - 180, 182, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 55, 0, 56, 57, 48, 49, 52, - 274, 53, 0, 0, 0, 0, 277, 278, 0, 85, - 0, 91, 93, 60, 0, 66, 75, 0, 166, 268, - 168, 0, 171, 0, 0, 0, 178, 183, 179, 0, + 21, 22, 0, 126, 262, 263, 0, 273, 0, 100, + 101, 144, 145, 0, 300, -2, -2, -2, -2, -2, + -2, -2, -2, -2, -2, -2, -2, -2, -2, 256, + 257, 0, 5, 115, 0, 150, 153, 0, 157, 161, + 274, 162, 166, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 0, 84, 85, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 25, 26, 0, 0, 0, 74, 0, + 22, 98, -2, 99, 0, 0, 0, 0, 104, 106, + 0, 110, 114, 148, 0, 154, 0, 160, 0, 165, + 0, 47, 56, 52, 53, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 279, 280, 0, 283, 0, 90, - 92, 61, 64, 297, 169, 0, 0, 177, 181, 184, - 0, 270, 185, 186, 187, 188, 189, 0, 190, 191, - 192, 193, 194, 200, 201, 202, 203, 70, 0, 58, - 59, 0, 0, 172, 173, 269, 0, 198, 71, 0, - 281, 0, 196, 199, 282, 195, 197, + 0, 0, 0, 82, 83, 277, 0, 0, 0, 0, + 286, 287, 288, 0, 86, 0, 88, 268, 269, 89, + 90, 264, 265, 0, 0, 0, 97, 81, 289, 0, + 0, 0, 291, 292, 293, 294, 295, 296, 23, 24, + 27, 0, 65, 28, 0, 76, 78, 80, 301, 297, + 298, 0, 102, 0, 107, 0, 113, 258, 259, 260, + 261, 0, 149, 152, 155, 158, 156, 159, 164, 167, + 169, 172, 176, 177, 178, 0, 29, 0, 0, 0, + 0, 0, -2, -2, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 278, 0, 0, 0, 0, 290, 0, 0, 0, + 0, 0, 266, 267, 91, 0, 96, 0, 64, 67, + 69, 70, 71, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, + 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, + 247, 248, 249, 250, 251, 252, 253, 254, 255, 75, + 79, 0, 103, 105, 108, 112, 109, 111, 0, 0, + 0, 0, 0, 0, 0, 0, 182, 184, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 57, + 0, 58, 59, 50, 51, 54, 276, 55, 0, 0, + 0, 0, 279, 280, 0, 87, 0, 93, 95, 62, + 0, 68, 77, 0, 168, 270, 170, 0, 173, 0, + 0, 0, 180, 185, 181, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 281, 282, 0, 285, 0, 92, 94, 63, 66, 299, + 171, 0, 0, 179, 183, 186, 0, 272, 187, 188, + 189, 190, 191, 0, 192, 193, 194, 195, 196, 202, + 203, 204, 205, 72, 0, 60, 61, 0, 0, 174, + 175, 271, 0, 200, 73, 0, 283, 0, 198, 201, + 284, 197, 199, } var yyTok1 = [...]int8{ @@ -811,7 +822,7 @@ var yyTok2 = [...]int8{ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, - 102, 103, 104, 105, 106, 107, 108, 109, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, } var yyTok3 = [...]int8{ @@ -1294,14 +1305,24 @@ yydefault: { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } + case 45: + yyDollar = yyS[yypt-4 : yypt+1] + { + yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) + } case 46: + yyDollar = yyS[yypt-4 : yypt+1] + { + yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) + } + case 48: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.node = &BinaryExpr{ VectorMatching: &VectorMatching{Card: CardOneToOne}, } } - case 47: + case 49: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &BinaryExpr{ @@ -1309,56 +1330,56 @@ yydefault: ReturnBool: true, } } - case 48: + case 50: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.MatchingLabels = yyDollar[3].strings } - case 49: + case 51: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.MatchingLabels = yyDollar[3].strings yyVAL.node.(*BinaryExpr).VectorMatching.On = true } - case 52: + case 54: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardManyToOne yyVAL.node.(*BinaryExpr).VectorMatching.Include = yyDollar[3].strings } - case 53: + case 55: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardOneToMany yyVAL.node.(*BinaryExpr).VectorMatching.Include = yyDollar[3].strings } - case 55: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.node = yyDollar[1].node - fill := yyDollar[3].node.(*NumberLiteral).Val - yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill - yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill - } - case 56: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.node = yyDollar[1].node - fill := yyDollar[3].node.(*NumberLiteral).Val - yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill - } case 57: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[1].node fill := yyDollar[3].node.(*NumberLiteral).Val + yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill } case 58: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.node = yyDollar[1].node + fill := yyDollar[3].node.(*NumberLiteral).Val + yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill + } + case 59: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.node = yyDollar[1].node + fill := yyDollar[3].node.(*NumberLiteral).Val + yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill + } + case 60: yyDollar = yyS[yypt-5 : yypt+1] { yyVAL.node = yyDollar[1].node @@ -1367,7 +1388,7 @@ yydefault: yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill_left yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill_right } - case 59: + case 61: yyDollar = yyS[yypt-5 : yypt+1] { fill_right := yyDollar[3].node.(*NumberLiteral).Val @@ -1375,44 +1396,44 @@ yydefault: yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill_left yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill_right } - case 60: + case 62: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.strings = yyDollar[2].strings } - case 61: + case 63: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.strings = yyDollar[2].strings } - case 62: + case 64: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.strings = []string{} } - case 63: + case 65: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("grouping opts", "\"(\"") yyVAL.strings = nil } - case 64: + case 66: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.strings = append(yyDollar[1].strings, yyDollar[3].item.Val) } - case 65: + case 67: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.strings = []string{yyDollar[1].item.Val} } - case 66: + case 68: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("grouping opts", "\",\" or \")\"") yyVAL.strings = yyDollar[1].strings } - case 67: + case 69: yyDollar = yyS[yypt-1 : yypt+1] { if !model.UTF8Validation.IsValidLabelName(yyDollar[1].item.Val) { @@ -1420,7 +1441,7 @@ yydefault: } yyVAL.item = yyDollar[1].item } - case 68: + case 70: yyDollar = yyS[yypt-1 : yypt+1] { unquoted := yylex.(*parser).unquoteString(yyDollar[1].item.Val) @@ -1431,18 +1452,18 @@ yydefault: yyVAL.item.Pos++ yyVAL.item.Val = unquoted } - case 69: + case 71: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("grouping opts", "label") yyVAL.item = Item{} } - case 70: + case 72: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[2].node.(*NumberLiteral) } - case 71: + case 73: yyDollar = yyS[yypt-4 : yypt+1] { nl := yyDollar[3].node.(*NumberLiteral) @@ -1452,7 +1473,7 @@ yydefault: nl.PosRange.Start = yyDollar[2].item.Pos yyVAL.node = nl } - case 72: + case 74: yyDollar = yyS[yypt-2 : yypt+1] { fn, exist := getFunction(yyDollar[1].item.Val, yylex.(*parser).functions) @@ -1471,38 +1492,38 @@ yydefault: }, } } - case 73: + case 75: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[2].node } - case 74: + case 76: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.node = Expressions{} } - case 75: + case 77: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = append(yyDollar[1].node.(Expressions), yyDollar[3].node.(Expr)) } - case 76: + case 78: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = Expressions{yyDollar[1].node.(Expr)} } - case 77: + case 79: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).addParseErrf(yyDollar[2].item.PositionRange(), "trailing commas not allowed in function call args") yyVAL.node = yyDollar[1].node } - case 78: + case 80: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &ParenExpr{Expr: yyDollar[2].node.(Expr), PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item)} } - case 79: + case 81: yyDollar = yyS[yypt-1 : yypt+1] { if numLit, ok := yyDollar[1].node.(*NumberLiteral); ok { @@ -1516,7 +1537,7 @@ yydefault: } yyVAL.node = yyDollar[1].node } - case 80: + case 82: yyDollar = yyS[yypt-3 : yypt+1] { if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok { @@ -1527,41 +1548,41 @@ yydefault: yylex.(*parser).addOffsetExpr(yyDollar[1].node, yyDollar[3].node.(*DurationExpr)) yyVAL.node = yyDollar[1].node } - case 81: + case 83: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("offset", "number, duration, step(), or range()") yyVAL.node = yyDollar[1].node } - case 82: + case 84: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).setAnchored(yyDollar[1].node) } - case 83: + case 85: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).setSmoothed(yyDollar[1].node) } - case 84: + case 86: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).setTimestamp(yyDollar[1].node, yyDollar[3].float) yyVAL.node = yyDollar[1].node } - case 85: + case 87: yyDollar = yyS[yypt-5 : yypt+1] { yylex.(*parser).setAtModifierPreprocessor(yyDollar[1].node, yyDollar[3].item) yyVAL.node = yyDollar[1].node } - case 86: + case 88: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("@", "timestamp") yyVAL.node = yyDollar[1].node } - case 89: + case 91: yyDollar = yyS[yypt-4 : yypt+1] { var errMsg string @@ -1591,7 +1612,7 @@ yydefault: EndPos: yylex.(*parser).lastClosing, } } - case 90: + case 92: yyDollar = yyS[yypt-6 : yypt+1] { var rangeNl time.Duration @@ -1613,7 +1634,7 @@ yydefault: EndPos: yyDollar[6].item.Pos + 1, } } - case 91: + case 93: yyDollar = yyS[yypt-5 : yypt+1] { var rangeNl time.Duration @@ -1628,31 +1649,31 @@ yydefault: EndPos: yyDollar[5].item.Pos + 1, } } - case 92: + case 94: yyDollar = yyS[yypt-6 : yypt+1] { yylex.(*parser).unexpected("subquery selector", "\"]\"") yyVAL.node = yyDollar[1].node } - case 93: + case 95: yyDollar = yyS[yypt-5 : yypt+1] { yylex.(*parser).unexpected("subquery selector", "number, duration, step(), range(), or \"]\"") yyVAL.node = yyDollar[1].node } - case 94: + case 96: yyDollar = yyS[yypt-4 : yypt+1] { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\"") yyVAL.node = yyDollar[1].node } - case 95: + case 97: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("subquery or range selector", "number, duration, step(), or range()") yyVAL.node = yyDollar[1].node } - case 96: + case 98: yyDollar = yyS[yypt-2 : yypt+1] { if nl, ok := yyDollar[2].node.(*NumberLiteral); ok { @@ -1665,7 +1686,7 @@ yydefault: yyVAL.node = &UnaryExpr{Op: yyDollar[1].item.Typ, Expr: yyDollar[2].node.(Expr), StartPos: yyDollar[1].item.Pos} } } - case 97: + case 99: yyDollar = yyS[yypt-2 : yypt+1] { vs := yyDollar[2].node.(*VectorSelector) @@ -1674,7 +1695,7 @@ yydefault: yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 98: + case 100: yyDollar = yyS[yypt-1 : yypt+1] { vs := &VectorSelector{ @@ -1685,14 +1706,14 @@ yydefault: yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 99: + case 101: yyDollar = yyS[yypt-1 : yypt+1] { vs := yyDollar[1].node.(*VectorSelector) yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 100: + case 102: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1700,7 +1721,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item), } } - case 101: + case 103: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1708,7 +1729,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[4].item), } } - case 102: + case 104: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1716,7 +1737,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[2].item), } } - case 103: + case 105: yyDollar = yyS[yypt-3 : yypt+1] { if yyDollar[1].matchers != nil { @@ -1725,144 +1746,144 @@ yydefault: yyVAL.matchers = yyDollar[1].matchers } } - case 104: + case 106: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.matchers = []*labels.Matcher{yyDollar[1].matcher} } - case 105: + case 107: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label matching", "\",\" or \"}\"") yyVAL.matchers = yyDollar[1].matchers } - case 106: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) - } - case 107: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) - } case 108: - yyDollar = yyS[yypt-1 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.matcher = yylex.(*parser).newMetricNameMatcher(yyDollar[1].item) + yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) } case 109: yyDollar = yyS[yypt-3 : yypt+1] { - yylex.(*parser).unexpected("label matching", "string") - yyVAL.matcher = nil + yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) } case 110: + yyDollar = yyS[yypt-1 : yypt+1] + { + yyVAL.matcher = yylex.(*parser).newMetricNameMatcher(yyDollar[1].item) + } + case 111: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label matching", "string") yyVAL.matcher = nil } - case 111: + case 112: + yyDollar = yyS[yypt-3 : yypt+1] + { + yylex.(*parser).unexpected("label matching", "string") + yyVAL.matcher = nil + } + case 113: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label matching", "label matching operator") yyVAL.matcher = nil } - case 112: + case 114: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("label matching", "identifier or \"}\"") yyVAL.matcher = nil } - case 113: + case 115: yyDollar = yyS[yypt-2 : yypt+1] { b := labels.NewBuilder(yyDollar[2].labels) b.Set(labels.MetricName, yyDollar[1].item.Val) yyVAL.labels = b.Labels() } - case 114: + case 116: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.labels = yyDollar[1].labels } - case 146: + case 148: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.labels = labels.New(yyDollar[2].lblList...) } - case 147: + case 149: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.labels = labels.New(yyDollar[2].lblList...) } - case 148: + case 150: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.labels = labels.New() } - case 149: + case 151: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.labels = labels.New() } - case 150: + case 152: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.lblList = append(yyDollar[1].lblList, yyDollar[3].label) } - case 151: + case 153: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.lblList = []labels.Label{yyDollar[1].label} } - case 152: + case 154: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\",\" or \"}\"") yyVAL.lblList = yyDollar[1].lblList } - case 153: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} - } - case 154: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} - } case 155: - yyDollar = yyS[yypt-1 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.label = labels.Label{Name: labels.MetricName, Value: yyDollar[1].item.Val} + yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } case 156: yyDollar = yyS[yypt-3 : yypt+1] { - yylex.(*parser).unexpected("label set", "string") - yyVAL.label = labels.Label{} + yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } case 157: + yyDollar = yyS[yypt-1 : yypt+1] + { + yyVAL.label = labels.Label{Name: labels.MetricName, Value: yyDollar[1].item.Val} + } + case 158: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } - case 158: + case 159: + yyDollar = yyS[yypt-3 : yypt+1] + { + yylex.(*parser).unexpected("label set", "string") + yyVAL.label = labels.Label{} + } + case 160: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\"=\"") yyVAL.label = labels.Label{} } - case 159: + case 161: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("label set", "identifier or \"}\"") yyVAL.label = labels.Label{} } - case 160: + case 162: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).generatedParserResult = &seriesDescription{ @@ -1870,33 +1891,33 @@ yydefault: values: yyDollar[2].series, } } - case 161: + case 163: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.series = []SequenceValue{} } - case 162: + case 164: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...) } - case 163: + case 165: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.series = yyDollar[1].series } - case 164: + case 166: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("series values", "") yyVAL.series = nil } - case 165: + case 167: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Omitted: true}} } - case 166: + case 168: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1904,12 +1925,12 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true}) } } - case 167: + case 169: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}} } - case 168: + case 170: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1918,7 +1939,7 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float}) } } - case 169: + case 171: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1928,12 +1949,12 @@ yydefault: yyDollar[1].float += yyDollar[2].float } } - case 170: + case 172: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Histogram: yyDollar[1].histogram}} } - case 171: + case 173: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1943,7 +1964,7 @@ yydefault: //$1 += $2 } } - case 172: + case 174: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1952,7 +1973,7 @@ yydefault: } yyVAL.series = val } - case 173: + case 175: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1961,7 +1982,7 @@ yydefault: } yyVAL.series = val } - case 174: + case 176: yyDollar = yyS[yypt-1 : yypt+1] { if yyDollar[1].item.Val != "stale" { @@ -1969,130 +1990,130 @@ yydefault: } yyVAL.float = math.Float64frombits(value.StaleNaN) } - case 177: + case 179: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } - case 178: + case 180: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } - case 179: + case 181: yyDollar = yyS[yypt-3 : yypt+1] { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } - case 180: + case 182: yyDollar = yyS[yypt-2 : yypt+1] { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } - case 181: + case 183: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors)) } - case 182: + case 184: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.descriptors = yyDollar[1].descriptors } - case 183: + case 185: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]") } - case 184: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["schema"] = yyDollar[3].int - } - case 185: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["sum"] = yyDollar[3].float - } case 186: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["count"] = yyDollar[3].float + yyVAL.descriptors["schema"] = yyDollar[3].int } case 187: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["z_bucket"] = yyDollar[3].float + yyVAL.descriptors["sum"] = yyDollar[3].float } case 188: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float + yyVAL.descriptors["count"] = yyDollar[3].float } case 189: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set + yyVAL.descriptors["z_bucket"] = yyDollar[3].float } case 190: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set + yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float } case 191: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["offset"] = yyDollar[3].int + yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set } case 192: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set + yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set } case 193: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["n_offset"] = yyDollar[3].int + yyVAL.descriptors["offset"] = yyDollar[3].int } case 194: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item + yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set } case 195: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.bucket_set = yyDollar[2].bucket_set + yyVAL.descriptors = yylex.(*parser).newMap() + yyVAL.descriptors["n_offset"] = yyDollar[3].int } case 196: yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.bucket_set = yyDollar[2].bucket_set + yyVAL.descriptors = yylex.(*parser).newMap() + yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item } case 197: + yyDollar = yyS[yypt-4 : yypt+1] + { + yyVAL.bucket_set = yyDollar[2].bucket_set + } + case 198: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.bucket_set = yyDollar[2].bucket_set + } + case 199: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) } - case 198: + case 200: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.bucket_set = []float64{yyDollar[1].float} } - case 260: + case 262: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &NumberLiteral{ @@ -2100,7 +2121,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 261: + case 263: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -2115,12 +2136,12 @@ yydefault: Duration: true, } } - case 262: + case 264: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } - case 263: + case 265: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -2131,17 +2152,17 @@ yydefault: } yyVAL.float = dur.Seconds() } - case 264: + case 266: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = yyDollar[2].float } - case 265: + case 267: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = -yyDollar[2].float } - case 268: + case 270: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -2150,17 +2171,17 @@ yydefault: yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err) } } - case 269: + case 271: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.int = -int64(yyDollar[2].uint) } - case 270: + case 272: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.int = int64(yyDollar[1].uint) } - case 271: + case 273: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &StringLiteral{ @@ -2168,7 +2189,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 272: + case 274: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.item = Item{ @@ -2177,12 +2198,12 @@ yydefault: Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), } } - case 273: + case 275: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.strings = nil } - case 275: + case 277: yyDollar = yyS[yypt-1 : yypt+1] { nl := yyDollar[1].node.(*NumberLiteral) @@ -2193,7 +2214,7 @@ yydefault: } yyVAL.node = nl } - case 276: + case 278: yyDollar = yyS[yypt-2 : yypt+1] { nl := yyDollar[2].node.(*NumberLiteral) @@ -2208,7 +2229,7 @@ yydefault: nl.PosRange.Start = yyDollar[1].item.Pos yyVAL.node = nl } - case 277: + case 279: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2217,7 +2238,7 @@ yydefault: EndPos: yyDollar[3].item.PositionRange().End, } } - case 278: + case 280: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2226,7 +2247,7 @@ yydefault: EndPos: yyDollar[3].item.PositionRange().End, } } - case 279: + case 281: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2239,7 +2260,7 @@ yydefault: StartPos: yyDollar[1].item.Pos, } } - case 280: + case 282: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2252,7 +2273,7 @@ yydefault: StartPos: yyDollar[1].item.Pos, } } - case 281: + case 283: yyDollar = yyS[yypt-6 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2263,7 +2284,7 @@ yydefault: RHS: yyDollar[5].node.(Expr), } } - case 282: + case 284: yyDollar = yyS[yypt-7 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2279,7 +2300,7 @@ yydefault: }, } } - case 283: + case 285: yyDollar = yyS[yypt-4 : yypt+1] { de := yyDollar[3].node.(*DurationExpr) @@ -2294,7 +2315,7 @@ yydefault: } yyVAL.node = yyDollar[3].node } - case 287: + case 289: yyDollar = yyS[yypt-1 : yypt+1] { nl := yyDollar[1].node.(*NumberLiteral) @@ -2305,7 +2326,7 @@ yydefault: } yyVAL.node = nl } - case 288: + case 290: yyDollar = yyS[yypt-2 : yypt+1] { switch expr := yyDollar[2].node.(type) { @@ -2338,25 +2359,25 @@ yydefault: break } } - case 289: + case 291: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) yyVAL.node = &DurationExpr{Op: ADD, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 290: + case 292: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) yyVAL.node = &DurationExpr{Op: SUB, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 291: + case 293: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) yyVAL.node = &DurationExpr{Op: MUL, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 292: + case 294: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) @@ -2367,7 +2388,7 @@ yydefault: } yyVAL.node = &DurationExpr{Op: DIV, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 293: + case 295: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) @@ -2378,13 +2399,13 @@ yydefault: } yyVAL.node = &DurationExpr{Op: MOD, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 294: + case 296: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) yyVAL.node = &DurationExpr{Op: POW, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 295: + case 297: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2393,7 +2414,7 @@ yydefault: EndPos: yyDollar[3].item.PositionRange().End, } } - case 296: + case 298: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2402,7 +2423,7 @@ yydefault: EndPos: yyDollar[3].item.PositionRange().End, } } - case 297: + case 299: yyDollar = yyS[yypt-6 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2413,7 +2434,7 @@ yydefault: RHS: yyDollar[5].node.(Expr), } } - case 299: + case 301: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[2].node.(Expr)) diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 7149985767..8aa9e9dcbe 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -189,21 +189,23 @@ var ItemTypeStr = map[ItemType]string{ TIMES: "x", SPACE: "", - SUB: "-", - ADD: "+", - MUL: "*", - MOD: "%", - DIV: "/", - EQLC: "==", - NEQ: "!=", - LTE: "<=", - LSS: "<", - GTE: ">=", - GTR: ">", - EQL_REGEX: "=~", - NEQ_REGEX: "!~", - POW: "^", - AT: "@", + SUB: "-", + ADD: "+", + MUL: "*", + MOD: "%", + DIV: "/", + EQLC: "==", + NEQ: "!=", + LTE: "<=", + LSS: "<", + GTE: ">=", + GTR: ">", + TRIM_UPPER: "/", + EQL_REGEX: "=~", + NEQ_REGEX: "!~", + POW: "^", + AT: "@", } func init() { @@ -446,6 +448,9 @@ func lexStatements(l *Lexer) stateFn { if t := l.peek(); t == '=' { l.next() l.emit(LTE) + } else if t := l.peek(); t == '/' { + l.next() + l.emit(TRIM_UPPER) } else { l.emit(LSS) } @@ -453,6 +458,9 @@ func lexStatements(l *Lexer) stateFn { if t := l.peek(); t == '=' { l.next() l.emit(GTE) + } else if t := l.peek(); t == '/' { + l.next() + l.emit(TRIM_LOWER) } else { l.emit(GTR) } diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index fd4b1f4178..57219c8981 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1872,3 +1872,58 @@ eval instant at 1m irate(nhcb_add_bucket[2m]) * 60 expect no_warn expect no_info {} {{schema:-53 sum:56 count:15 custom_values:[2 3 4 6] buckets:[1 0 1 5 8] counter_reset_hint:gauge}} + +# Test native histogram with trim operators ("/": TRIM_LOWER) +load 1m + h_test {{schema:0 sum:123.75 count:34 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1 2]}} + h_test_2 {{schema:2 sum:12.8286080906 count:28 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 3 1]}} + cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} + zero_bucket {{schema:0 sum:-6.75 z_bucket:5 z_bucket_w:0.01 buckets:[2 3] n_buckets:[1 2 3]}} + +# Native Histogram: Exponential Bucket Interpolation Tests +eval instant at 1m h_test_2 / 1.13 +{__name__="h_test_2"} {{schema:2 count:14.589417818876296 sum:-1.5258511531197865 z_bucket_w:0.001 offset:1 buckets:[0.589417818876296 4 7 3]}} + +eval instant at 1m h_test_2 >/ -1.3 +{__name__="h_test_2"} {{schema:2 count:25.54213947904476 sum:13.099057472672072 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 1.54213947904476]}} + +eval instant at 1m h_test_2 / 2 +{__name__="h_test"} {{count:24 sum:113.14339828220179 z_bucket_w:0.001 offset:2 buckets:[8 16]}} + +eval instant at 1m h_test >/ -1 +{__name__="h_test"} {{count:32 sum:120.92157287525382 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1]}} + +eval instant at 1m h_test / 13 +{__name__="cbh"} {{schema:-53 count:5.6 sum:92.5 custom_values:[5 10 15 20] offset:2 buckets:[1.6 3 1]}} + +eval instant at 1m cbh / 15 +{__name__="cbh"} {{schema:-53 count:4 sum:72.5 custom_values:[5 10 15 20] offset:3 buckets:[3 1]}} + +# Zero Bucket Edge Case: Interpolation Around Zero +eval instant at 1m zero_bucket / 0 +{__name__="zero_bucket"} {{count:7.5 sum:-18.77081528017131 z_bucket:2.5 z_bucket_w:0.01 buckets:[2 3]}} + +clear diff --git a/web/ui/mantine-ui/src/promql/ast.ts b/web/ui/mantine-ui/src/promql/ast.ts index 9f8c5cb102..f301b0a5c2 100644 --- a/web/ui/mantine-ui/src/promql/ast.ts +++ b/web/ui/mantine-ui/src/promql/ast.ts @@ -40,6 +40,8 @@ export enum binaryOperatorType { neq = "!=", gtr = ">", lss = "<", + trimUpper = "/", gte = ">=", lte = "<=", and = "and", diff --git a/web/ui/mantine-ui/src/promql/utils.ts b/web/ui/mantine-ui/src/promql/utils.ts index 2addeed8ab..54d4e97555 100644 --- a/web/ui/mantine-ui/src/promql/utils.ts +++ b/web/ui/mantine-ui/src/promql/utils.ts @@ -37,6 +37,8 @@ const binOpPrecedence = { [binaryOperatorType.lss]: 4, [binaryOperatorType.gte]: 4, [binaryOperatorType.lte]: 4, + [binaryOperatorType.trimLower]: 4, + [binaryOperatorType.trimUpper]: 4, [binaryOperatorType.and]: 5, [binaryOperatorType.or]: 6, [binaryOperatorType.unless]: 5, diff --git a/web/ui/module/codemirror-promql/src/complete/hybrid.ts b/web/ui/module/codemirror-promql/src/complete/hybrid.ts index 84c101b43c..37c3d5b625 100644 --- a/web/ui/module/codemirror-promql/src/complete/hybrid.ts +++ b/web/ui/module/codemirror-promql/src/complete/hybrid.ts @@ -60,6 +60,8 @@ import { LimitK, LimitRatio, CountValues, + TrimLower, + TrimUpper, } from '@prometheus-io/lezer-promql'; import { Completion, CompletionContext, CompletionResult } from '@codemirror/autocomplete'; import { EditorState } from '@codemirror/state'; @@ -408,7 +410,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode, pos: num // BinaryExpr( ..., Gtr , ... ) // When the `bool` keyword is present, then the expression looks like this: // BinaryExpr( ..., Gtr , BoolModifier(...), ... ) - if (containsAtLeastOneChild(parent, Eql, Gte, Gtr, Lte, Lss, Neq) && !containsAtLeastOneChild(parent, BoolModifier)) { + if (containsAtLeastOneChild(parent, Eql, Gte, Gtr, TrimLower, TrimUpper, Lte, Lss, Neq) && !containsAtLeastOneChild(parent, BoolModifier)) { result.push({ kind: ContextKind.Bool }); } } @@ -579,6 +581,8 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode, pos: num case Eql: case Gte: case Gtr: + case TrimLower: + case TrimUpper: case Lte: case Lss: case And: diff --git a/web/ui/module/codemirror-promql/src/complete/promql.terms.ts b/web/ui/module/codemirror-promql/src/complete/promql.terms.ts index 3670fffff7..9c6439e176 100644 --- a/web/ui/module/codemirror-promql/src/complete/promql.terms.ts +++ b/web/ui/module/codemirror-promql/src/complete/promql.terms.ts @@ -26,6 +26,8 @@ export const binOpTerms = [ { label: '>=' }, { label: '>' }, { label: '<' }, + { label: '/' }, { label: '<=' }, { label: '!=' }, { label: 'atan2' }, diff --git a/web/ui/module/codemirror-promql/src/parser/parser.ts b/web/ui/module/codemirror-promql/src/parser/parser.ts index f6e6fa50b6..bfd4acba22 100644 --- a/web/ui/module/codemirror-promql/src/parser/parser.ts +++ b/web/ui/module/codemirror-promql/src/parser/parser.ts @@ -49,12 +49,14 @@ import { StepInvariantExpr, SubqueryExpr, Topk, + TrimLower, + TrimUpper, UnaryExpr, Unless, UnquotedLabelMatcher, VectorSelector, } from '@prometheus-io/lezer-promql'; -import { containsAtLeastOneChild } from './path-finder'; +import { containsAtLeastOneChild, containsChild } from './path-finder'; import { getType } from './type'; import { buildLabelMatchers } from './matcher'; import { EditorState } from '@codemirror/state'; @@ -214,17 +216,25 @@ export class Parser { const lt = this.checkAST(lExpr); const rt = this.checkAST(rExpr); const boolModifierUsed = node.getChild(BoolModifier); - const isComparisonOperator = containsAtLeastOneChild(node, Eql, Neq, Lte, Lss, Gte, Gtr); + + const isComparisonOperator = containsAtLeastOneChild(node, Eql, Neq, Lte, Lss, Gte, Gtr, TrimLower, TrimUpper); + const isTrimLowerOperator = containsChild(node, TrimLower); + const isTrimUpperOperator = containsChild(node, TrimUpper); const isSetOperator = containsAtLeastOneChild(node, And, Or, Unless); - // BOOL modifier check if (boolModifierUsed) { - if (!isComparisonOperator) { + if (!isComparisonOperator || isTrimLowerOperator || isTrimUpperOperator) { this.addDiagnostic(node, 'bool modifier can only be used on comparison operators'); } } else { if (isComparisonOperator && lt === ValueType.scalar && rt === ValueType.scalar) { - this.addDiagnostic(node, 'comparisons between scalars must use BOOL modifier'); + if (isTrimLowerOperator) { + this.addDiagnostic(node, 'operator ">/" not allowed for Scalar operations'); + } else if (isTrimUpperOperator) { + this.addDiagnostic(node, 'operator "=" } Gtr { ">" } + TrimUpper { "/" } EqlRegex { "=~" } EqlSingle { "=" } NeqRegex { "!~" } diff --git a/web/ui/module/lezer-promql/test/expression.txt b/web/ui/module/lezer-promql/test/expression.txt index 109eb7af15..0fe5f3d918 100644 --- a/web/ui/module/lezer-promql/test/expression.txt +++ b/web/ui/module/lezer-promql/test/expression.txt @@ -716,3 +716,31 @@ rate(caddy_http_requests_total[5m] smoothed) ==> PromQL(FunctionCall(FunctionIdentifier(Rate),FunctionCallBody(SmoothedExpr(MatrixSelector(VectorSelector(Identifier),NumberDurationLiteralInDurationContext),Smoothed)))) + +# TrimUpper binary operator + +metric1 + +PromQL( + BinaryExpr( + VectorSelector(Identifier), + TrimUpper, + VectorSelector(Identifier) + ) +) + +# TrimLower binary operator + +metric1 >/ metric2 + +==> + +PromQL( + BinaryExpr( + VectorSelector(Identifier), + TrimLower, + VectorSelector(Identifier) + ) +) From 8528d5c44600ad3d294e836efcd0e91f8a6f97fc Mon Sep 17 00:00:00 2001 From: sujal shah Date: Thu, 27 Mar 2025 04:24:18 +0530 Subject: [PATCH 03/39] promql: Implement / operators for trimming native histograms. This implements the TRIM_UPPER (/) operators that allow removing observations below or above a threshold from a histogram. The implementation zeros out buckets outside the desired range. It also recalculates the sum, including only bucket counts within the specified threshold range. Fixes #14651. Signed-off-by: sujal shah --- promql/engine.go | 204 +++++++----------- .../testdata/native_histograms.test | 53 +++-- 2 files changed, 119 insertions(+), 138 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 749352c23d..c076f60411 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3140,101 +3140,83 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { panic(fmt.Errorf("operator %q not allowed for Scalar operations", op)) } -// processCustomBucket handles custom bucket processing for histogram trimming. -// It returns the count to keep and the bucket midpoint for sum calculations. -func processCustomBucket( - bucket histogram.Bucket[float64], - rhs float64, - op parser.ItemType, -) (keepCount, bucketMidpoint float64) { - // Midpoint calculation - switch { - case math.IsInf(bucket.Lower, -1): - // First bucket: no lower bound, assume midpoint is near upper bound. - bucketMidpoint = bucket.Upper - case math.IsInf(bucket.Upper, 1): - bucketMidpoint = bucket.Lower - default: - bucketMidpoint = (bucket.Lower + bucket.Upper) / 2 +func handleInfinityBuckets(b histogram.Bucket[float64], le float64) (float64, float64) { + var underCount, bucketMidpoint float64 + + if math.IsInf(b.Lower, -1) { + switch { + case le >= b.Upper: + // le is greater than or equal to upper bound. Full count applies + underCount = b.Count + bucketMidpoint = b.Upper + case le < 0: + // le is negative and less than zero — nothing to keep. + underCount = b.Count * 0.5 + bucketMidpoint = b.Upper + default: + // Interpolating with treated lower bound as 0 (linear) + fraction := le / b.Upper + underCount = b.Count * fraction + bucketMidpoint = le / 2 + } + return underCount, bucketMidpoint } - // Fractional keepCount calculation - switch op { - case parser.TRIM_UPPER: - switch { - case math.IsInf(bucket.Lower, -1): - // Special case for -Inf lower bound - if rhs >= bucket.Upper { - // Trim point is above bucket upper bound, keep all - keepCount = bucket.Count - } else { - // Trim point is within bucket or below, keep none - keepCount = 0 - } - case math.IsInf(bucket.Upper, 1): - // Special case for +Inf upper bound - if rhs <= bucket.Lower { - // Trim point is below bucket lower bound, keep none - keepCount = 0 - } else { - // Trim point is within the bucket, keep a portion - // Since we can't interpolate with +Inf, assume keep half for simplicity - // Another approach would be to use a different interpolation scheme - keepCount = bucket.Count * 0.5 - } - default: - // Normal case - finite bounds - switch { - case bucket.Upper <= rhs: - // Bucket entirely below trim point - keep all - keepCount = bucket.Count - case bucket.Lower < rhs: - // Bucket contains trim point - interpolate - fraction := (rhs - bucket.Lower) / (bucket.Upper - bucket.Lower) - keepCount = bucket.Count * fraction - default: - // Bucket entirely above trim point - discard - keepCount = 0 - } - } - - case parser.TRIM_LOWER: - switch { - case math.IsInf(bucket.Upper, 1): - // Special case for +Inf upper bound - if rhs <= bucket.Lower { - keepCount = bucket.Count - } else { - keepCount = 0 - } - case math.IsInf(bucket.Lower, -1): - // Special case for -Inf lower bound - if rhs >= bucket.Upper { - keepCount = 0 - } else { - keepCount = bucket.Count * 0.5 - } - default: - switch { - case bucket.Lower >= rhs: - keepCount = bucket.Count - case bucket.Upper > rhs: - fraction := (bucket.Upper - rhs) / (bucket.Upper - bucket.Lower) - keepCount = bucket.Count * fraction - default: - keepCount = 0 - } + if math.IsInf(b.Upper, 1) { + if le <= b.Lower { + underCount = 0 + bucketMidpoint = b.Lower + } else { + underCount = b.Count * 0.5 + bucketMidpoint = b.Lower } + return underCount, bucketMidpoint } - - return keepCount, bucketMidpoint + return underCount, bucketMidpoint } -func computeBucketTrim(op parser.ItemType, bucket histogram.Bucket[float64], rhs float64, isPostive, isCustomBucket bool) (float64, float64) { - if isCustomBucket { - return processCustomBucket(bucket, rhs, op) +// computeSplit calculates the portion of the bucket's count <= le (trim point). +func computeSplit(b histogram.Bucket[float64], le float64, isPositive, isCustom bool) float64 { + if le <= b.Lower { + return 0 } - return computeExponentialTrim(bucket, rhs, isPostive, op) + if le >= b.Upper { + return b.Count + } + + var fraction float64 + switch { + case isCustom || (b.Lower <= 0 && b.Upper >= 0): + fraction = (le - b.Lower) / (b.Upper - b.Lower) + default: + // Exponential interpolation + logLower := math.Log2(math.Abs(b.Lower)) + logUpper := math.Log2(math.Abs(b.Upper)) + logV := math.Log2(math.Abs(le)) + + if isPositive { + fraction = (logV - logLower) / (logUpper - logLower) + } else { + fraction = 1 - ((logV - logUpper) / (logLower - logUpper)) + } + } + + underCount := b.Count * fraction + return underCount +} + +func computeBucketTrim(op parser.ItemType, b histogram.Bucket[float64], rhs float64, isPositive, isCustomBucket bool) (float64, float64) { + if math.IsInf(b.Lower, -1) || math.IsInf(b.Upper, 1) { + return handleInfinityBuckets(b, rhs) + } + + product := math.Abs(b.Lower) * math.Abs(rhs) + underCount := computeSplit(b, rhs, isPositive, isCustomBucket) + if op == parser.TRIM_UPPER { + return underCount, computeMidpoint(b, product, isCustomBucket, isPositive) + } + product = math.Abs(rhs) * math.Abs(b.Upper) + return b.Count - underCount, computeMidpoint(b, product, isCustomBucket, isPositive) } // Helper function to trim native histogram buckets. @@ -3252,8 +3234,7 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, op parser for i, iter := 0, trimmedHist.PositiveBucketIterator(); iter.Next(); i++ { hasPositive = true bucket := iter.At() - var keepCount, bucketMidpoint float64 - keepCount, bucketMidpoint = computeBucketTrim(op, bucket, rhs, true, isCustomBucket) + keepCount, bucketMidpoint := computeBucketTrim(op, bucket, rhs, true, isCustomBucket) // Bucket is entirely below the trim point - keep all switch { @@ -3280,8 +3261,7 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, op parser for i, iter := 0, trimmedHist.NegativeBucketIterator(); iter.Next(); i++ { hasNegative = true bucket := iter.At() - var keepCount, bucketMidpoint float64 - keepCount, bucketMidpoint = computeBucketTrim(op, bucket, rhs, false, isCustomBucket) + keepCount, bucketMidpoint := computeBucketTrim(op, bucket, rhs, false, isCustomBucket) switch { case bucket.Upper <= rhs: @@ -3305,8 +3285,7 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, op parser for i, iter := 0, trimmedHist.PositiveBucketIterator(); iter.Next(); i++ { hasPositive = true bucket := iter.At() - var keepCount, bucketMidpoint float64 - keepCount, bucketMidpoint = computeBucketTrim(op, bucket, rhs, true, isCustomBucket) + keepCount, bucketMidpoint := computeBucketTrim(op, bucket, rhs, true, isCustomBucket) switch { case bucket.Lower >= rhs: @@ -3330,8 +3309,8 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, op parser for i, iter := 0, trimmedHist.NegativeBucketIterator(); iter.Next(); i++ { hasNegative = true bucket := iter.At() - var keepCount, bucketMidpoint float64 - keepCount, bucketMidpoint = computeBucketTrim(op, bucket, rhs, false, isCustomBucket) + keepCount, bucketMidpoint := computeBucketTrim(op, bucket, rhs, false, isCustomBucket) + switch { case bucket.Lower >= rhs: updatedCount += bucket.Count @@ -3403,36 +3382,19 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, op parser trimmedHist.Compact(0) } -func computeExponentialTrim(bucket histogram.Bucket[float64], rhs float64, isPositive bool, op parser.ItemType) (float64, float64) { - var fraction, bucketMidpoint, keepCount float64 - - logLower := math.Log2(math.Abs(bucket.Lower)) - logUpper := math.Log2(math.Abs(bucket.Upper)) - logRHS := math.Log2(math.Abs(rhs)) - - switch op { - case parser.TRIM_UPPER: +func computeMidpoint(b histogram.Bucket[float64], product float64, isCustom, isPositive bool) float64 { + midpoint := func(product float64, isPositive bool) float64 { if isPositive { - fraction = (logRHS - logLower) / (logUpper - logLower) - bucketMidpoint = math.Sqrt(bucket.Lower * rhs) - } else { - fraction = 1 - ((logRHS - logUpper) / (logLower - logUpper)) - bucketMidpoint = -math.Sqrt(math.Abs(bucket.Lower) * math.Abs(rhs)) - } - - case parser.TRIM_LOWER: - if isPositive { - fraction = (logUpper - logRHS) / (logUpper - logLower) - bucketMidpoint = math.Sqrt(rhs * bucket.Upper) - } else { - fraction = (logRHS - logUpper) / (logLower - logUpper) - bucketMidpoint = -math.Sqrt(math.Abs(rhs) * math.Abs(bucket.Upper)) + return math.Sqrt(product) } + return -math.Sqrt(product) } - keepCount = bucket.Count * fraction + if isCustom { + return (b.Lower + b.Upper) / 2 + } - return keepCount, bucketMidpoint + return midpoint(product, isPositive) } // vectorElemBinop evaluates a binary operation between two Vector elements. diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 57219c8981..8e3856f666 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1878,52 +1878,71 @@ load 1m h_test {{schema:0 sum:123.75 count:34 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1 2]}} h_test_2 {{schema:2 sum:12.8286080906 count:28 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 3 1]}} cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} + cbh_has_neg {{schema:-53 sum:172.5 count:15 custom_values:[-10 5 10 15 20] buckets:[2 1 6 4 3 1]}} zero_bucket {{schema:0 sum:-6.75 z_bucket:5 z_bucket_w:0.01 buckets:[2 3] n_buckets:[1 2 3]}} -# Native Histogram: Exponential Bucket Interpolation Tests +# Exponential buckets: trim uses exponential interpolation if cutoff is inside a bucket eval instant at 1m h_test_2 / 1.13 -{__name__="h_test_2"} {{schema:2 count:14.589417818876296 sum:-1.5258511531197865 z_bucket_w:0.001 offset:1 buckets:[0.589417818876296 4 7 3]}} + h_test_2 {{schema:2 count:14.589417818876296 sum:-1.5258511531197865 z_bucket_w:0.001 offset:1 buckets:[0.589417818876296 4 7 3]}} eval instant at 1m h_test_2 >/ -1.3 -{__name__="h_test_2"} {{schema:2 count:25.54213947904476 sum:13.099057472672072 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 1.54213947904476]}} + h_test_2 {{schema:2 count:25.54213947904476 sum:13.099057472672072 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 1.54213947904476]}} eval instant at 1m h_test_2 / 2 -{__name__="h_test"} {{count:24 sum:113.14339828220179 z_bucket_w:0.001 offset:2 buckets:[8 16]}} + h_test{} {{count:24 sum:113.14339828220179 z_bucket_w:0.001 offset:2 buckets:[8 16]}} eval instant at 1m h_test >/ -1 -{__name__="h_test"} {{count:32 sum:120.92157287525382 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1]}} + h_test{} {{count:32 sum:120.92157287525382 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1]}} eval instant at 1m h_test / 13 -{__name__="cbh"} {{schema:-53 count:5.6 sum:92.5 custom_values:[5 10 15 20] offset:2 buckets:[1.6 3 1]}} + cbh{} {{schema:-53 count:5.6 sum:92.5 custom_values:[5 10 15 20] offset:2 buckets:[1.6 3 1]}} eval instant at 1m cbh / 15 -{__name__="cbh"} {{schema:-53 count:4 sum:72.5 custom_values:[5 10 15 20] offset:3 buckets:[3 1]}} + cbh{} {{schema:-53 count:4 sum:72.5 custom_values:[5 10 15 20] offset:3 buckets:[3 1]}} + +# Custom buckets: trim uses linear interpolation if cutoff is inside a bucket +eval instant at 1m cbh / 0 -{__name__="zero_bucket"} {{count:7.5 sum:-18.77081528017131 z_bucket:2.5 z_bucket_w:0.01 buckets:[2 3]}} + zero_bucket{} {{count:7.5 sum:-18.77081528017131 z_bucket:2.5 z_bucket_w:0.01 buckets:[2 3]}} clear From 25bd93f9ef02abca268a4315e6ace9671b347ffc Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Thu, 22 Jan 2026 15:41:02 +0200 Subject: [PATCH 04/39] Address PR comments Signed-off-by: Linas Medziunas --- cmd/prometheus/testdata/features.json | 2 + promql/engine.go | 160 ++++++++++++++------------ 2 files changed, 86 insertions(+), 76 deletions(-) diff --git a/cmd/prometheus/testdata/features.json b/cmd/prometheus/testdata/features.json index 4c893daae2..69cb853185 100644 --- a/cmd/prometheus/testdata/features.json +++ b/cmd/prometheus/testdata/features.json @@ -142,6 +142,8 @@ "=~": true, ">": true, ">=": true, + ">/": true, + "= b.Upper: - // le is greater than or equal to upper bound. Full count applies + // le is greater than or equal to upper bound. Full count applies. underCount = b.Count bucketMidpoint = b.Upper case le < 0: @@ -3154,7 +3154,7 @@ func handleInfinityBuckets(b histogram.Bucket[float64], le float64) (float64, fl underCount = b.Count * 0.5 bucketMidpoint = b.Upper default: - // Interpolating with treated lower bound as 0 (linear) + // Interpolating with treated lower bound as 0 (linear). fraction := le / b.Upper underCount = b.Count * fraction bucketMidpoint = le / 2 @@ -3175,24 +3175,24 @@ func handleInfinityBuckets(b histogram.Bucket[float64], le float64) (float64, fl return underCount, bucketMidpoint } -// computeSplit calculates the portion of the bucket's count <= le (trim point). -func computeSplit(b histogram.Bucket[float64], le float64, isPositive, isCustom bool) float64 { - if le <= b.Lower { +// computeSplit calculates the portion of the bucket's count <= rhs (trim point). +func computeSplit(b histogram.Bucket[float64], rhs float64, isPositive, isLinear bool) float64 { + if rhs <= b.Lower { return 0 } - if le >= b.Upper { + if rhs >= b.Upper { return b.Count } var fraction float64 switch { - case isCustom || (b.Lower <= 0 && b.Upper >= 0): - fraction = (le - b.Lower) / (b.Upper - b.Lower) + case isLinear: + fraction = (rhs - b.Lower) / (b.Upper - b.Lower) default: - // Exponential interpolation + // Exponential interpolation. logLower := math.Log2(math.Abs(b.Lower)) logUpper := math.Log2(math.Abs(b.Upper)) - logV := math.Log2(math.Abs(le)) + logV := math.Log2(math.Abs(rhs)) if isPositive { fraction = (logV - logLower) / (logUpper - logLower) @@ -3201,8 +3201,7 @@ func computeSplit(b histogram.Bucket[float64], le float64, isPositive, isCustom } } - underCount := b.Count * fraction - return underCount + return b.Count * fraction } func computeBucketTrim(op parser.ItemType, b histogram.Bucket[float64], rhs float64, isPositive, isCustomBucket bool) (float64, float64) { @@ -3210,12 +3209,14 @@ func computeBucketTrim(op parser.ItemType, b histogram.Bucket[float64], rhs floa return handleInfinityBuckets(b, rhs) } - product := math.Abs(b.Lower) * math.Abs(rhs) underCount := computeSplit(b, rhs, isPositive, isCustomBucket) + if op == parser.TRIM_UPPER { + product := math.Abs(b.Lower) * math.Abs(rhs) return underCount, computeMidpoint(b, product, isCustomBucket, isPositive) } - product = math.Abs(rhs) * math.Abs(b.Upper) + + product := math.Abs(rhs) * math.Abs(b.Upper) return b.Count - underCount, computeMidpoint(b, product, isCustomBucket, isPositive) } @@ -3227,24 +3228,39 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, op parser hasPositive, hasNegative := false, false isCustomBucket := trimmedHist.UsesCustomBuckets() - // Calculate the fraction to keep for buckets that contain the trim value - // For TRIM_UPPER, we keep observations below the trim point (rhs) switch op { case parser.TRIM_UPPER: + // Calculate the fraction to keep for buckets that contain the trim value. + // For TRIM_UPPER, we keep observations below the trim point (rhs). + // Example: histogram / float. for i, iter := 0, trimmedHist.PositiveBucketIterator(); iter.Next(); i++ { - hasPositive = true bucket := iter.At() + if bucket.Count == 0 { + continue + } + if isCustomBucket { + if bucket.Upper <= 0 { + hasNegative = true + } else { + if bucket.Lower < 0 { + hasNegative = true + } + hasPositive = true + } + } else { + hasPositive = true + } + keepCount, bucketMidpoint := computeBucketTrim(op, bucket, rhs, true, isCustomBucket) switch { @@ -3292,8 +3327,7 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, op parser updatedCount += bucket.Count case bucket.Upper > rhs: removedCount := bucket.Count - keepCount - removedMid := bucketMidpoint - removedSum += removedCount * removedMid + removedSum += removedCount * bucketMidpoint trimmedHist.PositiveBuckets[i] = keepCount updatedCount += keepCount @@ -3307,8 +3341,12 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, op parser } for i, iter := 0, trimmedHist.NegativeBucketIterator(); iter.Next(); i++ { - hasNegative = true bucket := iter.At() + if bucket.Count == 0 { + continue + } + hasNegative = true + keepCount, bucketMidpoint := computeBucketTrim(op, bucket, rhs, false, isCustomBucket) switch { @@ -3316,8 +3354,7 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, op parser updatedCount += bucket.Count case bucket.Upper > rhs: removedCount := bucket.Count - keepCount - removedMid := bucketMidpoint - removedSum += removedCount * removedMid + removedSum += removedCount * bucketMidpoint trimmedHist.NegativeBuckets[i] = keepCount updatedCount += keepCount @@ -3329,45 +3366,20 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, op parser } } - // Handle the zero count bucket + // Handle the zero count bucket. if trimmedHist.ZeroCount > 0 { - zeroBucket := trimmedHist.ZeroBucket() - zLower := zeroBucket.Lower - zUpper := zeroBucket.Upper - - switch op { - case parser.TRIM_UPPER: - switch { - case rhs < zLower: - trimmedHist.ZeroCount = 0 - case rhs > zUpper: - updatedCount += trimmedHist.ZeroCount - default: - fraction := (rhs - zLower) / (zUpper - zLower) - keepCount := trimmedHist.ZeroCount * fraction - trimmedHist.ZeroCount = keepCount - updatedCount += keepCount - } - - case parser.TRIM_LOWER: - switch { - case rhs > zUpper: - trimmedHist.ZeroCount = 0 - case rhs < zLower: - updatedCount += trimmedHist.ZeroCount - default: - fraction := (zUpper - rhs) / (zUpper - zLower) - keepCount := trimmedHist.ZeroCount * fraction - trimmedHist.ZeroCount = keepCount - updatedCount += keepCount - } + keepCount := computeSplit(trimmedHist.ZeroBucket(), rhs, true, true) + if op == parser.TRIM_LOWER { + keepCount = trimmedHist.ZeroCount - keepCount } + trimmedHist.ZeroCount = keepCount + updatedCount += keepCount } - // Apply new sum + // Apply new sum. newSum := origSum - removedSum - // Clamp correction + // Clamp correction. if !hasNegative && newSum < 0 { newSum = 0 } @@ -3375,26 +3387,22 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, op parser newSum = 0 } - // Update the histogram's count and sum + // Update the histogram's count and sum. trimmedHist.Count = updatedCount trimmedHist.Sum = newSum trimmedHist.Compact(0) } -func computeMidpoint(b histogram.Bucket[float64], product float64, isCustom, isPositive bool) float64 { - midpoint := func(product float64, isPositive bool) float64 { - if isPositive { - return math.Sqrt(product) - } - return -math.Sqrt(product) - } - - if isCustom { +func computeMidpoint(b histogram.Bucket[float64], product float64, isLinear, isPositive bool) float64 { + if isLinear { return (b.Lower + b.Upper) / 2 } - return midpoint(product, isPositive) + if isPositive { + return math.Sqrt(product) + } + return -math.Sqrt(product) } // vectorElemBinop evaluates a binary operation between two Vector elements. From e3264908823576b1af659ba0239f13abd8f62712 Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Fri, 23 Jan 2026 10:07:52 +0200 Subject: [PATCH 05/39] Fix web UI validation Signed-off-by: Linas Medziunas --- .../codemirror-promql/src/complete/hybrid.ts | 2 +- .../module/codemirror-promql/src/parser/parser.ts | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/web/ui/module/codemirror-promql/src/complete/hybrid.ts b/web/ui/module/codemirror-promql/src/complete/hybrid.ts index 37c3d5b625..cff7457c93 100644 --- a/web/ui/module/codemirror-promql/src/complete/hybrid.ts +++ b/web/ui/module/codemirror-promql/src/complete/hybrid.ts @@ -410,7 +410,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode, pos: num // BinaryExpr( ..., Gtr , ... ) // When the `bool` keyword is present, then the expression looks like this: // BinaryExpr( ..., Gtr , BoolModifier(...), ... ) - if (containsAtLeastOneChild(parent, Eql, Gte, Gtr, TrimLower, TrimUpper, Lte, Lss, Neq) && !containsAtLeastOneChild(parent, BoolModifier)) { + if (containsAtLeastOneChild(parent, Eql, Gte, Gtr, Lte, Lss, Neq) && !containsAtLeastOneChild(parent, BoolModifier)) { result.push({ kind: ContextKind.Bool }); } } diff --git a/web/ui/module/codemirror-promql/src/parser/parser.ts b/web/ui/module/codemirror-promql/src/parser/parser.ts index bfd4acba22..968a22c94d 100644 --- a/web/ui/module/codemirror-promql/src/parser/parser.ts +++ b/web/ui/module/codemirror-promql/src/parser/parser.ts @@ -216,24 +216,24 @@ export class Parser { const lt = this.checkAST(lExpr); const rt = this.checkAST(rExpr); const boolModifierUsed = node.getChild(BoolModifier); - - const isComparisonOperator = containsAtLeastOneChild(node, Eql, Neq, Lte, Lss, Gte, Gtr, TrimLower, TrimUpper); + const isComparisonOperator = containsAtLeastOneChild(node, Eql, Neq, Lte, Lss, Gte, Gtr); const isTrimLowerOperator = containsChild(node, TrimLower); const isTrimUpperOperator = containsChild(node, TrimUpper); const isSetOperator = containsAtLeastOneChild(node, And, Or, Unless); + // BOOL modifier check if (boolModifierUsed) { - if (!isComparisonOperator || isTrimLowerOperator || isTrimUpperOperator) { + if (!isComparisonOperator) { this.addDiagnostic(node, 'bool modifier can only be used on comparison operators'); } } else { - if (isComparisonOperator && lt === ValueType.scalar && rt === ValueType.scalar) { - if (isTrimLowerOperator) { + if (lt === ValueType.scalar && rt === ValueType.scalar) { + if (isComparisonOperator) { + this.addDiagnostic(node, 'comparisons between scalars must use BOOL modifier'); + } else if (isTrimLowerOperator) { this.addDiagnostic(node, 'operator ">/" not allowed for Scalar operations'); } else if (isTrimUpperOperator) { this.addDiagnostic(node, 'operator " Date: Tue, 27 Jan 2026 09:37:27 +0200 Subject: [PATCH 06/39] Handle infinity buckets conservatively Signed-off-by: Linas Medziunas --- promql/engine.go | 118 ++++++++------ .../testdata/native_histograms.test | 152 +++++++++++++++++- 2 files changed, 218 insertions(+), 52 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 24aac9d905..8c4fffd508 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3140,39 +3140,56 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { panic(fmt.Errorf("operator %q not allowed for Scalar operations", op)) } -func handleInfinityBuckets(b histogram.Bucket[float64], le float64) (float64, float64) { - var underCount, bucketMidpoint float64 - +func handleInfinityBuckets(isUpperTrim bool, b histogram.Bucket[float64], rhs float64) (underCount, bucketMidpoint float64) { + // Case 1: Bucket with lower bound (-Inf, upper] if math.IsInf(b.Lower, -1) { - switch { - case le >= b.Upper: - // le is greater than or equal to upper bound. Full count applies. - underCount = b.Count - bucketMidpoint = b.Upper - case le < 0: - // le is negative and less than zero — nothing to keep. - underCount = b.Count * 0.5 - bucketMidpoint = b.Upper - default: - // Interpolating with treated lower bound as 0 (linear). - fraction := le / b.Upper - underCount = b.Count * fraction - bucketMidpoint = le / 2 + // TRIM_UPPER (= b.Upper { + // As the rhs is greater than the upper bound, we keep the entire current bucket. + return b.Count, 0 + } + if rhs >= 0 && b.Upper > rhs && !math.IsInf(b.Upper, 1) { + // If upper is finite and positive, we treat lower as 0 (despite it de facto being -Inf). + // This is only possible with NHCB, so we can always use linear interpolation. + return b.Count * rhs / b.Upper, (rhs + b.Upper) / 2 + } + // Otherwise, we are targeting a valid trim, but as we don't know the exact distribution of values that belongs to an infinite bucket, we need to remove the entire bucket. + return 0, b.Upper } - return underCount, bucketMidpoint + // TRIM_LOWER (>/) - remove values less than rhs + if rhs <= b.Lower { + // Impossible to happen because the lower bound is -Inf. Returning the entire current bucket. + return b.Count, 0 + } + if rhs >= 0 && b.Upper > rhs && !math.IsInf(b.Upper, 1) { + // If upper is finite and positive, we treat lower as 0 (despite it de facto being -Inf). + // This is only possible with NHCB, so we can always use linear interpolation. + return b.Count * (1 - rhs/b.Upper), rhs / 2 + } + // Otherwise, we are targeting a valid trim, but as we don't know the exact distribution of values that belongs to an infinite bucket, we need to remove the entire bucket. + return 0, b.Upper } + // Case 2: Bucket with upper bound [lower, +Inf) if math.IsInf(b.Upper, 1) { - if le <= b.Lower { - underCount = 0 - bucketMidpoint = b.Lower - } else { - underCount = b.Count * 0.5 - bucketMidpoint = b.Lower + if isUpperTrim { + // TRIM_UPPER (= lower and the bucket extends to +Inf, some values in this bucket could be > rhs, so we conservatively remove the entire bucket; + // when rhs < lower, all values in this bucket are >= lower > rhs, so all values should be removed. + return 0, b.Lower } - return underCount, bucketMidpoint + // TRIM_LOWER (>/) - remove values less than rhs. + if rhs <= b.Lower { + // rhs <= lower: all values in this bucket are >= lower >= rhs, so we keep the entire bucket. + return b.Count, 0 + } + // lower < rhs: we are inside the infinity bucket, but as we don't know the exact distribution of values, we conservatively remove the entire bucket. + return 0, b.Lower } - return underCount, bucketMidpoint + + panic(fmt.Errorf("one of the buckets must be infinite for handleInfinityBuckets, got %v", b)) } // computeSplit calculates the portion of the bucket's count <= rhs (trim point). @@ -3204,14 +3221,14 @@ func computeSplit(b histogram.Bucket[float64], rhs float64, isPositive, isLinear return b.Count * fraction } -func computeBucketTrim(op parser.ItemType, b histogram.Bucket[float64], rhs float64, isPositive, isCustomBucket bool) (float64, float64) { +func computeBucketTrim(b histogram.Bucket[float64], rhs float64, isUpperTrim, isPositive, isCustomBucket bool) (float64, float64) { if math.IsInf(b.Lower, -1) || math.IsInf(b.Upper, 1) { - return handleInfinityBuckets(b, rhs) + return handleInfinityBuckets(isUpperTrim, b, rhs) } underCount := computeSplit(b, rhs, isPositive, isCustomBucket) - if op == parser.TRIM_UPPER { + if isUpperTrim { product := math.Abs(b.Lower) * math.Abs(rhs) return underCount, computeMidpoint(b, product, isCustomBucket, isPositive) } @@ -3221,15 +3238,15 @@ func computeBucketTrim(op parser.ItemType, b histogram.Bucket[float64], rhs floa } // Helper function to trim native histogram buckets. -func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, op parser.ItemType) { +// TODO: move trimHistogram to model/histogram/float_histogram.go (making it a method of FloatHistogram). +func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTrim bool) { updatedCount := 0.0 origSum := trimmedHist.Sum removedSum := 0.0 hasPositive, hasNegative := false, false isCustomBucket := trimmedHist.UsesCustomBuckets() - switch op { - case parser.TRIM_UPPER: + if isUpperTrim { // Calculate the fraction to keep for buckets that contain the trim value. // For TRIM_UPPER, we keep observations below the trim point (rhs). // Example: histogram / float. for i, iter := 0, trimmedHist.PositiveBucketIterator(); iter.Next(); i++ { @@ -3320,7 +3336,7 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, op parser hasPositive = true } - keepCount, bucketMidpoint := computeBucketTrim(op, bucket, rhs, true, isCustomBucket) + keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, true, isCustomBucket) switch { case bucket.Lower >= rhs: @@ -3347,7 +3363,7 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, op parser } hasNegative = true - keepCount, bucketMidpoint := computeBucketTrim(op, bucket, rhs, false, isCustomBucket) + keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, false, isCustomBucket) switch { case bucket.Lower >= rhs: @@ -3369,22 +3385,26 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, op parser // Handle the zero count bucket. if trimmedHist.ZeroCount > 0 { keepCount := computeSplit(trimmedHist.ZeroBucket(), rhs, true, true) - if op == parser.TRIM_LOWER { + if !isUpperTrim { keepCount = trimmedHist.ZeroCount - keepCount } trimmedHist.ZeroCount = keepCount updatedCount += keepCount } - // Apply new sum. - newSum := origSum - removedSum + newSum := 0.0 + if updatedCount != 0 { + // Calculate new sum only when there are at least some observations remaining. + // Otherwise, make it zero. + newSum = origSum - removedSum - // Clamp correction. - if !hasNegative && newSum < 0 { - newSum = 0 - } - if !hasPositive && newSum > 0 { - newSum = 0 + // Clamp correction. + if !hasNegative && newSum < 0 { + newSum = 0 + } + if !hasPositive && newSum > 0 { + newSum = 0 + } } // Update the histogram's count and sum. @@ -3459,11 +3479,11 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram return 0, hlhs.Copy().Div(rhs).Compact(0), true, nil, nil case parser.TRIM_UPPER: trimmedHist := hlhs.Copy() - trimHistogram(trimmedHist, rhs, op) + trimHistogram(trimmedHist, rhs, true) return 0, trimmedHist, true, nil, nil case parser.TRIM_LOWER: trimmedHist := hlhs.Copy() - trimHistogram(trimmedHist, rhs, op) + trimHistogram(trimmedHist, rhs, false) return 0, trimmedHist, true, nil, nil case parser.ADD, parser.SUB, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("histogram", parser.ItemTypeStr[op], "float", pos) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 8e3856f666..66542d6747 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1924,9 +1924,27 @@ eval instant at 1m cbh >/ 15 eval instant at 1m cbh / +Inf + cbh{} {{schema:-53 custom_values:[5 10 15 20]}} + +eval instant at 1m cbh / -Inf + cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} + +eval instant at 1m cbh >/ 0 + cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} + +eval instant at 1m cbh / 0 zero_bucket{} {{count:7.5 sum:-18.77081528017131 z_bucket:2.5 z_bucket_w:0.01 buckets:[2 3]}} +load 1m + cbh_one_bucket {{schema:-53 sum:100.0 count:100 buckets:[100]}} + +# Skip [-Inf; +Inf] bucket (100). +eval instant at 1m cbh_one_bucket / 10.0 + cbh_one_bucket {{schema:-53 sum:0.0 count:0 buckets:[0]}} + +# Keep [-Inf; +Inf] bucket (100). +eval instant at 1m cbh_one_bucket / +Inf + cbh_one_bucket {{schema:-53 sum:0 count:0 buckets:[0]}} + +# Keep [-Inf; +Inf] bucket (100). +eval instant at 1m cbh_one_bucket >/ -Inf + cbh_one_bucket {{schema:-53 sum:100 count:100 buckets:[100]}} + +# Skip [-Inf; +Inf] bucket (100). +eval instant at 1m cbh_one_bucket / -10.0 + cbh_two_buckets_split_at_zero {{schema:-53 sum:33.0 count:100 custom_values:[0] buckets:[0 100]}} + +# Skip [-Inf, 0] bucket (1). +eval instant at 1m cbh_two_buckets_split_at_zero >/ 0.0 + cbh_two_buckets_split_at_zero {{schema:-53 sum:33.0 count:100 custom_values:[0] buckets:[0 100]}} + +# Skip both buckets (1 and 100). +eval instant at 1m cbh_two_buckets_split_at_zero >/ 10.0 + cbh_two_buckets_split_at_zero {{schema:-53 sum:0.0 count:0 custom_values:[0] buckets:[0 0]}} + + +load 1m + cbh_two_buckets_split_at_positive {{schema:-53 sum:33 count:101 custom_values:[5] buckets:[1 100]}} + +# Skip (5; +Inf] bucket (100). +eval instant at 1m cbh_two_buckets_split_at_positive / -10.0 + cbh_two_buckets_split_at_positive {{schema:-53 sum:28.0 count:100 custom_values:[5] buckets:[0 100]}} + +# Keep both buckets (1 and 100). +eval instant at 1m cbh_two_buckets_split_at_positive >/ 0.0 + cbh_two_buckets_split_at_positive {{schema:-53 sum:33.0 count:101 custom_values:[5] buckets:[1 100]}} + +# Keep (5, 100] bucket (100) and 3/5 of [-Inf, 5] bucket (0.6 * 1). +eval instant at 1m cbh_two_buckets_split_at_positive >/ 2.0 + cbh_two_buckets_split_at_positive {{schema:-53 sum:32.6 count:100.6 custom_values:[5] buckets:[0.6 100]}} + +# Skip both buckets (1 and 100). +eval instant at 1m cbh_two_buckets_split_at_positive >/ 10.0 + cbh_two_buckets_split_at_positive {{schema:-53 sum:0.0 count:0 custom_values:[5] buckets:[0 0]}} + + +load 1m + cbh_two_buckets_split_at_negative {{schema:-53 sum:33 count:101 custom_values:[-5] buckets:[1 100]}} + +# Skip (-5; +Inf] bucket (100). +eval instant at 1m cbh_two_buckets_split_at_negative / -10.0 + cbh_two_buckets_split_at_negative {{schema:-53 sum:38.0 count:100 custom_values:[-5] buckets:[0 100]}} + +# Skip both buckets (1 and 100). +eval instant at 1m cbh_two_buckets_split_at_negative >/ -2.0 + cbh_two_buckets_split_at_negative {{schema:-53 custom_values:[-5]}} + +# Skip both buckets (1 and 100). +eval instant at 1m cbh_two_buckets_split_at_negative >/ 0.0 + cbh_two_buckets_split_at_negative {{schema:-53 custom_values:[-5]}} + +# Skip both buckets (1 and 100). +eval instant at 1m cbh_two_buckets_split_at_negative >/ 10.0 + cbh_two_buckets_split_at_negative {{schema:-53 sum:0.0 count:0 custom_values:[-5] buckets:[0 0]}} + + clear From d0b7fe402fc0ce864953efac107238de4a742043 Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Tue, 27 Jan 2026 11:17:26 +0200 Subject: [PATCH 07/39] Edge test cases with Exponential Histogram Signed-off-by: Linas Medziunas --- .../promqltest/testdata/native_histograms.test | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 66542d6747..ea2400688b 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1881,6 +1881,24 @@ load 1m cbh_has_neg {{schema:-53 sum:172.5 count:15 custom_values:[-10 5 10 15 20] buckets:[2 1 6 4 3 1]}} zero_bucket {{schema:0 sum:-6.75 z_bucket:5 z_bucket_w:0.01 buckets:[2 3] n_buckets:[1 2 3]}} +eval instant at 1m h_test >/ -Inf + h_test {{schema:0 sum:123.75 count:34 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1 2]}} + +eval instant at 1m h_test / +Inf + h_test {{schema:0 z_bucket_w:0.001}} + +eval instant at 1m h_test / 0 + h_test {{schema:0 sum:120.21446609406726 count:30.5 z_bucket:0.5 z_bucket_w:0.001 buckets:[2 4 8 16]}} + +eval instant at 1m h_test Date: Tue, 9 Dec 2025 09:44:07 +0200 Subject: [PATCH 08/39] [PERF] PromQL: reuse matching data structures in VectorBinop/And/Or/Unless Signed-off-by: Linas Medziunas --- promql/engine.go | 124 +++++++++++++++++++++++++++++++++-------------- 1 file changed, 87 insertions(+), 37 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index b609dc4f0a..1c789b1a6a 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1208,10 +1208,12 @@ type EvalNodeHelper struct { lblResultBuf []byte // For binary vector matching. - rightSigs map[int]Sample - matchedSigs map[int]map[uint64]struct{} - resultMetric map[string]labels.Labels - numSigs int + rightSigs []Sample + sigsPresent []bool + matchedSigs []map[uint64]struct{} + matchedSigsPresent []bool + resultMetric map[string]labels.Labels + numSigs int // For info series matching. rightStrSigs map[string]Sample @@ -1220,6 +1222,44 @@ type EvalNodeHelper struct { enableDelayedNameRemoval bool } +func (enh *EvalNodeHelper) resetSigsPresent() []bool { + if len(enh.sigsPresent) == 0 { + enh.sigsPresent = make([]bool, enh.numSigs) + } else { + clear(enh.sigsPresent) + } + return enh.sigsPresent +} + +func (enh *EvalNodeHelper) resetMatchedSigsPresent() []bool { + if len(enh.matchedSigsPresent) == 0 { + enh.matchedSigsPresent = make([]bool, enh.numSigs) + } else { + clear(enh.matchedSigsPresent) + } + return enh.matchedSigsPresent +} + +func (enh *EvalNodeHelper) resetRightSigs() []Sample { + if enh.rightSigs == nil { + enh.rightSigs = make([]Sample, enh.numSigs) + } else { + clear(enh.rightSigs) + } + return enh.rightSigs +} + +func (enh *EvalNodeHelper) resetMatchedSigs() []map[uint64]struct{} { + if enh.matchedSigs == nil { + enh.matchedSigs = make([]map[uint64]struct{}, enh.numSigs) + } else { + for i := range enh.matchedSigs { + clear(enh.matchedSigs[i]) + } + } + return enh.matchedSigs +} + func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) { if enh.lb == nil { enh.lb = labels.NewBuilder(lbls) @@ -2791,7 +2831,7 @@ func (*evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, lh } // Ordinals of signatures present on the right-hand side. - rightSigOrdinalsPresent := make([]bool, enh.numSigs) + rightSigOrdinalsPresent := enh.resetSigsPresent() for _, sh := range rhsh { rightSigOrdinalsPresent[sh.sigOrdinal] = true } @@ -2817,7 +2857,7 @@ func (*evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhs return enh.Out } - leftSigOrdinalsPresent := make([]bool, enh.numSigs) + leftSigOrdinalsPresent := enh.resetSigsPresent() // Add everything from the left-hand-side Vector. for i, ls := range lhs { leftSigOrdinalsPresent[lhsh[i].sigOrdinal] = true @@ -2844,7 +2884,7 @@ func (*evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatching, } // Ordinals of signatures present on the right-hand side. - rightSigOrdinalsPresent := make([]bool, enh.numSigs) + rightSigOrdinalsPresent := enh.resetSigsPresent() for _, sh := range rhsh { rightSigOrdinalsPresent[sh.sigOrdinal] = true } @@ -2876,19 +2916,16 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * } // All samples from the rhs by their join signature ordinal. - if enh.rightSigs == nil { - enh.rightSigs = make(map[int]Sample, len(enh.Out)) - } else { - clear(enh.rightSigs) - } - rightSigs := enh.rightSigs + rightSigs := enh.resetRightSigs() + rightSigsPresent := enh.resetSigsPresent() // Add all rhs samples to a map so we can easily find matches later. for i, rs := range rhs { sigOrd := rhsh[i].sigOrdinal // The rhs is guaranteed to be the 'one' side. Having multiple samples // with the same signature means that the matching is many-to-many. - if duplSample, found := rightSigs[sigOrd]; found { + if rightSigsPresent[sigOrd] { + duplSample := rightSigs[sigOrd] // oneSide represents which side of the vector represents the 'one' in the many-to-one relationship. oneSide := "right" if matching.Card == parser.CardOneToMany { @@ -2900,16 +2937,22 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * ";many-to-many matching not allowed: matching labels must be unique on one side", matchedLabels.String(), oneSide, rs.Metric.String(), duplSample.Metric.String()) } rightSigs[sigOrd] = rs + rightSigsPresent[sigOrd] = true } - // Tracks the matching by signature ordinals. For one-to-one operations the value is nil. - // For many-to-one the value is a set of hashes to detect duplicated result elements. - if enh.matchedSigs == nil { - enh.matchedSigs = make(map[int]map[uint64]struct{}, len(rightSigs)) + var ( + // Tracks the match-signature for one-to-one operations. + matchedSigsPresent []bool + + // Tracks the match-signature for many-to-one operations, the value is a set of signatures + // to detect duplicated result elements. + matchedSigs []map[uint64]struct{} + ) + if matching.Card == parser.CardOneToOne { + matchedSigsPresent = enh.resetMatchedSigsPresent() } else { - clear(enh.matchedSigs) + matchedSigs = enh.resetMatchedSigs() } - matchedSigs := enh.matchedSigs var lastErr error @@ -2938,26 +2981,26 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * } } - metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh) - if !ev.enableDelayedNameRemoval && returnBool { - metric = metric.DropReserved(schema.IsMetadataLabel) - } - insertedSigs, exists := matchedSigs[sigOrd] + dropMetricName := !ev.enableDelayedNameRemoval && returnBool + metric := resultMetric(ls.Metric, rs.Metric, op, matching, dropMetricName, enh) + if matching.Card == parser.CardOneToOne { - if exists { + if matchedSigsPresent[sigOrd] { ev.errorf("multiple matches for labels: many-to-one matching must be explicit (group_left/group_right)") } - matchedSigs[sigOrd] = nil // Set existence to true. + matchedSigsPresent[sigOrd] = true } else { // In many-to-one matching the grouping labels have to ensure a unique metric // for the result Vector. Check whether those labels have already been added for // the same matching labels. insertSig := metric.Hash() - if !exists { - insertedSigs = map[uint64]struct{}{} - matchedSigs[sigOrd] = insertedSigs - } else if _, duplicate := insertedSigs[insertSig]; duplicate { + if matchedSigs[sigOrd] == nil { + matchedSigs[sigOrd] = map[uint64]struct{}{} + } + insertedSigs := matchedSigs[sigOrd] + + if _, duplicate := insertedSigs[insertSig]; duplicate { ev.errorf("multiple matches for labels: grouping labels must ensure unique matches") } insertedSigs[insertSig] = struct{}{} @@ -2980,8 +3023,12 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * for i, ls := range lhs { sigOrd := lhsh[i].sigOrdinal - rs, found := rightSigs[sigOrd] // Look for a match in the rhs Vector. - if !found { + var rs Sample + if rightSigsPresent[sigOrd] { + // Found a match in the rhs. + rs = rightSigs[sigOrd] + } else { + // Have to fall back to the fill value. fill := matching.FillValues.RHS if fill == nil { continue @@ -2998,8 +3045,11 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * // For any rhs samples which have not been matched, check if we need to // perform the operation with a fill value from the lhs. if fill := matching.FillValues.LHS; fill != nil { - for sigOrd, rs := range rightSigs { - if _, matched := matchedSigs[sigOrd]; matched { + for i, rs := range rhs { + sigOrd := rhsh[i].sigOrdinal + + if (len(matchedSigsPresent) > 0 && matchedSigsPresent[sigOrd]) || + (len(matchedSigs) > 0 && matchedSigs[sigOrd] != nil) { continue // Already matched. } ls := Sample{ @@ -3016,7 +3066,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * // resultMetric returns the metric for the given sample(s) based on the Vector // binary operation and the matching options. -func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.VectorMatching, enh *EvalNodeHelper) labels.Labels { +func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.VectorMatching, dropMetricName bool, enh *EvalNodeHelper) labels.Labels { if enh.resultMetric == nil { enh.resultMetric = make(map[string]labels.Labels, len(enh.Out)) } @@ -3034,7 +3084,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V str := string(enh.lblResultBuf) enh.resetBuilder(lhs) - if changesMetricSchema(op) { + if dropMetricName || changesMetricSchema(op) { // Setting empty Metadata causes the deletion of those if they exists. schema.Metadata{}.SetToLabels(enh.lb) } From e6c07126a69560144e816911d6e7fa3da7c8dc04 Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Wed, 28 Jan 2026 15:07:24 +0200 Subject: [PATCH 09/39] Midpoint of negative bucket must be negative Signed-off-by: Linas Medziunas --- promql/engine.go | 4 ++-- .../promqltest/testdata/native_histograms.test | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 8c4fffd508..1036ed0d5e 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3310,7 +3310,7 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr trimmedHist.NegativeBuckets[i] = keepCount updatedCount += keepCount default: - bucketMidpoint = math.Sqrt(bucket.Lower * bucket.Upper) + bucketMidpoint = -math.Sqrt(bucket.Lower * bucket.Upper) removedSum += bucket.Count * bucketMidpoint trimmedHist.NegativeBuckets[i] = 0 } @@ -3375,7 +3375,7 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr trimmedHist.NegativeBuckets[i] = keepCount updatedCount += keepCount default: - bucketMidpoint = math.Sqrt(bucket.Lower * bucket.Upper) + bucketMidpoint = -math.Sqrt(bucket.Lower * bucket.Upper) removedSum += bucket.Count * bucketMidpoint trimmedHist.NegativeBuckets[i] = 0 } diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index ea2400688b..c62983f8b2 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1875,7 +1875,7 @@ eval instant at 1m irate(nhcb_add_bucket[2m]) * 60 # Test native histogram with trim operators ("/": TRIM_LOWER) load 1m - h_test {{schema:0 sum:123.75 count:34 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1 2]}} + h_test {{schema:0 sum:123.75 count:34 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1 2]}} h_test_2 {{schema:2 sum:12.8286080906 count:28 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 3 1]}} cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} cbh_has_neg {{schema:-53 sum:172.5 count:15 custom_values:[-10 5 10 15 20] buckets:[2 1 6 4 3 1]}} @@ -1894,7 +1894,7 @@ eval instant at 1m h_test / 0 - h_test {{schema:0 sum:120.21446609406726 count:30.5 z_bucket:0.5 z_bucket_w:0.001 buckets:[2 4 8 16]}} + h_test {{schema:0 sum:127.28553390593274 count:30.5 z_bucket:0.5 z_bucket_w:0.001 buckets:[2 4 8 16]}} eval instant at 1m h_test / 1.13 - h_test_2 {{schema:2 count:14.589417818876296 sum:-1.5258511531197865 z_bucket_w:0.001 offset:1 buckets:[0.589417818876296 4 7 3]}} + h_test_2 {{schema:2 count:14.589417818876296 sum:22.078693238664073 z_bucket_w:0.001 offset:1 buckets:[0.589417818876296 4 7 3]}} eval instant at 1m h_test_2 >/ -1.3 - h_test_2 {{schema:2 count:25.54213947904476 sum:13.099057472672072 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 1.54213947904476]}} + h_test_2 {{schema:2 count:25.54213947904476 sum:16.183479123487956 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 1.54213947904476]}} eval instant at 1m h_test_2 / 2 - h_test{} {{count:24 sum:113.14339828220179 z_bucket_w:0.001 offset:2 buckets:[8 16]}} + h_test{} {{count:24 sum:120.21446609406726 z_bucket_w:0.001 offset:2 buckets:[8 16]}} eval instant at 1m h_test >/ -1 - h_test{} {{count:32 sum:120.92157287525382 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1]}} + h_test{} {{count:32 sum:126.57842712474618 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1]}} eval instant at 1m h_test / 0 - zero_bucket{} {{count:7.5 sum:-18.77081528017131 z_bucket:2.5 z_bucket_w:0.01 buckets:[2 3]}} + zero_bucket{} {{count:7.5 sum:5.270815280171309 z_bucket:2.5 z_bucket_w:0.01 buckets:[2 3]}} load 1m cbh_one_bucket {{schema:-53 sum:100.0 count:100 buckets:[100]}} From b69ec3f39c59545a30480b05b9fa979809153b66 Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Wed, 28 Jan 2026 15:11:10 +0200 Subject: [PATCH 10/39] Only track signs of surviving buckets Signed-off-by: Linas Medziunas --- promql/engine.go | 45 ++++++++----------- .../testdata/native_histograms.test | 6 +-- 2 files changed, 21 insertions(+), 30 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 1036ed0d5e..039c5fce7b 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3243,9 +3243,18 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr updatedCount := 0.0 origSum := trimmedHist.Sum removedSum := 0.0 - hasPositive, hasNegative := false, false isCustomBucket := trimmedHist.UsesCustomBuckets() + var hasPositive, hasNegative bool + trackBucketSigns := func(bucket histogram.Bucket[float64]) { + if bucket.Lower < 0 { + hasNegative = true + } + if bucket.Upper > 0 { + hasPositive = true + } + } + if isUpperTrim { // Calculate the fraction to keep for buckets that contain the trim value. // For TRIM_UPPER, we keep observations below the trim point (rhs). @@ -3255,18 +3264,6 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr if bucket.Count == 0 { continue } - if isCustomBucket { - if bucket.Upper <= 0 { - hasNegative = true - } else { - if bucket.Lower < 0 { - hasNegative = true - } - hasPositive = true - } - } else { - hasPositive = true - } keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, true, isCustomBucket) @@ -3274,10 +3271,12 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr case bucket.Upper <= rhs: // Bucket is entirely below the trim point - keep all. updatedCount += bucket.Count + trackBucketSigns(bucket) case bucket.Lower < rhs: // Bucket contains the trim point - interpolate. removedCount := bucket.Count - keepCount removedSum += removedCount * bucketMidpoint + trackBucketSigns(bucket) updatedCount += keepCount trimmedHist.PositiveBuckets[i] = keepCount @@ -3296,19 +3295,20 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr if bucket.Count == 0 { continue } - hasNegative = true keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, false, isCustomBucket) switch { case bucket.Upper <= rhs: updatedCount += bucket.Count + trackBucketSigns(bucket) case bucket.Lower < rhs: removedCount := bucket.Count - keepCount removedSum += removedCount * bucketMidpoint trimmedHist.NegativeBuckets[i] = keepCount updatedCount += keepCount + trackBucketSigns(bucket) default: bucketMidpoint = -math.Sqrt(bucket.Lower * bucket.Upper) removedSum += bucket.Count * bucketMidpoint @@ -3323,30 +3323,20 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr if bucket.Count == 0 { continue } - if isCustomBucket { - if bucket.Upper <= 0 { - hasNegative = true - } else { - if bucket.Lower < 0 { - hasNegative = true - } - hasPositive = true - } - } else { - hasPositive = true - } keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, true, isCustomBucket) switch { case bucket.Lower >= rhs: updatedCount += bucket.Count + trackBucketSigns(bucket) case bucket.Upper > rhs: removedCount := bucket.Count - keepCount removedSum += removedCount * bucketMidpoint trimmedHist.PositiveBuckets[i] = keepCount updatedCount += keepCount + trackBucketSigns(bucket) default: if !isCustomBucket { bucketMidpoint = math.Sqrt(bucket.Lower * bucket.Upper) @@ -3361,19 +3351,20 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr if bucket.Count == 0 { continue } - hasNegative = true keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, false, isCustomBucket) switch { case bucket.Lower >= rhs: updatedCount += bucket.Count + trackBucketSigns(bucket) case bucket.Upper > rhs: removedCount := bucket.Count - keepCount removedSum += removedCount * bucketMidpoint trimmedHist.NegativeBuckets[i] = keepCount updatedCount += keepCount + trackBucketSigns(bucket) default: bucketMidpoint = -math.Sqrt(bucket.Lower * bucket.Upper) removedSum += bucket.Count * bucketMidpoint diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index c62983f8b2..7f2585ef6e 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1897,7 +1897,7 @@ eval instant at 1m h_test >/ 0 h_test {{schema:0 sum:127.28553390593274 count:30.5 z_bucket:0.5 z_bucket_w:0.001 buckets:[2 4 8 16]}} eval instant at 1m h_test / -1 h_test{} {{count:32 sum:126.57842712474618 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1]}} eval instant at 1m h_test Date: Mon, 2 Feb 2026 15:46:32 +0200 Subject: [PATCH 11/39] Compute resulting sum based on surviving buckets Signed-off-by: Linas Medziunas --- promql/engine.go | 177 +++++++++--------- .../testdata/native_histograms.test | 62 +++--- 2 files changed, 119 insertions(+), 120 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 039c5fce7b..2bb7cb4e4f 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3141,6 +3141,13 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { } func handleInfinityBuckets(isUpperTrim bool, b histogram.Bucket[float64], rhs float64) (underCount, bucketMidpoint float64) { + zeroIfInf := func(x float64) float64 { + if math.IsInf(x, 0) { + return 0 + } + return x + } + // Case 1: Bucket with lower bound (-Inf, upper] if math.IsInf(b.Lower, -1) { // TRIM_UPPER (/) - remove values less than rhs if rhs <= b.Lower { @@ -3168,7 +3175,7 @@ func handleInfinityBuckets(isUpperTrim bool, b histogram.Bucket[float64], rhs fl return b.Count * (1 - rhs/b.Upper), rhs / 2 } // Otherwise, we are targeting a valid trim, but as we don't know the exact distribution of values that belongs to an infinite bucket, we need to remove the entire bucket. - return 0, b.Upper + return 0, zeroIfInf(b.Upper) } // Case 2: Bucket with upper bound [lower, +Inf) @@ -3178,7 +3185,7 @@ func handleInfinityBuckets(isUpperTrim bool, b histogram.Bucket[float64], rhs fl // We don't care about lower here, because: // when rhs >= lower and the bucket extends to +Inf, some values in this bucket could be > rhs, so we conservatively remove the entire bucket; // when rhs < lower, all values in this bucket are >= lower > rhs, so all values should be removed. - return 0, b.Lower + return 0, zeroIfInf(b.Lower) } // TRIM_LOWER (>/) - remove values less than rhs. if rhs <= b.Lower { @@ -3186,7 +3193,7 @@ func handleInfinityBuckets(isUpperTrim bool, b histogram.Bucket[float64], rhs fl return b.Count, 0 } // lower < rhs: we are inside the infinity bucket, but as we don't know the exact distribution of values, we conservatively remove the entire bucket. - return 0, b.Lower + return 0, zeroIfInf(b.Lower) } panic(fmt.Errorf("one of the buckets must be infinite for handleInfinityBuckets, got %v", b)) @@ -3229,31 +3236,20 @@ func computeBucketTrim(b histogram.Bucket[float64], rhs float64, isUpperTrim, is underCount := computeSplit(b, rhs, isPositive, isCustomBucket) if isUpperTrim { - product := math.Abs(b.Lower) * math.Abs(rhs) - return underCount, computeMidpoint(b, product, isCustomBucket, isPositive) + return underCount, computeMidpoint(b.Lower, rhs, isPositive, isCustomBucket) } - product := math.Abs(rhs) * math.Abs(b.Upper) - return b.Count - underCount, computeMidpoint(b, product, isCustomBucket, isPositive) + return b.Count - underCount, computeMidpoint(rhs, b.Upper, isPositive, isCustomBucket) } // Helper function to trim native histogram buckets. // TODO: move trimHistogram to model/histogram/float_histogram.go (making it a method of FloatHistogram). func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTrim bool) { - updatedCount := 0.0 - origSum := trimmedHist.Sum - removedSum := 0.0 - isCustomBucket := trimmedHist.UsesCustomBuckets() - - var hasPositive, hasNegative bool - trackBucketSigns := func(bucket histogram.Bucket[float64]) { - if bucket.Lower < 0 { - hasNegative = true - } - if bucket.Upper > 0 { - hasPositive = true - } - } + var ( + updatedCount, updatedSum float64 + trimmedBuckets bool + isCustomBucket = trimmedHist.UsesCustomBuckets() + ) if isUpperTrim { // Calculate the fraction to keep for buckets that contain the trim value. @@ -3265,28 +3261,26 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr continue } - keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, true, isCustomBucket) - switch { case bucket.Upper <= rhs: // Bucket is entirely below the trim point - keep all. updatedCount += bucket.Count - trackBucketSigns(bucket) + bucketMidpoint := computeMidpoint(bucket.Lower, bucket.Upper, true, isCustomBucket) + updatedSum += bucketMidpoint * bucket.Count + case bucket.Lower < rhs: // Bucket contains the trim point - interpolate. - removedCount := bucket.Count - keepCount - removedSum += removedCount * bucketMidpoint - trackBucketSigns(bucket) + keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, true, isCustomBucket) updatedCount += keepCount + updatedSum += bucketMidpoint * keepCount trimmedHist.PositiveBuckets[i] = keepCount + trimmedBuckets = true + default: - if !isCustomBucket { - bucketMidpoint = math.Sqrt(bucket.Lower * bucket.Upper) - } - removedSum += bucket.Count * bucketMidpoint // Bucket is entirely above the trim point - discard. trimmedHist.PositiveBuckets[i] = 0 + trimmedBuckets = true } } @@ -3296,23 +3290,25 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr continue } - keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, false, isCustomBucket) - switch { case bucket.Upper <= rhs: + // Bucket is entirely below the trim point - keep all. updatedCount += bucket.Count - trackBucketSigns(bucket) - case bucket.Lower < rhs: - removedCount := bucket.Count - keepCount - removedSum += removedCount * bucketMidpoint + bucketMidpoint := computeMidpoint(bucket.Lower, bucket.Upper, false, isCustomBucket) + updatedSum += bucketMidpoint * bucket.Count + + case bucket.Lower < rhs: + // Bucket contains the trim point - interpolate. + keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, false, isCustomBucket) - trimmedHist.NegativeBuckets[i] = keepCount updatedCount += keepCount - trackBucketSigns(bucket) + updatedSum += bucketMidpoint * keepCount + trimmedHist.NegativeBuckets[i] = keepCount + trimmedBuckets = true + default: - bucketMidpoint = -math.Sqrt(bucket.Lower * bucket.Upper) - removedSum += bucket.Count * bucketMidpoint trimmedHist.NegativeBuckets[i] = 0 + trimmedBuckets = true } } } else { // !isUpperTrim @@ -3324,25 +3320,25 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr continue } - keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, true, isCustomBucket) - switch { case bucket.Lower >= rhs: + // Bucket is entirely below the trim point - keep all. updatedCount += bucket.Count - trackBucketSigns(bucket) - case bucket.Upper > rhs: - removedCount := bucket.Count - keepCount - removedSum += removedCount * bucketMidpoint + bucketMidpoint := computeMidpoint(bucket.Lower, bucket.Upper, true, isCustomBucket) + updatedSum += bucketMidpoint * bucket.Count + + case bucket.Upper > rhs: + // Bucket contains the trim point - interpolate. + keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, true, isCustomBucket) - trimmedHist.PositiveBuckets[i] = keepCount updatedCount += keepCount - trackBucketSigns(bucket) + updatedSum += bucketMidpoint * keepCount + trimmedHist.PositiveBuckets[i] = keepCount + trimmedBuckets = true + default: - if !isCustomBucket { - bucketMidpoint = math.Sqrt(bucket.Lower * bucket.Upper) - } - removedSum += bucket.Count * bucketMidpoint trimmedHist.PositiveBuckets[i] = 0 + trimmedBuckets = true } } @@ -3352,23 +3348,25 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr continue } - keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, false, isCustomBucket) - switch { case bucket.Lower >= rhs: + // Bucket is entirely below the trim point - keep all. updatedCount += bucket.Count - trackBucketSigns(bucket) - case bucket.Upper > rhs: - removedCount := bucket.Count - keepCount - removedSum += removedCount * bucketMidpoint + bucketMidpoint := computeMidpoint(bucket.Lower, bucket.Upper, false, isCustomBucket) + updatedSum += bucketMidpoint * bucket.Count + + case bucket.Upper > rhs: + // Bucket contains the trim point - interpolate. + keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, false, isCustomBucket) - trimmedHist.NegativeBuckets[i] = keepCount updatedCount += keepCount - trackBucketSigns(bucket) + updatedSum += bucketMidpoint * keepCount + trimmedHist.NegativeBuckets[i] = keepCount + trimmedBuckets = true + default: - bucketMidpoint = -math.Sqrt(bucket.Lower * bucket.Upper) - removedSum += bucket.Count * bucketMidpoint trimmedHist.NegativeBuckets[i] = 0 + trimmedBuckets = true } } } @@ -3379,41 +3377,42 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr if !isUpperTrim { keepCount = trimmedHist.ZeroCount - keepCount } - trimmedHist.ZeroCount = keepCount + if trimmedHist.ZeroCount != keepCount { + trimmedHist.ZeroCount = keepCount + trimmedBuckets = true + } updatedCount += keepCount } - newSum := 0.0 - if updatedCount != 0 { - // Calculate new sum only when there are at least some observations remaining. - // Otherwise, make it zero. - newSum = origSum - removedSum + if trimmedBuckets { + // Only update the totals in case some bucket(s) were fully (or partially) trimmed. + trimmedHist.Count = updatedCount + trimmedHist.Sum = updatedSum - // Clamp correction. - if !hasNegative && newSum < 0 { - newSum = 0 - } - if !hasPositive && newSum > 0 { - newSum = 0 - } + trimmedHist.Compact(0) } - - // Update the histogram's count and sum. - trimmedHist.Count = updatedCount - trimmedHist.Sum = newSum - - trimmedHist.Compact(0) } -func computeMidpoint(b histogram.Bucket[float64], product float64, isLinear, isPositive bool) float64 { - if isLinear { - return (b.Lower + b.Upper) / 2 +func computeMidpoint(survivingIntervalBoundA, survivingIntervalBoundB float64, isPositive, isLinear bool) float64 { + if math.IsInf(survivingIntervalBoundA, 0) { + if math.IsInf(survivingIntervalBoundB, 0) { + return 0 + } + return survivingIntervalBoundB + } else if math.IsInf(survivingIntervalBoundB, 0) { + return survivingIntervalBoundA } - if isPositive { - return math.Sqrt(product) + if isLinear { + return (survivingIntervalBoundA + survivingIntervalBoundB) / 2 } - return -math.Sqrt(product) + + geoMean := math.Sqrt(math.Abs(survivingIntervalBoundA * survivingIntervalBoundB)) + + if isPositive { + return geoMean + } + return -geoMean } // vectorElemBinop evaluates a binary operation between two Vector elements. diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 7f2585ef6e..adf5f692e6 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1894,43 +1894,43 @@ eval instant at 1m h_test / 0 - h_test {{schema:0 sum:127.28553390593274 count:30.5 z_bucket:0.5 z_bucket_w:0.001 buckets:[2 4 8 16]}} + h_test {{schema:0 sum:120.20815280171308 count:30.5 z_bucket:0.5 z_bucket_w:0.001 buckets:[2 4 8 16]}} eval instant at 1m h_test / 1.13 - h_test_2 {{schema:2 count:14.589417818876296 sum:22.078693238664073 z_bucket_w:0.001 offset:1 buckets:[0.589417818876296 4 7 3]}} + h_test_2 {{schema:2 count:14.589417818876296 sum:22.168126492693734 z_bucket_w:0.001 offset:1 buckets:[0.589417818876296 4 7 3]}} eval instant at 1m h_test_2 >/ -1.3 - h_test_2 {{schema:2 count:25.54213947904476 sum:16.183479123487956 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 1.54213947904476]}} + h_test_2 {{schema:2 count:25.54213947904476 sum:16.29588491217537 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 1.54213947904476]}} eval instant at 1m h_test_2 / 2 - h_test{} {{count:24 sum:120.21446609406726 z_bucket_w:0.001 offset:2 buckets:[8 16]}} + h_test{} {{count:24 sum:113.13708498984761 z_bucket_w:0.001 offset:2 buckets:[8 16]}} eval instant at 1m h_test >/ -1 - h_test{} {{count:32 sum:126.57842712474618 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1]}} + h_test{} {{count:32 sum:119.50104602052653 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1]}} eval instant at 1m h_test / 13 - cbh{} {{schema:-53 count:5.6 sum:92.5 custom_values:[5 10 15 20] offset:2 buckets:[1.6 3 1]}} + cbh{} {{schema:-53 count:5.6 sum:94.9 custom_values:[5 10 15 20] offset:2 buckets:[1.6 3 1]}} eval instant at 1m cbh / 15 # Custom buckets: trim uses linear interpolation if cutoff is inside a bucket eval instant at 1m cbh / -Inf cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} eval instant at 1m cbh >/ 0 - cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} + cbh {{schema:-53 sum:167.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} eval instant at 1m cbh / 0 - zero_bucket{} {{count:7.5 sum:5.270815280171309 z_bucket:2.5 z_bucket_w:0.01 buckets:[2 3]}} + zero_bucket{} {{count:7.5 sum:5.656854249492381 z_bucket:2.5 z_bucket_w:0.01 buckets:[2 3]}} load 1m cbh_one_bucket {{schema:-53 sum:100.0 count:100 buckets:[100]}} @@ -2014,7 +2014,7 @@ load 1m # Skip (0; +Inf] bucket (100). eval instant at 1m cbh_two_buckets_split_at_zero / -10.0 - cbh_two_buckets_split_at_zero {{schema:-53 sum:33.0 count:100 custom_values:[0] buckets:[0 100]}} + cbh_two_buckets_split_at_zero {{schema:-53 sum:0.0 count:100 custom_values:[0] buckets:[0 100]}} # Skip [-Inf, 0] bucket (1). eval instant at 1m cbh_two_buckets_split_at_zero >/ 0.0 - cbh_two_buckets_split_at_zero {{schema:-53 sum:33.0 count:100 custom_values:[0] buckets:[0 100]}} + cbh_two_buckets_split_at_zero {{schema:-53 sum:0.0 count:100 custom_values:[0] buckets:[0 100]}} # Skip both buckets (1 and 100). eval instant at 1m cbh_two_buckets_split_at_zero >/ 10.0 @@ -2042,11 +2042,11 @@ load 1m # Skip (5; +Inf] bucket (100). eval instant at 1m cbh_two_buckets_split_at_positive / -10.0 - cbh_two_buckets_split_at_positive {{schema:-53 sum:28.0 count:100 custom_values:[5] buckets:[0 100]}} + cbh_two_buckets_split_at_positive {{schema:-53 sum:500.0 count:100 custom_values:[5] buckets:[0 100]}} # Keep both buckets (1 and 100). eval instant at 1m cbh_two_buckets_split_at_positive >/ 0.0 - cbh_two_buckets_split_at_positive {{schema:-53 sum:33.0 count:101 custom_values:[5] buckets:[1 100]}} + cbh_two_buckets_split_at_positive {{schema:-53 sum:500.0 count:101 custom_values:[5] buckets:[1 100]}} # Keep (5, 100] bucket (100) and 3/5 of [-Inf, 5] bucket (0.6 * 1). eval instant at 1m cbh_two_buckets_split_at_positive >/ 2.0 - cbh_two_buckets_split_at_positive {{schema:-53 sum:32.6 count:100.6 custom_values:[5] buckets:[0.6 100]}} + cbh_two_buckets_split_at_positive {{schema:-53 sum:500.6 count:100.6 custom_values:[5] buckets:[0.6 100]}} # Skip both buckets (1 and 100). eval instant at 1m cbh_two_buckets_split_at_positive >/ 10.0 @@ -2078,15 +2078,15 @@ load 1m # Skip (-5; +Inf] bucket (100). eval instant at 1m cbh_two_buckets_split_at_negative / -10.0 - cbh_two_buckets_split_at_negative {{schema:-53 sum:38.0 count:100 custom_values:[-5] buckets:[0 100]}} + cbh_two_buckets_split_at_negative {{schema:-53 sum:-500 count:100 custom_values:[-5] buckets:[0 100]}} # Skip both buckets (1 and 100). eval instant at 1m cbh_two_buckets_split_at_negative >/ -2.0 From a3a52004ba249b89d5c58b1a4b18119479d53266 Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Fri, 6 Feb 2026 13:41:51 +0200 Subject: [PATCH 12/39] Update native_histograms.test following the feedback Signed-off-by: Linas Medziunas --- .../testdata/native_histograms.test | 42 ++++++++++++++----- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index adf5f692e6..7072da8a88 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1873,13 +1873,10 @@ eval instant at 1m irate(nhcb_add_bucket[2m]) * 60 expect no_info {} {{schema:-53 sum:56 count:15 custom_values:[2 3 4 6] buckets:[1 0 1 5 8] counter_reset_hint:gauge}} + # Test native histogram with trim operators ("/": TRIM_LOWER) load 1m h_test {{schema:0 sum:123.75 count:34 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1 2]}} - h_test_2 {{schema:2 sum:12.8286080906 count:28 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 3 1]}} - cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} - cbh_has_neg {{schema:-53 sum:172.5 count:15 custom_values:[-10 5 10 15 20] buckets:[2 1 6 4 3 1]}} - zero_bucket {{schema:0 sum:-6.75 z_bucket:5 z_bucket_w:0.01 buckets:[2 3] n_buckets:[1 2 3]}} eval instant at 1m h_test >/ -Inf h_test {{schema:0 sum:123.75 count:34 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1 2]}} @@ -1900,6 +1897,17 @@ eval instant at 1m h_test / 1.4142135624 + h_test {{count:26 sum:116.50067065070982 z_bucket_w:0.001 buckets:[0 2 8 16]}} + + +load 1m + h_test_2 {{schema:2 sum:12.8286080906 count:28 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 3 1]}} + eval instant at 1m h_test_2 / -1 eval instant at 1m h_test / 13 - cbh{} {{schema:-53 count:5.6 sum:94.9 custom_values:[5 10 15 20] offset:2 buckets:[1.6 3 1]}} - eval instant at 1m cbh / 15 cbh{} {{schema:-53 count:4 sum:72.5 custom_values:[5 10 15 20] offset:3 buckets:[3 1]}} # Custom buckets: trim uses linear interpolation if cutoff is inside a bucket +eval instant at 1m cbh / 13 + cbh{} {{schema:-53 count:5.6 sum:94.9 custom_values:[5 10 15 20] offset:2 buckets:[1.6 3 1]}} + eval instant at 1m cbh / -Inf cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} +# Use trimming value (0) as a midpoint of lowest bucket when recomputing the sum. eval instant at 1m cbh >/ 0 cbh {{schema:-53 sum:167.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} eval instant at 1m cbh / 0 zero_bucket{} {{count:7.5 sum:5.656854249492381 z_bucket:2.5 z_bucket_w:0.01 buckets:[2 3]}} + load 1m cbh_one_bucket {{schema:-53 sum:100.0 count:100 buckets:[100]}} From 16c801d04eaa308c68b709aeed023656f9192ac1 Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Mon, 9 Feb 2026 17:40:23 +0200 Subject: [PATCH 13/39] Change interpolation within zero bucket Signed-off-by: Linas Medziunas --- promql/engine.go | 51 +++++++++++++++++-- .../testdata/native_histograms.test | 8 +-- 2 files changed, 51 insertions(+), 8 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 2bb7cb4e4f..67035c640a 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3228,6 +3228,45 @@ func computeSplit(b histogram.Bucket[float64], rhs float64, isPositive, isLinear return b.Count * fraction } +func computeZeroBucketTrim(zeroBucket histogram.Bucket[float64], rhs float64, hasNegative, hasPositive, isUpperTrim bool) (float64, float64) { + var ( + lower = zeroBucket.Lower + upper = zeroBucket.Upper + ) + if !hasPositive { + upper = 0 + } + if !hasNegative { + lower = 0 + } + + var fraction, midpoint float64 + + if isUpperTrim { + if rhs <= lower { + return 0, 0 + } + if rhs >= upper { + return zeroBucket.Count, (lower + upper) / 2 + } + + fraction = (rhs - lower) / (upper - lower) + midpoint = (lower + rhs) / 2 + } else { // lower trim + if rhs <= lower { + return zeroBucket.Count, (lower + upper) / 2 + } + if rhs >= upper { + return 0, 0 + } + + fraction = (upper - rhs) / (upper - lower) + midpoint = (rhs + upper) / 2 + } + + return zeroBucket.Count * fraction, midpoint +} + func computeBucketTrim(b histogram.Bucket[float64], rhs float64, isUpperTrim, isPositive, isCustomBucket bool) (float64, float64) { if math.IsInf(b.Lower, -1) || math.IsInf(b.Upper, 1) { return handleInfinityBuckets(isUpperTrim, b, rhs) @@ -3249,6 +3288,7 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr updatedCount, updatedSum float64 trimmedBuckets bool isCustomBucket = trimmedHist.UsesCustomBuckets() + hasPositive, hasNegative bool ) if isUpperTrim { @@ -3260,6 +3300,7 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr if bucket.Count == 0 { continue } + hasPositive = true switch { case bucket.Upper <= rhs: @@ -3289,6 +3330,7 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr if bucket.Count == 0 { continue } + hasNegative = true switch { case bucket.Upper <= rhs: @@ -3319,6 +3361,7 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr if bucket.Count == 0 { continue } + hasPositive = true switch { case bucket.Lower >= rhs: @@ -3347,6 +3390,7 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr if bucket.Count == 0 { continue } + hasNegative = true switch { case bucket.Lower >= rhs: @@ -3373,14 +3417,13 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr // Handle the zero count bucket. if trimmedHist.ZeroCount > 0 { - keepCount := computeSplit(trimmedHist.ZeroBucket(), rhs, true, true) - if !isUpperTrim { - keepCount = trimmedHist.ZeroCount - keepCount - } + keepCount, bucketMidpoint := computeZeroBucketTrim(trimmedHist.ZeroBucket(), rhs, hasNegative, hasPositive, isUpperTrim) + if trimmedHist.ZeroCount != keepCount { trimmedHist.ZeroCount = keepCount trimmedBuckets = true } + updatedSum += bucketMidpoint * keepCount updatedCount += keepCount } diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 7072da8a88..fd9fbcc4d0 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1891,10 +1891,10 @@ eval instant at 1m h_test / 0 - h_test {{schema:0 sum:120.20815280171308 count:30.5 z_bucket:0.5 z_bucket_w:0.001 buckets:[2 4 8 16]}} + h_test {{schema:0 sum:120.20840280171308 count:30.5 z_bucket:0.5 z_bucket_w:0.001 buckets:[2 4 8 16]}} eval instant at 1m h_test / 0 - zero_bucket{} {{count:7.5 sum:5.656854249492381 z_bucket:2.5 z_bucket_w:0.01 buckets:[2 3]}} + zero_bucket{} {{count:7.5 sum:5.669354249492381 z_bucket:2.5 z_bucket_w:0.01 buckets:[2 3]}} load 1m From b3066144d15605d112458d11b7c0a6cc4ea7decb Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Tue, 10 Feb 2026 13:42:24 +0200 Subject: [PATCH 14/39] Add test cases trimming a biased zero bucket Signed-off-by: Linas Medziunas --- .../testdata/native_histograms.test | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index fd9fbcc4d0..6ddd71b044 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1934,6 +1934,52 @@ eval instant at 1m h_test / 0.5 + h_positive_buckets {{schema:0 count:10 sum:7.0710678118654755 z_bucket:0 z_bucket_w:0.5 buckets:[10]}} + +eval instant at 1m h_positive_buckets >/ 0.1 + h_positive_buckets {{schema:0 count:11.6 sum:7.551067811865476 z_bucket:1.6 z_bucket_w:0.5 buckets:[10]}} + +eval instant at 1m h_positive_buckets >/ 0 + h_positive_buckets {{schema:0 sum:8.0210678118654755 count:12 z_bucket:2 z_bucket_w:0.5 buckets:[10]}} + +eval instant at 1m h_positive_buckets / -0.5 + h_negative_buckets {{schema:0 count:2 sum:-0.5 z_bucket:2 z_bucket_w:0.5}} + +eval instant at 1m h_negative_buckets >/ -0.1 + h_negative_buckets {{schema:0 count:0.4 sum:-0.020000000000000004 z_bucket:0.4 z_bucket_w:0.5}} + +eval instant at 1m h_negative_buckets >/ 0 + h_negative_buckets {{schema:0 z_bucket_w:0.5}} + + load 1m cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} From 34f71ba96fde870e1980ba99c095452c39db4513 Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Tue, 10 Feb 2026 14:32:30 +0200 Subject: [PATCH 15/39] Detect more noop trimmings Signed-off-by: Linas Medziunas --- promql/engine.go | 24 ++++++++++++------- .../testdata/native_histograms.test | 10 ++++---- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 67035c640a..6c0b7c807c 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3315,8 +3315,10 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr updatedCount += keepCount updatedSum += bucketMidpoint * keepCount - trimmedHist.PositiveBuckets[i] = keepCount - trimmedBuckets = true + if trimmedHist.PositiveBuckets[i] != keepCount { + trimmedHist.PositiveBuckets[i] = keepCount + trimmedBuckets = true + } default: // Bucket is entirely above the trim point - discard. @@ -3345,8 +3347,10 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr updatedCount += keepCount updatedSum += bucketMidpoint * keepCount - trimmedHist.NegativeBuckets[i] = keepCount - trimmedBuckets = true + if trimmedHist.NegativeBuckets[i] != keepCount { + trimmedHist.NegativeBuckets[i] = keepCount + trimmedBuckets = true + } default: trimmedHist.NegativeBuckets[i] = 0 @@ -3376,8 +3380,10 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr updatedCount += keepCount updatedSum += bucketMidpoint * keepCount - trimmedHist.PositiveBuckets[i] = keepCount - trimmedBuckets = true + if trimmedHist.PositiveBuckets[i] != keepCount { + trimmedHist.PositiveBuckets[i] = keepCount + trimmedBuckets = true + } default: trimmedHist.PositiveBuckets[i] = 0 @@ -3405,8 +3411,10 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr updatedCount += keepCount updatedSum += bucketMidpoint * keepCount - trimmedHist.NegativeBuckets[i] = keepCount - trimmedBuckets = true + if trimmedHist.NegativeBuckets[i] != keepCount { + trimmedHist.NegativeBuckets[i] = keepCount + trimmedBuckets = true + } default: trimmedHist.NegativeBuckets[i] = 0 diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 6ddd71b044..da763254a1 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -2016,9 +2016,9 @@ eval instant at 1m cbh / -Inf cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} -# Use trimming value (0) as a midpoint of lowest bucket when recomputing the sum. +# Noop eval instant at 1m cbh >/ 0 - cbh {{schema:-53 sum:167.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} + cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} eval instant at 1m cbh / -10.0 cbh_two_buckets_split_at_positive {{schema:-53 sum:500.0 count:100 custom_values:[5] buckets:[0 100]}} -# Keep both buckets (1 and 100). +# Noop. eval instant at 1m cbh_two_buckets_split_at_positive >/ 0.0 - cbh_two_buckets_split_at_positive {{schema:-53 sum:500.0 count:101 custom_values:[5] buckets:[1 100]}} + cbh_two_buckets_split_at_positive {{schema:-53 sum:33.0 count:101 custom_values:[5] buckets:[1 100]}} -# Keep (5, 100] bucket (100) and 3/5 of [-Inf, 5] bucket (0.6 * 1). +# Keep (5, +Inf] bucket (100) and 3/5 of [-Inf, 5] bucket (0.6 * 1). eval instant at 1m cbh_two_buckets_split_at_positive >/ 2.0 cbh_two_buckets_split_at_positive {{schema:-53 sum:500.6 count:100.6 custom_values:[5] buckets:[0.6 100]}} From 5c491877194ac624dc258a24f0ba4ef365acd91d Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Tue, 10 Feb 2026 14:37:13 +0200 Subject: [PATCH 16/39] Fix NHCB first bucket handling Signed-off-by: Linas Medziunas --- promql/engine.go | 4 ++-- promql/promqltest/testdata/native_histograms.test | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 6c0b7c807c..ab1f2996d8 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3159,7 +3159,7 @@ func handleInfinityBuckets(isUpperTrim bool, b histogram.Bucket[float64], rhs fl if rhs >= 0 && b.Upper > rhs && !math.IsInf(b.Upper, 1) { // If upper is finite and positive, we treat lower as 0 (despite it de facto being -Inf). // This is only possible with NHCB, so we can always use linear interpolation. - return b.Count * rhs / b.Upper, (rhs + b.Upper) / 2 + return b.Count * rhs / b.Upper, rhs / 2 } // Otherwise, we are targeting a valid trim, but as we don't know the exact distribution of values that belongs to an infinite bucket, we need to remove the entire bucket. return 0, zeroIfInf(b.Upper) @@ -3172,7 +3172,7 @@ func handleInfinityBuckets(isUpperTrim bool, b histogram.Bucket[float64], rhs fl if rhs >= 0 && b.Upper > rhs && !math.IsInf(b.Upper, 1) { // If upper is finite and positive, we treat lower as 0 (despite it de facto being -Inf). // This is only possible with NHCB, so we can always use linear interpolation. - return b.Count * (1 - rhs/b.Upper), rhs / 2 + return b.Count * (1 - rhs/b.Upper), (rhs + b.Upper) / 2 } // Otherwise, we are targeting a valid trim, but as we don't know the exact distribution of values that belongs to an infinite bucket, we need to remove the entire bucket. return 0, zeroIfInf(b.Upper) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index da763254a1..0e63e5b506 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -2114,7 +2114,7 @@ eval instant at 1m cbh_two_buckets_split_at_positive / 0.0 # Keep (5, +Inf] bucket (100) and 3/5 of [-Inf, 5] bucket (0.6 * 1). eval instant at 1m cbh_two_buckets_split_at_positive >/ 2.0 - cbh_two_buckets_split_at_positive {{schema:-53 sum:500.6 count:100.6 custom_values:[5] buckets:[0.6 100]}} + cbh_two_buckets_split_at_positive {{schema:-53 sum:502.1 count:100.6 custom_values:[5] buckets:[0.6 100]}} # Skip both buckets (1 and 100). eval instant at 1m cbh_two_buckets_split_at_positive >/ 10.0 From 0615b6af4f245afc94fef55717ce14fdf6fbe219 Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Tue, 10 Feb 2026 16:02:01 +0200 Subject: [PATCH 17/39] Handle zero_bucket_only case (plus more tests) Signed-off-by: Linas Medziunas --- promql/engine.go | 4 +-- .../testdata/native_histograms.test | 35 +++++++++++++++++++ 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index ab1f2996d8..c762c1b6a5 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3233,10 +3233,10 @@ func computeZeroBucketTrim(zeroBucket histogram.Bucket[float64], rhs float64, ha lower = zeroBucket.Lower upper = zeroBucket.Upper ) - if !hasPositive { + if hasNegative && !hasPositive { upper = 0 } - if !hasNegative { + if hasPositive && !hasNegative { lower = 0 } diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 0e63e5b506..f356a18bea 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1980,6 +1980,41 @@ eval instant at 1m h_negative_buckets >/ 0 h_negative_buckets {{schema:0 z_bucket_w:0.5}} +# Exponential buckets: trim zero bucket when there are no other buckets. +load 1m + zero_bucket_only {{schema:0 count:5 sum:0 z_bucket:5 z_bucket_w:0.1 }} + +eval instant at 1m zero_bucket_only >/ 0.1 + zero_bucket_only {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.1 }} + +eval instant at 1m zero_bucket_only / 0.05 + zero_bucket_only {{schema:0 count:1.25 sum:0.09375000000000001 z_bucket:1.25 z_bucket_w:0.1 }} + +eval instant at 1m zero_bucket_only / 0 + zero_bucket_only {{schema:0 count:2.5 sum:0.125 z_bucket:2.5 z_bucket_w:0.1 }} + +eval instant at 1m zero_bucket_only / -0.05 + zero_bucket_only {{schema:0 count:3.75 sum:0.09375000000000001 z_bucket:3.75 z_bucket_w:0.1 }} + +eval instant at 1m zero_bucket_only / -0.1 + zero_bucket_only {{schema:0 count:5 sum:0 z_bucket:5 z_bucket_w:0.1 }} + + load 1m cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} From 538ad9613680f4553d4f7cf77812f36256161213 Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Fri, 13 Feb 2026 15:39:27 +0200 Subject: [PATCH 18/39] Don't use extra digits in expected float values Signed-off-by: Linas Medziunas --- promql/promqltest/testdata/native_histograms.test | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index f356a18bea..d9eaf4e23f 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1951,7 +1951,7 @@ eval instant at 1m h_positive_buckets / -0.5 h_negative_buckets {{schema:0 count:2 sum:-0.5 z_bucket:2 z_bucket_w:0.5}} eval instant at 1m h_negative_buckets >/ -0.1 - h_negative_buckets {{schema:0 count:0.4 sum:-0.020000000000000004 z_bucket:0.4 z_bucket_w:0.5}} + h_negative_buckets {{schema:0 count:0.4 sum:-0.02 z_bucket:0.4 z_bucket_w:0.5}} eval instant at 1m h_negative_buckets >/ 0 h_negative_buckets {{schema:0 z_bucket_w:0.5}} @@ -1991,10 +1991,10 @@ eval instant at 1m zero_bucket_only / 0.05 - zero_bucket_only {{schema:0 count:1.25 sum:0.09375000000000001 z_bucket:1.25 z_bucket_w:0.1 }} + zero_bucket_only {{schema:0 count:1.25 sum:0.09375 z_bucket:1.25 z_bucket_w:0.1 }} eval instant at 1m zero_bucket_only / 0 zero_bucket_only {{schema:0 count:2.5 sum:0.125 z_bucket:2.5 z_bucket_w:0.1 }} @@ -2003,10 +2003,10 @@ eval instant at 1m zero_bucket_only / -0.05 - zero_bucket_only {{schema:0 count:3.75 sum:0.09375000000000001 z_bucket:3.75 z_bucket_w:0.1 }} + zero_bucket_only {{schema:0 count:3.75 sum:0.09375 z_bucket:3.75 z_bucket_w:0.1 }} eval instant at 1m zero_bucket_only Date: Fri, 13 Feb 2026 15:39:42 +0200 Subject: [PATCH 19/39] Fix midpoint of NHCB 1st bucket Signed-off-by: Linas Medziunas --- promql/engine.go | 19 +++++++++++-------- .../testdata/native_histograms.test | 10 +++++----- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index c762c1b6a5..bb1536829e 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3444,21 +3444,24 @@ func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTr } } -func computeMidpoint(survivingIntervalBoundA, survivingIntervalBoundB float64, isPositive, isLinear bool) float64 { - if math.IsInf(survivingIntervalBoundA, 0) { - if math.IsInf(survivingIntervalBoundB, 0) { +func computeMidpoint(survivingIntervalLowerBound, survivingIntervalUpperBound float64, isPositive, isLinear bool) float64 { + if math.IsInf(survivingIntervalLowerBound, 0) { + if math.IsInf(survivingIntervalUpperBound, 0) { return 0 } - return survivingIntervalBoundB - } else if math.IsInf(survivingIntervalBoundB, 0) { - return survivingIntervalBoundA + if survivingIntervalUpperBound > 0 { + return survivingIntervalUpperBound / 2 + } + return survivingIntervalUpperBound + } else if math.IsInf(survivingIntervalUpperBound, 0) { + return survivingIntervalLowerBound } if isLinear { - return (survivingIntervalBoundA + survivingIntervalBoundB) / 2 + return (survivingIntervalLowerBound + survivingIntervalUpperBound) / 2 } - geoMean := math.Sqrt(math.Abs(survivingIntervalBoundA * survivingIntervalBoundB)) + geoMean := math.Sqrt(math.Abs(survivingIntervalLowerBound * survivingIntervalUpperBound)) if isPositive { return geoMean diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index d9eaf4e23f..614521b3af 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -2020,24 +2020,24 @@ load 1m # Custom buckets: trim on bucket boundary without interpolation eval instant at 1m cbh / 15 cbh{} {{schema:-53 count:4 sum:72.5 custom_values:[5 10 15 20] offset:3 buckets:[3 1]}} # Custom buckets: trim uses linear interpolation if cutoff is inside a bucket eval instant at 1m cbh / 13 cbh{} {{schema:-53 count:5.6 sum:94.9 custom_values:[5 10 15 20] offset:2 buckets:[1.6 3 1]}} eval instant at 1m cbh Date: Fri, 13 Feb 2026 16:21:12 +0200 Subject: [PATCH 20/39] Fix for nhcb [-Inf; -x) bucket Signed-off-by: Linas Medziunas --- promql/engine.go | 5 ++++- promql/promqltest/testdata/native_histograms.test | 6 +++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index bb1536829e..c464d9baf0 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3156,11 +3156,14 @@ func handleInfinityBuckets(isUpperTrim bool, b histogram.Bucket[float64], rhs fl // As the rhs is greater than the upper bound, we keep the entire current bucket. return b.Count, 0 } - if rhs >= 0 && b.Upper > rhs && !math.IsInf(b.Upper, 1) { + if rhs > 0 && b.Upper > 0 && !math.IsInf(b.Upper, 1) { // If upper is finite and positive, we treat lower as 0 (despite it de facto being -Inf). // This is only possible with NHCB, so we can always use linear interpolation. return b.Count * rhs / b.Upper, rhs / 2 } + if b.Upper <= 0 { + return b.Count, rhs + } // Otherwise, we are targeting a valid trim, but as we don't know the exact distribution of values that belongs to an infinite bucket, we need to remove the entire bucket. return 0, zeroIfInf(b.Upper) } diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 614521b3af..07aabcfa94 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -2070,7 +2070,7 @@ eval instant at 1m cbh_has_neg / -10.0 @@ -2193,7 +2193,7 @@ eval instant at 1m cbh_two_buckets_split_at_negative / -10.0 From 86248a5cebf0c126948777a6308e4825d0855762 Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Fri, 13 Feb 2026 16:43:25 +0200 Subject: [PATCH 21/39] Fix for nhcb +Inf bucket Signed-off-by: Linas Medziunas --- promql/engine.go | 11 +++++----- .../testdata/native_histograms.test | 22 +++++++++---------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index c464d9baf0..212af8db39 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3148,7 +3148,7 @@ func handleInfinityBuckets(isUpperTrim bool, b histogram.Bucket[float64], rhs fl return x } - // Case 1: Bucket with lower bound (-Inf, upper] + // Case 1: Bucket with lower bound -Inf. if math.IsInf(b.Lower, -1) { // TRIM_UPPER (/) - remove values less than rhs. - if rhs <= b.Lower { - // rhs <= lower: all values in this bucket are >= lower >= rhs, so we keep the entire bucket. - return b.Count, 0 + if rhs >= b.Lower { + return b.Count, rhs } // lower < rhs: we are inside the infinity bucket, but as we don't know the exact distribution of values, we conservatively remove the entire bucket. return 0, zeroIfInf(b.Lower) } - panic(fmt.Errorf("one of the buckets must be infinite for handleInfinityBuckets, got %v", b)) + panic(fmt.Errorf("one of the bounds must be infinite for handleInfinityBuckets, got %v", b)) } // computeSplit calculates the portion of the bucket's count <= rhs (trim point). diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 07aabcfa94..b54aedf4ad 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -2135,9 +2135,9 @@ eval instant at 1m cbh_two_buckets_split_at_zero >/ -10.0 eval instant at 1m cbh_two_buckets_split_at_zero >/ 0.0 cbh_two_buckets_split_at_zero {{schema:-53 sum:0.0 count:100 custom_values:[0] buckets:[0 100]}} -# Skip both buckets (1 and 100). +# Skip first bucket. eval instant at 1m cbh_two_buckets_split_at_zero >/ 10.0 - cbh_two_buckets_split_at_zero {{schema:-53 sum:0.0 count:0 custom_values:[0] buckets:[0 0]}} + cbh_two_buckets_split_at_zero {{schema:-53 sum:1000.0 count:100 custom_values:[0] buckets:[0 100]}} load 1m @@ -2171,9 +2171,9 @@ eval instant at 1m cbh_two_buckets_split_at_positive >/ 0.0 eval instant at 1m cbh_two_buckets_split_at_positive >/ 2.0 cbh_two_buckets_split_at_positive {{schema:-53 sum:502.1 count:100.6 custom_values:[5] buckets:[0.6 100]}} -# Skip both buckets (1 and 100). +# Skip first bucket. eval instant at 1m cbh_two_buckets_split_at_positive >/ 10.0 - cbh_two_buckets_split_at_positive {{schema:-53 sum:0.0 count:0 custom_values:[5] buckets:[0 0]}} + cbh_two_buckets_split_at_positive {{schema:-53 sum:1000.0 count:100 custom_values:[5] buckets:[0 100]}} load 1m @@ -2191,7 +2191,7 @@ eval instant at 1m cbh_two_buckets_split_at_negative / -10.0 cbh_two_buckets_split_at_negative {{schema:-53 sum:-500 count:100 custom_values:[-5] buckets:[0 100]}} -# Skip both buckets (1 and 100). +# Skip [-Inf, -5] bucket (1). eval instant at 1m cbh_two_buckets_split_at_negative >/ -2.0 - cbh_two_buckets_split_at_negative {{schema:-53 custom_values:[-5]}} + cbh_two_buckets_split_at_negative {{schema:-53 sum:-200 count:100 custom_values:[-5] buckets:[0 100]}} -# Skip both buckets (1 and 100). +# Skip [-Inf, -5] bucket (1). eval instant at 1m cbh_two_buckets_split_at_negative >/ 0.0 - cbh_two_buckets_split_at_negative {{schema:-53 custom_values:[-5]}} + cbh_two_buckets_split_at_negative {{schema:-53 sum:0.0 count:100 custom_values:[-5] buckets:[0 100]}} -# Skip both buckets (1 and 100). +# Skip [-Inf, -5] bucket (1). eval instant at 1m cbh_two_buckets_split_at_negative >/ 10.0 - cbh_two_buckets_split_at_negative {{schema:-53 sum:0.0 count:0 custom_values:[-5] buckets:[0 0]}} + cbh_two_buckets_split_at_negative {{schema:-53 sum:1000.0 count:100 custom_values:[-5] buckets:[0 100]}} clear From a448a5cfbf492b713356604982e6b28aec506983 Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Tue, 17 Feb 2026 13:55:34 +0200 Subject: [PATCH 22/39] Use matching.Card == parser.CardOneToOne for slice selection Signed-off-by: Linas Medziunas --- promql/engine.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 1c789b1a6a..02b63649d1 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3048,8 +3048,8 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * for i, rs := range rhs { sigOrd := rhsh[i].sigOrdinal - if (len(matchedSigsPresent) > 0 && matchedSigsPresent[sigOrd]) || - (len(matchedSigs) > 0 && matchedSigs[sigOrd] != nil) { + if (matching.Card == parser.CardOneToOne && matchedSigsPresent[sigOrd]) || + (matching.Card != parser.CardOneToOne && matchedSigs[sigOrd] != nil) { continue // Already matched. } ls := Sample{ From c753252028d2e9642271d9b80727b6ae6cf94ffb Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Wed, 18 Feb 2026 15:38:27 +0200 Subject: [PATCH 23/39] Update/fix test comments Signed-off-by: Linas Medziunas --- .../promqltest/testdata/native_histograms.test | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index b54aedf4ad..efa656bd63 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1920,7 +1920,7 @@ eval instant at 1m h_test_2 >/ -1.3 eval instant at 1m h_test_2 / 10.0 load 1m cbh_two_buckets_split_at_positive {{schema:-53 sum:33 count:101 custom_values:[5] buckets:[1 100]}} -# Skip (5; +Inf] bucket (100). +# Skip (5, +Inf] bucket (100). eval instant at 1m cbh_two_buckets_split_at_positive / -10.0 cbh_two_buckets_split_at_positive {{schema:-53 sum:500.0 count:100 custom_values:[5] buckets:[0 100]}} @@ -2167,7 +2167,7 @@ eval instant at 1m cbh_two_buckets_split_at_positive >/ -10.0 eval instant at 1m cbh_two_buckets_split_at_positive >/ 0.0 cbh_two_buckets_split_at_positive {{schema:-53 sum:33.0 count:101 custom_values:[5] buckets:[1 100]}} -# Keep (5, +Inf] bucket (100) and 3/5 of [-Inf, 5] bucket (0.6 * 1). +# Keep (5, +Inf] bucket (100) and 3/5 of [0, 5] bucket (0.6 * 3.5). eval instant at 1m cbh_two_buckets_split_at_positive >/ 2.0 cbh_two_buckets_split_at_positive {{schema:-53 sum:502.1 count:100.6 custom_values:[5] buckets:[0.6 100]}} @@ -2179,11 +2179,11 @@ eval instant at 1m cbh_two_buckets_split_at_positive >/ 10.0 load 1m cbh_two_buckets_split_at_negative {{schema:-53 sum:33 count:101 custom_values:[-5] buckets:[1 100]}} -# Skip (-5; +Inf] bucket (100). +# Skip (-5, +Inf] bucket (100). eval instant at 1m cbh_two_buckets_split_at_negative Date: Wed, 18 Feb 2026 15:49:23 +0200 Subject: [PATCH 24/39] Add test case with join Signed-off-by: Linas Medziunas --- promql/promqltest/testdata/native_histograms.test | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index efa656bd63..b8e45d7980 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -2212,4 +2212,16 @@ eval instant at 1m cbh_two_buckets_split_at_negative >/ 10.0 cbh_two_buckets_split_at_negative {{schema:-53 sum:1000.0 count:100 custom_values:[-5] buckets:[0 100]}} +load 1m + cbh_for_join{label="a"} {{schema:-53 sum:33 count:101 custom_values:[5] buckets:[1 100]}} + cbh_for_join{label="b"} {{schema:-53 sum:66 count:202 custom_values:[5] buckets:[2 200]}} + cbh_for_join{label="c"} {{schema:-53 sum:99 count:303 custom_values:[5] buckets:[3 300]}} + float_for_join{label="a"} 1 + float_for_join{label="b"} 4 + +eval instant at 1m cbh_for_join >/ on (label) float_for_join + {label="a"} {{schema:-53 count:100.8 sum:502.4 custom_values:[5] buckets:[0.8 100]}} + {label="b"} {{schema:-53 count:200.4 sum:1001.8 custom_values:[5] buckets:[0.4 200]}} + + clear From fbb706b886fb6dbe366091afc6192fba19002d04 Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Fri, 20 Feb 2026 15:13:12 +0200 Subject: [PATCH 25/39] Documentation Signed-off-by: Linas Medziunas --- docs/querying/operators.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/docs/querying/operators.md b/docs/querying/operators.md index b15c02aedc..f822709c7c 100644 --- a/docs/querying/operators.md +++ b/docs/querying/operators.md @@ -126,6 +126,25 @@ samples. Operations involving histogram samples result in the removal of the corresponding vector elements from the output vector, flagged by an info-level annotation. +### Histogram trim operators + +The following binary histogram trim operators exist in Prometheus: + +* `/` (trim lower): removes all observations below a threshold value + +Histogram trim operators are defined between vector/scalar and vector/vector value pairs, +where the left hand side is a native histogram (either exponential or NHCB), +and the right hand side is a float threshold value. + +In case the threshold value is not aligned to one of the bucket boundaries of the histogram, +either linear (for NHCB and zero buckets of exponential histogram) or exponential (for non zero +bucket of exponential histogram) interpolation is applied to compute the estimated count +of observations that remain in the bucket containing the threshold. + +In case when some observations get trimmed, the new sum of observation values is recomputed +(approximately) based on the remaining observations. + ### Comparison binary operators The following binary comparison operators exist in Prometheus: From 454a19ec19fd88fe10a559a27f6fc7fd046ef5a3 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 23 Feb 2026 10:49:17 +0100 Subject: [PATCH 26/39] fix(deps): update module google.golang.org/grpc to v1.79.1 (#18166) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 21e477e952..803cf2b926 100644 --- a/go.mod +++ b/go.mod @@ -97,7 +97,7 @@ require ( golang.org/x/text v0.34.0 google.golang.org/api v0.267.0 google.golang.org/genproto/googleapis/api v0.0.0-20260217215200-42d3e9bedb6d - google.golang.org/grpc v1.78.0 + google.golang.org/grpc v1.79.1 google.golang.org/protobuf v1.36.11 k8s.io/api v0.35.1 k8s.io/apimachinery v0.35.1 @@ -151,7 +151,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.14 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect - github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e // indirect + github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect diff --git a/go.sum b/go.sum index 8683d3efe3..e288929bea 100644 --- a/go.sum +++ b/go.sum @@ -115,8 +115,8 @@ github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObk github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e h1:gt7U1Igw0xbJdyaCM5H2CnlAlPSkzrhsebQB6WQWjLA= -github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= +github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w= +github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= @@ -776,8 +776,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20260217215200-42d3e9bedb6d h1: google.golang.org/genproto/googleapis/api v0.0.0-20260217215200-42d3e9bedb6d/go.mod h1:48U2I+QQUYhsFrg2SY6r+nJzeOtjey7j//WBESw+qyQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac= google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= -google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= -google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= +google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY= +google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= From 789f22b9319ac7a45b4c5f0b97767edae5454103 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 23 Feb 2026 10:49:49 +0100 Subject: [PATCH 27/39] fix(deps): update module github.com/prometheus/prometheus to v0.309.1 (#18161) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- web/ui/mantine-ui/src/promql/tools/go.mod | 3 +-- web/ui/mantine-ui/src/promql/tools/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/web/ui/mantine-ui/src/promql/tools/go.mod b/web/ui/mantine-ui/src/promql/tools/go.mod index 693b168206..af604b1964 100644 --- a/web/ui/mantine-ui/src/promql/tools/go.mod +++ b/web/ui/mantine-ui/src/promql/tools/go.mod @@ -4,13 +4,12 @@ go 1.25.5 require ( github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 - github.com/prometheus/prometheus v0.308.1 + github.com/prometheus/prometheus v0.309.1 github.com/russross/blackfriday/v2 v2.1.0 ) require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/dennwc/varint v1.0.0 // indirect diff --git a/web/ui/mantine-ui/src/promql/tools/go.sum b/web/ui/mantine-ui/src/promql/tools/go.sum index 0e069f5a8c..6f43b2da98 100644 --- a/web/ui/mantine-ui/src/promql/tools/go.sum +++ b/web/ui/mantine-ui/src/promql/tools/go.sum @@ -108,8 +108,8 @@ github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEo github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= -github.com/prometheus/prometheus v0.308.1 h1:ApMNI/3/es3Ze90Z7CMb+wwU2BsSYur0m5VKeqHj7h4= -github.com/prometheus/prometheus v0.308.1/go.mod h1:aHjYCDz9zKRyoUXvMWvu13K9XHOkBB12XrEqibs3e0A= +github.com/prometheus/prometheus v0.309.1 h1:jutK6eCYDpWdPTUbVbkcQsNCMO9CCkSwjQRMLds4jSo= +github.com/prometheus/prometheus v0.309.1/go.mod h1:d+dOGiVhuNDa4MaFXHVdnUBy/CzqlcNTooR8oM1wdTU= github.com/prometheus/sigv4 v0.4.1 h1:EIc3j+8NBea9u1iV6O5ZAN8uvPq2xOIUPcqCTivHuXs= github.com/prometheus/sigv4 v0.4.1/go.mod h1:eu+ZbRvsc5TPiHwqh77OWuCnWK73IdkETYY46P4dXOU= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= From 0a6fbefaf4082f3a638651b93a29373da680740f Mon Sep 17 00:00:00 2001 From: Julien Pivotto <291750+roidelapluie@users.noreply.github.com> Date: Fri, 6 Feb 2026 12:57:43 +0100 Subject: [PATCH 28/39] docs: Add AWS external_id support in sigv4 configuration This adds documentation for the external_id parameter when assuming AWS roles in the sigv4 configuration. Signed-off-by: Julien Pivotto <291750+roidelapluie@users.noreply.github.com> --- docs/configuration/configuration.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 1f2f9931e8..e6621f43d1 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -3125,6 +3125,10 @@ sigv4: # AWS Role ARN, an alternative to using AWS API keys. [ role_arn: ] + # AWS External ID used when assuming a role. + # Can only be used with role_arn. + [ external_id: ] + # Defines the FIPS mode for the AWS STS endpoint. # Requires Prometheus >= 2.54.0 # Note: FIPS STS selection should be configured via use_fips_sts_endpoint rather than environment variables. (The problem report that motivated this: AWS_USE_FIPS_ENDPOINT no longer works.) @@ -3336,6 +3340,10 @@ sigv4: # AWS Role ARN, an alternative to using AWS API keys. [ role_arn: ] + # AWS External ID used when assuming a role. + # Can only be used with role_arn. + [ external_id: ] + # Defines the FIPS mode for the AWS STS endpoint. # Requires Prometheus >= 2.54.0 # Note: FIPS STS selection should be configured via use_fips_sts_endpoint rather than environment variables. (The problem report that motivated this: AWS_USE_FIPS_ENDPOINT no longer works.) From 3ab867b66a9d0892ade775380b84c4f69ef84728 Mon Sep 17 00:00:00 2001 From: Julien Pivotto <291750+roidelapluie@users.noreply.github.com> Date: Fri, 20 Feb 2026 17:23:18 +0100 Subject: [PATCH 29/39] scrape: Fix race condition in scrapeFailureLogger access Remove the separate scrapeFailureLoggerMtx and use targetMtx instead for synchronizing access to scrapeFailureLogger. This fixes a data race where Sync() would read scrapeFailureLogger while holding targetMtx but SetScrapeFailureLogger() would write to it while holding a different mutex. Add regression test to catch concurrent access issues. Signed-off-by: Julien Pivotto <291750+roidelapluie@users.noreply.github.com> --- scrape/scrape.go | 19 +++++----------- scrape/scrape_test.go | 52 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 14 deletions(-) diff --git a/scrape/scrape.go b/scrape/scrape.go index d5a9ba72b4..b4d6907815 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -105,6 +105,7 @@ type scrapePool struct { activeTargets map[uint64]*Target droppedTargets []*Target // Subject to KeepDroppedTargets limit. droppedTargetsCount int // Count of all dropped targets. + scrapeFailureLogger FailureLogger // newLoop injection for testing purposes. injectTestNewLoop func(scrapeLoopOptions) loop @@ -112,9 +113,6 @@ type scrapePool struct { metrics *scrapeMetrics buffers *pool.Pool offsetSeed uint64 - - scrapeFailureLogger FailureLogger - scrapeFailureLoggerMtx sync.RWMutex } type labelLimits struct { @@ -224,26 +222,18 @@ func (sp *scrapePool) DroppedTargetsCount() int { } func (sp *scrapePool) SetScrapeFailureLogger(l FailureLogger) { - sp.scrapeFailureLoggerMtx.Lock() - defer sp.scrapeFailureLoggerMtx.Unlock() + sp.targetMtx.Lock() + defer sp.targetMtx.Unlock() if l != nil { l = slog.New(l).With("job_name", sp.config.JobName).Handler().(FailureLogger) } sp.scrapeFailureLogger = l - sp.targetMtx.Lock() - defer sp.targetMtx.Unlock() for _, s := range sp.loops { s.setScrapeFailureLogger(sp.scrapeFailureLogger) } } -func (sp *scrapePool) getScrapeFailureLogger() FailureLogger { - sp.scrapeFailureLoggerMtx.RLock() - defer sp.scrapeFailureLoggerMtx.RUnlock() - return sp.scrapeFailureLogger -} - // stop terminates all scrape loops and returns after they all terminated. func (sp *scrapePool) stop() { sp.mtx.Lock() @@ -323,6 +313,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { sp.targetMtx.Lock() forcedErr := sp.refreshTargetLimitErr() + scrapeFailureLogger := sp.scrapeFailureLogger for fp, oldLoop := range sp.loops { var cache *scrapeCache if oc := oldLoop.getCache(); reuseCache && oc != nil { @@ -364,7 +355,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { wg.Done() newLoop.setForcedError(forcedErr) - newLoop.setScrapeFailureLogger(sp.getScrapeFailureLogger()) + newLoop.setScrapeFailureLogger(scrapeFailureLogger) newLoop.run(nil) }(oldLoop, newLoop) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index cab2b2918a..7b0cd022dd 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "io" + "log/slog" "maps" "math" "net/http" @@ -6734,3 +6735,54 @@ func testDropsSeriesFromMetricRelabeling(t *testing.T, appV2 bool) { require.NoError(t, app.Commit()) } + +// noopFailureLogger is a minimal FailureLogger implementation for testing. +type noopFailureLogger struct{} + +func (noopFailureLogger) Enabled(context.Context, slog.Level) bool { return true } +func (noopFailureLogger) Handle(context.Context, slog.Record) error { return nil } +func (noopFailureLogger) WithAttrs([]slog.Attr) slog.Handler { return noopFailureLogger{} } +func (noopFailureLogger) WithGroup(string) slog.Handler { return noopFailureLogger{} } +func (noopFailureLogger) Close() error { return nil } + +// TestScrapePoolSetScrapeFailureLoggerRace is a regression test for concurrent +// access to scrapeFailureLogger. Both must use targetMtx for synchronization. +func TestScrapePoolSetScrapeFailureLoggerRace(t *testing.T) { + var ( + app = teststorage.NewAppendable() + cfg = &config.ScrapeConfig{ + JobName: "test", + ScrapeInterval: model.Duration(100 * time.Millisecond), + ScrapeTimeout: model.Duration(50 * time.Millisecond), + MetricNameValidationScheme: model.UTF8Validation, + MetricNameEscapingScheme: model.AllowUTF8, + } + sp, err = newScrapePool(cfg, app, nil, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) + ) + require.NoError(t, err) + defer sp.stop() + + // Create a target group with a target. + tg := &targetgroup.Group{ + Targets: []model.LabelSet{ + {model.AddressLabel: "127.0.0.1:9090"}, + }, + } + + var wg sync.WaitGroup + + wg.Go(func() { + for range 100 { + sp.SetScrapeFailureLogger(noopFailureLogger{}) + sp.SetScrapeFailureLogger(nil) + } + }) + + wg.Go(func() { + for range 100 { + sp.Sync([]*targetgroup.Group{tg}) + } + }) + + wg.Wait() +} From 8f1f1f31461d3f86386985ef309c8c5b2213b3bb Mon Sep 17 00:00:00 2001 From: Ben Kochie Date: Mon, 23 Feb 2026 15:25:09 +0100 Subject: [PATCH 30/39] Update CI (#18173) * Bump promu to latest release. * Update actions/checkout. * Update Go to 1.26.x for golangci-lint in synced repos. * Improve golangci-lint push filter for synced repos. Signed-off-by: SuperQ --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- .github/workflows/check_release_notes.yml | 2 +- .github/workflows/ci.yml | 28 ++++++++++----------- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/container_description.yml | 4 +-- .github/workflows/fuzzing.yml | 2 +- .github/workflows/repo_sync.yml | 2 +- .github/workflows/scorecards.yml | 2 +- Makefile.common | 2 +- scripts/golangci-lint.yml | 6 +++-- 11 files changed, 28 insertions(+), 26 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 7b835b36f8..b5d6ad3864 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -12,7 +12,7 @@ jobs: name: lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 5de3c133b9..58a4c87b96 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest if: github.repository_owner == 'prometheus' steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0 diff --git a/.github/workflows/check_release_notes.yml b/.github/workflows/check_release_notes.yml index 171af5f213..cfc4264602 100644 --- a/.github/workflows/check_release_notes.yml +++ b/.github/workflows/check_release_notes.yml @@ -20,7 +20,7 @@ jobs: # Don't run it on dependabot PRs either as humans would take control in case a bump introduces a breaking change. if: (github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community') && github.event.pull_request.user.login != 'dependabot[bot]' steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - env: PR_DESCRIPTION: ${{ github.event.pull_request.body }} run: | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b09b65619e..2482055fa2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,7 +18,7 @@ jobs: # should also be updated. image: quay.io/prometheus/golang-builder:1.25-base steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4 @@ -36,7 +36,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.25-base steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4 @@ -61,7 +61,7 @@ jobs: # The go version in this image should be N-1 wrt test_go. image: quay.io/prometheus/golang-builder:1.25-base steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - run: make build @@ -80,7 +80,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.25-base steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4 @@ -99,7 +99,7 @@ jobs: name: Go tests on Windows runs-on: windows-latest steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0 @@ -118,7 +118,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.25-base steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - run: go install ./cmd/promtool/. @@ -138,7 +138,7 @@ jobs: # should also be updated. image: quay.io/prometheus/golang-builder:1.25-base steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4 @@ -166,7 +166,7 @@ jobs: matrix: thread: [ 0, 1, 2 ] steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4 @@ -193,7 +193,7 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4 @@ -232,7 +232,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.25-base steps: - name: Checkout repository - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4 @@ -246,7 +246,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - name: Install Go @@ -296,7 +296,7 @@ jobs: needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4 @@ -315,7 +315,7 @@ jobs: || (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4 @@ -332,7 +332,7 @@ jobs: needs: [test_ui, codeql] steps: - name: Checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index ac636c1797..21539d15d4 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -24,7 +24,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false diff --git a/.github/workflows/container_description.yml b/.github/workflows/container_description.yml index 7b46e9532f..d7b879f9c7 100644 --- a/.github/workflows/container_description.yml +++ b/.github/workflows/container_description.yml @@ -18,7 +18,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - name: Set docker hub repo name @@ -42,7 +42,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - name: Set quay.io org name diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index 0afcbe6f0c..f19e0ba609 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -13,7 +13,7 @@ jobs: fuzz_test: [FuzzParseMetricText, FuzzParseOpenMetric, FuzzParseMetricSelector, FuzzParseExpr] steps: - name: Checkout repository - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - name: Install Go diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml index afc589c6d7..9752c98b51 100644 --- a/.github/workflows/repo_sync.yml +++ b/.github/workflows/repo_sync.yml @@ -14,7 +14,7 @@ jobs: container: image: quay.io/prometheus/golang-builder steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - run: ./scripts/sync_repo_files.sh diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 242f0a8ae8..b54be91620 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -21,7 +21,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false diff --git a/Makefile.common b/Makefile.common index 18f20f79ab..d19d390d37 100644 --- a/Makefile.common +++ b/Makefile.common @@ -55,7 +55,7 @@ ifneq ($(shell command -v gotestsum 2> /dev/null),) endif endif -PROMU_VERSION ?= 0.17.0 +PROMU_VERSION ?= 0.18.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index 16467b897e..dc8ffd02d9 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -3,6 +3,7 @@ name: golangci-lint on: push: + branches: [main, master, 'release-*'] paths: - "go.sum" - "go.mod" @@ -10,6 +11,7 @@ on: - "scripts/errcheck_excludes.txt" - ".github/workflows/golangci-lint.yml" - ".golangci.yml" + tags: ['v*'] pull_request: permissions: # added using https://github.com/step-security/secure-repo @@ -24,13 +26,13 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - name: Install Go uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0 with: - go-version: 1.25.x + go-version: 1.26.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' From 1a5da4fbe0b894b033ee909b17508368fd6d18a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Duboc?= Date: Tue, 24 Feb 2026 12:12:57 +0100 Subject: [PATCH 31/39] fix(discovery): apply EC2 SD endpoint and guard refreshAZIDs nils (#18133) Signed-off-by: Aurelien Duboc --- discovery/aws/ec2.go | 14 +++++++++++++- discovery/aws/lightsail.go | 7 ++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index 4daff43ecc..48ab411d72 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -224,7 +224,12 @@ func (d *EC2Discovery) ec2Client(ctx context.Context) (ec2Client, error) { cfg.Credentials = aws.NewCredentialsCache(assumeProvider) } - d.ec2 = ec2.NewFromConfig(cfg) + d.ec2 = ec2.NewFromConfig(cfg, func(options *ec2.Options) { + if d.cfg.Endpoint != "" { + options.BaseEndpoint = &d.cfg.Endpoint + } + options.HTTPClient = httpClient + }) return d.ec2, nil } @@ -234,8 +239,15 @@ func (d *EC2Discovery) refreshAZIDs(ctx context.Context) error { if err != nil { return err } + if azs.AvailabilityZones == nil { + d.azToAZID = make(map[string]string) + return nil + } d.azToAZID = make(map[string]string, len(azs.AvailabilityZones)) for _, az := range azs.AvailabilityZones { + if az.ZoneName == nil || az.ZoneId == nil { + continue + } d.azToAZID[*az.ZoneName] = *az.ZoneId } return nil diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index 69a5b6625f..39e4716957 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -188,7 +188,12 @@ func (d *LightsailDiscovery) lightsailClient(ctx context.Context) (*lightsail.Cl cfg.Credentials = aws.NewCredentialsCache(assumeProvider) } - d.lightsail = lightsail.NewFromConfig(cfg) + d.lightsail = lightsail.NewFromConfig(cfg, func(options *lightsail.Options) { + if d.cfg.Endpoint != "" { + options.BaseEndpoint = &d.cfg.Endpoint + } + options.HTTPClient = httpClient + }) return d.lightsail, nil } From f312fde4a2c022e1117020f8e3cd62d5232b3d71 Mon Sep 17 00:00:00 2001 From: harsh kumar <135993950+hxrshxz@users.noreply.github.com> Date: Tue, 24 Feb 2026 17:07:20 +0530 Subject: [PATCH 32/39] test: Enable parallel execution for chunk write queue tests (#17338) * test(tsdb): Enable parallel execution for chunk write queue tests Signed-off-by: Harsh --- tsdb/chunks/chunk_write_queue_test.go | 4 ++++ tsdb/chunks/chunks_test.go | 2 ++ tsdb/chunks/head_chunks_test.go | 6 ++++++ tsdb/chunks/queue_test.go | 7 +++++++ 4 files changed, 19 insertions(+) diff --git a/tsdb/chunks/chunk_write_queue_test.go b/tsdb/chunks/chunk_write_queue_test.go index 489ff74210..4aab80e92b 100644 --- a/tsdb/chunks/chunk_write_queue_test.go +++ b/tsdb/chunks/chunk_write_queue_test.go @@ -27,6 +27,7 @@ import ( ) func TestChunkWriteQueue_GettingChunkFromQueue(t *testing.T) { + t.Parallel() var blockWriterWg sync.WaitGroup blockWriterWg.Add(1) @@ -55,6 +56,7 @@ func TestChunkWriteQueue_GettingChunkFromQueue(t *testing.T) { } func TestChunkWriteQueue_WritingThroughQueue(t *testing.T) { + t.Parallel() var ( gotSeriesRef HeadSeriesRef gotMint, gotMaxt int64 @@ -97,6 +99,7 @@ func TestChunkWriteQueue_WritingThroughQueue(t *testing.T) { } func TestChunkWriteQueue_WrappingAroundSizeLimit(t *testing.T) { + t.Parallel() sizeLimit := 100 unblockChunkWriterCh := make(chan struct{}, sizeLimit) @@ -183,6 +186,7 @@ func TestChunkWriteQueue_WrappingAroundSizeLimit(t *testing.T) { } func TestChunkWriteQueue_HandlerErrorViaCallback(t *testing.T) { + t.Parallel() testError := errors.New("test error") chunkWriter := func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool, bool) error { return testError diff --git a/tsdb/chunks/chunks_test.go b/tsdb/chunks/chunks_test.go index f40f996fde..db45fdf712 100644 --- a/tsdb/chunks/chunks_test.go +++ b/tsdb/chunks/chunks_test.go @@ -23,6 +23,7 @@ import ( ) func TestReaderWithInvalidBuffer(t *testing.T) { + t.Parallel() b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81}) r := &Reader{bs: []ByteSlice{b}} @@ -31,6 +32,7 @@ func TestReaderWithInvalidBuffer(t *testing.T) { } func TestWriterWithDefaultSegmentSize(t *testing.T) { + t.Parallel() chk1, err := ChunkFromSamples([]Sample{ sample{t: 10, f: 11}, sample{t: 20, f: 12}, diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index c3cbc5a618..f2d0960503 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -45,6 +45,7 @@ func TestMain(m *testing.M) { } func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) { + t.Parallel() hrw := createChunkDiskMapper(t, "") defer func() { require.NoError(t, hrw.Close()) @@ -181,6 +182,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) { // * The active file is not deleted even if the passed time makes it eligible to be deleted. // * Non-empty current file leads to creation of another file after truncation. func TestChunkDiskMapper_Truncate(t *testing.T) { + t.Parallel() hrw := createChunkDiskMapper(t, "") defer func() { require.NoError(t, hrw.Close()) @@ -275,6 +277,7 @@ func TestChunkDiskMapper_Truncate(t *testing.T) { // This test exposes https://github.com/prometheus/prometheus/issues/7412 where the truncation // simply deleted all empty files instead of stopping once it encountered a non-empty file. func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) { + t.Parallel() hrw := createChunkDiskMapper(t, "") defer func() { require.NoError(t, hrw.Close()) @@ -359,6 +362,7 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) { } func TestChunkDiskMapper_Truncate_WriteQueueRaceCondition(t *testing.T) { + t.Parallel() hrw := createChunkDiskMapper(t, "") t.Cleanup(func() { require.NoError(t, hrw.Close()) @@ -411,6 +415,7 @@ func TestChunkDiskMapper_Truncate_WriteQueueRaceCondition(t *testing.T) { // TestHeadReadWriter_TruncateAfterFailedIterateChunks tests for // https://github.com/prometheus/prometheus/issues/7753 func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) { + t.Parallel() hrw := createChunkDiskMapper(t, "") defer func() { require.NoError(t, hrw.Close()) @@ -442,6 +447,7 @@ func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) { } func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) { + t.Parallel() hrw := createChunkDiskMapper(t, "") timeRange := 0 diff --git a/tsdb/chunks/queue_test.go b/tsdb/chunks/queue_test.go index 2e3fff59a8..13558fcc16 100644 --- a/tsdb/chunks/queue_test.go +++ b/tsdb/chunks/queue_test.go @@ -62,6 +62,7 @@ func (q *writeJobQueue) assertInvariants(t *testing.T) { } func TestQueuePushPopSingleGoroutine(t *testing.T) { + t.Parallel() seed := time.Now().UnixNano() t.Log("seed:", seed) r := rand.New(rand.NewSource(seed)) @@ -115,6 +116,7 @@ func TestQueuePushPopSingleGoroutine(t *testing.T) { } func TestQueuePushBlocksOnFullQueue(t *testing.T) { + t.Parallel() queue := newWriteJobQueue(5, 5) pushTime := make(chan time.Time) @@ -152,6 +154,7 @@ func TestQueuePushBlocksOnFullQueue(t *testing.T) { } func TestQueuePopBlocksOnEmptyQueue(t *testing.T) { + t.Parallel() queue := newWriteJobQueue(5, 5) popTime := make(chan time.Time) @@ -192,6 +195,7 @@ func TestQueuePopBlocksOnEmptyQueue(t *testing.T) { } func TestQueuePopUnblocksOnClose(t *testing.T) { + t.Parallel() queue := newWriteJobQueue(5, 5) popTime := make(chan time.Time) @@ -231,6 +235,7 @@ func TestQueuePopUnblocksOnClose(t *testing.T) { } func TestQueuePopAfterCloseReturnsAllElements(t *testing.T) { + t.Parallel() const count = 10 queue := newWriteJobQueue(count, count) @@ -257,6 +262,7 @@ func TestQueuePopAfterCloseReturnsAllElements(t *testing.T) { } func TestQueuePushPopManyGoroutines(t *testing.T) { + t.Parallel() const readGoroutines = 5 const writeGoroutines = 10 const writes = 500 @@ -303,6 +309,7 @@ func TestQueuePushPopManyGoroutines(t *testing.T) { } func TestQueueSegmentIsKeptEvenIfEmpty(t *testing.T) { + t.Parallel() queue := newWriteJobQueue(1024, 64) require.True(t, queue.push(chunkWriteJob{seriesRef: 1})) From 7a9c0577272dc1e8a24fae68af730aa61807d6cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?At=C4=B1l=20Sensalduz?= <44255923+atilsensalduz@users.noreply.github.com> Date: Tue, 24 Feb 2026 15:25:00 +0300 Subject: [PATCH 33/39] Merge pull request #18180 from atilsensalduz/bugfix/tsdb-writeMetaFile-fd-leak fix(tsdb): close file descriptor on json.MarshalIndent failure in writeMetaFile --- tsdb/block.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/block.go b/tsdb/block.go index 118dd672ef..12d87f4ca3 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -291,7 +291,7 @@ func writeMetaFile(logger *slog.Logger, dir string, meta *BlockMeta) (int64, err jsonMeta, err := json.MarshalIndent(meta, "", "\t") if err != nil { - return 0, err + return 0, errors.Join(err, f.Close()) } n, err := f.Write(jsonMeta) From 696679e50c43339c255c5cc79bdd7b39d80a6a29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20LOYET?= <822436+fatpat@users.noreply.github.com> Date: Wed, 19 Apr 2023 15:15:02 +0200 Subject: [PATCH 34/39] Add `storage.tsdb.retention.percentage` config MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jérôme LOYET <822436+fatpat@users.noreply.github.com> Signed-off-by: Laurent Dufresne --- cmd/prometheus/main.go | 23 ++++++++++-- tsdb/db.go | 30 +++++++++++++-- util/runtime/statfs.go | 12 ++++-- util/runtime/statfs_default.go | 13 ++++++- util/runtime/statfs_linux_386.go | 15 +++++++- util/runtime/statfs_uint32.go | 13 ++++++- util/runtime/statfs_unix_test.go | 58 +++++++++++++++++++++++++++++ util/runtime/statfs_windows.go | 56 ++++++++++++++++++++++++++++ util/runtime/statfs_windows_test.go | 49 ++++++++++++++++++++++++ web/web.go | 7 ++++ 10 files changed, 259 insertions(+), 17 deletions(-) create mode 100644 util/runtime/statfs_unix_test.go create mode 100644 util/runtime/statfs_windows.go create mode 100644 util/runtime/statfs_windows_test.go diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 763911363b..d6db8c3d36 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -770,9 +770,9 @@ func main() { cfg.web.RoutePrefix = "/" + strings.Trim(cfg.web.RoutePrefix, "/") if !agentMode { - if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 { + if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 && cfg.tsdb.MaxPercentage == 0 { cfg.tsdb.RetentionDuration = defaultRetentionDuration - logger.Info("No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration) + logger.Info("No time, size or percentage retention was set so using the default time retention", "duration", defaultRetentionDuration) } // Check for overflows. This limits our max retention to 100y. @@ -785,6 +785,17 @@ func main() { logger.Warn("Time retention value is too high. Limiting to: " + y.String()) } + if cfg.tsdb.MaxPercentage > 100 { + cfg.tsdb.MaxPercentage = 100 + logger.Warn("Percentage retention value is too high. Limiting to: 100%") + } + if cfg.tsdb.MaxPercentage > 0 { + if prom_runtime.FsSize(localStoragePath) == 0 { + fmt.Fprintln(os.Stderr, fmt.Errorf("unable to detect total capacity of metric storage at %s, please disable retention percentage (%d%%)", localStoragePath, cfg.tsdb.MaxPercentage)) + os.Exit(2) + } + } + // Max block size settings. if cfg.tsdb.MaxBlockDuration == 0 { maxBlockDuration, err := model.ParseDuration("31d") @@ -958,6 +969,7 @@ func main() { cfg.web.Context = ctxWeb cfg.web.TSDBRetentionDuration = cfg.tsdb.RetentionDuration cfg.web.TSDBMaxBytes = cfg.tsdb.MaxBytes + cfg.web.TSDBMaxPercentage = cfg.tsdb.MaxPercentage cfg.web.TSDBDir = localStoragePath cfg.web.LocalStorage = localStorage cfg.web.Storage = fanoutStorage @@ -1371,7 +1383,7 @@ func main() { return fmt.Errorf("opening storage failed: %w", err) } - switch fsType := prom_runtime.Statfs(localStoragePath); fsType { + switch fsType := prom_runtime.FsType(localStoragePath); fsType { case "NFS_SUPER_MAGIC": logger.Warn("This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.", "fs_type", fsType) default: @@ -1383,6 +1395,7 @@ func main() { "MinBlockDuration", cfg.tsdb.MinBlockDuration, "MaxBlockDuration", cfg.tsdb.MaxBlockDuration, "MaxBytes", cfg.tsdb.MaxBytes, + "MaxPercentage", cfg.tsdb.MaxPercentage, "NoLockfile", cfg.tsdb.NoLockfile, "RetentionDuration", cfg.tsdb.RetentionDuration, "WALSegmentSize", cfg.tsdb.WALSegmentSize, @@ -1430,7 +1443,7 @@ func main() { return fmt.Errorf("opening storage failed: %w", err) } - switch fsType := prom_runtime.Statfs(localStoragePath); fsType { + switch fsType := prom_runtime.FsType(localStoragePath); fsType { case "NFS_SUPER_MAGIC": logger.Warn(fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.") default: @@ -1950,6 +1963,7 @@ type tsdbOptions struct { MaxBlockChunkSegmentSize units.Base2Bytes RetentionDuration model.Duration MaxBytes units.Base2Bytes + MaxPercentage uint NoLockfile bool WALCompressionType compression.Type HeadChunksWriteQueueSize int @@ -1978,6 +1992,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { MaxBlockChunkSegmentSize: int64(opts.MaxBlockChunkSegmentSize), RetentionDuration: int64(time.Duration(opts.RetentionDuration) / time.Millisecond), MaxBytes: int64(opts.MaxBytes), + MaxPercentage: opts.MaxPercentage, NoLockfile: opts.NoLockfile, WALCompression: opts.WALCompressionType, HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize, diff --git a/tsdb/db.go b/tsdb/db.go index 1d73628bfd..646ed83cd5 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -47,6 +47,7 @@ import ( "github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/util/compression" "github.com/prometheus/prometheus/util/features" + prom_runtime "github.com/prometheus/prometheus/util/runtime" ) const ( @@ -126,6 +127,11 @@ type Options struct { // the current size of the database. MaxBytes int64 + // Maximum % of disk space to use for blocks to be retained. + // 0 or less means disabled. + // If both MaxBytes and MaxPercentage are set, percentage prevails. + MaxPercentage uint + // NoLockfile disables creation and consideration of a lock file. NoLockfile bool @@ -1983,12 +1989,30 @@ func BeyondTimeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc // BeyondSizeRetention returns those blocks which are beyond the size retention // set in the db options. func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struct{}) { - // Size retention is disabled or no blocks to work with. - maxBytes := db.getMaxBytes() - if len(blocks) == 0 || maxBytes <= 0 { + // No blocks to work with + if len(blocks) == 0 { return deletable } + maxBytes := db.getMaxBytes() + + // Max percentage prevails over max size. + if db.opts.MaxPercentage > 0 { + diskSize := prom_runtime.FsSize(db.dir) + if diskSize <= 0 { + db.logger.Warn("Unable to retrieve filesystem size of database directory, skip percentage limitation and default to fixed size limitation", "dir", db.dir) + } else { + maxBytes = int64(uint64(db.opts.MaxPercentage) * diskSize / 100) + } + } + + // Size retention is disabled. + if maxBytes <= 0 { + return deletable + } + // update MaxBytes gauge + db.metrics.maxBytes.Set(float64(maxBytes)) + deletable = make(map[ulid.ULID]struct{}) // Initializing size counter with WAL size and Head chunks diff --git a/util/runtime/statfs.go b/util/runtime/statfs.go index 98dd822e4a..b6edbd872b 100644 --- a/util/runtime/statfs.go +++ b/util/runtime/statfs.go @@ -11,12 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build openbsd || windows || netbsd || solaris +//go:build openbsd || netbsd || solaris package runtime -// Statfs returns the file system type (Unix only) -// syscall.Statfs_t isn't available on openbsd -func Statfs(path string) string { +// FsType returns the file system type or "unknown" if unsupported. +func FsType(path string) string { return "unknown" } + +// FsSize returns the file system size or 0 if unsupported. +func FsSize(path string) uint64 { + return 0 +} diff --git a/util/runtime/statfs_default.go b/util/runtime/statfs_default.go index 0cf5c2e616..de65b780f0 100644 --- a/util/runtime/statfs_default.go +++ b/util/runtime/statfs_default.go @@ -20,8 +20,7 @@ import ( "syscall" ) -// Statfs returns the file system type (Unix only). -func Statfs(path string) string { +func FsType(path string) string { // Types of file systems that may be returned by `statfs` fsTypes := map[int64]string{ 0xadf5: "ADFS_SUPER_MAGIC", @@ -67,6 +66,7 @@ func Statfs(path string) string { 0x012FF7B4: "XENIX_SUPER_MAGIC", 0x58465342: "XFS_SUPER_MAGIC", 0x012FD16D: "_XIAFS_SUPER_MAGIC", + 0x794c7630: "OVERLAYFS_SUPER_MAGIC", } var fs syscall.Statfs_t @@ -82,3 +82,12 @@ func Statfs(path string) string { } return strconv.FormatInt(localType, 16) } + +func FsSize(path string) uint64 { + var fs syscall.Statfs_t + err := syscall.Statfs(path, &fs) + if err != nil { + return 0 + } + return uint64(fs.Bsize) * fs.Blocks +} diff --git a/util/runtime/statfs_linux_386.go b/util/runtime/statfs_linux_386.go index 33dbc4c3e9..82e586dc94 100644 --- a/util/runtime/statfs_linux_386.go +++ b/util/runtime/statfs_linux_386.go @@ -20,8 +20,8 @@ import ( "syscall" ) -// Statfs returns the file system type (Unix only) -func Statfs(path string) string { +// FsType returns the file system type (Unix only). +func FsType(path string) string { // Types of file systems that may be returned by `statfs` fsTypes := map[int32]string{ 0xadf5: "ADFS_SUPER_MAGIC", @@ -63,6 +63,7 @@ func Statfs(path string) string { 0x012FF7B4: "XENIX_SUPER_MAGIC", 0x58465342: "XFS_SUPER_MAGIC", 0x012FD16D: "_XIAFS_SUPER_MAGIC", + 0x794c7630: "OVERLAYFS_SUPER_MAGIC", } var fs syscall.Statfs_t @@ -75,3 +76,13 @@ func Statfs(path string) string { } return strconv.Itoa(int(fs.Type)) } + +// FsSize returns the file system size (Unix only). +func FsSize(path string) uint64 { + var fs syscall.Statfs_t + err := syscall.Statfs(path, &fs) + if err != nil { + return 0 + } + return uint64(fs.Bsize) * fs.Blocks +} diff --git a/util/runtime/statfs_uint32.go b/util/runtime/statfs_uint32.go index 2fb4d70849..acffb41295 100644 --- a/util/runtime/statfs_uint32.go +++ b/util/runtime/statfs_uint32.go @@ -20,8 +20,7 @@ import ( "syscall" ) -// Statfs returns the file system type (Unix only) -func Statfs(path string) string { +func FsType(path string) string { // Types of file systems that may be returned by `statfs` fsTypes := map[uint32]string{ 0xadf5: "ADFS_SUPER_MAGIC", @@ -63,6 +62,7 @@ func Statfs(path string) string { 0x012FF7B4: "XENIX_SUPER_MAGIC", 0x58465342: "XFS_SUPER_MAGIC", 0x012FD16D: "_XIAFS_SUPER_MAGIC", + 0x794c7630: "OVERLAYFS_SUPER_MAGIC", } var fs syscall.Statfs_t @@ -75,3 +75,12 @@ func Statfs(path string) string { } return strconv.Itoa(int(fs.Type)) } + +func FsSize(path string) uint64 { + var fs syscall.Statfs_t + err := syscall.Statfs(path, &fs) + if err != nil { + return 0 + } + return uint64(fs.Bsize) * fs.Blocks +} diff --git a/util/runtime/statfs_unix_test.go b/util/runtime/statfs_unix_test.go new file mode 100644 index 0000000000..563bd1dfa6 --- /dev/null +++ b/util/runtime/statfs_unix_test.go @@ -0,0 +1,58 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows && !openbsd && !netbsd && !solaris + +package runtime + +import ( + "os" + "testing" + + "github.com/grafana/regexp" + "github.com/stretchr/testify/require" +) + +var regexpFsType = regexp.MustCompile("^[A-Z][A-Z0-9_]*_MAGIC$") + +func TestFsType(t *testing.T) { + var fsType string + + path, err := os.Getwd() + require.NoError(t, err) + + fsType = FsType(path) + require.Regexp(t, regexpFsType, fsType) + + fsType = FsType("/no/where/to/be/found") + require.Equal(t, "0", fsType) + + fsType = FsType(" %% not event a real path\n\n") + require.Equal(t, "0", fsType) +} + +func TestFsSize(t *testing.T) { + var size uint64 + + path, err := os.Getwd() + require.NoError(t, err) + + size = FsSize(path) + require.Positive(t, size) + + size = FsSize("/no/where/to/be/found") + require.Equal(t, uint64(0), size) + + size = FsSize(" %% not event a real path\n\n") + require.Equal(t, uint64(0), size) +} diff --git a/util/runtime/statfs_windows.go b/util/runtime/statfs_windows.go new file mode 100644 index 0000000000..717d4c16f1 --- /dev/null +++ b/util/runtime/statfs_windows.go @@ -0,0 +1,56 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows + +package runtime + +import ( + "os" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + dll = windows.MustLoadDLL("kernel32.dll") + getDiskFreeSpaceExW = dll.MustFindProc("GetDiskFreeSpaceExW") +) + +func FsType(path string) string { + return "unknown" +} + +func FsSize(path string) uint64 { + // Ensure the path exists. + if _, err := os.Stat(path); err != nil { + return 0 + } + + var avail int64 + var total int64 + var free int64 + // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getdiskfreespaceexa + ret, _, _ := getDiskFreeSpaceExW.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(path))), + uintptr(unsafe.Pointer(&avail)), + uintptr(unsafe.Pointer(&total)), + uintptr(unsafe.Pointer(&free))) + + if ret == 0 || uint64(free) > uint64(total) { + return 0 + } + + return uint64(total) +} diff --git a/util/runtime/statfs_windows_test.go b/util/runtime/statfs_windows_test.go new file mode 100644 index 0000000000..5b65d7029e --- /dev/null +++ b/util/runtime/statfs_windows_test.go @@ -0,0 +1,49 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows + +package runtime + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFsType(t *testing.T) { + var fsType string + + path, err := os.Getwd() + require.NoError(t, err) + + fsType = FsType(path) + require.Equal(t, "unknown", fsType) + + fsType = FsType("A:\\no\\where\\to\\be\\found") + require.Equal(t, "unknown", fsType) +} + +func TestFsSize(t *testing.T) { + var size uint64 + + size = FsSize("C:\\") + require.Positive(t, size) + + size = FsSize("c:\\no\\where\\to\\be\\found") + require.Equal(t, uint64(0), size) + + size = FsSize(" %% not event a real path\n\n") + require.Equal(t, uint64(0), size) +} diff --git a/web/web.go b/web/web.go index 583492abc9..90eaf13afe 100644 --- a/web/web.go +++ b/web/web.go @@ -263,6 +263,7 @@ type Options struct { TSDBRetentionDuration model.Duration TSDBDir string TSDBMaxBytes units.Base2Bytes + TSDBMaxPercentage uint LocalStorage LocalStorage Storage storage.Storage ExemplarStorage storage.ExemplarQueryable @@ -874,6 +875,12 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) { } status.StorageRetention += h.options.TSDBMaxBytes.String() } + if h.options.TSDBMaxPercentage != 0 { + if status.StorageRetention != "" { + status.StorageRetention += " or " + } + status.StorageRetention = status.StorageRetention + strconv.FormatUint(uint64(h.options.TSDBMaxPercentage), 10) + "%" + } metrics, err := h.gatherer.Gather() if err != nil { From 971143edac5f6375fbf881e71d31f48a55233245 Mon Sep 17 00:00:00 2001 From: Laurent Dufresne Date: Fri, 13 Feb 2026 10:48:36 +0100 Subject: [PATCH 35/39] Added `Retention.Percentage` to config file with runtime config reloading Signed-off-by: Laurent Dufresne --- cmd/prometheus/main.go | 6 ++++++ config/config.go | 3 +++ config/config_test.go | 5 +++-- config/testdata/conf.good.yml | 1 + docs/configuration/configuration.md | 8 ++++++++ tsdb/db.go | 25 +++++++++++++++++-------- 6 files changed, 38 insertions(+), 10 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index d6db8c3d36..4cca65466f 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -717,6 +717,9 @@ func main() { if cfgFile.StorageConfig.TSDBConfig.Retention.Size > 0 { cfg.tsdb.MaxBytes = cfgFile.StorageConfig.TSDBConfig.Retention.Size } + if cfgFile.StorageConfig.TSDBConfig.Retention.Percentage > 0 { + cfg.tsdb.MaxPercentage = cfgFile.StorageConfig.TSDBConfig.Retention.Percentage + } } } @@ -790,6 +793,9 @@ func main() { logger.Warn("Percentage retention value is too high. Limiting to: 100%") } if cfg.tsdb.MaxPercentage > 0 { + if cfg.tsdb.MaxBytes > 0 { + logger.Warn("storage.tsdb.retention.size is ignored, because storage.tsdb.retention.percentage is specified") + } if prom_runtime.FsSize(localStoragePath) == 0 { fmt.Fprintln(os.Stderr, fmt.Errorf("unable to detect total capacity of metric storage at %s, please disable retention percentage (%d%%)", localStoragePath, cfg.tsdb.MaxPercentage)) os.Exit(2) diff --git a/config/config.go b/config/config.go index d721d7fb86..0ebebc26d5 100644 --- a/config/config.go +++ b/config/config.go @@ -1092,6 +1092,9 @@ type TSDBRetentionConfig struct { // Maximum number of bytes that can be stored for blocks. Size units.Base2Bytes `yaml:"size,omitempty"` + + // Maximum percentage of disk used for TSDB storage. + Percentage uint `yaml:"percentage,omitempty"` } // TSDBConfig configures runtime reloadable configuration options. diff --git a/config/config_test.go b/config/config_test.go index 968b563e1e..43c56a501f 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1737,8 +1737,9 @@ var expectedConf = &Config{ OutOfOrderTimeWindowFlag: model.Duration(30 * time.Minute), StaleSeriesCompactionThreshold: 0.5, Retention: &TSDBRetentionConfig{ - Time: model.Duration(24 * time.Hour), - Size: 1 * units.GiB, + Time: model.Duration(24 * time.Hour), + Size: 1 * units.GiB, + Percentage: 28, }, }, }, diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 96bf9e2b33..d6b1690243 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -457,6 +457,7 @@ storage: retention: time: 1d size: 1GB + percentage: 28 tracing: endpoint: "localhost:4317" diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 49b7774b5f..853f15dc4c 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -3581,6 +3581,14 @@ with this feature. # This option takes precedence over the deprecated command-line flag --storage.tsdb.retention.size. [ size: | default = 0 ] + # Maximum percent of total disk space allowed for storage of blocks. Alternative to `size` and + # behaves the same as if size was calculated by hand as a percentage of the total storage capacity. + # Prometheus will fail to start if this config is enabled, but it fails to query the total storage capacity. + # The total disk space allowed will automatically adapt to volume resize. + # If set to 0 or not set, percentage-based retention is disabled. + # + # This is an experimental feature, this behaviour could change or be removed in the future. + [ percentage: | default = 0 ] ``` ### `` diff --git a/tsdb/db.go b/tsdb/db.go index 646ed83cd5..ee234db352 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -350,6 +350,7 @@ type dbMetrics struct { tombCleanTimer prometheus.Histogram blocksBytes prometheus.Gauge maxBytes prometheus.Gauge + maxPercentage prometheus.Gauge retentionDuration prometheus.Gauge staleSeriesCompactionsTriggered prometheus.Counter staleSeriesCompactionsFailed prometheus.Counter @@ -430,6 +431,10 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { Name: "prometheus_tsdb_retention_limit_bytes", Help: "Max number of bytes to be retained in the tsdb blocks, configured 0 means disabled", }) + m.maxPercentage = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "prometheus_tsdb_retention_limit_percentage", + Help: "Max percentage of total storage space to be retained in the tsdb blocks, configured 0 means disabled", + }) m.retentionDuration = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "prometheus_tsdb_retention_limit_seconds", Help: "How long to retain samples in storage.", @@ -470,6 +475,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { m.tombCleanTimer, m.blocksBytes, m.maxBytes, + m.maxPercentage, m.retentionDuration, m.staleSeriesCompactionsTriggered, m.staleSeriesCompactionsFailed, @@ -1068,6 +1074,7 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn db.metrics = newDBMetrics(db, r) maxBytes := max(opts.MaxBytes, 0) db.metrics.maxBytes.Set(float64(maxBytes)) + db.metrics.maxPercentage.Set(float64(max(opts.MaxPercentage, 0))) db.metrics.retentionDuration.Set((time.Duration(opts.RetentionDuration) * time.Millisecond).Seconds()) // Calling db.reload() calls db.reloadBlocks() which requires cmtx to be locked. @@ -1280,6 +1287,10 @@ func (db *DB) ApplyConfig(conf *config.Config) error { db.opts.MaxBytes = int64(conf.StorageConfig.TSDBConfig.Retention.Size) db.metrics.maxBytes.Set(float64(db.opts.MaxBytes)) } + if conf.StorageConfig.TSDBConfig.Retention.Percentage > 0 { + db.opts.MaxPercentage = conf.StorageConfig.TSDBConfig.Retention.Percentage + db.metrics.maxPercentage.Set(float64(db.opts.MaxPercentage)) + } db.retentionMtx.Unlock() } } else { @@ -1325,11 +1336,11 @@ func (db *DB) getRetentionDuration() int64 { return db.opts.RetentionDuration } -// getMaxBytes returns the current max bytes setting in a thread-safe manner. -func (db *DB) getMaxBytes() int64 { +// getRetentionSettings returns max bytes and max percentage settings in a thread-safe manner. +func (db *DB) getRetentionSettings() (int64, uint) { db.retentionMtx.RLock() defer db.retentionMtx.RUnlock() - return db.opts.MaxBytes + return db.opts.MaxBytes, db.opts.MaxPercentage } // dbAppender wraps the DB's head appender and triggers compactions on commit @@ -1994,15 +2005,15 @@ func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc return deletable } - maxBytes := db.getMaxBytes() + maxBytes, maxPercentage := db.getRetentionSettings() // Max percentage prevails over max size. - if db.opts.MaxPercentage > 0 { + if maxPercentage > 0 { diskSize := prom_runtime.FsSize(db.dir) if diskSize <= 0 { db.logger.Warn("Unable to retrieve filesystem size of database directory, skip percentage limitation and default to fixed size limitation", "dir", db.dir) } else { - maxBytes = int64(uint64(db.opts.MaxPercentage) * diskSize / 100) + maxBytes = int64(uint64(maxPercentage) * diskSize / 100) } } @@ -2010,8 +2021,6 @@ func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc if maxBytes <= 0 { return deletable } - // update MaxBytes gauge - db.metrics.maxBytes.Set(float64(maxBytes)) deletable = make(map[ulid.ULID]struct{}) From c76e78d0a4191e0cdf21d100e2b285a60c8a4f40 Mon Sep 17 00:00:00 2001 From: Laurent Dufresne Date: Thu, 19 Feb 2026 14:04:31 +0100 Subject: [PATCH 36/39] Added test for percentage-based retention Signed-off-by: Laurent Dufresne --- tsdb/db.go | 16 +++++++++++++++- tsdb/db_test.go | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/tsdb/db.go b/tsdb/db.go index ee234db352..b0076bed23 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -263,6 +263,9 @@ type Options struct { // StaleSeriesCompactionThreshold is a number between 0.0-1.0 indicating the % of stale series in // the in-memory Head block. If the % of stale series crosses this threshold, stale series compaction is run immediately. StaleSeriesCompactionThreshold float64 + + // FsSizeFunc is a function returning the total disk size for a given path. + FsSizeFunc FsSizeFunc } type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) @@ -273,6 +276,8 @@ type BlockQuerierFunc func(b BlockReader, mint, maxt int64) (storage.Querier, er type BlockChunkQuerierFunc func(b BlockReader, mint, maxt int64) (storage.ChunkQuerier, error) +type FsSizeFunc func(path string) uint64 + // DB handles reads and writes of time series falling into // a hashed partition of a seriedb. type DB struct { @@ -334,6 +339,8 @@ type DB struct { blockQuerierFunc BlockQuerierFunc blockChunkQuerierFunc BlockChunkQuerierFunc + + fsSizeFunc FsSizeFunc } type dbMetrics struct { @@ -681,6 +688,7 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue head: head, blockQuerierFunc: NewBlockQuerier, blockChunkQuerierFunc: NewBlockChunkQuerier, + fsSizeFunc: prom_runtime.FsSize, }, nil } @@ -1015,6 +1023,12 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn db.blockChunkQuerierFunc = opts.BlockChunkQuerierFunc } + if opts.FsSizeFunc == nil { + db.fsSizeFunc = prom_runtime.FsSize + } else { + db.fsSizeFunc = opts.FsSizeFunc + } + var wal, wbl *wlog.WL segmentSize := wlog.DefaultSegmentSize // Wal is enabled. @@ -2009,7 +2023,7 @@ func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc // Max percentage prevails over max size. if maxPercentage > 0 { - diskSize := prom_runtime.FsSize(db.dir) + diskSize := db.fsSizeFunc(db.dir) if diskSize <= 0 { db.logger.Warn("Unable to retrieve filesystem size of database directory, skip percentage limitation and default to fixed size limitation", "dir", db.dir) } else { diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 18e969f952..ad66945541 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -9611,3 +9611,39 @@ func TestStaleSeriesCompactionWithZeroSeries(t *testing.T) { // Should still have no blocks since there was nothing to compact. require.Empty(t, db.Blocks()) } + +func TestBeyondSizeRetentionWithPercentage(t *testing.T) { + const maxBlock = 100 + const numBytesChunks = 1024 + const diskSize = maxBlock * numBytesChunks + + opts := DefaultOptions() + opts.MaxPercentage = 10 + opts.FsSizeFunc = func(_ string) uint64 { + return uint64(diskSize) + } + + db := newTestDB(t, withOpts(opts)) + require.Zero(t, db.Head().Size()) + + blocks := make([]*Block, 0, opts.MaxPercentage+1) + for range opts.MaxPercentage { + blocks = append(blocks, &Block{ + numBytesChunks: numBytesChunks, + meta: BlockMeta{ULID: ulid.Make()}, + }) + } + + deletable := BeyondSizeRetention(db, blocks) + require.Empty(t, deletable) + + ulid := ulid.Make() + blocks = append(blocks, &Block{ + numBytesChunks: numBytesChunks, + meta: BlockMeta{ULID: ulid}, + }) + + deletable = BeyondSizeRetention(db, blocks) + require.Len(t, deletable, 1) + require.Contains(t, deletable, ulid) +} From d66944d856ca675d0eb826b058a93b1d7b07dc95 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 24 Feb 2026 16:45:49 +0100 Subject: [PATCH 37/39] chore(config): migrate config renovate.json (#18183) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- renovate.json | 73 +++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 56 insertions(+), 17 deletions(-) diff --git a/renovate.json b/renovate.json index c0490c5610..a2f2c75719 100644 --- a/renovate.json +++ b/renovate.json @@ -4,17 +4,27 @@ "config:recommended" ], "separateMultipleMajor": true, - "baseBranches": ["main"], + "baseBranchPatterns": [ + "main" + ], "postUpdateOptions": [ "gomodTidy", "gomodUpdateImportPaths" ], - "schedule": ["* * 21 * *"], + "schedule": [ + "* * 21 * *" + ], "timezone": "UTC", "github-actions": { - "managerFilePatterns": ["scripts/**"] + "managerFilePatterns": [ + "scripts/**" + ] }, - "prBodyNotes": ["```release-notes","NONE","```"], + "prBodyNotes": [ + "```release-notes", + "NONE", + "```" + ], "prConcurrentLimit": 20, "prHourlyLimit": 5, "packageRules": [ @@ -27,31 +37,49 @@ }, { "description": "Don't update prometheus-io namespace packages", - "matchPackageNames": ["@prometheus-io/**"], + "matchPackageNames": [ + "@prometheus-io/**" + ], "enabled": false }, { "description": "Group AWS Go dependencies", - "matchManagers": ["gomod"], - "matchPackageNames": ["github.com/aws/**"], + "matchManagers": [ + "gomod" + ], + "matchPackageNames": [ + "github.com/aws/**" + ], "groupName": "AWS Go dependencies" }, { "description": "Group Azure Go dependencies", - "matchManagers": ["gomod"], - "matchPackageNames": ["github.com/Azure/**"], + "matchManagers": [ + "gomod" + ], + "matchPackageNames": [ + "github.com/Azure/**" + ], "groupName": "Azure Go dependencies" }, { "description": "Group Kubernetes Go dependencies", - "matchManagers": ["gomod"], - "matchPackageNames": ["k8s.io/**"], + "matchManagers": [ + "gomod" + ], + "matchPackageNames": [ + "k8s.io/**" + ], "groupName": "Kubernetes Go dependencies" }, { "description": "Group OpenTelemetry Go dependencies", - "matchManagers": ["gomod"], - "matchPackageNames": ["go.opentelemetry.io/**"], + "matchManagers": [ + "gomod" + ], + "matchPackageNames": [ + "go.opentelemetry.io/**" + ], "groupName": "OpenTelemetry Go dependencies" }, { @@ -60,7 +88,10 @@ "web/ui/mantine-ui/package.json" ], "groupName": "Mantine UI", - "matchUpdateTypes": ["minor", "patch"], + "matchUpdateTypes": [ + "minor", + "patch" + ], "enabled": true }, { @@ -69,7 +100,10 @@ "web/ui/react-app/package.json" ], "groupName": "React App", - "matchUpdateTypes": ["minor", "patch"], + "matchUpdateTypes": [ + "minor", + "patch" + ], "enabled": true }, { @@ -78,14 +112,19 @@ "web/ui/module/**/package.json" ], "groupName": "Modules", - "matchUpdateTypes": ["minor", "patch"], + "matchUpdateTypes": [ + "minor", + "patch" + ], "enabled": true } ], "branchPrefix": "deps-update/", "vulnerabilityAlerts": { "enabled": true, - "labels": ["security-update"] + "labels": [ + "security-update" + ] }, "osvVulnerabilityAlerts": true, "dependencyDashboardApproval": false From c317f9254e099e1fe5cd7d2884d24fd2cbd4e2da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linas=20Med=C5=BEi=C5=ABnas?= Date: Wed, 25 Feb 2026 09:10:42 +0100 Subject: [PATCH 38/39] chore(histogram): Move histogram trimming code out of engine.go (#18185) Signed-off-by: Linas Medziunas --- model/histogram/float_histogram.go | 334 ++++++++++++++++++++++++++++ promql/engine.go | 339 +---------------------------- 2 files changed, 336 insertions(+), 337 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index d457d8ab25..620d185177 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -2110,3 +2110,337 @@ func (h *FloatHistogram) HasOverflow() bool { } return false } + +// TrimBuckets trims native histogram buckets. +func (h *FloatHistogram) TrimBuckets(rhs float64, isUpperTrim bool) *FloatHistogram { + var ( + trimmedHist = h.Copy() + + updatedCount, updatedSum float64 + trimmedBuckets bool + isCustomBucket = trimmedHist.UsesCustomBuckets() + hasPositive, hasNegative bool + ) + + if isUpperTrim { + // Calculate the fraction to keep for buckets that contain the trim value. + // For TRIM_UPPER, we keep observations below the trim point (rhs). + // Example: histogram / float. + for i, iter := 0, trimmedHist.PositiveBucketIterator(); iter.Next(); i++ { + bucket := iter.At() + if bucket.Count == 0 { + continue + } + hasPositive = true + + switch { + case bucket.Lower >= rhs: + // Bucket is entirely below the trim point - keep all. + updatedCount += bucket.Count + bucketMidpoint := computeMidpoint(bucket.Lower, bucket.Upper, true, isCustomBucket) + updatedSum += bucketMidpoint * bucket.Count + + case bucket.Upper > rhs: + // Bucket contains the trim point - interpolate. + keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, true, isCustomBucket) + + updatedCount += keepCount + updatedSum += bucketMidpoint * keepCount + if trimmedHist.PositiveBuckets[i] != keepCount { + trimmedHist.PositiveBuckets[i] = keepCount + trimmedBuckets = true + } + + default: + trimmedHist.PositiveBuckets[i] = 0 + trimmedBuckets = true + } + } + + for i, iter := 0, trimmedHist.NegativeBucketIterator(); iter.Next(); i++ { + bucket := iter.At() + if bucket.Count == 0 { + continue + } + hasNegative = true + + switch { + case bucket.Lower >= rhs: + // Bucket is entirely below the trim point - keep all. + updatedCount += bucket.Count + bucketMidpoint := computeMidpoint(bucket.Lower, bucket.Upper, false, isCustomBucket) + updatedSum += bucketMidpoint * bucket.Count + + case bucket.Upper > rhs: + // Bucket contains the trim point - interpolate. + keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, false, isCustomBucket) + + updatedCount += keepCount + updatedSum += bucketMidpoint * keepCount + if trimmedHist.NegativeBuckets[i] != keepCount { + trimmedHist.NegativeBuckets[i] = keepCount + trimmedBuckets = true + } + + default: + trimmedHist.NegativeBuckets[i] = 0 + trimmedBuckets = true + } + } + } + + // Handle the zero count bucket. + if trimmedHist.ZeroCount > 0 { + keepCount, bucketMidpoint := computeZeroBucketTrim(trimmedHist.ZeroBucket(), rhs, hasNegative, hasPositive, isUpperTrim) + + if trimmedHist.ZeroCount != keepCount { + trimmedHist.ZeroCount = keepCount + trimmedBuckets = true + } + updatedSum += bucketMidpoint * keepCount + updatedCount += keepCount + } + + if trimmedBuckets { + // Only update the totals in case some bucket(s) were fully (or partially) trimmed. + trimmedHist.Count = updatedCount + trimmedHist.Sum = updatedSum + + trimmedHist.Compact(0) + } + + return trimmedHist +} + +func handleInfinityBuckets(isUpperTrim bool, b Bucket[float64], rhs float64) (underCount, bucketMidpoint float64) { + zeroIfInf := func(x float64) float64 { + if math.IsInf(x, 0) { + return 0 + } + return x + } + + // Case 1: Bucket with lower bound -Inf. + if math.IsInf(b.Lower, -1) { + // TRIM_UPPER (= b.Upper { + // As the rhs is greater than the upper bound, we keep the entire current bucket. + return b.Count, 0 + } + if rhs > 0 && b.Upper > 0 && !math.IsInf(b.Upper, 1) { + // If upper is finite and positive, we treat lower as 0 (despite it de facto being -Inf). + // This is only possible with NHCB, so we can always use linear interpolation. + return b.Count * rhs / b.Upper, rhs / 2 + } + if b.Upper <= 0 { + return b.Count, rhs + } + // Otherwise, we are targeting a valid trim, but as we don't know the exact distribution of values that belongs to an infinite bucket, we need to remove the entire bucket. + return 0, zeroIfInf(b.Upper) + } + // TRIM_LOWER (>/) - remove values less than rhs + if rhs <= b.Lower { + // Impossible to happen because the lower bound is -Inf. Returning the entire current bucket. + return b.Count, 0 + } + if rhs >= 0 && b.Upper > rhs && !math.IsInf(b.Upper, 1) { + // If upper is finite and positive, we treat lower as 0 (despite it de facto being -Inf). + // This is only possible with NHCB, so we can always use linear interpolation. + return b.Count * (1 - rhs/b.Upper), (rhs + b.Upper) / 2 + } + // Otherwise, we are targeting a valid trim, but as we don't know the exact distribution of values that belongs to an infinite bucket, we need to remove the entire bucket. + return 0, zeroIfInf(b.Upper) + } + + // Case 2: Bucket with upper bound +Inf. + if math.IsInf(b.Upper, 1) { + if isUpperTrim { + // TRIM_UPPER (= lower and the bucket extends to +Inf, some values in this bucket could be > rhs, so we conservatively remove the entire bucket; + // when rhs < lower, all values in this bucket are >= lower > rhs, so all values should be removed. + return 0, zeroIfInf(b.Lower) + } + // TRIM_LOWER (>/) - remove values less than rhs. + if rhs >= b.Lower { + return b.Count, rhs + } + // lower < rhs: we are inside the infinity bucket, but as we don't know the exact distribution of values, we conservatively remove the entire bucket. + return 0, zeroIfInf(b.Lower) + } + + panic(fmt.Errorf("one of the bounds must be infinite for handleInfinityBuckets, got %v", b)) +} + +// computeSplit calculates the portion of the bucket's count <= rhs (trim point). +func computeSplit(b Bucket[float64], rhs float64, isPositive, isLinear bool) float64 { + if rhs <= b.Lower { + return 0 + } + if rhs >= b.Upper { + return b.Count + } + + var fraction float64 + switch { + case isLinear: + fraction = (rhs - b.Lower) / (b.Upper - b.Lower) + default: + // Exponential interpolation. + logLower := math.Log2(math.Abs(b.Lower)) + logUpper := math.Log2(math.Abs(b.Upper)) + logV := math.Log2(math.Abs(rhs)) + + if isPositive { + fraction = (logV - logLower) / (logUpper - logLower) + } else { + fraction = 1 - ((logV - logUpper) / (logLower - logUpper)) + } + } + + return b.Count * fraction +} + +func computeZeroBucketTrim(zeroBucket Bucket[float64], rhs float64, hasNegative, hasPositive, isUpperTrim bool) (float64, float64) { + var ( + lower = zeroBucket.Lower + upper = zeroBucket.Upper + ) + if hasNegative && !hasPositive { + upper = 0 + } + if hasPositive && !hasNegative { + lower = 0 + } + + var fraction, midpoint float64 + + if isUpperTrim { + if rhs <= lower { + return 0, 0 + } + if rhs >= upper { + return zeroBucket.Count, (lower + upper) / 2 + } + + fraction = (rhs - lower) / (upper - lower) + midpoint = (lower + rhs) / 2 + } else { // lower trim + if rhs <= lower { + return zeroBucket.Count, (lower + upper) / 2 + } + if rhs >= upper { + return 0, 0 + } + + fraction = (upper - rhs) / (upper - lower) + midpoint = (rhs + upper) / 2 + } + + return zeroBucket.Count * fraction, midpoint +} + +func computeBucketTrim(b Bucket[float64], rhs float64, isUpperTrim, isPositive, isCustomBucket bool) (float64, float64) { + if math.IsInf(b.Lower, -1) || math.IsInf(b.Upper, 1) { + return handleInfinityBuckets(isUpperTrim, b, rhs) + } + + underCount := computeSplit(b, rhs, isPositive, isCustomBucket) + + if isUpperTrim { + return underCount, computeMidpoint(b.Lower, rhs, isPositive, isCustomBucket) + } + + return b.Count - underCount, computeMidpoint(rhs, b.Upper, isPositive, isCustomBucket) +} + +func computeMidpoint(survivingIntervalLowerBound, survivingIntervalUpperBound float64, isPositive, isLinear bool) float64 { + if math.IsInf(survivingIntervalLowerBound, 0) { + if math.IsInf(survivingIntervalUpperBound, 0) { + return 0 + } + if survivingIntervalUpperBound > 0 { + return survivingIntervalUpperBound / 2 + } + return survivingIntervalUpperBound + } else if math.IsInf(survivingIntervalUpperBound, 0) { + return survivingIntervalLowerBound + } + + if isLinear { + return (survivingIntervalLowerBound + survivingIntervalUpperBound) / 2 + } + + geoMean := math.Sqrt(math.Abs(survivingIntervalLowerBound * survivingIntervalUpperBound)) + + if isPositive { + return geoMean + } + return -geoMean +} diff --git a/promql/engine.go b/promql/engine.go index a769310e46..9b46210cf0 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3184,337 +3184,6 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { panic(fmt.Errorf("operator %q not allowed for Scalar operations", op)) } -func handleInfinityBuckets(isUpperTrim bool, b histogram.Bucket[float64], rhs float64) (underCount, bucketMidpoint float64) { - zeroIfInf := func(x float64) float64 { - if math.IsInf(x, 0) { - return 0 - } - return x - } - - // Case 1: Bucket with lower bound -Inf. - if math.IsInf(b.Lower, -1) { - // TRIM_UPPER (= b.Upper { - // As the rhs is greater than the upper bound, we keep the entire current bucket. - return b.Count, 0 - } - if rhs > 0 && b.Upper > 0 && !math.IsInf(b.Upper, 1) { - // If upper is finite and positive, we treat lower as 0 (despite it de facto being -Inf). - // This is only possible with NHCB, so we can always use linear interpolation. - return b.Count * rhs / b.Upper, rhs / 2 - } - if b.Upper <= 0 { - return b.Count, rhs - } - // Otherwise, we are targeting a valid trim, but as we don't know the exact distribution of values that belongs to an infinite bucket, we need to remove the entire bucket. - return 0, zeroIfInf(b.Upper) - } - // TRIM_LOWER (>/) - remove values less than rhs - if rhs <= b.Lower { - // Impossible to happen because the lower bound is -Inf. Returning the entire current bucket. - return b.Count, 0 - } - if rhs >= 0 && b.Upper > rhs && !math.IsInf(b.Upper, 1) { - // If upper is finite and positive, we treat lower as 0 (despite it de facto being -Inf). - // This is only possible with NHCB, so we can always use linear interpolation. - return b.Count * (1 - rhs/b.Upper), (rhs + b.Upper) / 2 - } - // Otherwise, we are targeting a valid trim, but as we don't know the exact distribution of values that belongs to an infinite bucket, we need to remove the entire bucket. - return 0, zeroIfInf(b.Upper) - } - - // Case 2: Bucket with upper bound +Inf. - if math.IsInf(b.Upper, 1) { - if isUpperTrim { - // TRIM_UPPER (= lower and the bucket extends to +Inf, some values in this bucket could be > rhs, so we conservatively remove the entire bucket; - // when rhs < lower, all values in this bucket are >= lower > rhs, so all values should be removed. - return 0, zeroIfInf(b.Lower) - } - // TRIM_LOWER (>/) - remove values less than rhs. - if rhs >= b.Lower { - return b.Count, rhs - } - // lower < rhs: we are inside the infinity bucket, but as we don't know the exact distribution of values, we conservatively remove the entire bucket. - return 0, zeroIfInf(b.Lower) - } - - panic(fmt.Errorf("one of the bounds must be infinite for handleInfinityBuckets, got %v", b)) -} - -// computeSplit calculates the portion of the bucket's count <= rhs (trim point). -func computeSplit(b histogram.Bucket[float64], rhs float64, isPositive, isLinear bool) float64 { - if rhs <= b.Lower { - return 0 - } - if rhs >= b.Upper { - return b.Count - } - - var fraction float64 - switch { - case isLinear: - fraction = (rhs - b.Lower) / (b.Upper - b.Lower) - default: - // Exponential interpolation. - logLower := math.Log2(math.Abs(b.Lower)) - logUpper := math.Log2(math.Abs(b.Upper)) - logV := math.Log2(math.Abs(rhs)) - - if isPositive { - fraction = (logV - logLower) / (logUpper - logLower) - } else { - fraction = 1 - ((logV - logUpper) / (logLower - logUpper)) - } - } - - return b.Count * fraction -} - -func computeZeroBucketTrim(zeroBucket histogram.Bucket[float64], rhs float64, hasNegative, hasPositive, isUpperTrim bool) (float64, float64) { - var ( - lower = zeroBucket.Lower - upper = zeroBucket.Upper - ) - if hasNegative && !hasPositive { - upper = 0 - } - if hasPositive && !hasNegative { - lower = 0 - } - - var fraction, midpoint float64 - - if isUpperTrim { - if rhs <= lower { - return 0, 0 - } - if rhs >= upper { - return zeroBucket.Count, (lower + upper) / 2 - } - - fraction = (rhs - lower) / (upper - lower) - midpoint = (lower + rhs) / 2 - } else { // lower trim - if rhs <= lower { - return zeroBucket.Count, (lower + upper) / 2 - } - if rhs >= upper { - return 0, 0 - } - - fraction = (upper - rhs) / (upper - lower) - midpoint = (rhs + upper) / 2 - } - - return zeroBucket.Count * fraction, midpoint -} - -func computeBucketTrim(b histogram.Bucket[float64], rhs float64, isUpperTrim, isPositive, isCustomBucket bool) (float64, float64) { - if math.IsInf(b.Lower, -1) || math.IsInf(b.Upper, 1) { - return handleInfinityBuckets(isUpperTrim, b, rhs) - } - - underCount := computeSplit(b, rhs, isPositive, isCustomBucket) - - if isUpperTrim { - return underCount, computeMidpoint(b.Lower, rhs, isPositive, isCustomBucket) - } - - return b.Count - underCount, computeMidpoint(rhs, b.Upper, isPositive, isCustomBucket) -} - -// Helper function to trim native histogram buckets. -// TODO: move trimHistogram to model/histogram/float_histogram.go (making it a method of FloatHistogram). -func trimHistogram(trimmedHist *histogram.FloatHistogram, rhs float64, isUpperTrim bool) { - var ( - updatedCount, updatedSum float64 - trimmedBuckets bool - isCustomBucket = trimmedHist.UsesCustomBuckets() - hasPositive, hasNegative bool - ) - - if isUpperTrim { - // Calculate the fraction to keep for buckets that contain the trim value. - // For TRIM_UPPER, we keep observations below the trim point (rhs). - // Example: histogram / float. - for i, iter := 0, trimmedHist.PositiveBucketIterator(); iter.Next(); i++ { - bucket := iter.At() - if bucket.Count == 0 { - continue - } - hasPositive = true - - switch { - case bucket.Lower >= rhs: - // Bucket is entirely below the trim point - keep all. - updatedCount += bucket.Count - bucketMidpoint := computeMidpoint(bucket.Lower, bucket.Upper, true, isCustomBucket) - updatedSum += bucketMidpoint * bucket.Count - - case bucket.Upper > rhs: - // Bucket contains the trim point - interpolate. - keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, true, isCustomBucket) - - updatedCount += keepCount - updatedSum += bucketMidpoint * keepCount - if trimmedHist.PositiveBuckets[i] != keepCount { - trimmedHist.PositiveBuckets[i] = keepCount - trimmedBuckets = true - } - - default: - trimmedHist.PositiveBuckets[i] = 0 - trimmedBuckets = true - } - } - - for i, iter := 0, trimmedHist.NegativeBucketIterator(); iter.Next(); i++ { - bucket := iter.At() - if bucket.Count == 0 { - continue - } - hasNegative = true - - switch { - case bucket.Lower >= rhs: - // Bucket is entirely below the trim point - keep all. - updatedCount += bucket.Count - bucketMidpoint := computeMidpoint(bucket.Lower, bucket.Upper, false, isCustomBucket) - updatedSum += bucketMidpoint * bucket.Count - - case bucket.Upper > rhs: - // Bucket contains the trim point - interpolate. - keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, false, isCustomBucket) - - updatedCount += keepCount - updatedSum += bucketMidpoint * keepCount - if trimmedHist.NegativeBuckets[i] != keepCount { - trimmedHist.NegativeBuckets[i] = keepCount - trimmedBuckets = true - } - - default: - trimmedHist.NegativeBuckets[i] = 0 - trimmedBuckets = true - } - } - } - - // Handle the zero count bucket. - if trimmedHist.ZeroCount > 0 { - keepCount, bucketMidpoint := computeZeroBucketTrim(trimmedHist.ZeroBucket(), rhs, hasNegative, hasPositive, isUpperTrim) - - if trimmedHist.ZeroCount != keepCount { - trimmedHist.ZeroCount = keepCount - trimmedBuckets = true - } - updatedSum += bucketMidpoint * keepCount - updatedCount += keepCount - } - - if trimmedBuckets { - // Only update the totals in case some bucket(s) were fully (or partially) trimmed. - trimmedHist.Count = updatedCount - trimmedHist.Sum = updatedSum - - trimmedHist.Compact(0) - } -} - -func computeMidpoint(survivingIntervalLowerBound, survivingIntervalUpperBound float64, isPositive, isLinear bool) float64 { - if math.IsInf(survivingIntervalLowerBound, 0) { - if math.IsInf(survivingIntervalUpperBound, 0) { - return 0 - } - if survivingIntervalUpperBound > 0 { - return survivingIntervalUpperBound / 2 - } - return survivingIntervalUpperBound - } else if math.IsInf(survivingIntervalUpperBound, 0) { - return survivingIntervalLowerBound - } - - if isLinear { - return (survivingIntervalLowerBound + survivingIntervalUpperBound) / 2 - } - - geoMean := math.Sqrt(math.Abs(survivingIntervalLowerBound * survivingIntervalUpperBound)) - - if isPositive { - return geoMean - } - return -geoMean -} - // vectorElemBinop evaluates a binary operation between two Vector elements. func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram, pos posrange.PositionRange) (res float64, resH *histogram.FloatHistogram, keep bool, info, err error) { switch { @@ -3568,13 +3237,9 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram case parser.DIV: return 0, hlhs.Copy().Div(rhs).Compact(0), true, nil, nil case parser.TRIM_UPPER: - trimmedHist := hlhs.Copy() - trimHistogram(trimmedHist, rhs, true) - return 0, trimmedHist, true, nil, nil + return 0, hlhs.TrimBuckets(rhs, true), true, nil, nil case parser.TRIM_LOWER: - trimmedHist := hlhs.Copy() - trimHistogram(trimmedHist, rhs, false) - return 0, trimmedHist, true, nil, nil + return 0, hlhs.TrimBuckets(rhs, false), true, nil, nil case parser.ADD, parser.SUB, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("histogram", parser.ItemTypeStr[op], "float", pos) } From 333e0dc188cd58fb77c76e1a15849bd10939dc2d Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Wed, 25 Feb 2026 13:22:13 +0000 Subject: [PATCH 39/39] tests: reinforce and optimize queue_manager_test createTimeSeries (#18179) * tests: fix createTimeSeries so it does not create unnecessary load 16M samples 4k series Signed-off-by: bwplotka * addressed comments Signed-off-by: bwplotka * Apply suggestions from code review Co-authored-by: George Krajcsovits Signed-off-by: Bartlomiej Plotka --------- Signed-off-by: bwplotka Signed-off-by: Bartlomiej Plotka Co-authored-by: George Krajcsovits --- storage/remote/queue_manager_test.go | 668 ++++++++++++--------------- 1 file changed, 306 insertions(+), 362 deletions(-) diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index a4b05d387a..210a61a287 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -27,7 +27,6 @@ import ( "time" "github.com/gogo/protobuf/proto" - "github.com/google/go-cmp/cmp" remoteapi "github.com/prometheus/client_golang/exp/api/remote" "github.com/prometheus/client_golang/prometheus" client_testutil "github.com/prometheus/client_golang/prometheus/testutil" @@ -69,6 +68,7 @@ func newHighestTimestampMetric() *maxTimestamp { func TestBasicContentNegotiation(t *testing.T) { t.Parallel() + queueConfig := config.DefaultQueueConfig queueConfig.BatchSendDeadline = model.Duration(100 * time.Millisecond) queueConfig.MaxShards = 1 @@ -139,20 +139,8 @@ func TestBasicContentNegotiation(t *testing.T) { s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) defer s.Close() - var ( - series []record.RefSeries - metadata []record.RefMetadata - samples []record.RefSample - ) + recs := generateRecords(recCase{series: 1, samplesPerSeries: 1}) - // Generates same series in both cases. - samples, series = createTimeseries(1, 1) - metadata = createSeriesMetadata(series) - - // Apply new config. - queueConfig.Capacity = len(samples) - queueConfig.MaxSamplesPerSend = len(samples) - // For now we only ever have a single rw config in this test. conf.RemoteWriteConfigs[0].ProtobufMessage = tc.senderProtoMsg require.NoError(t, s.ApplyConfig(conf)) hash, err := toHash(writeConfig) @@ -163,18 +151,18 @@ func TestBasicContentNegotiation(t *testing.T) { c.injectErrors(tc.injectErrs) qm.SetClient(c) - qm.StoreSeries(series, 0) - qm.StoreMetadata(metadata) + qm.StoreSeries(recs.series, 0) + qm.StoreMetadata(recs.metadata) // Do we expect some data back? if !tc.expectFail { - c.expectSamples(samples, series) + c.expectSamples(recs.samples, recs.series) } else { c.expectSamples(nil, nil) } // Schedule send. - qm.Append(samples) + qm.Append(recs.samples) if !tc.expectFail { // No error expected, so wait for data. @@ -201,8 +189,6 @@ func TestBasicContentNegotiation(t *testing.T) { func TestSampleDelivery(t *testing.T) { t.Parallel() - // Let's create an even number of send batches, so we don't run into the - // batch timeout case. n := 3 queueConfig := config.DefaultQueueConfig @@ -221,100 +207,75 @@ func TestSampleDelivery(t *testing.T) { writeConfig, }, } - for _, tc := range []struct { - protoMsg remoteapi.WriteMessageType - name string - samples bool - exemplars bool - histograms bool - floatHistograms bool - }{ - {protoMsg: remoteapi.WriteV1MessageType, samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"}, - {protoMsg: remoteapi.WriteV1MessageType, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"}, - {protoMsg: remoteapi.WriteV1MessageType, samples: false, exemplars: true, histograms: false, floatHistograms: false, name: "exemplars only"}, - {protoMsg: remoteapi.WriteV1MessageType, samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"}, - {protoMsg: remoteapi.WriteV1MessageType, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"}, + for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} { + for _, rc := range []recCase{ + {series: n, samplesPerSeries: n, histogramsPerSeries: 0, floatHistogramsPerSeries: 0, exemplarsPerSeries: 0, name: "samples only"}, + {series: n, samplesPerSeries: 0, histogramsPerSeries: n, floatHistogramsPerSeries: 0, exemplarsPerSeries: 0, name: "histograms only"}, + {series: n, samplesPerSeries: 0, histogramsPerSeries: 0, floatHistogramsPerSeries: n, exemplarsPerSeries: 0, name: "float histograms only"}, + {series: n, samplesPerSeries: 0, histogramsPerSeries: 0, floatHistogramsPerSeries: 0, exemplarsPerSeries: n, name: "exemplars only"}, + {series: n, samplesPerSeries: n, histogramsPerSeries: n, floatHistogramsPerSeries: n, exemplarsPerSeries: n, name: "all"}, + } { + t.Run(fmt.Sprintf("proto=%s/case=%s", protoMsg, rc.name), func(t *testing.T) { + dir := t.TempDir() + s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) + defer s.Close() - {protoMsg: remoteapi.WriteV2MessageType, samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"}, - {protoMsg: remoteapi.WriteV2MessageType, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"}, - {protoMsg: remoteapi.WriteV2MessageType, samples: false, exemplars: true, histograms: false, floatHistograms: false, name: "exemplars only"}, - {protoMsg: remoteapi.WriteV2MessageType, samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"}, - {protoMsg: remoteapi.WriteV2MessageType, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"}, - } { - t.Run(fmt.Sprintf("%s-%s", tc.protoMsg, tc.name), func(t *testing.T) { - dir := t.TempDir() - s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) - defer s.Close() + recs := generateRecords(rc) - var ( - series []record.RefSeries - metadata []record.RefMetadata - samples []record.RefSample - exemplars []record.RefExemplar - histograms []record.RefHistogramSample - floatHistograms []record.RefFloatHistogramSample - ) + var ( + series = recs.series + metadata = recs.metadata + samples = recs.samples + exemplars = recs.exemplars + histograms = recs.histograms + floatHistograms = recs.floatHistograms + ) - // Generates same series in both cases. - if tc.samples { - samples, series = createTimeseries(n, n) - } - if tc.exemplars { - exemplars, series = createExemplars(n, n) - } - if tc.histograms { - histograms, _, series = createHistograms(n, n, false) - } - if tc.floatHistograms { - _, floatHistograms, series = createHistograms(n, n, true) - } - metadata = createSeriesMetadata(series) + // Apply new config. + queueConfig.Capacity = n + queueConfig.MaxSamplesPerSend = n / 2 + conf.RemoteWriteConfigs[0].ProtobufMessage = protoMsg + require.NoError(t, s.ApplyConfig(conf)) + hash, err := toHash(writeConfig) + require.NoError(t, err) + qm := s.rws.queues[hash] - // Apply new config. - queueConfig.Capacity = len(samples) - queueConfig.MaxSamplesPerSend = len(samples) / 2 - // For now we only ever have a single rw config in this test. - conf.RemoteWriteConfigs[0].ProtobufMessage = tc.protoMsg - require.NoError(t, s.ApplyConfig(conf)) - hash, err := toHash(writeConfig) - require.NoError(t, err) - qm := s.rws.queues[hash] + c := NewTestWriteClient(protoMsg) + qm.SetClient(c) - c := NewTestWriteClient(tc.protoMsg) - qm.SetClient(c) + qm.StoreSeries(series, 0) + qm.StoreMetadata(metadata) - qm.StoreSeries(series, 0) - qm.StoreMetadata(metadata) + // Send first half of data. + c.expectSamples(samples[:len(samples)/2], series) + c.expectExemplars(exemplars[:len(exemplars)/2], series) + c.expectHistograms(histograms[:len(histograms)/2], series) + c.expectFloatHistograms(floatHistograms[:len(floatHistograms)/2], series) + if protoMsg == remoteapi.WriteV2MessageType && len(metadata) > 0 { + c.expectMetadataForBatch(metadata, series, samples[:len(samples)/2], exemplars[:len(exemplars)/2], histograms[:len(histograms)/2], floatHistograms[:len(floatHistograms)/2]) + } + qm.Append(samples[:len(samples)/2]) + qm.AppendExemplars(exemplars[:len(exemplars)/2]) + qm.AppendHistograms(histograms[:len(histograms)/2]) + qm.AppendFloatHistograms(floatHistograms[:len(floatHistograms)/2]) + c.waitForExpectedData(t, 30*time.Second) - // Send first half of data. - c.expectSamples(samples[:len(samples)/2], series) - c.expectExemplars(exemplars[:len(exemplars)/2], series) - c.expectHistograms(histograms[:len(histograms)/2], series) - c.expectFloatHistograms(floatHistograms[:len(floatHistograms)/2], series) - if tc.protoMsg == remoteapi.WriteV2MessageType && len(metadata) > 0 { - c.expectMetadataForBatch(metadata, series, samples[:len(samples)/2], exemplars[:len(exemplars)/2], histograms[:len(histograms)/2], floatHistograms[:len(floatHistograms)/2]) - } - qm.Append(samples[:len(samples)/2]) - qm.AppendExemplars(exemplars[:len(exemplars)/2]) - qm.AppendHistograms(histograms[:len(histograms)/2]) - qm.AppendFloatHistograms(floatHistograms[:len(floatHistograms)/2]) - c.waitForExpectedData(t, 30*time.Second) - - // Send second half of data. - c.expectSamples(samples[len(samples)/2:], series) - c.expectExemplars(exemplars[len(exemplars)/2:], series) - c.expectHistograms(histograms[len(histograms)/2:], series) - c.expectFloatHistograms(floatHistograms[len(floatHistograms)/2:], series) - if tc.protoMsg == remoteapi.WriteV2MessageType && len(metadata) > 0 { - c.expectMetadataForBatch(metadata, series, samples[len(samples)/2:], exemplars[len(exemplars)/2:], histograms[len(histograms)/2:], floatHistograms[len(floatHistograms)/2:]) - } - qm.Append(samples[len(samples)/2:]) - qm.AppendExemplars(exemplars[len(exemplars)/2:]) - qm.AppendHistograms(histograms[len(histograms)/2:]) - qm.AppendFloatHistograms(floatHistograms[len(floatHistograms)/2:]) - c.waitForExpectedData(t, 30*time.Second) - }) + // Send second half of data. + c.expectSamples(samples[len(samples)/2:], series) + c.expectExemplars(exemplars[len(exemplars)/2:], series) + c.expectHistograms(histograms[len(histograms)/2:], series) + c.expectFloatHistograms(floatHistograms[len(floatHistograms)/2:], series) + if protoMsg == remoteapi.WriteV2MessageType && len(metadata) > 0 { + c.expectMetadataForBatch(metadata, series, samples[len(samples)/2:], exemplars[len(exemplars)/2:], histograms[len(histograms)/2:], floatHistograms[len(floatHistograms)/2:]) + } + qm.Append(samples[len(samples)/2:]) + qm.AppendExemplars(exemplars[len(exemplars)/2:]) + qm.AppendHistograms(histograms[len(histograms)/2:]) + qm.AppendFloatHistograms(floatHistograms[len(floatHistograms)/2:]) + c.waitForExpectedData(t, 30*time.Second) + }) + } } } @@ -387,24 +348,26 @@ func TestWALMetadataDelivery(t *testing.T) { }, } - num := 3 - _, series := createTimeseries(0, num) - metadata := createSeriesMetadata(series) + n := 3 + recs := generateRecords(recCase{series: n, samplesPerSeries: n}) require.NoError(t, s.ApplyConfig(conf)) hash, err := toHash(writeConfig) require.NoError(t, err) qm := s.rws.queues[hash] - c := NewTestWriteClient(remoteapi.WriteV1MessageType) + c := NewTestWriteClient(remoteapi.WriteV2MessageType) qm.SetClient(c) - qm.StoreSeries(series, 0) - qm.StoreMetadata(metadata) + qm.StoreSeries(recs.series, 0) + qm.StoreMetadata(recs.metadata) - require.Len(t, qm.seriesLabels, num) - require.Len(t, qm.seriesMetadata, num) + require.Len(t, qm.seriesLabels, n) + require.Len(t, qm.seriesMetadata, n) + c.expectSamples(recs.samples, recs.series) + c.expectMetadataForBatch(recs.metadata, recs.series, recs.samples, nil, nil, nil) + qm.Append(recs.samples) c.waitForExpectedData(t, 30*time.Second) } @@ -412,26 +375,24 @@ func TestSampleDeliveryTimeout(t *testing.T) { t.Parallel() for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} { t.Run(fmt.Sprint(protoMsg), func(t *testing.T) { - // Let's send one less sample than batch size, and wait the timeout duration - n := 9 - samples, series := createTimeseries(n, n) + recs := generateRecords(recCase{series: 10, samplesPerSeries: 10}) cfg := testDefaultQueueConfig() mcfg := config.DefaultMetadataConfig cfg.MaxShards = 1 c := NewTestWriteClient(protoMsg) m := newTestQueueManager(t, cfg, mcfg, defaultFlushDeadline, c, protoMsg) - m.StoreSeries(series, 0) + m.StoreSeries(recs.series, 0) m.Start() defer m.Stop() // Send the samples twice, waiting for the samples in the meantime. - c.expectSamples(samples, series) - m.Append(samples) + c.expectSamples(recs.samples, recs.series) + m.Append(recs.samples) c.waitForExpectedData(t, 30*time.Second) - c.expectSamples(samples, series) - m.Append(samples) + c.expectSamples(recs.samples, recs.series) + m.Append(recs.samples) c.waitForExpectedData(t, 30*time.Second) }) } @@ -443,29 +404,16 @@ func TestSampleDeliveryOrder(t *testing.T) { t.Run(fmt.Sprint(protoMsg), func(t *testing.T) { ts := 10 n := config.DefaultQueueConfig.MaxSamplesPerSend * ts - samples := make([]record.RefSample, 0, n) - series := make([]record.RefSeries, 0, n) - for i := range n { - name := fmt.Sprintf("test_metric_%d", i%ts) - samples = append(samples, record.RefSample{ - Ref: chunks.HeadSeriesRef(i), - T: int64(i), - V: float64(i), - }) - series = append(series, record.RefSeries{ - Ref: chunks.HeadSeriesRef(i), - Labels: labels.FromStrings("__name__", name), - }) - } + recs := generateRecords(recCase{series: n, samplesPerSeries: 1}) c, m := newTestClientAndQueueManager(t, defaultFlushDeadline, protoMsg) - c.expectSamples(samples, series) - m.StoreSeries(series, 0) + c.expectSamples(recs.samples, recs.series) + m.StoreSeries(recs.series, 0) m.Start() defer m.Stop() // These should be received by the client. - m.Append(samples) + m.Append(recs.samples) c.waitForExpectedData(t, 30*time.Second) }) } @@ -483,14 +431,15 @@ func TestShutdown(t *testing.T) { mcfg := config.DefaultMetadataConfig m := newTestQueueManager(t, cfg, mcfg, deadline, c, protoMsg) + // Send 2x batch size, so we know it will need at least two sends. n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend - samples, series := createTimeseries(n, n) - m.StoreSeries(series, 0) + recs := generateRecords(recCase{series: n / 1000, samplesPerSeries: 1000}) + m.StoreSeries(recs.series, 0) m.Start() // Append blocks to guarantee delivery, so we do it in the background. go func() { - m.Append(samples) + m.Append(recs.samples) }() synctest.Wait() @@ -547,33 +496,35 @@ func TestSeriesReset(t *testing.T) { func TestReshard(t *testing.T) { t.Parallel() + for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} { t.Run(fmt.Sprint(protoMsg), func(t *testing.T) { size := 10 // Make bigger to find more races. nSeries := 6 - nSamples := config.DefaultQueueConfig.Capacity * size - samples, series := createTimeseries(nSamples, nSeries) + samplesPerSeries := config.DefaultQueueConfig.Capacity * size + recs := generateRecords(recCase{series: nSeries, samplesPerSeries: samplesPerSeries}) + t.Logf("about to send %v samples", len(recs.samples)) cfg := config.DefaultQueueConfig cfg.MaxShards = 1 c := NewTestWriteClient(protoMsg) m := newTestQueueManager(t, cfg, config.DefaultMetadataConfig, defaultFlushDeadline, c, protoMsg) - c.expectSamples(samples, series) - m.StoreSeries(series, 0) + c.expectSamples(recs.samples, recs.series) + m.StoreSeries(recs.series, 0) m.Start() defer m.Stop() go func() { - for i := 0; i < len(samples); i += config.DefaultQueueConfig.Capacity { - sent := m.Append(samples[i : i+config.DefaultQueueConfig.Capacity]) + for i := 0; i < len(recs.samples); i += config.DefaultQueueConfig.Capacity { + sent := m.Append(recs.samples[i : i+config.DefaultQueueConfig.Capacity]) require.True(t, sent, "samples not sent") time.Sleep(100 * time.Millisecond) } }() - for i := 1; i < len(samples)/config.DefaultQueueConfig.Capacity; i++ { + for i := 1; i < len(recs.samples)/config.DefaultQueueConfig.Capacity; i++ { m.shards.stop() m.shards.start(i) time.Sleep(100 * time.Millisecond) @@ -627,7 +578,7 @@ func TestReshardPartialBatch(t *testing.T) { t.Parallel() for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} { t.Run(fmt.Sprint(protoMsg), func(t *testing.T) { - samples, series := createTimeseries(1, 10) + recs := generateRecords(recCase{series: 1, samplesPerSeries: 10}) c := NewTestBlockedWriteClient() @@ -639,14 +590,14 @@ func TestReshardPartialBatch(t *testing.T) { cfg.BatchSendDeadline = model.Duration(batchSendDeadline) m := newTestQueueManager(t, cfg, mcfg, flushDeadline, c, protoMsg) - m.StoreSeries(series, 0) + m.StoreSeries(recs.series, 0) m.Start() for range 100 { done := make(chan struct{}) go func() { - m.Append(samples) + m.Append(recs.samples) time.Sleep(batchSendDeadline) m.shards.stop() m.shards.start(1) @@ -672,7 +623,7 @@ func TestReshardPartialBatch(t *testing.T) { func TestQueueFilledDeadlock(t *testing.T) { for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} { t.Run(fmt.Sprint(protoMsg), func(t *testing.T) { - samples, series := createTimeseries(50, 1) + recs := generateRecords(recCase{series: 50, samplesPerSeries: 1}) c := NewNopWriteClient() @@ -686,7 +637,7 @@ func TestQueueFilledDeadlock(t *testing.T) { cfg.BatchSendDeadline = model.Duration(batchSendDeadline) m := newTestQueueManager(t, cfg, mcfg, flushDeadline, c, protoMsg) - m.StoreSeries(series, 0) + m.StoreSeries(recs.series, 0) m.Start() defer m.Stop() @@ -694,7 +645,7 @@ func TestQueueFilledDeadlock(t *testing.T) { done := make(chan struct{}) go func() { time.Sleep(batchSendDeadline) - m.Append(samples) + m.Append(recs.samples) done <- struct{}{} }() select { @@ -784,7 +735,7 @@ func TestDisableReshardOnRetry(t *testing.T) { defer onStoreCalled() var ( - fakeSamples, fakeSeries = createTimeseries(100, 100) + recs = generateRecords(recCase{series: 100, samplesPerSeries: 100}) cfg = config.DefaultQueueConfig mcfg = config.DefaultMetadataConfig @@ -807,14 +758,14 @@ func TestDisableReshardOnRetry(t *testing.T) { ) m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, 0, newPool(), newHighestTimestampMetric(), nil, false, false, false, remoteapi.WriteV1MessageType) - m.StoreSeries(fakeSeries, 0) + m.StoreSeries(recs.series, 0) // Attempt to samples while the manager is running. We immediately stop the // manager after the recoverable error is generated to prevent the manager // from resharding itself. m.Start() { - m.Append(fakeSamples) + m.Append(recs.samples) select { case <-onStoredContext.Done(): @@ -840,35 +791,132 @@ func TestDisableReshardOnRetry(t *testing.T) { }, time.Minute, retryAfter, "shouldReshard should have been re-enabled") } -func createTimeseries(numSamples, numSeries int, extraLabels ...labels.Label) ([]record.RefSample, []record.RefSeries) { - samples := make([]record.RefSample, 0, numSamples) - series := make([]record.RefSeries, 0, numSeries) - lb := labels.NewScratchBuilder(1 + len(extraLabels)) - for i := range numSeries { - name := fmt.Sprintf("test_metric_%d", i) - for j := range numSamples { - samples = append(samples, record.RefSample{ - Ref: chunks.HeadSeriesRef(i), - T: int64(j), - V: float64(i), - }) - } - // Create Labels that is name of series plus any extra labels supplied. - lb.Reset() - lb.Add(labels.MetricName, name) - rand.Shuffle(len(extraLabels), func(i, j int) { - extraLabels[i], extraLabels[j] = extraLabels[j], extraLabels[i] - }) - for _, l := range extraLabels { - lb.Add(l.Name, l.Value) - } - lb.Sort() - series = append(series, record.RefSeries{ - Ref: chunks.HeadSeriesRef(i), - Labels: lb.Labels(), - }) +type recCase struct { + name string + + series int + samplesPerSeries int + histogramsPerSeries int + floatHistogramsPerSeries int + exemplarsPerSeries int + + extraLabels []labels.Label + + labelsFn func(lb *labels.ScratchBuilder, i int) labels.Labels + tsFn func(i, j int) int64 +} + +type records struct { + series []record.RefSeries + samples []record.RefSample + histograms []record.RefHistogramSample + floatHistograms []record.RefFloatHistogramSample + exemplars []record.RefExemplar + metadata []record.RefMetadata +} + +func newTestHist(i int) *histogram.Histogram { + return &histogram.Histogram{ + Schema: 2, + ZeroThreshold: 1e-128, + ZeroCount: 0, + Count: 2, + Sum: 0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{int64(i) + 1}, + NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, + NegativeBuckets: []int64{int64(-i) - 1}, + } +} + +func generateRecords(c recCase) (ret records) { + ret.series = make([]record.RefSeries, c.series) + ret.metadata = make([]record.RefMetadata, c.series) + ret.samples = make([]record.RefSample, c.series*c.samplesPerSeries) + ret.histograms = make([]record.RefHistogramSample, c.series*c.histogramsPerSeries) + ret.floatHistograms = make([]record.RefFloatHistogramSample, c.series*c.floatHistogramsPerSeries) + ret.exemplars = make([]record.RefExemplar, c.series*c.exemplarsPerSeries) + + if c.labelsFn == nil { + c.labelsFn = func(lb *labels.ScratchBuilder, i int) labels.Labels { + // Create series with labels that contains name of series plus any extra labels supplied. + name := fmt.Sprintf("test_metric_%d", i) + lb.Reset() + lb.Add(model.MetricNameLabel, name) + for _, l := range c.extraLabels { + lb.Add(l.Name, l.Value) + } + lb.Sort() + return lb.Labels() + } + } + if c.tsFn == nil { + c.tsFn = func(_, j int) int64 { return int64(j) } + } + + lb := labels.NewScratchBuilder(1 + len(c.extraLabels)) + for i := range ret.series { + ret.series[i] = record.RefSeries{ + Ref: chunks.HeadSeriesRef(i), + Labels: c.labelsFn(&lb, i), + } + ret.metadata[i] = record.RefMetadata{ + Ref: chunks.HeadSeriesRef(i), + Type: uint8(record.Counter), + Unit: "unit text", + Help: "help text", + } + for j := range c.samplesPerSeries { + ret.samples[i*c.samplesPerSeries+j] = record.RefSample{ + Ref: chunks.HeadSeriesRef(i), + T: c.tsFn(i, j), + V: float64(i), + } + } + h := newTestHist(i) + for j := range c.histogramsPerSeries { + ret.histograms[i*c.histogramsPerSeries+j] = record.RefHistogramSample{ + Ref: chunks.HeadSeriesRef(i), + T: c.tsFn(i, j), + H: h, + } + } + for j := range c.floatHistogramsPerSeries { + ret.floatHistograms[i*c.floatHistogramsPerSeries+j] = record.RefFloatHistogramSample{ + Ref: chunks.HeadSeriesRef(i), + T: c.tsFn(i, j), + FH: h.ToFloat(nil), + } + } + for j := range c.exemplarsPerSeries { + ret.exemplars[i*c.exemplarsPerSeries+j] = record.RefExemplar{ + Ref: chunks.HeadSeriesRef(i), + T: c.tsFn(i, j), + V: float64(i), + Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", i)), + } + } + } + return ret +} + +// BenchmarkGenerateRecords checks data generator performance. +// Recommended CLI: +/* + export bench=genRecs && go test ./storage/remote/... \ + -run '^$' -bench '^BenchmarkGenerateRecords' \ + -benchtime 1s -count 6 -cpu 2 -timeout 999m -benchmem \ + | tee ${bench}.txt +*/ +func BenchmarkGenerateRecords(b *testing.B) { + n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend + + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + // This will generate 16M samples and 4k series. + generateRecords(recCase{series: n, samplesPerSeries: n}) } - return samples, series } func createProtoTimeseriesWithOld(numSamples, baseTs int64) []prompb.TimeSeries { @@ -895,88 +943,6 @@ func createProtoTimeseriesWithOld(numSamples, baseTs int64) []prompb.TimeSeries return samples } -func createExemplars(numExemplars, numSeries int) ([]record.RefExemplar, []record.RefSeries) { - exemplars := make([]record.RefExemplar, 0, numExemplars) - series := make([]record.RefSeries, 0, numSeries) - for i := range numSeries { - name := fmt.Sprintf("test_metric_%d", i) - for j := range numExemplars { - e := record.RefExemplar{ - Ref: chunks.HeadSeriesRef(i), - T: int64(j), - V: float64(i), - Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", i)), - } - exemplars = append(exemplars, e) - } - series = append(series, record.RefSeries{ - Ref: chunks.HeadSeriesRef(i), - Labels: labels.FromStrings("__name__", name), - }) - } - return exemplars, series -} - -func createHistograms(numSamples, numSeries int, floatHistogram bool) ([]record.RefHistogramSample, []record.RefFloatHistogramSample, []record.RefSeries) { - histograms := make([]record.RefHistogramSample, 0, numSamples) - floatHistograms := make([]record.RefFloatHistogramSample, 0, numSamples) - series := make([]record.RefSeries, 0, numSeries) - for i := range numSeries { - name := fmt.Sprintf("test_metric_%d", i) - for j := range numSamples { - hist := &histogram.Histogram{ - Schema: 2, - ZeroThreshold: 1e-128, - ZeroCount: 0, - Count: 2, - Sum: 0, - PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, - PositiveBuckets: []int64{int64(i) + 1}, - NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, - NegativeBuckets: []int64{int64(-i) - 1}, - } - - if floatHistogram { - fh := record.RefFloatHistogramSample{ - Ref: chunks.HeadSeriesRef(i), - T: int64(j), - FH: hist.ToFloat(nil), - } - floatHistograms = append(floatHistograms, fh) - } else { - h := record.RefHistogramSample{ - Ref: chunks.HeadSeriesRef(i), - T: int64(j), - H: hist, - } - histograms = append(histograms, h) - } - } - series = append(series, record.RefSeries{ - Ref: chunks.HeadSeriesRef(i), - Labels: labels.FromStrings("__name__", name), - }) - } - if floatHistogram { - return nil, floatHistograms, series - } - return histograms, nil, series -} - -func createSeriesMetadata(series []record.RefSeries) []record.RefMetadata { - metas := make([]record.RefMetadata, 0, len(series)) - - for _, s := range series { - metas = append(metas, record.RefMetadata{ - Ref: s.Ref, - Type: uint8(record.Counter), - Unit: "unit text", - Help: "help text", - }) - } - return metas -} - func getSeriesIDFromRef(r record.RefSeries) string { return r.Labels.String() } @@ -1419,7 +1385,7 @@ func BenchmarkSampleSend(b *testing.B) { const numSamples = 1 const numSeries = 10000 - samples, series := createTimeseries(numSamples, numSeries, extraLabels...) + recs := generateRecords(recCase{series: numSeries, samplesPerSeries: numSamples, extraLabels: extraLabels}) c := NewNopWriteClient() @@ -1433,7 +1399,7 @@ func BenchmarkSampleSend(b *testing.B) { for _, format := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} { b.Run(string(format), func(b *testing.B) { m := newTestQueueManager(b, cfg, mcfg, defaultFlushDeadline, c, format) - m.StoreSeries(series, 0) + m.StoreSeries(recs.series, 0) // These should be received by the client. m.Start() @@ -1441,8 +1407,8 @@ func BenchmarkSampleSend(b *testing.B) { b.ResetTimer() for i := 0; b.Loop(); i++ { - m.Append(samples) - m.UpdateSeriesSegment(series, i+1) // simulate what wlog.Watcher.garbageCollectSeries does + m.Append(recs.samples) + m.UpdateSeriesSegment(recs.series, i+1) // simulate what wlog.Watcher.garbageCollectSeries does m.SeriesReset(i + 1) } // Do not include shutdown @@ -1484,7 +1450,7 @@ func BenchmarkStoreSeries(b *testing.B) { // numSeries chosen to be big enough that StoreSeries dominates creating a new queue manager. const numSeries = 1000 - _, series := createTimeseries(0, numSeries, extraLabels...) + recs := generateRecords(recCase{series: numSeries, samplesPerSeries: 0, extraLabels: extraLabels}) for _, tc := range testCases { b.Run(tc.name, func(b *testing.B) { @@ -1499,7 +1465,7 @@ func BenchmarkStoreSeries(b *testing.B) { m.externalLabels = tc.externalLabels m.relabelConfigs = tc.relabelConfigs - m.StoreSeries(series, 0) + m.StoreSeries(recs.series, 0) } }) } @@ -2009,7 +1975,25 @@ func TestDropOldTimeSeries(t *testing.T) { size := 10 nSeries := 6 nSamples := config.DefaultQueueConfig.Capacity * size - samples, newSamples, series := createTimeseriesWithOldSamples(nSamples, nSeries) + pastRecs := generateRecords(recCase{ + series: nSeries, + samplesPerSeries: (nSamples / nSeries) / 2, // Half data is past. + tsFn: func(_, j int) int64 { + past := timestamp.FromTime(time.Now().Add(-5 * time.Minute)) + return past + int64(j) + }, + }) + newRecs := generateRecords(recCase{ + series: nSeries, + samplesPerSeries: (nSamples / nSeries) / 2, // Half data is past. + tsFn: func(_, j int) int64 { + return time.Now().UnixMilli() + int64(j) + }, + }) + + series := pastRecs.series // Series is the same for both old and new. + newSamples := newRecs.samples + samples := append(pastRecs.samples, newRecs.samples...) c := NewTestWriteClient(protoMsg) c.expectSamples(newSamples, series) @@ -2038,10 +2022,14 @@ func TestIsSampleOld(t *testing.T) { // Simulates scenario in which remote write endpoint is down and a subset of samples is dropped due to age limit while backoffing. func TestSendSamplesWithBackoffWithSampleAgeLimit(t *testing.T) { + const ( + maxSamplesPerSend = 10 + maxLabels = 9 + ) + t.Parallel() for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} { t.Run(fmt.Sprint(protoMsg), func(t *testing.T) { - maxSamplesPerSend := 10 sampleAgeLimit := time.Second * 2 cfg := config.DefaultQueueConfig @@ -2063,18 +2051,38 @@ func TestSendSamplesWithBackoffWithSampleAgeLimit(t *testing.T) { m.Start() batchID := 0 - expectedSamples := map[string][]prompb.Sample{} - appendData := func(numberOfSeries int, timeAdd time.Duration, shouldBeDropped bool) { t.Log(">>>> Appending series ", numberOfSeries, " as batch ID ", batchID, " with timeAdd ", timeAdd, " and should be dropped ", shouldBeDropped) - samples, series := createTimeseriesWithRandomLabelCount(strconv.Itoa(batchID), numberOfSeries, timeAdd, 9) - m.StoreSeries(series, batchID) - sent := m.Append(samples) + + // Use a fixed rand source so tests are consistent. + r := rand.New(rand.NewSource(99)) + + recs := generateRecords(recCase{ + series: numberOfSeries, + samplesPerSeries: 1, + tsFn: func(_, _ int) int64 { + return time.Now().Add(timeAdd).UnixMilli() + }, + labelsFn: func(lb *labels.ScratchBuilder, i int) labels.Labels { + lb.Reset() + labelsCount := r.Intn(maxLabels) + lb.Add("__name__", "batch_"+strconv.Itoa(batchID)+"_id_"+strconv.Itoa(i)) + for j := 1; j < labelsCount+1; j++ { + // same for both name and value + label := "batch_" + strconv.Itoa(batchID) + "_label_" + strconv.Itoa(j) + lb.Add(label, label) + } + return lb.Labels() + }, + }) + + m.StoreSeries(recs.series, batchID) + sent := m.Append(recs.samples) require.True(t, sent, "samples not sent") if !shouldBeDropped { - for _, s := range samples { - tsID := getSeriesIDFromRef(series[s.Ref]) - expectedSamples[tsID] = append(c.expectedSamples[tsID], prompb.Sample{ + for _, s := range recs.samples { + tsID := getSeriesIDFromRef(recs.series[s.Ref]) + c.expectedSamples[tsID] = append(c.expectedSamples[tsID], prompb.Sample{ Timestamp: s.T, Value: s.V, }) @@ -2084,95 +2092,30 @@ func TestSendSamplesWithBackoffWithSampleAgeLimit(t *testing.T) { } timeShift := -time.Millisecond * 5 + // Inject RW error. c.SetReturnError(RecoverableError{context.DeadlineExceeded, defaultBackoff}) - + // Send current samples in various intervals. appendData(maxSamplesPerSend/2, timeShift, true) time.Sleep(sampleAgeLimit) appendData(maxSamplesPerSend/2, timeShift, true) time.Sleep(sampleAgeLimit / 10) appendData(maxSamplesPerSend/2, timeShift, true) time.Sleep(2 * sampleAgeLimit) + // Eventually all the above data must be ignored as 2x sampleAgeLimit passed. + + // Now send, quickly re-enable RW target and send another batch. + // We should expect all the data from those two below batches. appendData(2*maxSamplesPerSend, timeShift, false) time.Sleep(sampleAgeLimit / 2) c.SetReturnError(nil) appendData(5, timeShift, false) m.Stop() - if diff := cmp.Diff(expectedSamples, c.receivedSamples); diff != "" { - t.Errorf("mismatch (-want +got):\n%s", diff) - } + require.Equal(t, c.expectedSamples, c.receivedSamples) }) } } -func createTimeseriesWithRandomLabelCount(id string, seriesCount int, timeAdd time.Duration, maxLabels int) ([]record.RefSample, []record.RefSeries) { - samples := []record.RefSample{} - series := []record.RefSeries{} - // use a fixed rand source so tests are consistent - r := rand.New(rand.NewSource(99)) - for i := range seriesCount { - s := record.RefSample{ - Ref: chunks.HeadSeriesRef(i), - T: time.Now().Add(timeAdd).UnixMilli(), - V: r.Float64(), - } - samples = append(samples, s) - labelsCount := r.Intn(maxLabels) - lb := labels.NewScratchBuilder(1 + labelsCount) - lb.Add("__name__", "batch_"+id+"_id_"+strconv.Itoa(i)) - for j := 1; j < labelsCount+1; j++ { - // same for both name and value - label := "batch_" + id + "_label_" + strconv.Itoa(j) - lb.Add(label, label) - } - series = append(series, record.RefSeries{ - Ref: chunks.HeadSeriesRef(i), - Labels: lb.Labels(), - }) - } - return samples, series -} - -func createTimeseriesWithOldSamples(numSamples, numSeries int, extraLabels ...labels.Label) ([]record.RefSample, []record.RefSample, []record.RefSeries) { - newSamples := make([]record.RefSample, 0, numSamples) - samples := make([]record.RefSample, 0, numSamples) - series := make([]record.RefSeries, 0, numSeries) - lb := labels.NewScratchBuilder(1 + len(extraLabels)) - for i := range numSeries { - name := fmt.Sprintf("test_metric_%d", i) - // We create half of the samples in the past. - past := timestamp.FromTime(time.Now().Add(-5 * time.Minute)) - for j := 0; j < numSamples/2; j++ { - samples = append(samples, record.RefSample{ - Ref: chunks.HeadSeriesRef(i), - T: past + int64(j), - V: float64(i), - }) - } - for j := 0; j < numSamples/2; j++ { - sample := record.RefSample{ - Ref: chunks.HeadSeriesRef(i), - T: time.Now().UnixMilli() + int64(j), - V: float64(i), - } - samples = append(samples, sample) - newSamples = append(newSamples, sample) - } - // Create Labels that is name of series plus any extra labels supplied. - lb.Reset() - lb.Add(labels.MetricName, name) - for _, l := range extraLabels { - lb.Add(l.Name, l.Value) - } - lb.Sort() - series = append(series, record.RefSeries{ - Ref: chunks.HeadSeriesRef(i), - Labels: lb.Labels(), - }) - } - return samples, newSamples, series -} - func filterTsLimit(limit int64, ts prompb.TimeSeries) bool { return limit > ts.Samples[0].Timestamp } @@ -2662,7 +2605,7 @@ func TestHighestTimestampOnAppend(t *testing.T) { t.Run(fmt.Sprint(protoMsg), func(t *testing.T) { nSamples := 11 * config.DefaultQueueConfig.Capacity nSeries := 3 - samples, series := createTimeseries(nSamples, nSeries) + recs := generateRecords(recCase{series: nSeries, samplesPerSeries: nSamples / nSeries}) _, m := newTestClientAndQueueManager(t, defaultFlushDeadline, protoMsg) m.Start() @@ -2670,13 +2613,14 @@ func TestHighestTimestampOnAppend(t *testing.T) { require.Equal(t, 0.0, m.metrics.highestTimestamp.Get()) - m.StoreSeries(series, 0) - require.True(t, m.Append(samples)) + m.StoreSeries(recs.series, 0) + require.True(t, m.Append(recs.samples)) // Check that Append sets the highest timestamp correctly. - highestTs := float64((nSamples - 1) / 1000) - require.Greater(t, highestTs, 0.0) - require.Equal(t, highestTs, m.metrics.highestTimestamp.Get()) + // NOTE: generateRecords yields nSamples/nSeries samples (36666), with timestamp. + // This gives the highest timestamp of 36666/1000 (seconds). + const expectedHighestTsSeconds = 36.0 + require.Equal(t, expectedHighestTsSeconds, m.metrics.highestTimestamp.Get()) }) } }