# Minimal valid case: an empty histogram. load 5m empty_histogram {{}} eval instant at 1m empty_histogram {__name__="empty_histogram"} {{}} eval instant at 1m histogram_count(empty_histogram) {} 0 eval instant at 1m histogram_sum(empty_histogram) {} 0 eval instant at 1m histogram_avg(empty_histogram) {} NaN eval instant at 1m histogram_fraction(-Inf, +Inf, empty_histogram) {} NaN eval instant at 1m histogram_fraction(0, 8, empty_histogram) {} NaN clear # buckets:[1 2 1] means 1 observation in the 1st bucket, 2 observations in the 2nd and 1 observation in the 3rd (total 4). load 5m single_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}} # histogram_count extracts the count property from the histogram. eval instant at 1m histogram_count(single_histogram) {} 4 # histogram_sum extracts the sum property from the histogram. eval instant at 1m histogram_sum(single_histogram) {} 5 # histogram_avg calculates the average from sum and count properties. eval instant at 1m histogram_avg(single_histogram) {} 1.25 # We expect half of the values to fall in the range 1 < x <= 2. eval instant at 1m histogram_fraction(1, 2, single_histogram) {} 0.5 # We expect all values to fall in the range 0 < x <= 8. eval instant at 1m histogram_fraction(0, 8, single_histogram) expect no_info {} 1 # Median is 1.414213562373095 (2**2**-1, or sqrt(2)) due to # exponential interpolation, i.e. the "midpoint" within range 1 < x <= # 2 is assumed where the bucket boundary would be if we increased the # resolution of the histogram by one step. eval instant at 1m histogram_quantile(0.5, single_histogram) expect no_info {} 1.414213562373095 eval instant at 1m histogram_quantiles(single_histogram, "q", 0.5) expect no_info {q="0.5"} 1.414213562373095 clear # Repeat the same histogram 10 times. load 5m multi_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}}x10 {{schema:0 sum:5 count:4 buckets:[1 2 1]}}+{{}}x10 eval instant at 5m histogram_count(multi_histogram) {} 4 eval instant at 5m histogram_sum(multi_histogram) {} 5 eval instant at 5m histogram_avg(multi_histogram) {} 1.25 eval instant at 5m histogram_fraction(1, 2, multi_histogram) {} 0.5 # See explanation for exponential interpolation above. eval instant at 5m histogram_quantile(0.5, multi_histogram) {} 1.414213562373095 # Each entry should look the same as the first. eval instant at 50m histogram_count(multi_histogram) {} 4 eval instant at 50m histogram_sum(multi_histogram) {} 5 eval instant at 50m histogram_avg(multi_histogram) {} 1.25 eval instant at 50m histogram_fraction(1, 2, multi_histogram) {} 0.5 # See explanation for exponential interpolation above. eval instant at 50m histogram_quantile(0.5, multi_histogram) {} 1.414213562373095 clear # Accumulate the histogram addition for 10 iterations, offset is a bucket position where offset:0 is always the bucket # with an upper limit of 1 and offset:1 is the bucket which follows to the right. Negative offsets represent bucket # positions for upper limits <1 (tending toward zero), where offset:-1 is the bucket to the left of offset:0. load 5m incr_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}}+{{sum:2 count:1 buckets:[1] offset:1}}x10 eval instant at 5m histogram_count(incr_histogram) {} 5 eval instant at 5m histogram_sum(incr_histogram) {} 6 eval instant at 5m histogram_avg(incr_histogram) {} 1.2 # We expect 3/5ths of the values to fall in the range 1 < x <= 2. eval instant at 5m histogram_fraction(1, 2, incr_histogram) {} 0.6 # See explanation for exponential interpolation above. eval instant at 5m histogram_quantile(0.5, incr_histogram) {} 1.414213562373095 eval instant at 50m incr_histogram {__name__="incr_histogram"} {{count:14 sum:24 buckets:[1 12 1]}} eval instant at 50m histogram_count(incr_histogram) {} 14 eval instant at 50m histogram_sum(incr_histogram) {} 24 eval instant at 50m histogram_avg(incr_histogram) {} 1.7142857142857142 # We expect 12/14ths of the values to fall in the range 1 < x <= 2. eval instant at 50m histogram_fraction(1, 2, incr_histogram) {} 0.8571428571428571 # See explanation for exponential interpolation above. eval instant at 50m histogram_quantile(0.5, incr_histogram) {} 1.414213562373095 # Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum. eval instant at 50m rate(incr_histogram[10m]) expect no_warn {} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}} # Calculate the 50th percentile of observations over the last 10m. # See explanation for exponential interpolation above. eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m])) expect no_warn {} 1.414213562373095 clear # Schema represents the histogram resolution, different schema have compatible bucket boundaries, e.g.: # 0: 1 2 4 8 16 32 64 (higher resolution) # -1: 1 4 16 64 (lower resolution) # # Histograms can be merged as long as the histogram to the right is same resolution or higher. load 5m low_res_histogram {{schema:-1 sum:4 count:1 buckets:[1] offset:1}}+{{schema:0 sum:4 count:4 buckets:[2 2] offset:1}}x1 eval instant at 5m low_res_histogram {__name__="low_res_histogram"} {{schema:-1 count:5 sum:8 offset:1 buckets:[5]}} eval instant at 5m histogram_count(low_res_histogram) {} 5 eval instant at 5m histogram_sum(low_res_histogram) {} 8 eval instant at 5m histogram_avg(low_res_histogram) {} 1.6 # We expect all values to fall into the lower-resolution bucket with the range 1 < x <= 4. eval instant at 5m histogram_fraction(1, 4, low_res_histogram) {} 1 clear # z_bucket:1 means there is one observation in the zero bucket and z_bucket_w:0.5 means the zero bucket has the range # 0 < x <= 0.5. Sum and count are expected to represent all observations in the histogram, including those in the zero bucket. load 5m single_zero_histogram {{schema:0 z_bucket:1 z_bucket_w:0.5 sum:0.25 count:1}} eval instant at 1m histogram_count(single_zero_histogram) {} 1 eval instant at 1m histogram_sum(single_zero_histogram) {} 0.25 eval instant at 1m histogram_avg(single_zero_histogram) {} 0.25 # When only the zero bucket is populated, or there are negative buckets, the distribution is assumed to be equally # distributed around zero; i.e. that there are an equal number of positive and negative observations. Therefore the # entire distribution must lie within the full range of the zero bucket, in this case: -0.5 < x <= +0.5. eval instant at 1m histogram_fraction(-0.5, 0.5, single_zero_histogram) {} 1 # Half of the observations are estimated to be zero, as this is the midpoint between -0.5 and +0.5. eval instant at 1m histogram_quantile(0.5, single_zero_histogram) {} 0 clear # Let's turn single_histogram upside-down. load 5m negative_histogram {{schema:0 sum:-5 count:4 n_buckets:[1 2 1]}} eval instant at 1m histogram_count(negative_histogram) {} 4 eval instant at 1m histogram_sum(negative_histogram) {} -5 eval instant at 1m histogram_avg(negative_histogram) {} -1.25 # We expect half of the values to fall in the range -2 < x <= -1. eval instant at 1m histogram_fraction(-2, -1, negative_histogram) {} 0.5 # Exponential interpolation works the same as for positive buckets, just mirrored. eval instant at 1m histogram_quantile(0.5, negative_histogram) {} -1.414213562373095 clear # Two histogram samples. load 5m two_samples_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}} {{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}} # We expect to see the newest sample. eval instant at 5m histogram_count(two_samples_histogram) {} 4 eval instant at 5m histogram_sum(two_samples_histogram) {} -4 eval instant at 5m histogram_avg(two_samples_histogram) {} -1 eval instant at 5m histogram_fraction(-2, -1, two_samples_histogram) {} 0.5 # See explanation for exponential interpolation above. eval instant at 5m histogram_quantile(0.5, two_samples_histogram) {} -1.414213562373095 clear # Add two histograms with negated data. load 5m balanced_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}}+{{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}}x1 eval instant at 5m histogram_count(balanced_histogram) {} 8 eval instant at 5m histogram_sum(balanced_histogram) {} 0 eval instant at 5m histogram_avg(balanced_histogram) {} 0 eval instant at 5m histogram_fraction(0, 4, balanced_histogram) {} 0.5 # If the quantile happens to be located in a span of empty buckets, the actually returned value is the lower bound of # the first populated bucket after the span of empty buckets. eval instant at 5m histogram_quantile(0.5, balanced_histogram) {} 0.5 clear # Add histogram to test sum(last_over_time) regression load 5m incr_sum_histogram{number="1"} {{schema:0 sum:0 count:0 buckets:[1]}}+{{schema:0 sum:1 count:1 buckets:[1]}}x10 incr_sum_histogram{number="2"} {{schema:0 sum:0 count:0 buckets:[1]}}+{{schema:0 sum:2 count:1 buckets:[1]}}x10 eval instant at 50m histogram_sum(sum(incr_sum_histogram)) {} 30 eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m]))) {} 30 clear # Apply rate function to histogram. load 15s histogram_rate {{schema:1 count:12 sum:18.4 z_bucket:2 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[1 2 0 1 1]}}+{{schema:1 count:9 sum:18.4 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 0 1 1] n_buckets:[1 1 0 1 1]}}x100 eval instant at 5m rate(histogram_rate[45s]) expect no_warn {} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}} eval range from 5m to 5m30s step 30s rate(histogram_rate[45s]) expect no_warn {} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}x1 clear # Apply count and sum function to histogram. load 10m histogram_count_sum_2 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 eval instant at 10m histogram_count(histogram_count_sum_2) {} 24 eval instant at 10m histogram_sum(histogram_count_sum_2) {} 100 clear # Apply stddev and stdvar function to histogram with {1, 2, 3, 4} (low res). load 10m histogram_stddev_stdvar_1 {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}x1 eval instant at 10m histogram_stddev(histogram_stddev_stdvar_1) {} 1.0787993180043811 eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_1) {} 1.163807968526718 clear # Apply stddev and stdvar function to histogram with {1, 1, 1, 1} (high res). load 10m histogram_stddev_stdvar_2 {{schema:8 count:10 sum:10 buckets:[1 2 3 4]}}x1 eval instant at 10m histogram_stddev(histogram_stddev_stdvar_2) {} 0.0048960313898237465 eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_2) {} 2.3971123370139447e-05 clear # Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9}. load 10m histogram_stddev_stdvar_3 {{schema:3 count:7 sum:62 z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 eval instant at 10m histogram_stddev(histogram_stddev_stdvar_3) {} 42.94723640026 eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_3) {} 1844.4651144196398 clear # Apply stddev and stdvar function to histogram with {-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3}. load 10m histogram_stddev_stdvar_4 {{schema:0 count:10 sum:-112946 z_bucket:0 n_buckets:[0 0 1 1 1 0 1 1 0 0 3 0 0 0 1 0 0 1]}}x1 eval instant at 10m histogram_stddev(histogram_stddev_stdvar_4) {} 27556.344499842 eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_4) {} 759352122.1939945 clear # Apply stddev and stdvar function to histogram with {-10x10}. load 10m histogram_stddev_stdvar_5 {{schema:0 count:10 sum:-100 z_bucket:0 n_buckets:[0 0 0 0 10]}}x1 eval instant at 10m histogram_stddev(histogram_stddev_stdvar_5) {} 1.3137084989848 eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_5) {} 1.725830020304794 clear # Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, NaN}. load 10m histogram_stddev_stdvar_6 {{schema:3 count:7 sum:NaN z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 eval instant at 10m histogram_stddev(histogram_stddev_stdvar_6) {} NaN eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_6) {} NaN clear # Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, Inf}. load 10m histogram_stddev_stdvar_7 {{schema:3 count:7 sum:Inf z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 eval instant at 10m histogram_stddev(histogram_stddev_stdvar_7) {} Inf eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7) {} Inf clear # Apply quantile function to histogram with all positive buckets with zero bucket. load 10m histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 eval instant at 10m histogram_quantile(1.001, histogram_quantile_1) expect warn {} Inf eval instant at 10m histogram_quantile(1, histogram_quantile_1) expect no_warn {} 16 # The following quantiles are within a bucket. Exponential # interpolation is applied (rather than linear, as it is done for # classic histograms), leading to slightly different quantile values. eval instant at 10m histogram_quantile(0.99, histogram_quantile_1) expect no_warn {} 15.67072476139083 eval instant at 10m histogram_quantile(0.9, histogram_quantile_1) expect no_warn {} 12.99603834169977 eval instant at 10m histogram_quantile(0.6, histogram_quantile_1) expect no_warn {} 4.594793419988138 eval instant at 10m histogram_quantile(0.5, histogram_quantile_1) expect no_warn {} 1.5874010519681994 # Linear interpolation within the zero bucket after all. eval instant at 10m histogram_quantile(0.1, histogram_quantile_1) expect no_warn {} 0.0006 eval instant at 10m histogram_quantile(0, histogram_quantile_1) expect no_warn {} 0 eval instant at 10m histogram_quantile(-1, histogram_quantile_1) expect warn {} -Inf clear # Apply quantile function to histogram with all negative buckets with zero bucket. load 10m histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1 eval instant at 10m histogram_quantile(1.001, histogram_quantile_2) expect warn {} Inf eval instant at 10m histogram_quantile(1, histogram_quantile_2) expect no_warn {} 0 # Again, the quantile values here are slightly different from what # they would be with linear interpolation. Note that quantiles # ending up in the zero bucket are linearly interpolated after all. eval instant at 10m histogram_quantile(0.99, histogram_quantile_2) expect no_warn {} -0.00006 eval instant at 10m histogram_quantile(0.9, histogram_quantile_2) expect no_warn {} -0.0006 eval instant at 10m histogram_quantile(0.5, histogram_quantile_2) expect no_warn {} -1.5874010519681996 eval instant at 10m histogram_quantile(0.1, histogram_quantile_2) expect no_warn {} -12.996038341699768 eval instant at 10m histogram_quantile(0, histogram_quantile_2) expect no_warn {} -16 eval instant at 10m histogram_quantile(-1, histogram_quantile_2) expect warn {} -Inf clear # Apply quantile function to histogram with both positive and negative # buckets with zero bucket. # First positive buckets with exponential interpolation. load 10m histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 eval instant at 10m histogram_quantile(1.001, histogram_quantile_3) expect warn {} Inf eval instant at 10m histogram_quantile(1, histogram_quantile_3) expect no_warn {} 16 eval instant at 10m histogram_quantile(0.99, histogram_quantile_3) expect no_warn {} 15.34822590920423 eval instant at 10m histogram_quantile(0.9, histogram_quantile_3) expect no_warn {} 10.556063286183155 eval instant at 10m histogram_quantile(0.7, histogram_quantile_3) expect no_warn {} 1.2030250360821164 # Linear interpolation in the zero bucket, symmetrically centered around # the zero point. eval instant at 10m histogram_quantile(0.55, histogram_quantile_3) expect no_warn {} 0.0006 eval instant at 10m histogram_quantile(0.5, histogram_quantile_3) expect no_warn {} 0 eval instant at 10m histogram_quantile(0.45, histogram_quantile_3) expect no_warn {} -0.0006 # Finally negative buckets with mirrored exponential interpolation. eval instant at 10m histogram_quantile(0.3, histogram_quantile_3) expect no_warn {} -1.2030250360821169 eval instant at 10m histogram_quantile(0.1, histogram_quantile_3) expect no_warn {} -10.556063286183155 eval instant at 10m histogram_quantile(0.01, histogram_quantile_3) expect no_warn {} -15.34822590920423 eval instant at 10m histogram_quantile(0, histogram_quantile_3) expect no_warn {} -16 eval instant at 10m histogram_quantile(-1, histogram_quantile_3) expect warn {} -Inf clear # Try different schemas. (The interpolation logic must not depend on the schema.) clear load 1m var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 buckets:[0 5]}} var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 buckets:[0 5]}} var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 buckets:[0 5]}} eval instant at 1m histogram_quantile(0.5, var_res_histogram) {schema="-1"} 2.0 {schema="0"} 1.4142135623730951 {schema="+1"} 1.189207 eval instant at 1m histogram_fraction(0, 2, var_res_histogram{schema="-1"}) {schema="-1"} 0.5 eval instant at 1m histogram_fraction(0, 1.4142135623730951, var_res_histogram{schema="0"}) {schema="0"} 0.5 eval instant at 1m histogram_fraction(0, 1.189207, var_res_histogram{schema="+1"}) {schema="+1"} 0.5 # The same as above, but one bucket "further to the right". clear load 1m var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 buckets:[0 0 5]}} var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 buckets:[0 0 5]}} var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 buckets:[0 0 5]}} eval instant at 1m histogram_quantile(0.5, var_res_histogram) {schema="-1"} 8.0 {schema="0"} 2.82842712474619 {schema="+1"} 1.6817928305074292 eval instant at 1m histogram_fraction(0, 8, var_res_histogram{schema="-1"}) {schema="-1"} 0.5 eval instant at 1m histogram_fraction(0, 2.82842712474619, var_res_histogram{schema="0"}) {schema="0"} 0.5 eval instant at 1m histogram_fraction(0, 1.6817928305074292, var_res_histogram{schema="+1"}) {schema="+1"} 0.5 # And everything again but for negative buckets. clear load 1m var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 n_buckets:[0 5]}} var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 n_buckets:[0 5]}} var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 n_buckets:[0 5]}} eval instant at 1m histogram_quantile(0.5, var_res_histogram) {schema="-1"} -2.0 {schema="0"} -1.4142135623730951 {schema="+1"} -1.189207 eval instant at 1m histogram_fraction(-2, 0, var_res_histogram{schema="-1"}) {schema="-1"} 0.5 eval instant at 1m histogram_fraction(-1.4142135623730951, 0, var_res_histogram{schema="0"}) {schema="0"} 0.5 eval instant at 1m histogram_fraction(-1.189207, 0, var_res_histogram{schema="+1"}) {schema="+1"} 0.5 clear load 1m var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 n_buckets:[0 0 5]}} var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 n_buckets:[0 0 5]}} var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 n_buckets:[0 0 5]}} eval instant at 1m histogram_quantile(0.5, var_res_histogram) {schema="-1"} -8.0 {schema="0"} -2.82842712474619 {schema="+1"} -1.6817928305074292 eval instant at 1m histogram_fraction(-8, 0, var_res_histogram{schema="-1"}) {schema="-1"} 0.5 eval instant at 1m histogram_fraction(-2.82842712474619, 0, var_res_histogram{schema="0"}) {schema="0"} 0.5 eval instant at 1m histogram_fraction(-1.6817928305074292, 0, var_res_histogram{schema="+1"}) {schema="+1"} 0.5 # Apply fraction function to empty histogram. load 10m histogram_fraction_1 {{}}x1 eval instant at 10m histogram_fraction(3.1415, 42, histogram_fraction_1) {} NaN clear # Apply fraction function to histogram with positive and zero buckets. load 10m histogram_fraction_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_2) {} 1 eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_2) {} 0 eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_2) {} 0 eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_2) {} 0.16666666666666666 # Note that this result and the one above add up to 1. eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2) {} 0.8333333333333334 # We are in the zero bucket, resulting in linear interpolation eval instant at 10m histogram_fraction(0, 0.0005, histogram_fraction_2) {} 0.08333333333333333 # Demonstrate that the inverse operation with histogram_quantile yields # the original value with the non-trivial result above. eval instant at 10m histogram_quantile(0.08333333333333333, histogram_fraction_2) {} 0.0005 eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2) {} 0 eval instant at 10m histogram_fraction(1, 2, histogram_fraction_2) {} 0.25 # More non-trivial results with interpolation involved below, including # some round-trips via histogram_quantile to prove that the inverse # operation leads to the same results. eval instant at 10m histogram_fraction(0, 1.5, histogram_fraction_2) {} 0.4795739585136224 eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_2) {} 0.10375937481971091 eval instant at 10m histogram_fraction(1, 8, histogram_fraction_2) {} 0.3333333333333333 eval instant at 10m histogram_fraction(0, 6, histogram_fraction_2) {} 0.6320802083934297 eval instant at 10m histogram_quantile(0.6320802083934297, histogram_fraction_2) {} 6 eval instant at 10m histogram_fraction(1, 6, histogram_fraction_2) {} 0.29874687506009634 eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_2) {} 0.15250624987980724 eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_2) {} 0 eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_2) {} 0 eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_2) {} 0 eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_2) {} 0 eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_2) {} 0 eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_2) {} 0 eval instant at 10m histogram_fraction(0, 0, histogram_fraction_2) {} 0 eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_2) {} 0 eval instant at 10m histogram_fraction(42, 42, histogram_fraction_2) {} 0 eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_2) {} 0 eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_2) {} NaN eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_2) {} NaN eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_2) {} NaN eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_2) {} 1 # Apply fraction function to histogram with negative and zero buckets. load 10m histogram_fraction_3 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1 eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_3) {} 0 eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_3) {} 1 eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_3) {} 0.16666666666666666 eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_3) {} 0 eval instant at 10m histogram_fraction(-0.0005, 0, histogram_fraction_3) {} 0.08333333333333333 eval instant at 10m histogram_fraction(-inf, -0.0005, histogram_fraction_3) {} 0.9166666666666666 eval instant at 10m histogram_quantile(0.9166666666666666, histogram_fraction_3) {} -0.0005 eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_3) {} 0 eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_3) {} 0.8333333333333334 eval instant at 10m histogram_fraction(1, 2, histogram_fraction_3) {} 0 eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_3) {} 0 eval instant at 10m histogram_fraction(1, 8, histogram_fraction_3) {} 0 eval instant at 10m histogram_fraction(1, 6, histogram_fraction_3) {} 0 eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_3) {} 0 eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_3) {} 0.25 eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_3) {} 0.10375937481971091 eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_3) {} 0.3333333333333333 eval instant at 10m histogram_fraction(-inf, -6, histogram_fraction_3) {} 0.36791979160657035 eval instant at 10m histogram_quantile(0.36791979160657035, histogram_fraction_3) {} -6 eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_3) {} 0.29874687506009634 eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_3) {} 0.15250624987980724 eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_3) {} 0 eval instant at 10m histogram_fraction(0, 0, histogram_fraction_3) {} 0 eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_3) {} 0 eval instant at 10m histogram_fraction(42, 42, histogram_fraction_3) {} 0 eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_3) {} 0 eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_3) {} NaN eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_3) {} NaN eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_3) {} NaN eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_3) {} 1 clear # Apply fraction function to histogram with both positive, negative and zero buckets. load 10m histogram_fraction_4 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_4) {} 0.5 eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_4) {} 0.5 eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_4) {} 0.08333333333333333 eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_4) {} 0.08333333333333333 eval instant at 10m histogram_fraction(-0.0005, 0.0005, histogram_fraction_4) {} 0.08333333333333333 eval instant at 10m histogram_fraction(-inf, 0.0005, histogram_fraction_4) {} 0.5416666666666666 eval instant at 10m histogram_quantile(0.5416666666666666, histogram_fraction_4) {} 0.0005 eval instant at 10m histogram_fraction(-inf, -0.0005, histogram_fraction_4) {} 0.4583333333333333 eval instant at 10m histogram_quantile(0.4583333333333333, histogram_fraction_4) {} -0.0005 eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_4) {} 0.4166666666666667 eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_4) {} 0.4166666666666667 eval instant at 10m histogram_fraction(1, 2, histogram_fraction_4) {} 0.125 eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_4) {} 0.051879687409855414 eval instant at 10m histogram_fraction(1, 8, histogram_fraction_4) {} 0.16666666666666666 eval instant at 10m histogram_fraction(1, 6, histogram_fraction_4) {} 0.14937343753004825 eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_4) {} 0.07625312493990366 eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_4) {} 0.125 eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_4) {} 0.051879687409855456 eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_4) {} 0.16666666666666666 eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_4) {} 0.14937343753004817 eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_4) {} 0.07625312493990362 eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_4) {} 0 eval instant at 10m histogram_fraction(0, 0, histogram_fraction_4) {} 0 eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_4) {} 0 eval instant at 10m histogram_fraction(42, 42, histogram_fraction_4) {} 0 eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_4) {} 0 eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_4) {} NaN eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_4) {} NaN eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_4) {} NaN eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_4) {} 1 eval instant at 10m histogram_sum(scalar(histogram_fraction(-Inf, +Inf, sum(histogram_fraction_4))) * histogram_fraction_4) {} 100 # Apply multiplication and division operator to histogram. load 10m histogram_mul_div {{schema:0 count:30 sum:33 z_bucket:3 z_bucket_w:0.001 buckets:[3 3 3] n_buckets:[6 6 6]}}x1 float_series_3 3+0x1 float_series_0 0+0x1 eval instant at 10m histogram_mul_div*3 expect no_info {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m histogram_mul_div*-1 expect no_info {} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}} eval instant at 10m -histogram_mul_div expect no_info {} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}} eval instant at 10m histogram_mul_div*-3 expect no_info {} {{schema:0 count:-90 sum:-99 z_bucket:-9 z_bucket_w:0.001 buckets:[-9 -9 -9] n_buckets:[-18 -18 -18]}} eval instant at 10m 3*histogram_mul_div expect no_info {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m histogram_mul_div*float_series_3 expect no_info {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m float_series_3*histogram_mul_div expect no_info {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m histogram_mul_div/3 expect no_info {} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}} eval instant at 10m histogram_mul_div/-3 expect no_info {} {{schema:0 count:-10 sum:-11 z_bucket:-1 z_bucket_w:0.001 buckets:[-1 -1 -1] n_buckets:[-2 -2 -2]}} eval instant at 10m histogram_mul_div/float_series_3 expect no_info {} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}} eval instant at 10m histogram_mul_div*0 expect no_info {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} eval instant at 10m 0*histogram_mul_div expect no_info {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} eval instant at 10m histogram_mul_div*float_series_0 expect no_info {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} eval instant at 10m float_series_0*histogram_mul_div expect no_info {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} eval instant at 10m histogram_mul_div/0 expect no_info {} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}} eval instant at 10m histogram_mul_div/float_series_0 expect no_info {} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}} eval instant at 10m histogram_mul_div*0/0 expect no_info {} {{schema:0 count:NaN sum:NaN z_bucket_w:0.001 z_bucket:NaN}} eval instant at 10m histogram_mul_div*histogram_mul_div expect info eval instant at 10m histogram_mul_div/histogram_mul_div expect info eval instant at 10m float_series_3/histogram_mul_div expect info eval instant at 10m 0/histogram_mul_div expect info clear # Apply binary operators to mixed histogram and float samples. # TODO:(NeerajGartia21) move these tests to their respective locations when tests from engine_test.go are be moved here. load 10m histogram_sample {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 float_sample 0x1 eval instant at 10m float_sample+histogram_sample expect info eval instant at 10m histogram_sample+float_sample expect info eval instant at 10m float_sample-histogram_sample expect info eval instant at 10m histogram_sample-float_sample expect info # Counter reset only noticeable in a single bucket. load 5m reset_in_bucket {{schema:0 count:4 sum:5 buckets:[1 2 1]}} {{schema:0 count:5 sum:6 buckets:[1 1 3]}} {{schema:0 count:6 sum:7 buckets:[1 2 3]}} eval instant at 10m increase(reset_in_bucket[15m]) expect no_warn {} {{count:9 sum:10.5 buckets:[1.5 3 4.5]}} # The following two test the "fast path" where only sum and count is decoded. eval instant at 10m histogram_count(increase(reset_in_bucket[15m])) expect no_warn {} 9 eval instant at 10m histogram_sum(increase(reset_in_bucket[15m])) expect no_warn {} 10.5 clear # Test native histograms with custom buckets. load 5m custom_buckets_histogram {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}}x10 eval instant at 5m histogram_fraction(5, 10, custom_buckets_histogram) {} 0.5 eval instant at 5m histogram_quantile(0.5, custom_buckets_histogram) {} 7.5 eval instant at 5m sum(custom_buckets_histogram) {} {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}} clear # Test 'this native histogram metric is not a counter' warning for rate load 30s some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}} # Test the case where we only have two points for rate eval instant at 30s rate(some_metric[1m]) expect warn msg: PromQL warning: this native histogram metric is not a counter: "some_metric" {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} # Test the case where we have more than two points for rate eval instant at 1m rate(some_metric[1m30s]) expect warn msg: PromQL warning: this native histogram metric is not a counter: "some_metric" {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} clear # Test rate() over mixed exponential and custom buckets. load 30s some_metric {{schema:0 sum:1 count:1 buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} # Start and end with exponential, with custom in the middle. eval instant at 1m rate(some_metric[1m30s]) expect warn msg: PromQL warning: vector contains a mix of histograms with exponential and custom buckets schemas for metric name "some_metric" # Should produce no results. # Start and end with custom, with exponential in the middle. eval instant at 1m30s rate(some_metric[1m30s]) expect warn msg: PromQL warning: vector contains a mix of histograms with exponential and custom buckets schemas for metric name "some_metric" # Should produce no results. # Start with custom, end with exponential. Return the exponential histogram divided by 48. # (The 1st sample is the NHCB with count:1. It is mostly ignored with the exception of the # count, which means the rate calculation extrapolates until the count hits 0.) eval instant at 1m rate(some_metric[1m]) expect no_warn {} {{count:0.08333333333333333 sum:0.10416666666666666 counter_reset_hint:gauge buckets:[0.020833333333333332 0.041666666666666664 0.020833333333333332]}} # Start with exponential, end with custom. Return the custom buckets histogram divided by 30. # (With the 2nd sample having a count of 1, the extrapolation to zero lands exactly at the # left boundary of the range, so no extrapolation limitation needed.) eval instant at 30s rate(some_metric[1m]) expect no_warn {} {{schema:-53 sum:0.03333333333333333 count:0.03333333333333333 custom_values:[5 10] buckets:[0.03333333333333333]}} clear # Histogram with constant buckets. load 1m const_histogram {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} # There is no change to the bucket count over time, thus rate is 0 in each bucket. # However native histograms do not represent empty buckets, so here the zeros are implicit. eval instant at 5m rate(const_histogram[5m]) expect no_warn {} {{schema:0 sum:0 count:0}} # Zero buckets mean no observations, thus the denominator in the average is 0 # leading to 0/0, which is NaN. eval instant at 5m histogram_avg(rate(const_histogram[5m])) expect no_warn {} NaN # Zero buckets mean no observations, so count is 0. eval instant at 5m histogram_count(rate(const_histogram[5m])) expect no_warn {} 0.0 # Zero buckets mean no observations and empty histogram has a sum of 0 by definition. eval instant at 5m histogram_sum(rate(const_histogram[5m])) expect no_warn {} 0.0 # Zero buckets mean no observations, thus the denominator in the fraction is 0, # leading to 0/0, which is NaN. eval instant at 5m histogram_fraction(0.0, 1.0, rate(const_histogram[5m])) expect no_warn {} NaN # Workaround to calculate the observation count corresponding to NaN fraction. eval instant at 5m histogram_count(rate(const_histogram[5m])) == 0.0 or histogram_fraction(0.0, 1.0, rate(const_histogram[5m])) * histogram_count(rate(const_histogram[5m])) expect no_warn {} 0.0 # Zero buckets mean no observations, so there is no value that observations fall below, # which means that any quantile is a NaN. eval instant at 5m histogram_quantile(1.0, rate(const_histogram[5m])) expect no_warn {} NaN # Zero buckets mean no observations, so there is no standard deviation. eval instant at 5m histogram_stddev(rate(const_histogram[5m])) expect no_warn {} NaN # Zero buckets mean no observations, so there is no standard variance. eval instant at 5m histogram_stdvar(rate(const_histogram[5m])) expect no_warn {} NaN clear # Test mixing exponential and custom buckets. load 6m metric{series="exponential"} {{sum:4 count:3 buckets:[1 2 1]}} _ {{sum:4 count:3 buckets:[1 2 1]}} metric{series="other-exponential"} {{sum:3 count:2 buckets:[1 1 1]}} _ {{sum:3 count:2 buckets:[1 1 1]}} metric{series="custom"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} metric{series="other-custom"} _ {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} # T=0: only exponential # T=6: only custom # T=12: mixed, should be ignored and emit a warning eval range from 0 to 12m step 6m sum(metric) expect warn {} {{sum:7 count:5 buckets:[2 3 2]}} {{schema:-53 sum:16 count:3 custom_values:[5 10] buckets:[1 2]}} _ eval range from 0 to 12m step 6m avg(metric) expect warn {} {{sum:3.5 count:2.5 buckets:[1 1.5 1]}} {{schema:-53 sum:8 count:1.5 custom_values:[5 10] buckets:[0.5 1]}} _ clear # Test mismatched custom bucket boundaries. load 6m metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[10] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[5] buckets:[1]}} metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[2 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} eval range from 0 to 12m step 6m sum(metric) expect no_warn {} {{schema:-53 count:2 sum:2 custom_values:[10] buckets:[2]}} {{schema:-53 sum:2 count:2 custom_values:[5 10] buckets:[2]}} {{schema:-53 count:3 sum:3 custom_values:[5] buckets:[3]}} eval range from 0 to 12m step 6m avg(metric) expect no_warn {} {{schema:-53 count:1 sum:1 custom_values:[10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 count:1 sum:1 custom_values:[5] buckets:[1]}} # Test mismatched boundaries with additional aggregation operators eval range from 0 to 12m step 6m count(metric) {} 2 2 3 eval range from 0 to 12m step 6m group(metric) {} 1 1 1 eval range from 0 to 12m step 6m count(limitk(1, metric)) {} 1 1 1 eval range from 0 to 12m step 6m limitk(3, metric) metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[10] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[5] buckets:[1]}} metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[2 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} eval range from 0 to 12m step 6m limit_ratio(1, metric) metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[10] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[5] buckets:[1]}} metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[2 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} # Test mismatched schemas with and/or eval range from 0 to 12m step 6m metric{series="1"} and ignoring(series) metric{series="2"} metric{series="1"} _ _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} eval range from 0 to 12m step 6m metric{series="1"} or ignoring(series) metric{series="2"} metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[10] buckets:[1]}} _ _ # Test mismatched boundaries with arithmetic binary operators eval range from 0 to 12m step 6m metric{series="2"} + ignoring (series) metric{series="3"} expect info msg:PromQL info: mismatched custom buckets were reconciled during addition {} {{schema:-53 count:2 sum:2 custom_values:[10] buckets:[2]}} _ {{schema:-53 count:2 sum:2 custom_values:[5] buckets:[2]}} eval range from 0 to 12m step 6m metric{series="2"} - ignoring (series) metric{series="3"} expect info msg:PromQL info: mismatched custom buckets were reconciled during subtraction {} {{schema:-53 custom_values:[10] counter_reset_hint:gauge}} _ {{schema:-53 custom_values:[5] counter_reset_hint:gauge}} clear # Test mismatched boundaries with comparison binary operators load 6m metric1 {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} metric2 {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} eval range from 0 to 6m step 6m metric1 == metric2 expect no_info metric1{} _ {{schema:-53 count:1 sum:1 custom_values:[5 10] buckets:[1]}} eval range from 0 to 6m step 6m metric1 != metric2 expect no_info metric1{} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ eval range from 0 to 6m step 6m metric2 > metric2 expect info clear load 6m nhcb_metric {{schema:-53 sum:1 count:1 custom_values:[5] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} # If evaluating at 12m, the first two NHCBs have the same custom values # while the 3rd one has different ones. eval instant at 12m sum_over_time(nhcb_metric[13m]) expect no_warn expect info msg: PromQL info: mismatched custom buckets were reconciled during aggregation {} {{schema:-53 count:3 sum:3 custom_values:[5] buckets:[3]}} eval instant at 12m avg_over_time(nhcb_metric[13m]) expect no_warn expect info msg: PromQL info: mismatched custom buckets were reconciled during aggregation {} {{schema:-53 count:1 sum:1 custom_values:[5] buckets:[1]}} eval instant at 12m last_over_time(nhcb_metric[13m]) expect no_warn nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} eval instant at 12m count_over_time(nhcb_metric[13m]) expect no_warn {} 3 eval instant at 12m present_over_time(nhcb_metric[13m]) expect no_warn {} 1 eval instant at 12m changes(nhcb_metric[13m]) expect no_warn {} 1 eval instant at 12m delta(nhcb_metric[13m]) expect warn msg: PromQL warning: this native histogram metric is not a gauge: "nhcb_metric" {} {{schema:-53 custom_values:[5]}} eval instant at 12m increase(nhcb_metric[13m]) expect no_warn {} {{schema:-53 custom_values:[5]}} eval instant at 12m rate(nhcb_metric[13m]) expect no_warn {} {{schema:-53 custom_values:[5] }} eval instant at 12m resets(nhcb_metric[13m]) expect no_warn {} 0 # Now doing the same again, but at 18m, where the first NHCB has # different custom_values compared to the other two. eval instant at 18m sum_over_time(nhcb_metric[13m]) expect no_warn {} {{schema:-53 count:3 sum:3 custom_values:[5] buckets:[3]}} eval instant at 18m avg_over_time(nhcb_metric[13m]) expect no_warn {} {{schema:-53 count:1 sum:1 custom_values:[5] buckets:[1]}} eval instant at 18m last_over_time(nhcb_metric[13m]) expect no_warn nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} eval instant at 18m count_over_time(nhcb_metric[13m]) expect no_warn {} 3 eval instant at 18m present_over_time(nhcb_metric[13m]) expect no_warn {} 1 eval instant at 18m changes(nhcb_metric[13m]) expect no_warn {} 1 eval instant at 18m delta(nhcb_metric[13m]) expect warn msg: PromQL warning: this native histogram metric is not a gauge: "nhcb_metric" expect info msg: PromQL info: mismatched custom buckets were reconciled during subtraction {} {{schema:-53 custom_values:[5]}} eval instant at 18m increase(nhcb_metric[13m]) expect no_warn {} {{schema:-53 custom_values:[5]}} eval instant at 18m rate(nhcb_metric[13m]) expect no_warn {} {{schema:-53 custom_values:[5]}} eval instant at 18m resets(nhcb_metric[13m]) expect no_warn {} 0 clear load 1m metric{group="just-floats", series="1"} 2 metric{group="just-floats", series="2"} 3 metric{group="just-exponential-histograms", series="1"} {{sum:3 count:4 buckets:[1 2 1]}} metric{group="just-exponential-histograms", series="2"} {{sum:2 count:3 buckets:[1 1 1]}} metric{group="just-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} metric{group="just-custom-histograms", series="2"} {{schema:-53 sum:3 count:4 custom_values:[2] buckets:[7]}} metric{group="floats-and-histograms", series="1"} 2 metric{group="floats-and-histograms", series="2"} {{sum:2 count:3 buckets:[1 1 1]}} metric{group="exponential-and-custom-histograms", series="1"} {{sum:2 count:3 buckets:[1 1 1]}} metric{group="exponential-and-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} metric{group="mismatched-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} metric{group="mismatched-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[10] buckets:[1]}} eval instant at 0 sum by (group) (metric) expect warn {group="just-floats"} 5 {group="just-exponential-histograms"} {{sum:5 count:7 buckets:[2 3 2]}} {group="just-custom-histograms"} {{schema:-53 sum:4 count:5 custom_values:[2] buckets:[8]}} {group="mismatched-custom-histograms"} {{schema:-53 count:2 sum:2 custom_values:[10] buckets:[2]}} clear # Test native histograms with sum, count, avg. load 10m histogram_sum{idx="0"} {{schema:0 count:25 sum:3.1 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}}x1 histogram_sum{idx="1"} {{schema:0 count:41 sum:1e100 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1 histogram_sum{idx="2"} {{schema:0 count:41 sum:-1e100 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1 histogram_sum{idx="3"} {{schema:1 count:0 sum:1.3 z_bucket:3 z_bucket_w:0.001 buckets:[2 4 2 3 2 2] n_buckets:[1 2 5 3 8 1 1 1 1 6 3]}}x1 histogram_sum_float{idx="0"} 42.0x1 eval instant at 10m sum(histogram_sum) expect no_warn {} {{schema:0 count:107 sum:4.4 z_bucket:17 z_bucket_w:0.001 buckets:[5 14 7 7 3 2 2] n_buckets:[3 13 19 6 17 18 0 0 0 10 10 4]}} eval instant at 10m sum({idx="0"}) expect warn eval instant at 10m sum(histogram_sum{idx="0"} + ignoring(idx) histogram_sum{idx="3"}) expect no_warn {} {{schema:0 count:25 sum:4.4 z_bucket:7 z_bucket_w:0.001 buckets:[3 8 5 3 1] n_buckets:[3 11 11 2 3 18]}} # Plain addition doesn't use Kahan summation, so operations involving very large magnitudes # (±1e+100) lose precision. The smaller values are absorbed, leading to an incorrect result. # eval instant at 10m sum(histogram_sum{idx="0"} + ignoring(idx) histogram_sum{idx="1"} + ignoring(idx) histogram_sum{idx="2"} + ignoring(idx) histogram_sum{idx="3"}) # expect no_warn # {} {{schema:0 count:107 sum:4.4 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}} eval instant at 10m count(histogram_sum) expect no_warn {} 4 eval instant at 10m avg(histogram_sum) expect no_warn {} {{schema:0 count:26.75 sum:1.1 z_bucket:4.25 z_bucket_w:0.001 buckets:[1.25 3.5 1.75 1.75 0.75 0.5 0.5] n_buckets:[0.75 3.25 4.75 1.5 4.25 4.5 0 0 0 2.5 2.5 1]}} clear # Test native histograms with incremental avg calulation. # Very large floats involved trigger incremental avg calculation, as direct avg calculation would overflow float64. load 10m histogram_avg_incremental{idx="0"} {{schema:0 count:1.7976931348623157e+308 sum:5.30921651659898 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[1.78264E+50 1.78264E+215 1.78264E+219 3363.5121756487] n_buckets:[1178.20696291113 731.697776280323 715.201503759399 1386.11378876781 855.572553278132]}}x1 histogram_avg_incremental{idx="1"} {{schema:0 count:1e308 sum:0.961118537914768 z_bucket:0.76342771 z_bucket_w:0.001 buckets:[0.76342771 0.76342771 0.76342771 195.70084087969] n_buckets:[421.30382970055 0 450441.779]}}x1 histogram_avg_incremental{idx="2"} {{schema:0 count:1e-6 sum:1.62091361305318 z_bucket:1.9592258 z_bucket_w:0.001 buckets:[1.9592258 1.9592258 1.9592258 1135.74692279] n_buckets:[0 4504.41779 588.599358265103 40.3760942760943]}}x1 histogram_avg_incremental{idx="3"} {{schema:0 count:1e-6 sum:0.865089463758091 z_bucket:7.69805412 z_bucket_w:0.001 buckets:[2.258E+220 2.258E+220 2.3757689E+217 1078.68071312804] n_buckets:[349.905284031261 0 0 0.161173466838949 588.599358]}}x1 histogram_avg_incremental{idx="4"} {{schema:0 count:1e-6 sum:0.323055185914577 z_bucket:458.90154 z_bucket_w:0.001 buckets:[7.69805412 7.69805412 2.258E+220 3173.28218135701]}}x1 histogram_avg_incremental{idx="5"} {{schema:0 count:1e-6 sum:0.951811357687154 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[458.90154 458.90154 7.69805412 2178.35] n_buckets:[2054.92644438789 844.560108898123]}}x1 histogram_avg_incremental{idx="6"} {{schema:0 count:1e-6 sum:0 z_bucket:5 z_bucket_w:0.001 buckets:[0 0 1.78264E+219 376.770478890989]}}x1 histogram_avg_incremental{idx="7"} {{schema:0 count:1e-6 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 458.90154 250325.5] n_buckets:[0 0.0000000011353 0 608.697257]}}x1 # This test fails due to float64 rounding in the incremental average calculation. # For large intermediate means (e.g. ~1e99), multiplying by a fractional weight like (n-1)/n # produces values such as 2.0000000000000002e99 instead of the mathematically exact 2e99. # While the relative error is tiny, subtracting nearly equal high-magnitude values later # result in a large absolute error. The outcome also depends on the (effectively random) order # in which input series are processed which makes the test flaky. # histogram_avg_incremental_2{idx="0"} {{schema:0 count:1.7976931348623157e+308 sum:5.3 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[1.78264E+50 1.78264E+215 1.78264E+219 3363.5121756487] n_buckets:[1178.20696291113 731.697776280323 715.201503759399 1386.11378876781 855.572553278132]}}x1 # histogram_avg_incremental_2{idx="1"} {{schema:0 count:1e308 sum:1e100 z_bucket:0.76342771 z_bucket_w:0.001 buckets:[0.76342771 0.76342771 0.76342771 195.70084087969] n_buckets:[421.30382970055 0 450441.779]}}x1 # histogram_avg_incremental_2{idx="2"} {{schema:0 count:1e-6 sum:1 z_bucket:1.9592258 z_bucket_w:0.001 buckets:[1.9592258 1.9592258 1.9592258 1135.74692279] n_buckets:[0 4504.41779 588.599358265103 40.3760942760943]}}x1 # histogram_avg_incremental_2{idx="3"} {{schema:0 count:1e-6 sum:-1e100 z_bucket:7.69805412 z_bucket_w:0.001 buckets:[2.258E+220 2.258E+220 2.3757689E+217 1078.68071312804] n_buckets:[349.905284031261 0 0 0.161173466838949 588.599358]}}x1 # histogram_avg_incremental_2{idx="4"} {{schema:0 count:1e-6 sum:1 z_bucket:458.90154 z_bucket_w:0.001 buckets:[7.69805412 7.69805412 2.258E+220 3173.28218135701]}}x1 # histogram_avg_incremental_2{idx="5"} {{schema:0 count:1e-6 sum:1 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[458.90154 458.90154 7.69805412 2178.35] n_buckets:[2054.92644438789 844.560108898123]}}x1 # histogram_avg_incremental_2{idx="6"} {{schema:0 count:1e-6 sum:0 z_bucket:5 z_bucket_w:0.001 buckets:[0 0 1.78264E+219 376.770478890989]}}x1 # histogram_avg_incremental_2{idx="7"} {{schema:0 count:1e-6 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 458.90154 250325.5] n_buckets:[0 0.0000000011353 0 608.697257]}}x1 eval instant at 10m avg(histogram_avg_incremental) {} {{schema:0 count:3.497116418577895e+307 sum:1.2539005843658437 z_bucket:4.4566e49 z_bucket_w:0.001 buckets:[2.8225e+219 2.822522283e+219 3.271129711125e+219 32728.442914086805] n_buckets:[500.5428151288539 760.0844593974477 56468.19748275306 254.4185391888429 180.5214889097665]}} # This test doesn't work, see the load section above for reasoning. # eval instant at 10m avg(histogram_avg_incremental_2) # {} {{schema:0 count:3.497116418577895e+307 sum:1.0375 z_bucket:4.4566e49 z_bucket_w:0.001 buckets:[2.8225e+219 2.822522283e+219 3.271129711125e+219 32728.442914086805] n_buckets:[500.5428151288539 760.0844593974477 56468.19748275306 254.4185391888429 180.5214889097665]}} clear # Test native histograms with sum_over_time, avg_over_time. load 1m histogram_sum_over_time {{schema:0 count:25 sum:1234.5 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:0 count:41 sum:1111.1 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:1 count:0}} histogram_sum_over_time_2 {{schema:0 count:1e10 sum:5.30921651659898 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[1.78264E+50 1.78264E+215 1.78264E+219 3363.5121756487] n_buckets:[1178.20696291113 731.697776280323 715.201503759399 1386.11378876781 855.572553278132]}} {{schema:0 count:1e-6 sum:0.961118537914768 z_bucket:0.76342771 z_bucket_w:0.001 buckets:[0.76342771 0.76342771 0.76342771 195.70084087969] n_buckets:[421.30382970055 0 450441.779]}} {{schema:0 count:1e-6 sum:1.62091361305318 z_bucket:1.9592258 z_bucket_w:0.001 buckets:[1.9592258 1.9592258 1.9592258 1135.74692279] n_buckets:[0 4504.41779 588.599358265103 40.3760942760943]}} {{schema:0 count:1e-6 sum:0.865089463758091 z_bucket:7.69805412 z_bucket_w:0.001 buckets:[2.258E+220 2.258E+220 2.3757689E+217 1078.68071312804] n_buckets:[349.905284031261 0 0 0.161173466838949 588.599358]}} {{schema:0 count:1e-6 sum:0.323055185914577 z_bucket:458.90154 z_bucket_w:0.001 buckets:[7.69805412 7.69805412 2.258E+220 3173.28218135701]}} {{schema:0 count:1e-6 sum:0.951811357687154 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[458.90154 458.90154 7.69805412 2178.35] n_buckets:[2054.92644438789 844.560108898123]}} {{schema:0 count:1e-6 sum:0 z_bucket:5 z_bucket_w:0.001 buckets:[0 0 1.78264E+219 376.770478890989]}} {{schema:0 count:1e-6 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 458.90154 250325.5] n_buckets:[0 0.0000000011353 0 608.697257]}} histogram_sum_over_time_3 {{schema:0 count:1 sum:1}} {{schema:0 count:2 sum:1e100}} {{schema:0 count:3 sum:1}} {{schema:0 count:4 sum:-1e100}} histogram_sum_over_time_4 {{schema:0 count:1 sum:5.3}} {{schema:0 count:2 sum:1e100}} {{schema:0 count:3 sum:1}} {{schema:0 count:4 sum:-1e100}} {{schema:0 count:5 sum:2}} {{schema:0 count:6 sum:1e50}} {{schema:0 count:7 sum:-1e50}} histogram_sum_over_time_incremental {{schema:0 count:1.7976931348623157e+308 sum:5.30921651659898 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[1.78264E+50 1.78264E+215 1.78264E+219 3363.5121756487] n_buckets:[1178.20696291113 731.697776280323 715.201503759399 1386.11378876781 855.572553278132]}} {{schema:0 count:1e308 sum:0.961118537914768 z_bucket:0.76342771 z_bucket_w:0.001 buckets:[0.76342771 0.76342771 0.76342771 195.70084087969] n_buckets:[421.30382970055 0 450441.779]}} {{schema:0 count:1e-6 sum:1.62091361305318 z_bucket:1.9592258 z_bucket_w:0.001 buckets:[1.9592258 1.9592258 1.9592258 1135.74692279] n_buckets:[0 4504.41779 588.599358265103 40.3760942760943]}} {{schema:0 count:1e-6 sum:0.865089463758091 z_bucket:7.69805412 z_bucket_w:0.001 buckets:[2.258E+220 2.258E+220 2.3757689E+217 1078.68071312804] n_buckets:[349.905284031261 0 0 0.161173466838949 588.599358]}} {{schema:0 count:1e-6 sum:0.323055185914577 z_bucket:458.90154 z_bucket_w:0.001 buckets:[7.69805412 7.69805412 2.258E+220 3173.28218135701]}} {{schema:0 count:1e-6 sum:0.951811357687154 z_bucket:1.78264e50 z_bucket_w:0.001 buckets:[458.90154 458.90154 7.69805412 2178.35] n_buckets:[2054.92644438789 844.560108898123]}} {{schema:0 count:1e-6 sum:0 z_bucket:5 z_bucket_w:0.001 buckets:[0 0 1.78264E+219 376.770478890989]}} {{schema:0 count:1e-6 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 458.90154 250325.5] n_buckets:[0 0.0000000011353 0 608.697257]}} histogram_sum_over_time_incremental_2 {{schema:0 count:1.7976931348623157e+308 sum:5.3}} {{schema:0 count:1e308 sum:1e100}} {{schema:0 count:1e-6 sum:1}} {{schema:0 count:1e-6 sum:-1e100}} {{schema:0 count:1e-6 sum:2}} {{schema:0 count:1e-6 sum:0}} {{schema:0 count:1e-6 sum:0}} histogram_sum_over_time_incremental_3 {{schema:0 count:1.7976931348623157e+308 sum:5.3}} {{schema:0 count:1e308 sum:1e100}} {{schema:0 count:1e-6 sum:-1e100}} {{schema:0 count:1e-6 sum:1}} {{schema:0 count:1e-6 sum:1e100}} {{schema:0 count:1e-6 sum:-1e100}} {{schema:0 count:1e-6 sum:0}} histogram_sum_over_time_incremental_4 {{schema:0 count:1.7976931348623157e+308 sum:5.3}} {{schema:0 count:1e308 sum:1e100}} {{schema:0 count:1e-6 sum:-1e100}} {{schema:0 count:1e-6 sum:1}} {{schema:0 count:1e-6 sum:1e50}} {{schema:0 count:1e-6 sum:-1e50}} {{schema:0 count:1e-6 sum:0}} histogram_sum_over_time_incremental_6 {{schema:0 count:1.7976931348623157e+308 sum:1}} {{schema:0 count:1e308 sum:1e100}} {{schema:0 count:1e-6 sum:1}} {{schema:0 count:1e-6 sum:-1e100}} # Kahan summation only compensates reliably across two magnitude scales. In following inputs, the # series contains three distinct magnitude groups (≈1, ≈1e50, and ≈1e100). When these magnitudes # are interleaved, rounding error can't be fully compensated, causing smaller values to be lost. # However, when values are ordered so that cancellation within one magnitude group # occurs first, followed by cancellation of the next group, the outcome remains accurate. # histogram_sum_over_time_5 {{schema:0 count:1 sum:5.3}} {{schema:0 count:2 sum:1e100}} {{schema:0 count:3 sum:1}} {{schema:0 count:4 sum:1e50}} {{schema:0 count:5 sum:2}} {{schema:0 count:6 sum:-1e100}} {{schema:0 count:7 sum:-1e50}} # histogram_sum_over_time_incremental_5 {{schema:0 count:1.7976931348623157e+308 sum:5.3}} {{schema:0 count:1e308 sum:1e100}} {{schema:0 count:1e-6 sum:1e50}} {{schema:0 count:1e-6 sum:1}} {{schema:0 count:1e-6 sum:-1e100}} {{schema:0 count:1e-6 sum:-1e50}} {{schema:0 count:1e-6 sum:0}} eval instant at 3m sum_over_time(histogram_sum_over_time[4m:1m]) {} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}} eval instant at 3m avg_over_time(histogram_sum_over_time[4m:1m]) {} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}} eval instant at 7m sum_over_time(histogram_sum_over_time_2[8m:1m]) {} {{schema:0 count:10000000000.000008 sum:10.03120467492675 z_bucket:3.56528e+50 z_bucket_w:0.001 buckets:[2.258e+220 2.2580178264e+220 2.6169037689e+220 261827.54331269444] n_buckets:[4004.342521030831 6080.675675179582 451745.57986202446 2035.3483135107433 1444.171911278132]}} eval instant at 7m avg_over_time(histogram_sum_over_time_2[8m:1m]) {} {{schema:0 count:1250000000.000001 sum:1.2539005843658437 z_bucket:4.4566e49 z_bucket_w:0.001 buckets:[2.8225e+219 2.822522283e+219 3.271129711125e+219 32728.442914086805] n_buckets:[500.5428151288539 760.0844593974477 56468.19748275306 254.4185391888429 180.5214889097665]}} eval instant at 3m sum_over_time(histogram_sum_over_time_3[4m:1m]) {} {{schema:0 count:10 sum:2}} eval instant at 3m avg_over_time(histogram_sum_over_time_3[4m:1m]) {} {{schema:0 count:2.5 sum:0.5}} eval instant at 6m sum_over_time(histogram_sum_over_time_4[7m:1m]) {} {{schema:0 count:28 sum:8.3}} eval instant at 6m avg_over_time(histogram_sum_over_time_4[7m:1m]) {} {{schema:0 count:4 sum:1.1857142857142857}} # These tests don't work, see the load section above for reasoning. # eval instant at 6m sum_over_time(histogram_sum_over_time_5[7m:1m]) # {} {{schema:0 count:28 sum:8.3}} # # eval instant at 6m avg_over_time(histogram_sum_over_time_5[7m:1m]) # {} {{schema:0 count:4 sum:1.1857142857142857}} eval instant at 7m sum_over_time(histogram_sum_over_time_incremental[8m:1m]) {} {{schema:0 count:Inf sum:10.03120467492675 z_bucket:3.56528e+50 z_bucket_w:0.001 buckets:[2.258e+220 2.2580178264e+220 2.6169037689e+220 261827.54331269444] n_buckets:[4004.342521030831 6080.675675179582 451745.57986202446 2035.3483135107433 1444.171911278132]}} eval instant at 7m avg_over_time(histogram_sum_over_time_incremental[8m:1m]) {} {{schema:0 count:3.497116418577895e+307 sum:1.2539005843658437 z_bucket:4.4566e49 z_bucket_w:0.001 buckets:[2.8225e+219 2.822522283e+219 3.271129711125e+219 32728.442914086805] n_buckets:[500.5428151288539 760.0844593974477 56468.19748275306 254.4185391888429 180.5214889097665]}} eval instant at 6m sum_over_time(histogram_sum_over_time_incremental_2[7m:1m]) {} {{schema:0 count:Inf sum:8.3}} eval instant at 6m avg_over_time(histogram_sum_over_time_incremental_2[7m:1m]) {} {{schema:0 count:3.9967044783747367e+307 sum:1.1857142857142857}} eval instant at 6m sum_over_time(histogram_sum_over_time_incremental_3[7m:1m]) {} {{schema:0 count:Inf sum:6.3}} eval instant at 6m avg_over_time(histogram_sum_over_time_incremental_3[7m:1m]) {} {{schema:0 count:3.9967044783747367e+307 sum:0.9}} eval instant at 6m sum_over_time(histogram_sum_over_time_incremental_4[7m:1m]) {} {{schema:0 count:Inf sum:6.3}} eval instant at 6m avg_over_time(histogram_sum_over_time_incremental_4[7m:1m]) {} {{schema:0 count:3.9967044783747367e+307 sum:0.9}} # These tests don't work, see the load section above for reasoning. # eval instant at 6m sum_over_time(histogram_sum_over_time_incremental_5[7m:1m]) # {} {{schema:0 count:Inf sum:6.3}} # # eval instant at 6m avg_over_time(histogram_sum_over_time_incremental_5[7m:1m]) # {} {{schema:0 count:3.9967044783747367e+307 sum:0.9}} eval instant at 3m sum_over_time(histogram_sum_over_time_incremental_6[4m:1m]) {} {{schema:0 count:Inf sum:2}} eval instant at 3m avg_over_time(histogram_sum_over_time_incremental_6[4m:1m]) {} {{schema:0 count:6.99423283715579e+307 sum:0.5}} clear # Test avg_over_time with a single histogram sample (regression test for division by zero bug). load 1m single_histogram_sample {{schema:3 sum:5 count:4 buckets:[1 2 1]}} single_nhcb_sample {{schema:-53 sum:1 count:5 custom_values:[5 10] buckets:[1 4]}} # avg_over_time should return the histogram unchanged when there's only one sample, not Inf/NaN. eval instant at 0m avg_over_time(single_histogram_sample[1m]) {} {{schema:3 sum:5 count:4 buckets:[1 2 1]}} # Test with native histogram with custom buckets (NHCB). eval instant at 0m avg_over_time(single_nhcb_sample[1m]) {} {{schema:-53 sum:1 count:5 custom_values:[5 10] buckets:[1 4]}} clear # Test native histograms with sub operator. load 10m histogram_sub_1{idx="0"} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1 histogram_sub_1{idx="1"} {{schema:0 count:11 sum:1234.5 z_bucket:3 z_bucket_w:0.001 buckets:[0 2 1] n_buckets:[0 0 3 2]}}x1 histogram_sub_2{idx="0"} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1 histogram_sub_2{idx="1"} {{schema:1 count:11 sum:1234.5 z_bucket:3 z_bucket_w:0.001 buckets:[0 2 1] n_buckets:[0 0 3 2]}}x1 histogram_sub_3{idx="0"} {{schema:1 count:11 sum:1234.5 z_bucket:3 z_bucket_w:0.001 buckets:[0 2 1] n_buckets:[0 0 3 2]}}x1 histogram_sub_3{idx="1"} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1 eval instant at 10m histogram_sub_1{idx="0"} - ignoring(idx) histogram_sub_1{idx="1"} {} {{schema:0 count:30 sum:1111.1 z_bucket:2 z_bucket_w:0.001 buckets:[1 1 0 2 1 1 1] n_buckets:[0 1 1 0 7 0 0 0 0 5 5 2]}} eval instant at 10m histogram_sub_2{idx="0"} - ignoring(idx) histogram_sub_2{idx="1"} {} {{schema:0 count:30 sum:1111.1 z_bucket:2 z_bucket_w:0.001 buckets:[1 0 1 2 1 1 1] n_buckets:[0 -2 2 2 7 0 0 0 0 5 5 2]}} eval instant at 10m histogram_sub_3{idx="0"} - ignoring(idx) histogram_sub_3{idx="1"} {} {{schema:0 count:-30 sum:-1111.1 z_bucket:-2 z_bucket_w:0.001 buckets:[-1 0 -1 -2 -1 -1 -1] n_buckets:[0 2 -2 -2 -7 0 0 0 0 -5 -5 -2]}} clear # Test native histograms with last_over_time subquery load 2m http_request_duration_seconds{pod="nginx-1"} {{schema:0 count:3 sum:14.00 buckets:[1 2]}}x20 eval range from 0s to 60s step 15s last_over_time({__name__="http_request_duration_seconds"} @ start()[1h:1m] offset 1m16s) {__name__="http_request_duration_seconds", pod="nginx-1"} {{count:3 sum:14 buckets:[1 2]}}x4 clear # Test native histogram quantile and fraction when the native histogram with exponential # buckets has NaN observations. load 1m histogram_nan{case="100% NaNs"} {{schema:0 count:0 sum:0}} {{schema:0 count:3 sum:NaN}} histogram_nan{case="20% NaNs"} {{schema:0 count:0 sum:0}} {{schema:0 count:15 sum:NaN buckets:[12]}} eval instant at 1m histogram_quantile(1, histogram_nan) expect info msg: PromQL info: input to histogram_quantile has NaN observations, result is NaN for metric name "histogram_nan" {case="100% NaNs"} NaN {case="20% NaNs"} NaN eval instant at 1m histogram_quantile(0.81, histogram_nan) expect info msg: PromQL info: input to histogram_quantile has NaN observations, result is NaN for metric name "histogram_nan" {case="100% NaNs"} NaN {case="20% NaNs"} NaN eval instant at 1m histogram_quantiles(histogram_nan, "q", 0.81) expect info msg: PromQL info: input to histogram_quantile has NaN observations, result is NaN for metric name "histogram_nan" {case="100% NaNs", q="0.81"} NaN {case="20% NaNs", q="0.81"} NaN eval instant at 1m histogram_quantile(0.8, histogram_nan{case="100% NaNs"}) expect info msg: PromQL info: input to histogram_quantile has NaN observations, result is NaN for metric name "histogram_nan" {case="100% NaNs"} NaN eval instant at 1m histogram_quantile(0.8, histogram_nan{case="20% NaNs"}) expect info msg: PromQL info: input to histogram_quantile has NaN observations, result is skewed higher for metric name "histogram_nan" {case="20% NaNs"} 1 eval instant at 1m histogram_quantile(0.4, histogram_nan{case="100% NaNs"}) expect info msg: PromQL info: input to histogram_quantile has NaN observations, result is NaN for metric name "histogram_nan" {case="100% NaNs"} NaN # histogram_quantile and histogram_fraction equivalence if quantile is not NaN eval instant at 1m histogram_quantile(0.4, histogram_nan{case="20% NaNs"}) expect info msg: PromQL info: input to histogram_quantile has NaN observations, result is skewed higher for metric name "histogram_nan" {case="20% NaNs"} 0.7071067811865475 eval instant at 1m histogram_fraction(-Inf, 0.7071067811865475, histogram_nan) expect info msg: PromQL info: input to histogram_fraction has NaN observations, which are excluded from all fractions for metric name "histogram_nan" {case="100% NaNs"} 0.0 {case="20% NaNs"} 0.4 eval instant at 1m histogram_fraction(-Inf, +Inf, histogram_nan) expect info msg: PromQL info: input to histogram_fraction has NaN observations, which are excluded from all fractions for metric name "histogram_nan" {case="100% NaNs"} 0.0 {case="20% NaNs"} 0.8 clear # Tests to demonstrate how an extrapolation below zero is prevented for both float counters and native counter histograms. # Note that the float counter behaves the same as the histogram count after `increase`. load 1m metric{type="histogram"} {{schema:0 count:15 sum:25 buckets:[5 10]}} {{schema:0 count:2490 sum:75 buckets:[15 2475]}}x55 metric{type="counter"} 15 2490x55 # End of range coincides with sample. Zero point of count is reached within the range. # Note that the 2nd bucket has an exaggerated increase of 2479.939393939394 (although # it has a value of only 2475 at the end of the range). eval instant at 55m increase(metric[90m]) expect no_warn {type="histogram"} {{count:2490 sum:50.303030303030305 counter_reset_hint:gauge buckets:[10.06060606060606 2479.939393939394]}} {type="counter"} 2490 # End of range does not coincide with sample. Zero point of count is reached within the range. # The 2nd bucket again has an exaggerated increase, but it is less obvious because of the # right-side extrapolation. eval instant at 54m30s increase(metric[90m]) expect no_warn {type="histogram"} {{count:2512.9166666666665 sum:50.76599326599326 counter_reset_hint:gauge buckets:[10.153198653198652 2502.7634680134674]}} {type="counter"} 2512.9166666666665 # End of range coincides with sample. Zero point of count is reached outside of (i.e. before) the range. # This means no change of extrapolation is required for the histogram count (and neither for the float counter), # however, the 2nd bucket's extrapolation will reach zero within the range. The overestimation is visible # easily here because the last sample in the range coincides with the boundary, where the 2nd bucket has # a value of 2475 but has increased by 2476.2045454545455 according to the returned result. eval instant at 55m increase(metric[55m15s]) expect no_warn {type="histogram"} {{count:2486.25 sum:50.227272727272734 counter_reset_hint:gauge buckets:[10.045454545454547 2476.2045454545455]}} {type="counter"} 2486.25 # End of range does not coincide with sample. Zero point of count is reached outside of (i.e. before) the range. # This means no change of extrapolation is required for the histogram count (and neither for the float counter), # however, the 2nd bucket's extrapolation will reach zero within the range. eval instant at 54m30s increase(metric[54m45s]) expect no_warn {type="histogram"} {{count:2509.375 sum:50.69444444444444 counter_reset_hint:gauge buckets:[10.13888888888889 2499.236111111111]}} {type="counter"} 2509.375 # Try the same, but now extract just the histogram count via `histogram_count`. eval instant at 55m histogram_count(increase(metric[90m])) expect no_warn {type="histogram"} 2490 eval instant at 54m30s histogram_count(increase(metric[90m])) expect no_warn {type="histogram"} 2512.9166666666665 eval instant at 55m histogram_count(increase(metric[55m15s])) expect no_warn {type="histogram"} 2486.25 eval instant at 54m30s histogram_count(increase(metric[54m45s])) expect no_warn {type="histogram"} 2509.375 clear # Test counter reset hint adjustment in subtraction and aggregation, including _over_time. load 5m metric{id="1"} {{schema:0 sum:4 count:4 buckets:[1 2 1]}}x10 metric{id="2"} {{schema:0 sum:4 count:4 buckets:[1 2 1]}}x10 # Unary minus turns counters into gauges. eval instant at 5m -metric expect no_warn expect no_info {id="1"} {{count:-4 sum:-4 counter_reset_hint:gauge buckets:[-1 -2 -1]}} {id="2"} {{count:-4 sum:-4 counter_reset_hint:gauge buckets:[-1 -2 -1]}} # Subtraction results in gauges, even if the result is not negative. eval instant at 5m metric - 0.5 * metric expect no_warn expect no_info {id="1"} {{count:2 sum:2 counter_reset_hint:gauge buckets:[0.5 1 0.5]}} {id="2"} {{count:2 sum:2 counter_reset_hint:gauge buckets:[0.5 1 0.5]}} # Subtraction results in gauges, now with actually negative result. eval instant at 5m metric - 2 * metric expect no_warn expect no_info {id="1"} {{count:-4 sum:-4 counter_reset_hint:gauge buckets:[-1 -2 -1]}} {id="2"} {{count:-4 sum:-4 counter_reset_hint:gauge buckets:[-1 -2 -1]}} # sum and avg of counters yield a counter. eval instant at 5m sum(metric) expect no_warn expect no_info {} {{count:8 sum:8 counter_reset_hint:not_reset buckets:[2 4 2]}} eval instant at 5m avg(metric) expect no_warn expect no_info {} {{count:4 sum:4 counter_reset_hint:not_reset buckets:[1 2 1]}} clear # Note that with all the series below, we never get counter_reset_hint:reset # as a result because of of https://github.com/prometheus/prometheus/issues/15346 . # Therefore, all the tests only look at the hints gauge, not_reset, and unknown. load 1m metric{type="gauge"} {{sum:4 count:4 counter_reset_hint:gauge buckets:[1 2 1]}}+{{sum:2 count:3 counter_reset_hint:gauge buckets:[1 1 1]}}x10 metric{type="counter"} {{sum:6 count:5 buckets:[2 2 1]}}+{{sum:2 count:3 buckets:[1 1 1]}}x10 metric{type="counter_with_reset"} {{sum:6 count:5 buckets:[2 2 1]}}+{{sum:2 count:3 buckets:[1 1 1]}}x5 {{sum:4 count:4 buckets:[1 2 1]}}+{{sum:2 count:3 buckets:[1 1 1]}}x5 mixed {{sum:6 count:5 buckets:[2 2 1]}}+{{sum:2 count:3 buckets:[1 1 1]}}x4 {{sum:4 count:4 counter_reset_hint:gauge buckets:[1 2 1]}} {{sum:6 count:5 buckets:[2 2 1]}}+{{sum:2 count:3 buckets:[1 1 1]}}x4 {{sum:4 count:4 buckets:[1 2 1]}}+{{sum:2 count:3 buckets:[1 1 1]}}x5 # Mix of gauge and not_reset results in gauge. eval instant at 3m sum(metric) expect no_warn expect no_info {} {{count:41 sum:34 counter_reset_hint:gauge buckets:[14 15 12]}} eval instant at 3m avg(metric) expect no_warn expect no_info {} {{count:13.666666666666668 sum:11.333333333333334 counter_reset_hint:gauge buckets:[4.666666666666667 5 4]}} eval instant at 5m sum_over_time(mixed[3m]) expect no_warn expect no_info {} {{count:35 sum:30 counter_reset_hint:gauge buckets:[12 13 10]}} eval instant at 5m avg_over_time(mixed[3m]) expect no_warn expect no_info {} {{count:11.666666666666666 sum:10 counter_reset_hint:gauge buckets:[4 4.333333333333334 3.333333333333333]}} # Mix of gauge, not_reset, and unknown results in gauge. eval instant at 6m sum(metric) expect no_warn expect no_info {} {{count:49 sum:38 counter_reset_hint:gauge buckets:[16 18 15]}} eval instant at 6m avg(metric) expect no_warn expect no_info {} {{count:16.333333333333332 sum:12.666666666666666 counter_reset_hint:gauge buckets:[5.333333333333334 6 5]}} eval instant at 14m sum_over_time(mixed[10m]) expect no_warn expect no_info {} {{count:93 sum:82 counter_reset_hint:gauge buckets:[31 36 26]}} eval instant at 14m avg_over_time(mixed[10m]) expect no_warn expect no_info {} {{count:9.3 sum:8.2 counter_reset_hint:gauge buckets:[3.1 3.6 2.6]}} # Only not_reset results in not_reset. eval instant at 3m sum(metric{type=~"counter.*"}) expect no_warn expect no_info {} {{count:28 sum:24 counter_reset_hint:not_reset buckets:[10 10 8]}} eval instant at 3m avg(metric{type=~"counter.*"}) expect no_warn expect no_info {} {{count:14 sum:12 counter_reset_hint:not_reset buckets:[5 5 4]}} eval instant at 3m sum_over_time(mixed[3m]) expect no_warn expect no_info {} {{count:33 sum:30 counter_reset_hint:not_reset buckets:[12 12 9]}} eval instant at 3m avg_over_time(mixed[3m]) expect no_warn expect no_info {} {{count:11 sum:10 counter_reset_hint:not_reset buckets:[4 4 3]}} # Mix of not_reset and unknown results in unknown. eval instant at 6m sum(metric{type=~"counter.*"}) expect no_warn expect no_info {} {{count:27 sum:22 counter_reset_hint:unknown buckets:[9 10 8]}} eval instant at 6m avg(metric{type=~"counter.*"}) expect no_warn expect no_info {} {{count:13.5 sum:11 counter_reset_hint:unknown buckets:[4.5 5 4]}} eval instant at 15m sum_over_time(mixed[10m]) expect no_warn expect no_info {} {{count:105 sum:90 counter_reset_hint:unknown buckets:[35 40 30]}} eval instant at 15m avg_over_time(mixed[10m]) expect no_warn expect no_info {} {{count:10.5 sum:9 counter_reset_hint:unknown buckets:[3.5 4 3]}} # To finally test the warning about a direct counter reset collisions, we can # utilize the HistogramStatsIterator (by calling histogram_count()). This # special iterator does counter reset detection on the fly and therefore # is able to create the counter reset hint "reset", which we can then mix # with the "not_reset" hint in the test and provoke the warning. eval instant at 6m histogram_count(sum(metric)) expect warn msg:PromQL warning: conflicting counter resets during histogram aggregation expect no_info {} 49 eval instant at 6m histogram_count(avg(metric)) expect warn msg:PromQL warning: conflicting counter resets during histogram aggregation expect no_info {} 16.333333333333332 eval instant at 14m histogram_count(sum_over_time(mixed[10m])) expect warn msg:PromQL warning: conflicting counter resets during histogram aggregation expect no_info {} 93 eval instant at 14m histogram_count(avg_over_time(mixed[10m])) expect warn msg:PromQL warning: conflicting counter resets during histogram aggregation expect no_info {} 9.3 # In the following two tests, the first sample has hint "not_reset" # and the second has "reset". This tests if the conflict is detected # between the first two samples, too. eval instant at 11m histogram_count(sum_over_time(mixed[2m])) expect warn msg:PromQL warning: conflicting counter resets during histogram aggregation expect no_info {} 21 eval instant at 11m histogram_count(avg_over_time(mixed[2m])) expect warn msg:PromQL warning: conflicting counter resets during histogram aggregation expect no_info {} 10.5 # Test histogram_quantile annotations. load 1m nonmonotonic_bucket{le="0.1"} 0+2x10 nonmonotonic_bucket{le="1"} 0+1x10 nonmonotonic_bucket{le="10"} 0+5x10 nonmonotonic_bucket{le="100"} 0+4x10 nonmonotonic_bucket{le="1000"} 0+9x10 nonmonotonic_bucket{le="+Inf"} 0+8x10 myHistogram1{abe="0.1"} 0+2x10 myHistogram2{le="Hello World"} 0+2x10 mixedHistogram{le="0.1"} 0+2x10 mixedHistogram{le="1"} 0+3x10 mixedHistogram{} {{schema:0 count:10 sum:50 buckets:[1 2 3]}} eval instant at 1m histogram_quantile(0.5, nonmonotonic_bucket) expect info msg: PromQL info: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name "nonmonotonic_bucket" {} 8.5 eval instant at 1m histogram_quantile(0.5, myHistogram1) expect warn msg: PromQL warning: bucket label "le" is missing or has a malformed value of "" for metric name "myHistogram1" eval instant at 1m histogram_quantile(0.5, myHistogram2) expect warn msg: PromQL warning: bucket label "le" is missing or has a malformed value of "Hello World" for metric name "myHistogram2" eval instant at 1m histogram_quantile(0.5, mixedHistogram) expect warn msg: PromQL warning: vector contains a mix of classic and native histograms for metric name "mixedHistogram" eval instant at 1m histogram_quantiles(mixedHistogram, "q", 0.5) expect warn msg: PromQL warning: vector contains a mix of classic and native histograms for metric name "mixedHistogram" clear # A counter reset only in a bucket. Sub-queries still need to detect # it via explicit counter reset detection. This test also runs it with # histogram_count in the expression to make sure that the # HistogramStatsIterator is not used. (The latter fails to correctly # do the counter resets because Seek is used with sub-queries. And the # explicit counter reset detection done with sub-queries cannot access # the buckets anymore, if HistogramStatsIterator is used.) load 1m h{} {{schema:0 count:1 sum:10 buckets:[1]}}+{{}}x20 {{schema:0 count:1 sum:10 buckets:[0 1]}}+{{}}x20 # Both evals below should yield the same value for the count. eval instant at 41m histogram_count(increase(h[40m:9m])) {} 1.4814814814814814 eval instant at 41m increase(h[40m:9m]) {} {{count:1.4814814814814814 sum:14.814814814814813 counter_reset_hint:gauge offset:1 buckets:[1.4814814814814814]}} clear load 1m reset{timing="late"} {{schema:0 sum:1 count:0 buckets:[1 1 1]}} {{schema:0 sum:1 count:2 buckets:[1 1 1]}} {{schema:0 sum:1 count:3 buckets:[1 1 1]}} {{schema:0 sum:1 count:2 buckets:[1 1 1]}} reset{timing="early"} {{schema:0 sum:1 count:3 buckets:[1 1 1]}} {{schema:0 sum:1 count:2 buckets:[1 1 1]}} {{schema:0 sum:1 count:2 buckets:[1 1 1]}} {{schema:0 sum:1 count:3 buckets:[1 1 1]}} # Trigger an annotation about conflicting counter resets by going through the # HistogramStatsIterator, which creates counter reset hints on the fly. eval instant at 5m histogram_count(sum_over_time(reset{timing="late"}[5m])) expect warn msg: PromQL warning: conflicting counter resets during histogram aggregation {timing="late"} 7 eval instant at 5m histogram_count(sum(reset)) expect warn msg: PromQL warning: conflicting counter resets during histogram aggregation {} 5 eval instant at 5m histogram_count(avg(reset)) expect warn msg: PromQL warning: conflicting counter resets during histogram aggregation {} 2.5 # No annotation with the right timing. eval instant at 30s histogram_count(sum(reset)) expect no_warn {} 3 eval instant at 30s histogram_count(avg(reset)) expect no_warn {} 1.5 # Ensure that the annotation does not happen with rate. eval instant at 5m histogram_count(rate(reset{timing="late"}[5m])) expect no_warn {timing="late"} 0.0175 clear # Test edge cases of HistogramStatsIterator detection. # We access the same series multiple times within the same expression, # once with and once without HistogramStatsIterator. The results here # at least prove that we do not use HistogramStatsIterator where we # should not. load 1m histogram {{schema:0 count:10 sum:50 counter_reset_hint:gauge buckets:[1 2 3 4]}} eval instant at 1m histogram_count(histogram unless histogram_quantile(0.5, histogram) < 3) {} 10 eval instant at 1m histogram_quantile(0.5, histogram unless histogram_count(histogram) == 0) {} 3.1748021039363987 eval instant at 1m histogram_quantiles(histogram unless histogram_count(histogram) == 0, "q", 0.5) {q="0.5"} 3.1748021039363987 clear # Regression test for: # https://github.com/prometheus/prometheus/issues/14172 # https://github.com/prometheus/prometheus/issues/15177 load 1m mixed_metric1 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:8 count:6 buckets:[1 4 1]}} 4 5 {{schema:0 sum:18 count:10 buckets:[3 4 3]}} mixed_metric2 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:8 count:6 buckets:[1 4 1]}} # The order of the float vs native histograms is preserved. eval range from 0 to 8m step 1m mixed_metric1 mixed_metric1{} 1 2 3 {{count:4 sum:5 buckets:[1 2 1]}} {{count:6 sum:8 buckets:[1 4 1]}} 4 5 {{schema:0 sum:18 count:10 buckets:[3 4 3]}} {{schema:0 sum:18 count:10 buckets:[3 4 3]}} eval range from 0 to 5m step 1m mixed_metric2 mixed_metric2 1 2 3 {{count:4 sum:5 buckets:[1 2 1]}} {{count:6 sum:8 buckets:[1 4 1]}} {{count:6 sum:8 buckets:[1 4 1]}} clear # Test native histograms with custom buckets, reconciling mismatched bounds. load 1m nhcb_add_buckets {{schema:-53 sum:55 count:15 custom_values:[2 4 6] buckets:[1 2 5 7]}} {{schema:-53 sum:555 count:450 custom_values:[1 2 3 4 5 6 7 8] buckets:[10 20 30 40 50 60 70 80 90]}} eval instant at 1m irate(nhcb_add_buckets[2m]) * 60 expect no_warn expect info msg: PromQL info: mismatched custom buckets were reconciled during subtraction {} {{schema:-53 sum:500 count:435 custom_values:[2 4 6] buckets:[29 68 105 233]}} load 1m nhcb_remove_buckets {{schema:-53 sum:55 count:45 custom_values:[1 2 3 4 5 6 7 8] buckets:[1 2 3 4 5 6 7 8 9]}} {{schema:-53 sum:5560 count:1000 custom_values:[3 5 7] buckets:[100 200 300 400]}} eval instant at 1m irate(nhcb_remove_buckets[2m]) * 60 expect no_warn expect info msg: PromQL info: mismatched custom buckets were reconciled during subtraction {} {{schema:-53 sum:5505 count:955 custom_values:[3 5 7] buckets:[94 191 287 383]}} clear # Test native histograms with custom buckets, reconciling mismatched bounds, with counter reset in one bucket. load 1m nhcb_add_bucket {{schema:-53 sum:55 count:15 custom_values:[2 4 6] buckets:[1 2 5 7]}} {{schema:-53 sum:56 count:15 custom_values:[2 3 4 6] buckets:[1 0 1 5 8]}} eval instant at 1m irate(nhcb_add_bucket[2m]) * 60 expect no_warn expect no_info {} {{schema:-53 sum:56 count:15 custom_values:[2 3 4 6] buckets:[1 0 1 5 8] counter_reset_hint:gauge}} # Test native histogram with trim operators ("/": TRIM_LOWER) load 1m h_test {{schema:0 sum:123.75 count:34 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1 2]}} eval instant at 1m h_test >/ -Inf h_test {{schema:0 sum:123.75 count:34 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1 2]}} eval instant at 1m h_test / +Inf h_test {{schema:0 z_bucket_w:0.001}} eval instant at 1m h_test / 0 h_test {{schema:0 sum:120.20840280171308 count:30.5 z_bucket:0.5 z_bucket_w:0.001 buckets:[2 4 8 16]}} eval instant at 1m h_test / 1.4142135624 h_test {{count:26 sum:116.50067065070982 z_bucket_w:0.001 buckets:[0 2 8 16]}} load 1m h_test_2 {{schema:2 sum:12.8286080906 count:28 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 3 1]}} eval instant at 1m h_test_2 / 1.13 h_test_2 {{schema:2 count:14.589417818876296 sum:22.168126492693734 z_bucket_w:0.001 offset:1 buckets:[0.589417818876296 4 7 3]}} eval instant at 1m h_test_2 >/ -1.3 h_test_2 {{schema:2 count:25.54213947904476 sum:16.29588491217537 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 1.54213947904476]}} eval instant at 1m h_test_2 / 2 h_test{} {{count:24 sum:113.13708498984761 z_bucket_w:0.001 offset:2 buckets:[8 16]}} eval instant at 1m h_test >/ -1 h_test{} {{count:32 sum:119.50104602052653 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1]}} eval instant at 1m h_test / 0.5 h_positive_buckets {{schema:0 count:10 sum:7.0710678118654755 z_bucket:0 z_bucket_w:0.5 buckets:[10]}} eval instant at 1m h_positive_buckets >/ 0.1 h_positive_buckets {{schema:0 count:11.6 sum:7.551067811865476 z_bucket:1.6 z_bucket_w:0.5 buckets:[10]}} eval instant at 1m h_positive_buckets >/ 0 h_positive_buckets {{schema:0 sum:8.0210678118654755 count:12 z_bucket:2 z_bucket_w:0.5 buckets:[10]}} eval instant at 1m h_positive_buckets / -0.5 h_negative_buckets {{schema:0 count:2 sum:-0.5 z_bucket:2 z_bucket_w:0.5}} eval instant at 1m h_negative_buckets >/ -0.1 h_negative_buckets {{schema:0 count:0.4 sum:-0.02 z_bucket:0.4 z_bucket_w:0.5}} eval instant at 1m h_negative_buckets >/ 0 h_negative_buckets {{schema:0 z_bucket_w:0.5}} # Exponential buckets: trim zero bucket when there are no other buckets. load 1m zero_bucket_only {{schema:0 count:5 sum:0 z_bucket:5 z_bucket_w:0.1 }} eval instant at 1m zero_bucket_only >/ 0.1 zero_bucket_only {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.1 }} eval instant at 1m zero_bucket_only / 0.05 zero_bucket_only {{schema:0 count:1.25 sum:0.09375 z_bucket:1.25 z_bucket_w:0.1 }} eval instant at 1m zero_bucket_only / 0 zero_bucket_only {{schema:0 count:2.5 sum:0.125 z_bucket:2.5 z_bucket_w:0.1 }} eval instant at 1m zero_bucket_only / -0.05 zero_bucket_only {{schema:0 count:3.75 sum:0.09375 z_bucket:3.75 z_bucket_w:0.1 }} eval instant at 1m zero_bucket_only / -0.1 zero_bucket_only {{schema:0 count:5 sum:0 z_bucket:5 z_bucket_w:0.1 }} load 1m cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} # Custom buckets: trim on bucket boundary without interpolation eval instant at 1m cbh / 15 cbh{} {{schema:-53 count:4 sum:72.5 custom_values:[5 10 15 20] offset:3 buckets:[3 1]}} # Custom buckets: trim uses linear interpolation if cutoff is inside a bucket eval instant at 1m cbh / 13 cbh{} {{schema:-53 count:5.6 sum:94.9 custom_values:[5 10 15 20] offset:2 buckets:[1.6 3 1]}} eval instant at 1m cbh / +Inf cbh{} {{schema:-53 custom_values:[5 10 15 20]}} eval instant at 1m cbh / -Inf cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} # Noop eval instant at 1m cbh >/ 0 cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} eval instant at 1m cbh / 0 zero_bucket{} {{count:7.5 sum:5.669354249492381 z_bucket:2.5 z_bucket_w:0.01 buckets:[2 3]}} load 1m cbh_one_bucket {{schema:-53 sum:100.0 count:100 buckets:[100]}} # Skip [-Inf; +Inf] bucket (100). eval instant at 1m cbh_one_bucket / 10.0 cbh_one_bucket {{schema:-53 sum:0.0 count:0 buckets:[0]}} # Keep [-Inf; +Inf] bucket (100). eval instant at 1m cbh_one_bucket / +Inf cbh_one_bucket {{schema:-53 sum:0 count:0 buckets:[0]}} # Keep [-Inf; +Inf] bucket (100). eval instant at 1m cbh_one_bucket >/ -Inf cbh_one_bucket {{schema:-53 sum:100 count:100 buckets:[100]}} # Skip [-Inf; +Inf] bucket (100). eval instant at 1m cbh_one_bucket / -10.0 cbh_two_buckets_split_at_zero {{schema:-53 sum:0.0 count:100 custom_values:[0] buckets:[0 100]}} # Skip [-Inf, 0] bucket (1). eval instant at 1m cbh_two_buckets_split_at_zero >/ 0.0 cbh_two_buckets_split_at_zero {{schema:-53 sum:0.0 count:100 custom_values:[0] buckets:[0 100]}} # Skip first bucket. eval instant at 1m cbh_two_buckets_split_at_zero >/ 10.0 cbh_two_buckets_split_at_zero {{schema:-53 sum:1000.0 count:100 custom_values:[0] buckets:[0 100]}} load 1m cbh_two_buckets_split_at_positive {{schema:-53 sum:33 count:101 custom_values:[5] buckets:[1 100]}} # Skip (5, +Inf] bucket (100). eval instant at 1m cbh_two_buckets_split_at_positive / -10.0 cbh_two_buckets_split_at_positive {{schema:-53 sum:500.0 count:100 custom_values:[5] buckets:[0 100]}} # Noop. eval instant at 1m cbh_two_buckets_split_at_positive >/ 0.0 cbh_two_buckets_split_at_positive {{schema:-53 sum:33.0 count:101 custom_values:[5] buckets:[1 100]}} # Keep (5, +Inf] bucket (100) and 3/5 of [0, 5] bucket (0.6 * 3.5). eval instant at 1m cbh_two_buckets_split_at_positive >/ 2.0 cbh_two_buckets_split_at_positive {{schema:-53 sum:502.1 count:100.6 custom_values:[5] buckets:[0.6 100]}} # Skip first bucket. eval instant at 1m cbh_two_buckets_split_at_positive >/ 10.0 cbh_two_buckets_split_at_positive {{schema:-53 sum:1000.0 count:100 custom_values:[5] buckets:[0 100]}} load 1m cbh_two_buckets_split_at_negative {{schema:-53 sum:33 count:101 custom_values:[-5] buckets:[1 100]}} # Skip (-5, +Inf] bucket (100). eval instant at 1m cbh_two_buckets_split_at_negative / -10.0 cbh_two_buckets_split_at_negative {{schema:-53 sum:-500 count:100 custom_values:[-5] buckets:[0 100]}} # Skip [-Inf, -5] bucket (1). eval instant at 1m cbh_two_buckets_split_at_negative >/ -2.0 cbh_two_buckets_split_at_negative {{schema:-53 sum:-200 count:100 custom_values:[-5] buckets:[0 100]}} # Skip [-Inf, -5] bucket (1). eval instant at 1m cbh_two_buckets_split_at_negative >/ 0.0 cbh_two_buckets_split_at_negative {{schema:-53 sum:0.0 count:100 custom_values:[-5] buckets:[0 100]}} # Skip [-Inf, -5] bucket (1). eval instant at 1m cbh_two_buckets_split_at_negative >/ 10.0 cbh_two_buckets_split_at_negative {{schema:-53 sum:1000.0 count:100 custom_values:[-5] buckets:[0 100]}} # Verify there is no interference from skip buckets optimization: eval instant at 1m histogram_sum(cbh_two_buckets_split_at_negative >/ 10.0) {} 1000 eval instant at 1m histogram_count(cbh_two_buckets_split_at_negative >/ 10.0) {} 100 load 1m cbh_for_join{label="a"} {{schema:-53 sum:33 count:101 custom_values:[5] buckets:[1 100]}} cbh_for_join{label="b"} {{schema:-53 sum:66 count:202 custom_values:[5] buckets:[2 200]}} cbh_for_join{label="c"} {{schema:-53 sum:99 count:303 custom_values:[5] buckets:[3 300]}} float_for_join{label="a"} 1 float_for_join{label="b"} 4 eval instant at 1m cbh_for_join >/ on (label) float_for_join {label="a"} {{schema:-53 count:100.8 sum:502.4 custom_values:[5] buckets:[0.8 100]}} {label="b"} {{schema:-53 count:200.4 sum:1001.8 custom_values:[5] buckets:[0.4 200]}} clear