Merge branch 'main' into krajo/intern-custom-values

This commit is contained in:
György Krajcsovits 2025-05-20 08:23:15 +02:00
commit 772d5ab433
No known key found for this signature in database
GPG Key ID: 47A8F9CE80FD7C7F
75 changed files with 3651 additions and 1425 deletions

View File

@ -2,6 +2,7 @@ formatters:
enable:
- gci
- gofumpt
- goimports
settings:
gci:
sections:
@ -10,6 +11,9 @@ formatters:
- prefix(github.com/prometheus/prometheus)
gofumpt:
extra-rules: true
goimports:
local-prefixes:
- github.com/prometheus/prometheus
issues:
max-issues-per-linter: 0

View File

@ -1,12 +1,25 @@
# Changelog
## unreleased
## 3.4.0 / 2025-05-17
* [CHANGE] Make setting out-of-order native histograms feature (`--enable-feature=ooo-native-histograms`) a no-op. Out-of-order native histograms are now always enabled when `out_of_order_time_window` is greater than zero and `--enable-feature=native-histograms` is set. #16207
* [CHANGE] Config: Make setting out-of-order native histograms feature (`--enable-feature=ooo-native-histograms`) a no-op. Out-of-order native histograms are now always enabled when `out_of_order_time_window` is greater than zero and `--enable-feature=native-histograms` is set. #16207
* [FEATURE] OTLP translate: Add feature flag for optionally translating OTel explicit bucket histograms into native histograms with custom buckets. #15850
* [FEATURE] OTLP translate: Add option to receive OTLP metrics without translating names or attributes. #16441
* [FEATURE] PromQL: allow arithmetic operations in durations in PromQL parser. #16249
* [FEATURE] OTLP receiver: Add primitive support for ingesting OTLP delta metrics as-is. #16360
* [ENHANCEMENT] PromQL: histogram_fraction for bucket histograms. #16095
* [ENHANCEMENT] TSDB: add `prometheus_tsdb_wal_replay_unknown_refs_total` and `prometheus_tsdb_wbl_replay_unknown_refs_total` metrics to track unknown series references during WAL/WBL replay. #16166
* [BUGFIX] TSDB: fix unknown series errors and possible lost data during WAL replay when series are removed from the head due to inactivity and reappear before the next WAL checkpoint. #16060
* [ENHANCEMENT] Scraping: Add config option for escaping scheme request. #16066
* [ENHANCEMENT] Config: Add global config option for convert_classic_histograms_to_nhcb. #16226
* [ENHANCEMENT] Alerting: make batch size configurable (`--alertmanager.notification-batch-size`). #16254
* [PERF] Kubernetes SD: make endpointSlice discovery more efficient. #16433
* [BUGFIX] Config: Fix auto-reload on changes to rule and scrape config files. #16340
* [BUGFIX] Scraping: Skip native histogram series if ingestion is disabled. #16218
* [BUGFIX] TSDB: Handle metadata/tombstones/exemplars for duplicate series during WAL replay. #16231
* [BUGFIX] TSDB: Avoid processing exemplars outside the valid time range during WAL replay. #16242
* [BUGFIX] Promtool: Add feature flags for PromQL features. #16443
* [BUGFIX] Rules: correct logging of alert name & template data. #15093
* [BUGFIX] PromQL: Use arithmetic mean for `histogram_stddev()` and `histogram_stdvar()` . #16444
## 3.3.0 / 2025-04-15

View File

@ -62,6 +62,7 @@ SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v2.1.5
GOLANGCI_FMT_OPTS ?=
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@ -156,9 +157,13 @@ $(GOTEST_DIR):
@mkdir -p $@
.PHONY: common-format
common-format:
common-format: $(GOLANGCI_LINT)
@echo ">> formatting code"
$(GO) fmt $(pkgs)
ifdef GOLANGCI_LINT
@echo ">> formatting code with golangci-lint"
$(GOLANGCI_LINT) fmt $(GOLANGCI_FMT_OPTS)
endif
.PHONY: common-vet
common-vet:
@ -248,8 +253,8 @@ $(PROMU):
cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
rm -r $(PROMU_TMP)
.PHONY: proto
proto:
.PHONY: common-proto
common-proto:
@echo ">> generating code from proto files"
@./scripts/genproto.sh

View File

@ -13,7 +13,7 @@ Please see [the v2.55 RELEASE.md](https://github.com/prometheus/prometheus/blob/
| v3.1 | 2024-12-17 | Bryan Boreham (GitHub: @bboreham) |
| v3.2 | 2025-01-28 | Jan Fajerski (GitHub: @jan--f) |
| v3.3 | 2025-03-11 | Ayoub Mrini (Github: @machine424) |
| v3.4 | 2025-04-22 | Jan-Otto Kröpke (Github: @jkroepke)|
| v3.4 | 2025-04-29 | Jan-Otto Kröpke (Github: @jkroepke)|
| v3.5 LTS | 2025-06-03 | Bryan Boreham (GitHub: @bboreham) |
| v3.6 | 2025-07-15 | **volunteer welcome** |

View File

@ -1 +1 @@
3.3.0
3.4.0

View File

@ -30,6 +30,7 @@ import (
goregexp "regexp" //nolint:depguard // The Prometheus client library requires us to pass a regexp from this package.
"runtime"
"runtime/debug"
"slices"
"strconv"
"strings"
"sync"
@ -289,6 +290,9 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
// See proposal: https://github.com/prometheus/proposals/pull/48
c.web.NativeOTLPDeltaIngestion = true
logger.Info("Enabling native ingestion of delta OTLP metrics, storing the raw sample values without conversion. WARNING: Delta support is in an early stage of development. The ingestion and querying process is likely to change over time.")
case "type-and-unit-labels":
c.scrape.EnableTypeAndUnitLabels = true
logger.Info("Experimental type and unit labels enabled")
default:
logger.Warn("Unknown option for --enable-feature", "option", o)
}
@ -1921,10 +1925,8 @@ func (p *rwProtoMsgFlagParser) Set(opt string) error {
if err := t.Validate(); err != nil {
return err
}
for _, prev := range *p.msgs {
if prev == t {
return fmt.Errorf("duplicated %v flag value, got %v already", t, *p.msgs)
}
if slices.Contains(*p.msgs, t) {
return fmt.Errorf("duplicated %v flag value, got %v already", t, *p.msgs)
}
*p.msgs = append(*p.msgs, t)
return nil

View File

@ -685,12 +685,11 @@ func TestRuntimeGOGCConfig(t *testing.T) {
name: "empty config file",
expectedGOGC: 75,
},
// the GOGC env var is ignored in this case, see https://github.com/prometheus/prometheus/issues/16334
/* {
{
name: "empty config file with GOGC env var set",
gogcEnvVar: "66",
expectedGOGC: 66,
}, */
},
{
name: "gogc set through config",
config: `
@ -719,15 +718,14 @@ runtime:`,
gogcEnvVar: "88",
expectedGOGC: 88,
},
// the GOGC env var is ignored in this case, see https://github.com/prometheus/prometheus/issues/16334
/* {
name: "unrelated config and GOGC env var set",
config: `
global:
scrape_interval: 500ms`,
gogcEnvVar: "80",
expectedGOGC: 80,
}, */
{
name: "unrelated config and GOGC env var set",
config: `
global:
scrape_interval: 500ms`,
gogcEnvVar: "80",
expectedGOGC: 80,
},
} {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()

View File

@ -21,6 +21,7 @@ import (
"net/url"
"os"
"path/filepath"
"slices"
"sort"
"strconv"
"strings"
@ -174,7 +175,7 @@ var (
DefaultRuntimeConfig = RuntimeConfig{
// Go runtime tuning.
GoGC: 75,
GoGC: getGoGC(),
}
// DefaultScrapeConfig is the default scrape configuration.
@ -384,8 +385,6 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
// We have to restore it here.
if c.Runtime.isZero() {
c.Runtime = DefaultRuntimeConfig
// Use the GOGC env var value if the runtime section is empty.
c.Runtime.GoGC = getGoGCEnv()
}
for _, rf := range c.RuleFiles {
@ -651,6 +650,8 @@ func (c *GlobalConfig) isZero() bool {
!c.AlwaysScrapeClassicHistograms
}
const DefaultGoGCPercentage = 75
// RuntimeConfig configures the values for the process behavior.
type RuntimeConfig struct {
// The Go garbage collection target percentage.
@ -1109,13 +1110,11 @@ func (v *AlertmanagerAPIVersion) UnmarshalYAML(unmarshal func(interface{}) error
return err
}
for _, supportedVersion := range SupportedAlertmanagerAPIVersions {
if *v == supportedVersion {
return nil
}
if !slices.Contains(SupportedAlertmanagerAPIVersions, *v) {
return fmt.Errorf("expected Alertmanager api version to be one of %v but got %v", SupportedAlertmanagerAPIVersions, *v)
}
return fmt.Errorf("expected Alertmanager api version to be one of %v but got %v", SupportedAlertmanagerAPIVersions, *v)
return nil
}
const (
@ -1495,7 +1494,7 @@ func fileErr(filename string, err error) error {
return fmt.Errorf("%q: %w", filePath(filename), err)
}
func getGoGCEnv() int {
func getGoGC() int {
goGCEnv := os.Getenv("GOGC")
// If the GOGC env var is set, use the same logic as upstream Go.
if goGCEnv != "" {
@ -1508,7 +1507,7 @@ func getGoGCEnv() int {
return i
}
}
return DefaultRuntimeConfig.GoGC
return DefaultGoGCPercentage
}
type translationStrategyOption string

View File

@ -20,6 +20,7 @@ import (
"log/slog"
"os"
"reflect"
"slices"
"strings"
"sync"
"time"
@ -38,8 +39,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
// Required to get the GCP auth provider working.
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // Required to get the GCP auth provider working.
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
@ -210,18 +210,9 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if _, ok := allowedSelectors[c.Role]; !ok {
return fmt.Errorf("invalid role: %q, expecting one of: pod, service, endpoints, endpointslice, node or ingress", c.Role)
}
var allowed bool
for _, role := range allowedSelectors[c.Role] {
if role == string(selector.Role) {
allowed = true
break
}
}
if !allowed {
if !slices.Contains(allowedSelectors[c.Role], string(selector.Role)) {
return fmt.Errorf("%s role supports only %s selectors", c.Role, strings.Join(allowedSelectors[c.Role], ", "))
}
_, err := fields.ParseSelector(selector.Field)
if err != nil {
return err

View File

@ -306,13 +306,13 @@ func (m *Manager) startProvider(ctx context.Context, p *Provider) {
// cleaner cleans resources associated with provider.
func (m *Manager) cleaner(p *Provider) {
m.targetsMtx.Lock()
p.mu.RLock()
m.targetsMtx.Lock()
for s := range p.subs {
delete(m.targets, poolKey{s, p.name})
}
p.mu.RUnlock()
m.targetsMtx.Unlock()
p.mu.RUnlock()
if p.done != nil {
p.done()
}
@ -413,9 +413,9 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group {
n := map[string]int{}
m.mtx.RLock()
m.targetsMtx.Lock()
for _, p := range m.providers {
p.mu.RLock()
m.targetsMtx.Lock()
for s := range p.subs {
// Send empty lists for subs without any targets to make sure old stale targets are dropped by consumers.
// See: https://github.com/prometheus/prometheus/issues/12858 for details.
@ -430,9 +430,9 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group {
}
}
}
m.targetsMtx.Unlock()
p.mu.RUnlock()
}
m.targetsMtx.Unlock()
m.mtx.RUnlock()
for setName, v := range n {

View File

@ -11,7 +11,7 @@ to an external service. Whenever the alert expression results in one or more
vector elements at a given point in time, the alert counts as active for these
elements' label sets.
### Defining alerting rules
## Defining alerting rules
Alerting rules are configured in Prometheus in the same way as [recording
rules](recording_rules.md).
@ -54,7 +54,7 @@ values can be templated.
The `annotations` clause specifies a set of informational labels that can be used to store longer additional information such as alert descriptions or runbook links. The annotation values can be templated.
#### Templating
### Templating
Label and annotation values can be templated using [console
templates](https://prometheus.io/docs/visualization/consoles). The `$labels`
@ -93,7 +93,7 @@ groups:
description: "{{ $labels.instance }} has a median request latency above 1s (current value: {{ $value }}s)"
```
### Inspecting alerts during runtime
## Inspecting alerts during runtime
To manually inspect which alerts are active (pending or firing), navigate to
the "Alerts" tab of your Prometheus instance. This will show you the exact
@ -105,7 +105,7 @@ The sample value is set to `1` as long as the alert is in the indicated active
(pending or firing) state, and the series is marked stale when this is no
longer the case.
### Sending alert notifications
## Sending alert notifications
Prometheus's alerting rules are good at figuring what is broken *right now*, but
they are not a fully-fledged notification solution. Another layer is needed to
@ -114,6 +114,6 @@ on top of the simple alert definitions. In Prometheus's ecosystem, the
[Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) takes on this
role. Thus, Prometheus may be configured to periodically send information about
alert states to an Alertmanager instance, which then takes care of dispatching
the right notifications.
the right notifications.
Prometheus can be [configured](configuration.md) to automatically discover available
Alertmanager instances through its service discovery integrations.

View File

@ -80,9 +80,9 @@ global:
[ rule_query_offset: <duration> | default = 0s ]
# The labels to add to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
# Environment variable references `${var}` or `$var` are replaced according
# to the values of the current environment variables.
# external systems (federation, remote storage, Alertmanager).
# Environment variable references `${var}` or `$var` are replaced according
# to the values of the current environment variables.
# References to undefined variables are replaced by the empty string.
# The `$` character can be escaped by using `$$`.
external_labels:
@ -195,7 +195,7 @@ otlp:
# It preserves all special character like dots and won't append special suffixes for metric
# unit and type.
#
# WARNING: The "NoTranslation" setting has significant known risks and limitations (see https://prometheus.io/docs/practices/naming/
# WARNING: The "NoTranslation" setting has significant known risks and limitations (see https://prometheus.io/docs/practices/naming/
# for details):
# * Impaired UX when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling configuration).
# * Series collisions which in the best case may result in OOO errors, in the worst case a silently malformed
@ -484,26 +484,26 @@ metric_relabel_configs:
# that will be kept in memory. 0 means no limit.
[ keep_dropped_targets: <int> | default = 0 ]
# Specifies the validation scheme for metric and label names. Either blank or
# Specifies the validation scheme for metric and label names. Either blank or
# "utf8" for full UTF-8 support, or "legacy" for letters, numbers, colons, and
# underscores.
[ metric_name_validation_scheme: <string> | default "utf8" ]
# Specifies the character escaping scheme that will be requested when scraping
# for metric and label names that do not conform to the legacy Prometheus
# character set. Available options are:
# character set. Available options are:
# * `allow-utf-8`: Full UTF-8 support, no escaping needed.
# * `underscores`: Escape all legacy-invalid characters to underscores.
# * `dots`: Escapes dots to `_dot_`, underscores to `__`, and all other
# legacy-invalid characters to underscores.
# * `values`: Prepend the name with `U__` and replace all invalid
# characters with their unicode value, surrounded by underscores. Single
# underscores are replaced with double underscores.
# underscores are replaced with double underscores.
# e.g. "U__my_2e_dotted_2e_name".
# If this value is left blank, Prometheus will default to `allow-utf-8` if the
# validation scheme for the current scrape config is set to utf8, or
# `underscores` if the validation scheme is set to `legacy`.
[ metric_name_validation_scheme: <string> | default "utf8" ]
[ metric_name_escaping_scheme: <string> | default "utf8" ]
# Limit on total number of positive and negative buckets allowed in a single
# native histogram. The resolution of a histogram with more buckets will be
@ -517,7 +517,7 @@ metric_relabel_configs:
# reduced as much as possible until it is within the limit.
# To set an upper limit for the schema (equivalent to "scale" in OTel's
# exponential histograms), use the following factor limits:
#
#
# +----------------------------+----------------------------+
# | growth factor | resulting schema AKA scale |
# +----------------------------+----------------------------+
@ -547,7 +547,7 @@ metric_relabel_configs:
# +----------------------------+----------------------------+
# | 1.002 | 8 |
# +----------------------------+----------------------------+
#
#
# 0 results in the smallest supported factor (which is currently ~1.0027 or
# schema 8, but might change in the future).
[ native_histogram_min_bucket_factor: <float> | default = 0 ]
@ -564,7 +564,7 @@ Where `<job_name>` must be unique across all scrape configurations.
A `http_config` allows configuring HTTP requests.
```
```yaml
# Sets the `Authorization` header on every request with the
# configured username and password.
# username and username_file are mutually exclusive.
@ -795,7 +795,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_consul_address`: the address of the target
* `__meta_consul_dc`: the datacenter name for the target
* `__meta_consul_health`: the health status of the service
* `__meta_consul_partition`: the admin partition name where the service is registered
* `__meta_consul_partition`: the admin partition name where the service is registered
* `__meta_consul_metadata_<key>`: each node metadata key value of the target
* `__meta_consul_node`: the node name defined for the target
* `__meta_consul_service_address`: the service address of the target
@ -942,7 +942,7 @@ host: <string>
[ host_networking_host: <string> | default = "localhost" ]
# Sort all non-nil networks in ascending order based on network name and
# get the first network if the container has multiple networks defined,
# get the first network if the container has multiple networks defined,
# thus avoiding collecting duplicate targets.
[ match_first_network: <boolean> | default = true ]
@ -1258,7 +1258,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
#### `loadbalancer`
The `loadbalancer` role discovers one target per Octavia loadbalancer with a
The `loadbalancer` role discovers one target per Octavia loadbalancer with a
`PROMETHEUS` listener. The target address defaults to the VIP address
of the load balancer.
@ -1471,7 +1471,7 @@ and serves as an interface to plug in custom service discovery mechanisms.
It reads a set of files containing a list of zero or more
`<static_config>`s. Changes to all defined files are detected via disk watches
and applied immediately.
and applied immediately.
While those individual files are watched for changes,
the parent directory is also watched implicitly. This is to handle [atomic
@ -1984,7 +1984,7 @@ See below for the configuration options for Kuma MonitoringAssignment discovery:
# Address of the Kuma Control Plane's MADS xDS server.
server: <string>
# Client id is used by Kuma Control Plane to compute Monitoring Assignment for specific Prometheus backend.
# Client id is used by Kuma Control Plane to compute Monitoring Assignment for specific Prometheus backend.
# This is useful when migrating between multiple Prometheus backends, or having separate backend for each Mesh.
# When not specified, system hostname/fqdn will be used if available, if not `prometheus` will be used.
[ client_id: <string> ]
@ -2082,7 +2082,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_linode_status`: the status of the linode instance
* `__meta_linode_tags`: a list of tags of the linode instance joined by the tag separator
* `__meta_linode_group`: the display group a linode instance is a member of
* `__meta_linode_gpus`: the number of GPU's of the linode instance
* `__meta_linode_gpus`: the number of GPU's of the linode instance
* `__meta_linode_hypervisor`: the virtualization software powering the linode instance
* `__meta_linode_backups`: the backup service status of the linode instance
* `__meta_linode_specs_disk_bytes`: the amount of storage space the linode instance has access to
@ -2603,7 +2603,7 @@ input to a subsequent relabeling step), use the `__tmp` label name prefix. This
prefix is guaranteed to never be used by Prometheus itself.
```yaml
# The source_labels tells the rule what labels to fetch from the series. Any
# The source_labels tells the rule what labels to fetch from the series. Any
# labels which do not exist get a blank value (""). Their content is concatenated
# using the configured separator and matched against the configured regular expression
# for the replace, keep, and drop actions.
@ -2894,7 +2894,7 @@ write_relabel_configs:
# For the `io.prometheus.write.v2.Request` message, this option is noop (always true).
[ send_native_histograms: <boolean> | default = false ]
# When enabled, remote-write will resolve the URL host name via DNS, choose one of the IP addresses at random, and connect to it.
# When enabled, remote-write will resolve the URL host name via DNS, choose one of the IP addresses at random, and connect to it.
# When disabled, remote-write relies on Go's standard behavior, which is to try to connect to each address in turn.
# The connection timeout applies to the whole operation, i.e. in the latter case it is spread over all attempt.
# This is an experimental feature, and its behavior might still change, or even get removed.
@ -2927,7 +2927,7 @@ azuread:
# Azure User-assigned Managed identity.
[ managed_identity:
[ client_id: <string> ] ]
[ client_id: <string> ] ]
# Azure OAuth.
[ oauth:
@ -3055,8 +3055,8 @@ with this feature.
# that is within the out-of-order window, or (b) too-old, i.e. not in-order
# and before the out-of-order window.
#
# When out_of_order_time_window is greater than 0, it also affects experimental agent. It allows
# the agent's WAL to accept out-of-order samples that fall within the specified time window relative
# When out_of_order_time_window is greater than 0, it also affects experimental agent. It allows
# the agent's WAL to accept out-of-order samples that fall within the specified time window relative
# to the timestamp of the last appended sample for the same series.
[ out_of_order_time_window: <duration> | default = 0s ]
```

View File

@ -27,7 +27,7 @@ Generic placeholders are defined as follows:
A valid example file can be found [here](/documentation/examples/web-config.yml).
```
```yaml
tls_server_config:
# Certificate and key files for server to use to authenticate to client.
cert_file: <filename>

View File

@ -34,7 +34,7 @@ When the file is syntactically valid, the checker prints a textual
representation of the parsed rules to standard output and then exits with
a `0` return status.
If there are any syntax errors or invalid input arguments, it prints an error
If there are any syntax errors or invalid input arguments, it prints an error
message to standard error and exits with a `1` return status.
## Recording rules
@ -71,7 +71,8 @@ groups:
```
### `<rule_group>`
```
```yaml
# The name of the group. Must be unique within a file.
name: <string>
@ -98,7 +99,7 @@ rules:
The syntax for recording rules is:
```
```yaml
# The name of the time series to output to. Must be a valid metric name.
record: <string>
@ -114,7 +115,7 @@ labels:
The syntax for alerting rules is:
```
```yaml
# The name of the alert. Must be a valid label value.
alert: <string>
@ -143,7 +144,7 @@ annotations:
See also the
[best practices for naming metrics created by recording rules](https://prometheus.io/docs/practices/rules/#recording-rules).
# Limiting alerts and series
## Limiting alerts and series
A limit for alerts produced by alerting rules and series produced recording rules
can be configured per-group. When the limit is exceeded, _all_ series produced
@ -152,9 +153,9 @@ the rule, active, pending, or inactive, are cleared as well. The event will be
recorded as an error in the evaluation, and as such no stale markers are
written.
# Rule query offset
## Rule query offset
This is useful to ensure the underlying metrics have been received and stored in Prometheus. Metric availability delays are more likely to occur when Prometheus is running as a remote write target due to the nature of distributed systems, but can also occur when there's anomalies with scraping and/or short evaluation intervals.
# Failed rule evaluations due to slow evaluation
## Failed rule evaluations due to slow evaluation
If a rule group hasn't finished evaluating before its next evaluation is supposed to start (as defined by the `evaluation_interval`), the next evaluation will be skipped. Subsequent evaluations of the rule group will continue to be skipped until the initial evaluation either completes or times out. When this happens, there will be a gap in the metric produced by the recording rule. The `rule_group_iterations_missed_total` metric will be incremented for each missed iteration of the rule group.
If a rule group hasn't finished evaluating before its next evaluation is supposed to start (as defined by the `evaluation_interval`), the next evaluation will be skipped. Subsequent evaluations of the rule group will continue to be skipped until the initial evaluation either completes or times out. When this happens, there will be a gap in the metric produced by the recording rule. The `rule_group_iterations_missed_total` metric will be incremented for each missed iteration of the rule group.

View File

@ -13,7 +13,7 @@ templating](https://golang.org/pkg/text/template/) system.
## Simple alert field templates
```
```yaml
alert: InstanceDown
expr: up == 0
for: 5m
@ -33,7 +33,7 @@ console instead.
This displays a list of instances, and whether they are up:
```go
```
{{ range query "up" }}
{{ .Labels.instance }} {{ .Value }}
{{ end }}
@ -43,7 +43,7 @@ The special `.` variable contains the value of the current sample for each loop
## Display one value
```go
```
{{ with query "some_metric{instance='someinstance'}" }}
{{ . | first | value | humanize }}
{{ end }}
@ -58,7 +58,7 @@ formatting of results, and linking to the [expression browser](https://prometheu
## Using console URL parameters
```go
```
{{ with printf "node_memory_MemTotal{job='node',instance='%s'}" .Params.instance | query }}
{{ . | first | value | humanize1024 }}B
{{ end }}
@ -95,7 +95,7 @@ powerful when combined with
[console library](template_reference.md#console-templates) support, allowing
sharing of templates across consoles.
```go
```
{{/* Define the template */}}
{{define "myTemplate"}}
do something
@ -107,7 +107,7 @@ sharing of templates across consoles.
Templates are limited to one argument. The `args` function can be used to wrap multiple arguments.
```go
```
{{define "myMultiArgTemplate"}}
First argument: {{.arg0}}
Second argument: {{.arg1}}

View File

@ -17,8 +17,8 @@ The primary data structure for dealing with time series data is the sample, defi
```go
type sample struct {
Labels map[string]string
Value interface{}
Labels map[string]string
Value interface{}
}
```

View File

@ -23,7 +23,7 @@ Exemplar storage is implemented as a fixed size circular buffer that stores exem
`--enable-feature=memory-snapshot-on-shutdown`
This takes a snapshot of the chunks that are in memory along with the series information when shutting down and stores it on disk. This will reduce the startup time since the memory state can now be restored with this snapshot
This takes a snapshot of the chunks that are in memory along with the series information when shutting down and stores it on disk. This will reduce the startup time since the memory state can now be restored with this snapshot
and m-mapped chunks, while a WAL replay from disk is only needed for the parts of the WAL that are not part of the snapshot.
## Extra scrape metrics
@ -183,7 +183,7 @@ This state is periodically ([`max_stale`][d2c]) cleared of inactive series.
Enabling this _can_ have negative impact on performance, because the in-memory
state is mutex guarded. Cumulative-only OTLP requests are not affected.
### PromQL arithmetic expressions in time durations
## PromQL arithmetic expressions in time durations
`--enable-feature=promql-duration-expr`
@ -203,7 +203,7 @@ The following operators are supported:
* `+` - addition
* `-` - subtraction
* `*` - multiplication
* `*` - multiplication
* `/` - division
* `%` - modulo
* `^` - exponentiation
@ -227,7 +227,7 @@ When enabled, allows for the native ingestion of delta OTLP metrics, storing the
Currently, the StartTimeUnixNano field is ignored, and deltas are given the unknown metric metadata type.
Delta support is in a very early stage of development and the ingestion and querying process my change over time. For the open proposal see [prometheus/proposals#48](https://github.com/prometheus/proposals/pull/48).
Delta support is in a very early stage of development and the ingestion and querying process my change over time. For the open proposal see [prometheus/proposals#48](https://github.com/prometheus/proposals/pull/48).
### Querying
@ -246,4 +246,29 @@ These may not work well if the `<range>` is not a multiple of the collection int
* It is difficult to figure out whether a metric has delta or cumulative temporality, since there's no indication of temporality in metric names or labels. For now, if you are ingesting a mix of delta and cumulative metrics we advise you to explicitly add your own labels to distinguish them. In the future, we plan to introduce type labels to consistently distinguish metric types and potentially make PromQL functions type-aware (e.g. providing warnings when cumulative-only functions are used with delta metrics).
* If there are multiple samples being ingested at the same timestamp, only one of the points is kept - the samples are **not** summed together (this is how Prometheus works in general - duplicate timestamp samples are rejected). Any aggregation will have to be done before sending samples to Prometheus.
* If there are multiple samples being ingested at the same timestamp, only one of the points is kept - the samples are **not** summed together (this is how Prometheus works in general - duplicate timestamp samples are rejected). Any aggregation will have to be done before sending samples to Prometheus.
## Type and Unit Labels
`--enable-feature=type-and-unit-labels`
When enabled, Prometheus will start injecting additional, reserved `__type__`
and `__unit__` labels as designed in the [PROM-39 proposal](https://github.com/prometheus/proposals/pull/39).
Those labels are sourced from the metadata structured of the existing scrape and ingestion formats
like OpenMetrics Text, Prometheus Text, Prometheus Proto, Remote Write 2 and OTLP. All the user provided labels with
`__type__` and `__unit__` will be overridden.
PromQL layer will handle those labels the same way __name__ is handled, e.g. dropped
on certain operations like `-` or `+` and affected by `promql-delayed-name-removal` feature.
This feature enables important metadata information to be accessible directly with samples and PromQL layer.
It's especially useful for users who:
* Want to be able to select metrics based on type or unit.
* Want to handle cases of series with the same metric name and different type and units.
e.g. native histogram migrations or OpenTelemetry metrics from OTLP endpoint, without translation.
In future more [work is planned](https://github.com/prometheus/prometheus/issues/16610) that will depend on this e.g. rich PromQL UX that helps
when wrong types are used on wrong functions, automatic renames, delta types and more.

View File

@ -200,7 +200,7 @@ To record the time series resulting from this expression into a new metric
called `job_instance_mode:node_cpu_seconds:avg_rate5m`, create a file
with the following recording rule and save it as `prometheus.rules.yml`:
```
```yaml
groups:
- name: cpu-node
rules:

View File

@ -11,52 +11,52 @@ This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0
## Flags
- The following feature flags have been removed and they have been added to the
- The following feature flags have been removed and they have been added to the
default behavior of Prometheus v3:
- `promql-at-modifier`
- `promql-negative-offset`
- `new-service-discovery-manager`
- `expand-external-labels`
- Environment variable references `${var}` or `$var` in external label values
are replaced according to the values of the current environment variables.
- Environment variable references `${var}` or `$var` in external label values
are replaced according to the values of the current environment variables.
- References to undefined variables are replaced by the empty string.
The `$` character can be escaped by using `$$`.
- `no-default-scrape-port`
- Prometheus v3 will no longer add ports to scrape targets according to the
- Prometheus v3 will no longer add ports to scrape targets according to the
specified scheme. Target will now appear in labels as configured.
- If you rely on scrape targets like
`https://example.com/metrics` or `http://example.com/metrics` to be
represented as `https://example.com/metrics:443` and
- If you rely on scrape targets like
`https://example.com/metrics` or `http://example.com/metrics` to be
represented as `https://example.com/metrics:443` and
`http://example.com/metrics:80` respectively, add them to your target URLs
- `agent`
- Instead use the dedicated `--agent` CLI flag.
- `remote-write-receiver`
- Instead use the dedicated `--web.enable-remote-write-receiver` CLI flag to enable the remote write receiver.
- `auto-gomemlimit`
- Prometheus v3 will automatically set `GOMEMLIMIT` to match the Linux
container memory limit. If there is no container limit, or the process is
running outside of containers, the system memory total is used. To disable
- Prometheus v3 will automatically set `GOMEMLIMIT` to match the Linux
container memory limit. If there is no container limit, or the process is
running outside of containers, the system memory total is used. To disable
this, `--no-auto-gomemlimit` is available.
- `auto-gomaxprocs`
- Prometheus v3 will automatically set `GOMAXPROCS` to match the Linux
- Prometheus v3 will automatically set `GOMAXPROCS` to match the Linux
container CPU quota. To disable this, `--no-auto-gomaxprocs` is available.
Prometheus v3 will log a warning if you continue to pass these to
Prometheus v3 will log a warning if you continue to pass these to
`--enable-feature`.
## Configuration
- The scrape job level configuration option `scrape_classic_histograms` has been
renamed to `always_scrape_classic_histograms`. If you use the
`--enable-feature=native-histograms` feature flag to ingest native histograms
and you also want to ingest classic histograms that an endpoint might expose
along with native histograms, be sure to add this configuration or change your
- The scrape job level configuration option `scrape_classic_histograms` has been
renamed to `always_scrape_classic_histograms`. If you use the
`--enable-feature=native-histograms` feature flag to ingest native histograms
and you also want to ingest classic histograms that an endpoint might expose
along with native histograms, be sure to add this configuration or change your
configuration from the old name.
- The `http_config.enable_http2` in `remote_write` items default has been
changed to `false`. In Prometheus v2 the remote write http client would
default to use http2. In order to parallelize multiple remote write queues
- The `http_config.enable_http2` in `remote_write` items default has been
changed to `false`. In Prometheus v2 the remote write http client would
default to use http2. In order to parallelize multiple remote write queues
across multiple sockets its preferable to not default to http2.
If you prefer to use http2 for remote write you must now set
If you prefer to use http2 for remote write you must now set
`http_config.enable_http2: true` in your `remote_write` configuration section.
## PromQL
@ -137,7 +137,7 @@ may now fail if this fallback protocol is not specified.
### TSDB format and downgrade
The TSDB format has been changed slightly in Prometheus v2.55 in preparation for changes
The TSDB format has been changed slightly in Prometheus v2.55 in preparation for changes
to the index format. Consequently, a Prometheus v3 TSDB can only be read by a
Prometheus v2.55 or newer. Keep that in mind when upgrading to v3 -- you will be only
able to downgrade to v2.55, not lower, without losing your TSDB persistent data.
@ -147,8 +147,8 @@ confirm Prometheus works as expected, before upgrading to v3.
### TSDB storage contract
TSDB compatible storage is now expected to return results matching the specified
selectors. This might impact some third party implementations, most likely
TSDB compatible storage is now expected to return results matching the specified
selectors. This might impact some third party implementations, most likely
implementing `remote_read`.
This contract is not explicitly enforced, but can cause undefined behavior.
@ -179,7 +179,7 @@ scrape_configs:
```
### Log message format
Prometheus v3 has adopted `log/slog` over the previous `go-kit/log`. This
Prometheus v3 has adopted `log/slog` over the previous `go-kit/log`. This
results in a change of log message format. An example of the old log format is:
```
@ -198,19 +198,19 @@ time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.co
```
### `le` and `quantile` label values
In Prometheus v3, the values of the `le` label of classic histograms and the
In Prometheus v3, the values of the `le` label of classic histograms and the
`quantile` label of summaries are normalized upon ingestion. In Prometheus v2
the value of these labels depended on the scrape protocol (protobuf vs text
format) in some situations. This led to label values changing based on the
scrape protocol. E.g. a metric exposed as `my_classic_hist{le="1"}` would be
ingested as `my_classic_hist{le="1"}` via the text format, but as
`my_classic_hist{le="1.0"}` via protobuf. This changed the identity of the
the value of these labels depended on the scrape protocol (protobuf vs text
format) in some situations. This led to label values changing based on the
scrape protocol. E.g. a metric exposed as `my_classic_hist{le="1"}` would be
ingested as `my_classic_hist{le="1"}` via the text format, but as
`my_classic_hist{le="1.0"}` via protobuf. This changed the identity of the
metric and caused problems when querying the metric.
In Prometheus v3 these label values will always be normalized to a float like
representation. I.e. the above example will always result in
`my_classic_hist{le="1.0"}` being ingested into prometheus, no matter via which
protocol. The effect of this change is that alerts, recording rules and
dashboards that directly reference label values as whole numbers such as
In Prometheus v3 these label values will always be normalized to a float like
representation. I.e. the above example will always result in
`my_classic_hist{le="1.0"}` being ingested into prometheus, no matter via which
protocol. The effect of this change is that alerts, recording rules and
dashboards that directly reference label values as whole numbers such as
`le="1"` will stop working.
Ways to deal with this change either globally or on a per metric basis:
@ -236,11 +236,11 @@ This should **only** be applied to metrics that currently produce such labels.
```
### Disallow configuring Alertmanager with the v1 API
Prometheus 3 no longer supports Alertmanager's v1 API. Effectively Prometheus 3
Prometheus 3 no longer supports Alertmanager's v1 API. Effectively Prometheus 3
requires [Alertmanager 0.16.0](https://github.com/prometheus/alertmanager/releases/tag/v0.16.0) or later. Users with older Alertmanager
versions or configurations that use `alerting: alertmanagers: [api_version: v1]`
versions or configurations that use `alerting: alertmanagers: [api_version: v1]`
need to upgrade Alertmanager and change their configuration to use `api_version: v2`.
# Prometheus 2.0 migration guide
## Prometheus 2.0 migration guide
For the Prometheus 1.8 to 2.0 please refer to the [Prometheus v2.55 documentation](https://prometheus.io/docs/prometheus/2.55/migration/).
For the migration guide from Prometheus 1.8 to 2.0 please refer to the [Prometheus v2.55 documentation](https://prometheus.io/docs/prometheus/2.55/migration/).

View File

@ -32,7 +32,7 @@ will be returned in the data field.
The JSON response envelope format is as follows:
```
```json
{
"status": "success" | "error",
"data": <data>,
@ -96,7 +96,7 @@ query that may breach server-side URL character limits.
The `data` section of the query result has the following format:
```
```json
{
"resultType": "matrix" | "vector" | "scalar" | "string",
"result": <value>
@ -110,8 +110,11 @@ formats](#expression-query-result-formats).
The following example evaluates the expression `up` at the time
`2015-07-01T20:10:51.781Z`:
```bash
curl 'http://localhost:9090/api/v1/query?query=up&time=2015-07-01T20:10:51.781Z'
```
```json
$ curl 'http://localhost:9090/api/v1/query?query=up&time=2015-07-01T20:10:51.781Z'
{
"status" : "success",
"data" : {
@ -163,7 +166,7 @@ query that may breach server-side URL character limits.
The `data` section of the query result has the following format:
```
```json
{
"resultType": "matrix",
"result": <value>
@ -176,8 +179,11 @@ format](#range-vectors).
The following example evaluates the expression `up` over a 30-second range with
a query resolution of 15 seconds.
```bash
curl 'http://localhost:9090/api/v1/query_range?query=up&start=2015-07-01T20:10:30.781Z&end=2015-07-01T20:11:00.781Z&step=15s'
```
```json
$ curl 'http://localhost:9090/api/v1/query_range?query=up&start=2015-07-01T20:10:30.781Z&end=2015-07-01T20:11:00.781Z&step=15s'
{
"status" : "success",
"data" : {
@ -233,8 +239,11 @@ The `data` section of the query result is a string containing the formatted quer
The following example formats the expression `foo/bar`:
```bash
curl 'http://localhost:9090/api/v1/format_query?query=foo/bar'
```
```json
$ curl 'http://localhost:9090/api/v1/format_query?query=foo/bar'
{
"status" : "success",
"data" : "foo / bar"
@ -264,8 +273,11 @@ The `data` section of the query result is a string containing the AST of the par
The following example parses the expression `foo/bar`:
```bash
curl 'http://localhost:9090/api/v1/parse_query?query=foo/bar'
```
```json
$ curl 'http://localhost:9090/api/v1/parse_query?query=foo/bar'
{
"data" : {
"bool" : false,
@ -343,8 +355,11 @@ contain the label name/value pairs which identify each series.
The following example returns all series that match either of the selectors
`up` or `process_start_time_seconds{job="prometheus"}`:
```bash
curl -g 'http://localhost:9090/api/v1/series?' --data-urlencode 'match[]=up' --data-urlencode 'match[]=process_start_time_seconds{job="prometheus"}'
```
```json
$ curl -g 'http://localhost:9090/api/v1/series?' --data-urlencode 'match[]=up' --data-urlencode 'match[]=process_start_time_seconds{job="prometheus"}'
{
"status" : "success",
"data" : [
@ -389,8 +404,11 @@ The `data` section of the JSON response is a list of string label names.
Here is an example.
```bash
curl 'localhost:9090/api/v1/labels'
```
```json
$ curl 'localhost:9090/api/v1/labels'
{
"status": "success",
"data": [
@ -439,8 +457,11 @@ The `data` section of the JSON response is a list of string label values.
This example queries for all label values for the `http_status_code` label:
```bash
curl http://localhost:9090/api/v1/label/http_status_code/values
```
```json
$ curl http://localhost:9090/api/v1/label/http_status_code/values
{
"status" : "success",
"data" : [
@ -462,8 +483,11 @@ Label names can optionally be encoded using the Values Escaping method, and is n
This example queries for all label values for the `http.status_code` label:
```bash
curl http://localhost:9090/api/v1/label/U__http_2e_status_code/values
```
```json
$ curl http://localhost:9090/api/v1/label/U__http_2e_status_code/values
{
"status" : "success",
"data" : [
@ -489,8 +513,11 @@ URL query parameters:
- `start=<rfc3339 | unix_timestamp>`: Start timestamp.
- `end=<rfc3339 | unix_timestamp>`: End timestamp.
```bash
curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metric_total&start=2020-09-14T15:22:25.479Z&end=2020-09-14T15:23:25.479Z'
```
```json
$ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metric_total&start=2020-09-14T15:22:25.479Z&end=2020-09-14T15:23:25.479Z'
{
"status": "success",
"data": [
@ -556,7 +583,7 @@ is explained in detail in its own section below.
Range vectors are returned as result type `matrix`. The corresponding
`result` property has the following format:
```
```json
[
{
"metric": { "<label_name>": "<label_value>", ... },
@ -578,7 +605,7 @@ and [`sort_by_label`](functions.md#sort_by_label) have no effect for range vecto
Instant vectors are returned as result type `vector`. The corresponding
`result` property has the following format:
```
```json
[
{
"metric": { "<label_name>": "<label_value>", ... },
@ -600,7 +627,7 @@ is used.
Scalar results are returned as result type `scalar`. The corresponding
`result` property has the following format:
```
```json
[ <unix_time>, "<scalar_value>" ]
```
@ -609,7 +636,7 @@ Scalar results are returned as result type `scalar`. The corresponding
String results are returned as result type `string`. The corresponding
`result` property has the following format:
```
```json
[ <unix_time>, "<string_value>" ]
```
@ -620,7 +647,7 @@ The `<histogram>` placeholder used above is formatted as follows.
_Note that native histograms are an experimental feature, and the format below
might still change._
```
```json
{
"count": "<count_of_observations>",
"sum": "<sum_of_observations>",
@ -654,8 +681,11 @@ Dropped targets are subject to `keep_dropped_targets` limit, if set.
`labels` represents the label set after relabeling has occurred.
`discoveredLabels` represent the unmodified labels retrieved during service discovery before relabeling has occurred.
```bash
curl http://localhost:9090/api/v1/targets
```
```json
$ curl http://localhost:9090/api/v1/targets
{
"status": "success",
"data": {
@ -704,9 +734,12 @@ The `state` query parameter allows the caller to filter by active or dropped tar
Note that an empty array is still returned for targets that are filtered out.
Other values are ignored.
```bash
curl 'http://localhost:9090/api/v1/targets?state=active'
```
```json
$ curl 'http://localhost:9090/api/v1/targets?state=active'
{
"status": "success",
"data": {
"activeTargets": [
@ -737,9 +770,12 @@ $ curl 'http://localhost:9090/api/v1/targets?state=active'
The `scrapePool` query parameter allows the caller to filter by scrape pool name.
```bash
curl 'http://localhost:9090/api/v1/targets?scrapePool=node_exporter'
```
```json
$ curl 'http://localhost:9090/api/v1/targets?scrapePool=node_exporter'
{
"status": "success",
"data": {
"activeTargets": [
@ -792,9 +828,11 @@ URL query parameters:
- `group_limit=<number>`: The `group_limit` parameter allows you to specify a limit for the number of rule groups that is returned in a single response. If the total number of rule groups exceeds the specified `group_limit` value, the response will include a `groupNextToken` property. You can use the value of this `groupNextToken` property in subsequent requests in the `group_next_token` parameter to paginate over the remaining rule groups. The `groupNextToken` property will not be present in the final response, indicating that you have retrieved all the available rule groups. Please note that there are no guarantees regarding the consistency of the response if the rule groups are being modified during the pagination process.
- `group_next_token`: the pagination token that was returned in previous request when the `group_limit` property is set. The pagination token is used to iteratively paginate over a large number of rule groups. To use the `group_next_token` parameter, the `group_limit` parameter also need to be present. If a rule group that coincides with the next token is removed while you are paginating over the rule groups, a response with status code 400 will be returned.
```json
$ curl http://localhost:9090/api/v1/rules
```bash
curl http://localhost:9090/api/v1/rules
```
```json
{
"data": {
"groups": [
@ -857,9 +895,11 @@ guarantees as the overarching API v1.
GET /api/v1/alerts
```
```json
$ curl http://localhost:9090/api/v1/alerts
```bash
curl http://localhost:9090/api/v1/alerts
```
```json
{
"data": {
"alerts": [
@ -904,6 +944,9 @@ curl -G http://localhost:9091/api/v1/targets/metadata \
--data-urlencode 'metric=go_goroutines' \
--data-urlencode 'match_target={job="prometheus"}' \
--data-urlencode 'limit=2'
```
```json
{
"status": "success",
"data": [
@ -932,9 +975,12 @@ curl -G http://localhost:9091/api/v1/targets/metadata \
The following example returns metadata for all metrics for all targets with
label `instance="127.0.0.1:9090"`.
```json
```bash
curl -G http://localhost:9091/api/v1/targets/metadata \
--data-urlencode 'match_target={instance="127.0.0.1:9090"}'
```
```json
{
"status": "success",
"data": [
@ -983,9 +1029,11 @@ The `data` section of the query result consists of an object where each key is a
The following example returns two metrics. Note that the metric `http_requests_total` has more than one object in the list. At least one target has a value for `HELP` that do not match with the rest.
```json
```bash
curl -G http://localhost:9090/api/v1/metadata?limit=2
```
```json
{
"status": "success",
"data": {
@ -1014,9 +1062,11 @@ curl -G http://localhost:9090/api/v1/metadata?limit=2
The following example returns only one metadata entry for each metric.
```json
```bash
curl -G http://localhost:9090/api/v1/metadata?limit_per_metric=1
```
```json
{
"status": "success",
"data": {
@ -1040,9 +1090,11 @@ curl -G http://localhost:9090/api/v1/metadata?limit_per_metric=1
The following example returns metadata only for the metric `http_requests_total`.
```json
```bash
curl -G http://localhost:9090/api/v1/metadata?metric=http_requests_total
```
```json
{
"status": "success",
"data": {
@ -1073,8 +1125,11 @@ GET /api/v1/alertmanagers
Both the active and dropped Alertmanagers are part of the response.
```bash
curl http://localhost:9090/api/v1/alertmanagers
```
```json
$ curl http://localhost:9090/api/v1/alertmanagers
{
"status": "success",
"data": {
@ -1107,8 +1162,11 @@ GET /api/v1/status/config
The config is returned as dumped YAML file. Due to limitation of the YAML
library, YAML comments are not included.
```bash
curl http://localhost:9090/api/v1/status/config
```
```json
$ curl http://localhost:9090/api/v1/status/config
{
"status": "success",
"data": {
@ -1127,8 +1185,11 @@ GET /api/v1/status/flags
All values are of the result type `string`.
```bash
curl http://localhost:9090/api/v1/status/flags
```
```json
$ curl http://localhost:9090/api/v1/status/flags
{
"status": "success",
"data": {
@ -1154,8 +1215,11 @@ GET /api/v1/status/runtimeinfo
The returned values are of different types, depending on the nature of the runtime property.
```bash
curl http://localhost:9090/api/v1/status/runtimeinfo
```
```json
$ curl http://localhost:9090/api/v1/status/runtimeinfo
{
"status": "success",
"data": {
@ -1190,8 +1254,11 @@ GET /api/v1/status/buildinfo
All values are of the result type `string`.
```bash
curl http://localhost:9090/api/v1/status/buildinfo
```
```json
$ curl http://localhost:9090/api/v1/status/buildinfo
{
"status": "success",
"data": {
@ -1232,8 +1299,11 @@ The `data` section of the query result consists of:
- **memoryInBytesByLabelName** This will provide a list of the label names and memory used in bytes. Memory usage is calculated by adding the length of all values for a given label name.
- **seriesCountByLabelPair** This will provide a list of label value pairs and their series count.
```bash
curl http://localhost:9090/api/v1/status/tsdb
```
```json
$ curl http://localhost:9090/api/v1/status/tsdb
{
"status": "success",
"data": {
@ -1305,8 +1375,11 @@ GET /api/v1/status/walreplay
- **in progress**: The replay is in progress.
- **done**: The replay has finished.
```bash
curl http://localhost:9090/api/v1/status/walreplay
```
```json
$ curl http://localhost:9090/api/v1/status/walreplay
{
"status": "success",
"data": {
@ -1338,8 +1411,11 @@ URL query parameters:
- `skip_head=<bool>`: Skip data present in the head block. Optional.
```bash
curl -XPOST http://localhost:9090/api/v1/admin/tsdb/snapshot
```
```json
$ curl -XPOST http://localhost:9090/api/v1/admin/tsdb/snapshot
{
"status": "success",
"data": {
@ -1371,8 +1447,8 @@ Not mentioning both start and end times would clear all the data for the matched
Example:
```json
$ curl -X POST \
```bash
curl -X POST \
-g 'http://localhost:9090/api/v1/admin/tsdb/delete_series?match[]=up&match[]=process_start_time_seconds{job="prometheus"}'
```
@ -1392,8 +1468,8 @@ PUT /api/v1/admin/tsdb/clean_tombstones
This takes no parameters or body.
```json
$ curl -XPOST http://localhost:9090/api/v1/admin/tsdb/clean_tombstones
```bash
curl -XPOST http://localhost:9090/api/v1/admin/tsdb/clean_tombstones
```
*New in v2.1 and supports PUT from v2.9*
@ -1451,8 +1527,11 @@ GET /api/v1/notifications
Example:
```bash
curl http://localhost:9090/api/v1/notifications
```
```
$ curl http://localhost:9090/api/v1/notifications
{
"status": "success",
"data": [
@ -1477,8 +1556,11 @@ GET /api/v1/notifications/live
Example:
```bash
curl http://localhost:9090/api/v1/notifications/live
```
```
$ curl http://localhost:9090/api/v1/notifications/live
data: {
"status": "success",
"data": [

View File

@ -268,7 +268,7 @@ vector of fewer elements with aggregated values:
* `bottomk` (smallest _k_ elements by sample value)
* `topk` (largest _k_ elements by sample value)
* `limitk` (sample _k_ elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`)
* `limit_ratio` (sample a pseudo-randem ratio _r_ of elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`)
* `limit_ratio` (sample a pseudo-random ratio _r_ of elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`)
* `group` (all values in the resulting vector are 1)
* `count` (count number of elements in the vector)
* `count_values` (count number of elements with the same value)

View File

@ -61,10 +61,10 @@ A Prometheus server's data directory looks something like this:
Note that a limitation of local storage is that it is not clustered or
replicated. Thus, it is not arbitrarily scalable or durable in the face of
drive or node outages and should be managed like any other single node
database.
database.
[Snapshots](querying/api.md#snapshot) are recommended for backups. Backups
made without snapshots run the risk of losing data that was recorded since
[Snapshots](querying/api.md#snapshot) are recommended for backups. Backups
made without snapshots run the risk of losing data that was recorded since
the last WAL sync, which typically happens every two hours. With proper
architecture, it is possible to retain years of data in local storage.
@ -75,14 +75,14 @@ performance, and efficiency.
For further details on file format, see [TSDB format](/tsdb/docs/format/README.md).
## Compaction
### Compaction
The initial two-hour blocks are eventually compacted into longer blocks in the background.
Compaction will create larger blocks containing data spanning up to 10% of the retention time,
or 31 days, whichever is smaller.
## Operational aspects
### Operational aspects
Prometheus has several flags that configure local storage. The most important are:
@ -134,16 +134,16 @@ will be used.
Expired block cleanup happens in the background. It may take up to two hours
to remove expired blocks. Blocks must be fully expired before they are removed.
## Right-Sizing Retention Size
### Right-Sizing Retention Size
If you are utilizing `storage.tsdb.retention.size` to set a size limit, you
will want to consider the right size for this value relative to the storage you
have allocated for Prometheus. It is wise to reduce the retention size to provide
a buffer, ensuring that older entries will be removed before the allocated storage
If you are utilizing `storage.tsdb.retention.size` to set a size limit, you
will want to consider the right size for this value relative to the storage you
have allocated for Prometheus. It is wise to reduce the retention size to provide
a buffer, ensuring that older entries will be removed before the allocated storage
for Prometheus becomes full.
At present, we recommend setting the retention size to, at most, 80-85% of your
allocated Prometheus disk space. This increases the likelihood that older entries
At present, we recommend setting the retention size to, at most, 80-85% of your
allocated Prometheus disk space. This increases the likelihood that older entries
will be removed prior to hitting any disk limitations.
## Remote storage integrations

View File

@ -336,16 +336,29 @@ func (ls Labels) Validate(f func(l Label) error) error {
return nil
}
// DropMetricName returns Labels with "__name__" removed.
// DropMetricName returns Labels with the "__name__" removed.
// Deprecated: Use DropReserved instead.
func (ls Labels) DropMetricName() Labels {
return ls.DropReserved(func(n string) bool { return n == MetricName })
}
// DropReserved returns Labels without the chosen (via shouldDropFn) reserved (starting with underscore) labels.
func (ls Labels) DropReserved(shouldDropFn func(name string) bool) Labels {
rm := 0
for i, l := range ls {
if l.Name == MetricName {
if l.Name[0] > '_' { // Stop looking if we've gone past special labels.
break
}
if shouldDropFn(l.Name) {
i := i - rm // Offsetting after removals.
if i == 0 { // Make common case fast with no allocations.
return ls[1:]
ls = ls[1:]
} else {
// Avoid modifying original Labels - use [:i:i] so that left slice would not
// have any spare capacity and append would have to allocate a new slice for the result.
ls = append(ls[:i:i], ls[i+1:]...)
}
// Avoid modifying original Labels - use [:i:i] so that left slice would not
// have any spare capacity and append would have to allocate a new slice for the result.
return append(ls[:i:i], ls[i+1:]...)
rm++
}
}
return ls

View File

@ -24,10 +24,12 @@ import (
)
const (
MetricName = "__name__"
AlertName = "alertname"
BucketLabel = "le"
InstanceName = "instance"
// MetricName is a special label name that represent a metric name.
// Deprecated: Use schema.Metadata structure and its methods.
MetricName = "__name__"
AlertName = "alertname"
BucketLabel = "le"
labelSep = '\xfe' // Used at beginning of `Bytes` return.
sep = '\xff' // Used between labels in `Bytes` and `Hash`.
@ -35,7 +37,7 @@ const (
var seps = []byte{sep} // Used with Hash, which has no WriteByte method.
// Label is a key/value pair of strings.
// Label is a key/value a pair of strings.
type Label struct {
Name, Value string
}
@ -167,10 +169,8 @@ func (b *Builder) Del(ns ...string) *Builder {
// Keep removes all labels from the base except those with the given names.
func (b *Builder) Keep(ns ...string) *Builder {
b.base.Range(func(l Label) {
for _, n := range ns {
if l.Name == n {
return
}
if slices.Contains(ns, l.Name) {
return
}
b.del = append(b.del, l.Name)
})

View File

@ -554,20 +554,27 @@ func (ls Labels) ReleaseStrings(release func(string)) {
// TODO: remove these calls as there is nothing to do.
}
// DropMetricName returns Labels with "__name__" removed.
// DropMetricName returns Labels with the "__name__" removed.
// Deprecated: Use DropReserved instead.
func (ls Labels) DropMetricName() Labels {
return ls.DropReserved(func(n string) bool { return n == MetricName })
}
// DropReserved returns Labels without the chosen (via shouldDropFn) reserved (starting with underscore) labels.
func (ls Labels) DropReserved(shouldDropFn func(name string) bool) Labels {
for i := 0; i < len(ls.data); {
lName, i2 := decodeString(ls.syms, ls.data, i)
_, i2 = decodeVarint(ls.data, i2)
if lName == MetricName {
if lName[0] > '_' { // Stop looking if we've gone past special labels.
break
}
if shouldDropFn(lName) {
if i == 0 { // Make common case fast with no allocations.
ls.data = ls.data[i2:]
} else {
ls.data = ls.data[:i] + ls.data[i2:]
}
break
} else if lName[0] > MetricName[0] { // Stop looking if we've gone past.
break
continue
}
i = i2
}

View File

@ -413,21 +413,28 @@ func (ls Labels) Validate(f func(l Label) error) error {
return nil
}
// DropMetricName returns Labels with "__name__" removed.
// DropMetricName returns Labels with the "__name__" removed.
// Deprecated: Use DropReserved instead.
func (ls Labels) DropMetricName() Labels {
return ls.DropReserved(func(n string) bool { return n == MetricName })
}
// DropReserved returns Labels without the chosen (via shouldDropFn) reserved (starting with underscore) labels.
func (ls Labels) DropReserved(shouldDropFn func(name string) bool) Labels {
for i := 0; i < len(ls.data); {
lName, i2 := decodeString(ls.data, i)
size, i2 := decodeSize(ls.data, i2)
i2 += size
if lName == MetricName {
if lName[0] > '_' { // Stop looking if we've gone past special labels.
break
}
if shouldDropFn(lName) {
if i == 0 { // Make common case fast with no allocations.
ls.data = ls.data[i2:]
} else {
ls.data = ls.data[:i] + ls.data[i2:]
}
break
} else if lName[0] > MetricName[0] { // Stop looking if we've gone past.
break
continue
}
i = i2
}

View File

@ -523,11 +523,25 @@ func TestLabels_DropMetricName(t *testing.T) {
require.True(t, Equal(FromStrings("aaa", "111"), FromStrings(MetricName, "myname", "aaa", "111").DropMetricName()))
original := FromStrings("__aaa__", "111", MetricName, "myname", "bbb", "222")
check := FromStrings("__aaa__", "111", MetricName, "myname", "bbb", "222")
check := original.Copy()
require.True(t, Equal(FromStrings("__aaa__", "111", "bbb", "222"), check.DropMetricName()))
require.True(t, Equal(original, check))
}
func TestLabels_DropReserved(t *testing.T) {
shouldDropFn := func(n string) bool {
return n == MetricName || n == "__something__"
}
require.True(t, Equal(FromStrings("aaa", "111", "bbb", "222"), FromStrings("aaa", "111", "bbb", "222").DropReserved(shouldDropFn)))
require.True(t, Equal(FromStrings("aaa", "111"), FromStrings(MetricName, "myname", "aaa", "111").DropReserved(shouldDropFn)))
require.True(t, Equal(FromStrings("aaa", "111"), FromStrings(MetricName, "myname", "__something__", string(model.MetricTypeCounter), "aaa", "111").DropReserved(shouldDropFn)))
original := FromStrings("__aaa__", "111", MetricName, "myname", "bbb", "222")
check := original.Copy()
require.True(t, Equal(FromStrings("__aaa__", "111", "bbb", "222"), check.DropReserved(shouldDropFn)))
require.True(t, Equal(original, check))
}
func ScratchBuilderForBenchmark() ScratchBuilder {
// (Only relevant to -tags dedupelabels: stuff the symbol table before adding the real labels, to avoid having everything fitting into 1 byte.)
b := NewScratchBuilder(256)

View File

@ -95,12 +95,7 @@ func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool {
return func(s string) bool {
if len(m.setMatches) != 0 {
for _, match := range m.setMatches {
if match == s {
return true
}
}
return false
return slices.Contains(m.setMatches, s)
}
if m.prefix != "" && !strings.HasPrefix(s, m.prefix) {
return false
@ -771,16 +766,11 @@ func (m *equalMultiStringSliceMatcher) setMatches() []string {
func (m *equalMultiStringSliceMatcher) Matches(s string) bool {
if m.caseSensitive {
for _, v := range m.values {
if s == v {
return true
}
}
} else {
for _, v := range m.values {
if strings.EqualFold(s, v) {
return true
}
return slices.Contains(m.values, s)
}
for _, v := range m.values {
if strings.EqualFold(s, v) {
return true
}
}
return false

View File

@ -144,10 +144,12 @@ func benchParse(b *testing.B, data []byte, parser string) {
var newParserFn newParser
switch parser {
case "promtext":
newParserFn = NewPromParser
newParserFn = func(b []byte, st *labels.SymbolTable) Parser {
return NewPromParser(b, st, false)
}
case "promproto":
newParserFn = func(b []byte, st *labels.SymbolTable) Parser {
return NewProtobufParser(b, true, st)
return NewProtobufParser(b, true, false, st)
}
case "omtext":
newParserFn = func(b []byte, st *labels.SymbolTable) Parser {
@ -273,7 +275,7 @@ func BenchmarkCreatedTimestampPromProto(b *testing.B) {
data := createTestProtoBuf(b).Bytes()
st := labels.NewSymbolTable()
p := NewProtobufParser(data, true, st)
p := NewProtobufParser(data, true, false, st)
found := false
Inner:

View File

@ -51,11 +51,13 @@ type Parser interface {
// Type returns the metric name and type in the current entry.
// Must only be called after Next returned a type entry.
// The returned byte slices become invalid after the next call to Next.
// TODO(bwplotka): Once type-and-unit-labels stabilizes we could remove this method.
Type() ([]byte, model.MetricType)
// Unit returns the metric name and unit in the current entry.
// Must only be called after Next returned a unit entry.
// The returned byte slices become invalid after the next call to Next.
// TODO(bwplotka): Once type-and-unit-labels stabilizes we could remove this method.
Unit() ([]byte, []byte)
// Comment returns the text of the current comment.
@ -128,19 +130,20 @@ func extractMediaType(contentType, fallbackType string) (string, error) {
// An error may also be returned if fallbackType had to be used or there was some
// other error parsing the supplied Content-Type.
// If the returned parser is nil then the scrape must fail.
func New(b []byte, contentType, fallbackType string, parseClassicHistograms, skipOMCTSeries bool, st *labels.SymbolTable) (Parser, error) {
func New(b []byte, contentType, fallbackType string, parseClassicHistograms, skipOMCTSeries, enableTypeAndUnitLabels bool, st *labels.SymbolTable) (Parser, error) {
mediaType, err := extractMediaType(contentType, fallbackType)
// err may be nil or something we want to warn about.
switch mediaType {
case "application/openmetrics-text":
return NewOpenMetricsParser(b, st, func(o *openMetricsParserOptions) {
o.SkipCTSeries = skipOMCTSeries
o.skipCTSeries = skipOMCTSeries
o.enableTypeAndUnitLabels = enableTypeAndUnitLabels
}), err
case "application/vnd.google.protobuf":
return NewProtobufParser(b, parseClassicHistograms, st), err
return NewProtobufParser(b, parseClassicHistograms, enableTypeAndUnitLabels, st), err
case "text/plain":
return NewPromParser(b, st), err
return NewPromParser(b, st, enableTypeAndUnitLabels), err
default:
return nil, err
}

View File

@ -168,7 +168,7 @@ func TestNewParser(t *testing.T) {
fallbackProtoMediaType := tt.fallbackScrapeProtocol.HeaderMediaType()
p, err := New([]byte{}, tt.contentType, fallbackProtoMediaType, false, false, labels.NewSymbolTable())
p, err := New([]byte{}, tt.contentType, fallbackProtoMediaType, false, false, false, labels.NewSymbolTable())
tt.validateParser(t, p)
if tt.err == "" {
require.NoError(t, err)

View File

@ -598,7 +598,7 @@ func TestNHCBParser_NoNHCBWhenExponential(t *testing.T) {
func() (string, parserFactory, []int, parserOptions) {
factory := func(keepClassic bool) Parser {
inputBuf := createTestProtoBufHistogram(t)
return NewProtobufParser(inputBuf.Bytes(), keepClassic, labels.NewSymbolTable())
return NewProtobufParser(inputBuf.Bytes(), keepClassic, false, labels.NewSymbolTable())
}
return "ProtoBuf", factory, []int{1, 2, 3}, parserOptions{useUTF8sep: true, hasCreatedTimeStamp: true}
},
@ -612,7 +612,7 @@ func TestNHCBParser_NoNHCBWhenExponential(t *testing.T) {
func() (string, parserFactory, []int, parserOptions) {
factory := func(_ bool) Parser {
input := createTestPromHistogram()
return NewPromParser([]byte(input), labels.NewSymbolTable())
return NewPromParser([]byte(input), labels.NewSymbolTable(), false)
}
return "Prometheus", factory, []int{1}, parserOptions{}
},

View File

@ -33,6 +33,7 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/schema"
)
type openMetricsLexer struct {
@ -81,10 +82,12 @@ type OpenMetricsParser struct {
mfNameLen int // length of metric family name to get from series.
text []byte
mtype model.MetricType
val float64
ts int64
hasTS bool
start int
unit string
val float64
ts int64
hasTS bool
start int
// offsets is a list of offsets into series that describe the positions
// of the metric name and label names and values for this series.
// p.offsets[0] is the start character of the metric name.
@ -106,12 +109,14 @@ type OpenMetricsParser struct {
ignoreExemplar bool
// visitedMFName is the metric family name of the last visited metric when peeking ahead
// for _created series during the execution of the CreatedTimestamp method.
visitedMFName []byte
skipCTSeries bool
visitedMFName []byte
skipCTSeries bool
enableTypeAndUnitLabels bool
}
type openMetricsParserOptions struct {
SkipCTSeries bool
skipCTSeries bool
enableTypeAndUnitLabels bool
}
type OpenMetricsOption func(*openMetricsParserOptions)
@ -125,7 +130,15 @@ type OpenMetricsOption func(*openMetricsParserOptions)
// best-effort compatibility.
func WithOMParserCTSeriesSkipped() OpenMetricsOption {
return func(o *openMetricsParserOptions) {
o.SkipCTSeries = true
o.skipCTSeries = true
}
}
// WithOMParserTypeAndUnitLabels enables type-and-unit-labels mode
// in which parser injects __type__ and __unit__ into labels.
func WithOMParserTypeAndUnitLabels() OpenMetricsOption {
return func(o *openMetricsParserOptions) {
o.enableTypeAndUnitLabels = true
}
}
@ -138,9 +151,10 @@ func NewOpenMetricsParser(b []byte, st *labels.SymbolTable, opts ...OpenMetricsO
}
parser := &OpenMetricsParser{
l: &openMetricsLexer{b: b},
builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
skipCTSeries: options.SkipCTSeries,
l: &openMetricsLexer{b: b},
builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
skipCTSeries: options.skipCTSeries,
enableTypeAndUnitLabels: options.enableTypeAndUnitLabels,
}
return parser
@ -187,7 +201,7 @@ func (p *OpenMetricsParser) Type() ([]byte, model.MetricType) {
// Must only be called after Next returned a unit entry.
// The returned byte slices become invalid after the next call to Next.
func (p *OpenMetricsParser) Unit() ([]byte, []byte) {
return p.l.b[p.offsets[0]:p.offsets[1]], p.text
return p.l.b[p.offsets[0]:p.offsets[1]], []byte(p.unit)
}
// Comment returns the text of the current comment.
@ -203,16 +217,28 @@ func (p *OpenMetricsParser) Labels(l *labels.Labels) {
p.builder.Reset()
metricName := unreplace(s[p.offsets[0]-p.start : p.offsets[1]-p.start])
p.builder.Add(labels.MetricName, metricName)
m := schema.Metadata{
Name: metricName,
Type: p.mtype,
Unit: p.unit,
}
if p.enableTypeAndUnitLabels {
m.AddToLabels(&p.builder)
} else {
p.builder.Add(labels.MetricName, metricName)
}
for i := 2; i < len(p.offsets); i += 4 {
a := p.offsets[i] - p.start
b := p.offsets[i+1] - p.start
label := unreplace(s[a:b])
if p.enableTypeAndUnitLabels && !m.IsEmptyFor(label) {
// Dropping user provided metadata labels, if found in the OM metadata.
continue
}
c := p.offsets[i+2] - p.start
d := p.offsets[i+3] - p.start
value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d]))
p.builder.Add(label, value)
}
@ -283,7 +309,7 @@ func (p *OpenMetricsParser) CreatedTimestamp() int64 {
return p.ct
}
// Create a new lexer to reset the parser once this function is done executing.
// Create a new lexer and other core state details to reset the parser once this function is done executing.
resetLexer := &openMetricsLexer{
b: p.l.b,
i: p.l.i,
@ -291,15 +317,16 @@ func (p *OpenMetricsParser) CreatedTimestamp() int64 {
err: p.l.err,
state: p.l.state,
}
resetStart := p.start
resetMType := p.mtype
p.skipCTSeries = false
p.ignoreExemplar = true
savedStart := p.start
defer func() {
p.ignoreExemplar = false
p.start = savedStart
p.l = resetLexer
p.start = resetStart
p.mtype = resetMType
p.ignoreExemplar = false
}()
for {
@ -493,11 +520,11 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
case tType:
return EntryType, nil
case tUnit:
p.unit = string(p.text)
m := yoloString(p.l.b[p.offsets[0]:p.offsets[1]])
u := yoloString(p.text)
if len(u) > 0 {
if !strings.HasSuffix(m, u) || len(m) < len(u)+1 || p.l.b[p.offsets[1]-len(u)-1] != '_' {
return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", u, m)
if len(p.unit) > 0 {
if !strings.HasSuffix(m, p.unit) || len(m) < len(p.unit)+1 || p.l.b[p.offsets[1]-len(p.unit)-1] != '_' {
return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", p.unit, m)
}
}
return EntryUnit, nil

View File

@ -119,356 +119,539 @@ foobar{quantile="0.99"} 150.1`
input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
input += "\n# EOF\n"
exp := []parsedEntry{
{
m: "go_gc_duration_seconds",
help: "A summary of the GC invocation durations.",
}, {
m: "go_gc_duration_seconds",
typ: model.MetricTypeSummary,
}, {
m: "go_gc_duration_seconds",
unit: "seconds",
}, {
m: `go_gc_duration_seconds{quantile="0"}`,
v: 4.9351e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"),
}, {
m: `go_gc_duration_seconds{quantile="0.25"}`,
v: 7.424100000000001e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"),
}, {
m: `go_gc_duration_seconds{quantile="0.5",a="b"}`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"),
}, {
m: "nohelp1",
help: "",
}, {
m: "help2",
help: "escape \\ \n \\ \" \\x chars",
}, {
m: "nounit",
unit: "",
}, {
m: `go_gc_duration_seconds{quantile="1.0",a="b"}`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
}, {
m: `go_gc_duration_seconds_count`,
v: 99,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds_count"),
}, {
m: `some:aggregate:rate5m{a_b="c"}`,
v: 1,
lset: labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"),
}, {
m: "go_goroutines",
help: "Number of goroutines that currently exist.",
}, {
m: "go_goroutines",
typ: model.MetricTypeGauge,
}, {
m: `go_goroutines`,
v: 33,
t: int64p(123123),
lset: labels.FromStrings("__name__", "go_goroutines"),
}, {
m: "hh",
typ: model.MetricTypeHistogram,
}, {
m: `hh_bucket{le="+Inf"}`,
v: 1,
lset: labels.FromStrings("__name__", "hh_bucket", "le", "+Inf"),
}, {
m: "gh",
typ: model.MetricTypeGaugeHistogram,
}, {
m: `gh_bucket{le="+Inf"}`,
v: 1,
lset: labels.FromStrings("__name__", "gh_bucket", "le", "+Inf"),
}, {
m: "hhh",
typ: model.MetricTypeHistogram,
}, {
m: `hhh_bucket{le="+Inf"}`,
v: 1,
lset: labels.FromStrings("__name__", "hhh_bucket", "le", "+Inf"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4},
},
}, {
m: `hhh_count`,
v: 1,
lset: labels.FromStrings("__name__", "hhh_count"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4},
},
}, {
m: "ggh",
typ: model.MetricTypeGaugeHistogram,
}, {
m: `ggh_bucket{le="+Inf"}`,
v: 1,
lset: labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123},
},
}, {
m: `ggh_count`,
v: 1,
lset: labels.FromStrings("__name__", "ggh_count"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123},
},
}, {
m: "smr_seconds",
typ: model.MetricTypeSummary,
}, {
m: `smr_seconds_count`,
v: 2,
lset: labels.FromStrings("__name__", "smr_seconds_count"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321},
},
}, {
m: `smr_seconds_sum`,
v: 42,
lset: labels.FromStrings("__name__", "smr_seconds_sum"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321},
},
}, {
m: "ii",
typ: model.MetricTypeInfo,
}, {
m: `ii{foo="bar"}`,
v: 1,
lset: labels.FromStrings("__name__", "ii", "foo", "bar"),
}, {
m: "ss",
typ: model.MetricTypeStateset,
}, {
m: `ss{ss="foo"}`,
v: 1,
lset: labels.FromStrings("__name__", "ss", "ss", "foo"),
}, {
m: `ss{ss="bar"}`,
v: 0,
lset: labels.FromStrings("__name__", "ss", "ss", "bar"),
}, {
m: `ss{A="a"}`,
v: 0,
lset: labels.FromStrings("A", "a", "__name__", "ss"),
}, {
m: "un",
typ: model.MetricTypeUnknown,
}, {
m: "_metric_starting_with_underscore",
v: 1,
lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"),
}, {
m: "testmetric{_label_starting_with_underscore=\"foo\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"),
}, {
m: "testmetric{label=\"\\\"bar\\\"\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
}, {
m: "foo",
help: "Counter with and without labels to certify CT is parsed for both cases",
}, {
m: "foo",
typ: model.MetricTypeCounter,
}, {
m: "foo_total",
v: 17,
lset: labels.FromStrings("__name__", "foo_total"),
t: int64p(1520879607789),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
},
ct: 1520872607123,
}, {
m: `foo_total{a="b"}`,
v: 17.0,
lset: labels.FromStrings("__name__", "foo_total", "a", "b"),
t: int64p(1520879607789),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
},
ct: 1520872607123,
}, {
m: `foo_total{le="c"}`,
v: 21.0,
lset: labels.FromStrings("__name__", "foo_total", "le", "c"),
ct: 1520872621123,
}, {
m: `foo_total{le="1"}`,
v: 10.0,
lset: labels.FromStrings("__name__", "foo_total", "le", "1"),
}, {
m: "bar",
help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far",
}, {
m: "bar",
typ: model.MetricTypeSummary,
}, {
m: "bar_count",
v: 17.0,
lset: labels.FromStrings("__name__", "bar_count"),
ct: 1520872608124,
}, {
m: "bar_sum",
v: 324789.3,
lset: labels.FromStrings("__name__", "bar_sum"),
ct: 1520872608124,
}, {
m: `bar{quantile="0.95"}`,
v: 123.7,
lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"),
ct: 1520872608124,
}, {
m: `bar{quantile="0.99"}`,
v: 150.0,
lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"),
ct: 1520872608124,
}, {
m: "baz",
help: "Histogram with the same objective as above's summary",
}, {
m: "baz",
typ: model.MetricTypeHistogram,
}, {
m: `baz_bucket{le="0.0"}`,
v: 0,
lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"),
ct: 1520872609125,
}, {
m: `baz_bucket{le="+Inf"}`,
v: 17,
lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"),
ct: 1520872609125,
}, {
m: `baz_count`,
v: 17,
lset: labels.FromStrings("__name__", "baz_count"),
ct: 1520872609125,
}, {
m: `baz_sum`,
v: 324789.3,
lset: labels.FromStrings("__name__", "baz_sum"),
ct: 1520872609125,
}, {
m: "fizz_created",
help: "Gauge which shouldn't be parsed as CT",
}, {
m: "fizz_created",
typ: model.MetricTypeGauge,
}, {
m: `fizz_created`,
v: 17,
lset: labels.FromStrings("__name__", "fizz_created"),
}, {
m: "something",
help: "Histogram with _created between buckets and summary",
}, {
m: "something",
typ: model.MetricTypeHistogram,
}, {
m: `something_count`,
v: 18,
lset: labels.FromStrings("__name__", "something_count"),
ct: 1520430001000,
}, {
m: `something_sum`,
v: 324789.4,
lset: labels.FromStrings("__name__", "something_sum"),
ct: 1520430001000,
}, {
m: `something_bucket{le="0.0"}`,
v: 1,
lset: labels.FromStrings("__name__", "something_bucket", "le", "0.0"),
ct: 1520430001000,
}, {
m: `something_bucket{le="1"}`,
v: 2,
lset: labels.FromStrings("__name__", "something_bucket", "le", "1.0"),
ct: 1520430001000,
}, {
m: `something_bucket{le="+Inf"}`,
v: 18,
lset: labels.FromStrings("__name__", "something_bucket", "le", "+Inf"),
ct: 1520430001000,
}, {
m: "yum",
help: "Summary with _created between sum and quantiles",
}, {
m: "yum",
typ: model.MetricTypeSummary,
}, {
m: `yum_count`,
v: 20,
lset: labels.FromStrings("__name__", "yum_count"),
ct: 1520430003000,
}, {
m: `yum_sum`,
v: 324789.5,
lset: labels.FromStrings("__name__", "yum_sum"),
ct: 1520430003000,
}, {
m: `yum{quantile="0.95"}`,
v: 123.7,
lset: labels.FromStrings("__name__", "yum", "quantile", "0.95"),
ct: 1520430003000,
}, {
m: `yum{quantile="0.99"}`,
v: 150.0,
lset: labels.FromStrings("__name__", "yum", "quantile", "0.99"),
ct: 1520430003000,
}, {
m: "foobar",
help: "Summary with _created as the first line",
}, {
m: "foobar",
typ: model.MetricTypeSummary,
}, {
m: `foobar_count`,
v: 21,
lset: labels.FromStrings("__name__", "foobar_count"),
ct: 1520430004000,
}, {
m: `foobar_sum`,
v: 324789.6,
lset: labels.FromStrings("__name__", "foobar_sum"),
ct: 1520430004000,
}, {
m: `foobar{quantile="0.95"}`,
v: 123.8,
lset: labels.FromStrings("__name__", "foobar", "quantile", "0.95"),
ct: 1520430004000,
}, {
m: `foobar{quantile="0.99"}`,
v: 150.1,
lset: labels.FromStrings("__name__", "foobar", "quantile", "0.99"),
ct: 1520430004000,
}, {
m: "metric",
help: "foo\x00bar",
}, {
m: "null_byte_metric{a=\"abc\x00\"}",
v: 1,
lset: labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"),
},
for _, typeAndUnitEnabled := range []bool{false, true} {
t.Run(fmt.Sprintf("type-and-unit=%v", typeAndUnitEnabled), func(t *testing.T) {
exp := []parsedEntry{
{
m: "go_gc_duration_seconds",
help: "A summary of the GC invocation durations.",
}, {
m: "go_gc_duration_seconds",
typ: model.MetricTypeSummary,
}, {
m: "go_gc_duration_seconds",
unit: "seconds",
}, {
m: `go_gc_duration_seconds{quantile="0"}`,
v: 4.9351e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "__unit__", "seconds", "quantile", "0.0"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"),
),
}, {
m: `go_gc_duration_seconds{quantile="0.25"}`,
v: 7.424100000000001e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "__unit__", "seconds", "quantile", "0.25"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"),
),
}, {
m: `go_gc_duration_seconds{quantile="0.5",a="b"}`,
v: 8.3835e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "__unit__", "seconds", "quantile", "0.5", "a", "b"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"),
),
}, {
m: "nohelp1",
help: "",
}, {
m: "help2",
help: "escape \\ \n \\ \" \\x chars",
}, {
m: "nounit",
unit: "",
}, {
m: `go_gc_duration_seconds{quantile="1.0",a="b"}`,
v: 8.3835e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "1.0", "a", "b"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
),
}, {
m: `go_gc_duration_seconds_count`,
v: 99,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds_count", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "go_gc_duration_seconds_count"),
),
}, {
m: `some:aggregate:rate5m{a_b="c"}`,
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"), model.MetricTypeSummary),
}, {
m: "go_goroutines",
help: "Number of goroutines that currently exist.",
}, {
m: "go_goroutines",
typ: model.MetricTypeGauge,
}, {
m: `go_goroutines`,
v: 33,
t: int64p(123123),
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_goroutines", "__type__", string(model.MetricTypeGauge)),
labels.FromStrings("__name__", "go_goroutines"),
),
}, {
m: "hh",
typ: model.MetricTypeHistogram,
}, {
m: `hh_bucket{le="+Inf"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "hh_bucket", "__type__", string(model.MetricTypeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "hh_bucket", "le", "+Inf"),
),
}, {
m: "gh",
typ: model.MetricTypeGaugeHistogram,
}, {
m: `gh_bucket{le="+Inf"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "gh_bucket", "__type__", string(model.MetricTypeGaugeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "gh_bucket", "le", "+Inf"),
),
}, {
m: "hhh",
typ: model.MetricTypeHistogram,
}, {
m: `hhh_bucket{le="+Inf"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "hhh_bucket", "__type__", string(model.MetricTypeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "hhh_bucket", "le", "+Inf"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4},
},
}, {
m: `hhh_count`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "hhh_count", "__type__", string(model.MetricTypeHistogram)),
labels.FromStrings("__name__", "hhh_count"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4},
},
}, {
m: "ggh",
typ: model.MetricTypeGaugeHistogram,
}, {
m: `ggh_bucket{le="+Inf"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ggh_bucket", "__type__", string(model.MetricTypeGaugeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123},
},
}, {
m: `ggh_count`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ggh_count", "__type__", string(model.MetricTypeGaugeHistogram)),
labels.FromStrings("__name__", "ggh_count"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123},
},
}, {
m: "smr_seconds",
typ: model.MetricTypeSummary,
}, {
m: `smr_seconds_count`,
v: 2,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "smr_seconds_count", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "smr_seconds_count"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321},
},
}, {
m: `smr_seconds_sum`,
v: 42,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "smr_seconds_sum", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "smr_seconds_sum"),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321},
},
}, {
m: "ii",
typ: model.MetricTypeInfo,
}, {
m: `ii{foo="bar"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ii", "__type__", string(model.MetricTypeInfo), "foo", "bar"),
labels.FromStrings("__name__", "ii", "foo", "bar"),
),
}, {
m: "ss",
typ: model.MetricTypeStateset,
}, {
m: `ss{ss="foo"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ss", "__type__", string(model.MetricTypeStateset), "ss", "foo"),
labels.FromStrings("__name__", "ss", "ss", "foo"),
),
}, {
m: `ss{ss="bar"}`,
v: 0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ss", "__type__", string(model.MetricTypeStateset), "ss", "bar"),
labels.FromStrings("__name__", "ss", "ss", "bar"),
),
}, {
m: `ss{A="a"}`,
v: 0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "ss", "__type__", string(model.MetricTypeStateset), "A", "a"),
labels.FromStrings("__name__", "ss", "A", "a"),
),
}, {
m: "un",
typ: model.MetricTypeUnknown,
}, {
m: "_metric_starting_with_underscore",
v: 1,
lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"),
}, {
m: "testmetric{_label_starting_with_underscore=\"foo\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"),
}, {
m: "testmetric{label=\"\\\"bar\\\"\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
}, {
m: "foo",
help: "Counter with and without labels to certify CT is parsed for both cases",
}, {
m: "foo",
typ: model.MetricTypeCounter,
}, {
m: "foo_total",
v: 17,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foo_total", "__type__", string(model.MetricTypeCounter)),
labels.FromStrings("__name__", "foo_total"),
),
t: int64p(1520879607789),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
},
ct: 1520872607123,
}, {
m: `foo_total{a="b"}`,
v: 17.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foo_total", "__type__", string(model.MetricTypeCounter), "a", "b"),
labels.FromStrings("__name__", "foo_total", "a", "b"),
),
t: int64p(1520879607789),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
},
ct: 1520872607123,
}, {
m: `foo_total{le="c"}`,
v: 21.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foo_total", "__type__", string(model.MetricTypeCounter), "le", "c"),
labels.FromStrings("__name__", "foo_total", "le", "c"),
),
ct: 1520872621123,
}, {
m: `foo_total{le="1"}`,
v: 10.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foo_total", "__type__", string(model.MetricTypeCounter), "le", "1"),
labels.FromStrings("__name__", "foo_total", "le", "1"),
),
}, {
m: "bar",
help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far",
}, {
m: "bar",
typ: model.MetricTypeSummary,
}, {
m: "bar_count",
v: 17.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "bar_count", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "bar_count"),
),
ct: 1520872608124,
}, {
m: "bar_sum",
v: 324789.3,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "bar_sum", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "bar_sum"),
),
ct: 1520872608124,
}, {
m: `bar{quantile="0.95"}`,
v: 123.7,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "bar", "__type__", string(model.MetricTypeSummary), "quantile", "0.95"),
labels.FromStrings("__name__", "bar", "quantile", "0.95"),
),
ct: 1520872608124,
}, {
m: `bar{quantile="0.99"}`,
v: 150.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "bar", "__type__", string(model.MetricTypeSummary), "quantile", "0.99"),
labels.FromStrings("__name__", "bar", "quantile", "0.99"),
),
ct: 1520872608124,
}, {
m: "baz",
help: "Histogram with the same objective as above's summary",
}, {
m: "baz",
typ: model.MetricTypeHistogram,
}, {
m: `baz_bucket{le="0.0"}`,
v: 0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "baz_bucket", "__type__", string(model.MetricTypeHistogram), "le", "0.0"),
labels.FromStrings("__name__", "baz_bucket", "le", "0.0"),
),
ct: 1520872609125,
}, {
m: `baz_bucket{le="+Inf"}`,
v: 17,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "baz_bucket", "__type__", string(model.MetricTypeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"),
),
ct: 1520872609125,
}, {
m: `baz_count`,
v: 17,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "baz_count", "__type__", string(model.MetricTypeHistogram)),
labels.FromStrings("__name__", "baz_count"),
),
ct: 1520872609125,
}, {
m: `baz_sum`,
v: 324789.3,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "baz_sum", "__type__", string(model.MetricTypeHistogram)),
labels.FromStrings("__name__", "baz_sum"),
),
ct: 1520872609125,
}, {
m: "fizz_created",
help: "Gauge which shouldn't be parsed as CT",
}, {
m: "fizz_created",
typ: model.MetricTypeGauge,
}, {
m: `fizz_created`,
v: 17,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "fizz_created", "__type__", string(model.MetricTypeGauge)),
labels.FromStrings("__name__", "fizz_created"),
),
}, {
m: "something",
help: "Histogram with _created between buckets and summary",
}, {
m: "something",
typ: model.MetricTypeHistogram,
}, {
m: `something_count`,
v: 18,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "something_count", "__type__", string(model.MetricTypeHistogram)),
labels.FromStrings("__name__", "something_count"),
),
ct: 1520430001000,
}, {
m: `something_sum`,
v: 324789.4,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "something_sum", "__type__", string(model.MetricTypeHistogram)),
labels.FromStrings("__name__", "something_sum"),
),
ct: 1520430001000,
}, {
m: `something_bucket{le="0.0"}`,
v: 1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "something_bucket", "__type__", string(model.MetricTypeHistogram), "le", "0.0"),
labels.FromStrings("__name__", "something_bucket", "le", "0.0"),
),
ct: 1520430001000,
}, {
m: `something_bucket{le="1"}`,
v: 2,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "something_bucket", "__type__", string(model.MetricTypeHistogram), "le", "1.0"),
labels.FromStrings("__name__", "something_bucket", "le", "1.0"),
),
ct: 1520430001000,
}, {
m: `something_bucket{le="+Inf"}`,
v: 18,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "something_bucket", "__type__", string(model.MetricTypeHistogram), "le", "+Inf"),
labels.FromStrings("__name__", "something_bucket", "le", "+Inf"),
),
ct: 1520430001000,
}, {
m: "yum",
help: "Summary with _created between sum and quantiles",
}, {
m: "yum",
typ: model.MetricTypeSummary,
}, {
m: `yum_count`,
v: 20,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "yum_count", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "yum_count"),
),
ct: 1520430003000,
}, {
m: `yum_sum`,
v: 324789.5,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "yum_sum", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "yum_sum"),
),
ct: 1520430003000,
}, {
m: `yum{quantile="0.95"}`,
v: 123.7,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "yum", "__type__", string(model.MetricTypeSummary), "quantile", "0.95"),
labels.FromStrings("__name__", "yum", "quantile", "0.95"),
),
ct: 1520430003000,
}, {
m: `yum{quantile="0.99"}`,
v: 150.0,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "yum", "__type__", string(model.MetricTypeSummary), "quantile", "0.99"),
labels.FromStrings("__name__", "yum", "quantile", "0.99"),
),
ct: 1520430003000,
}, {
m: "foobar",
help: "Summary with _created as the first line",
}, {
m: "foobar",
typ: model.MetricTypeSummary,
}, {
m: `foobar_count`,
v: 21,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foobar_count", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "foobar_count"),
),
ct: 1520430004000,
}, {
m: `foobar_sum`,
v: 324789.6,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foobar_sum", "__type__", string(model.MetricTypeSummary)),
labels.FromStrings("__name__", "foobar_sum"),
),
ct: 1520430004000,
}, {
m: `foobar{quantile="0.95"}`,
v: 123.8,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foobar", "__type__", string(model.MetricTypeSummary), "quantile", "0.95"),
labels.FromStrings("__name__", "foobar", "quantile", "0.95"),
),
ct: 1520430004000,
}, {
m: `foobar{quantile="0.99"}`,
v: 150.1,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "foobar", "__type__", string(model.MetricTypeSummary), "quantile", "0.99"),
labels.FromStrings("__name__", "foobar", "quantile", "0.99"),
),
ct: 1520430004000,
}, {
m: "metric",
help: "foo\x00bar",
}, {
m: "null_byte_metric{a=\"abc\x00\"}",
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"), model.MetricTypeSummary),
},
}
opts := []OpenMetricsOption{WithOMParserCTSeriesSkipped()}
if typeAndUnitEnabled {
opts = append(opts, WithOMParserTypeAndUnitLabels())
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), opts...)
got := testParse(t, p)
requireEntries(t, exp, got)
})
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
got := testParse(t, p)
requireEntries(t, exp, got)
}
func TestUTF8OpenMetricsParse(t *testing.T) {
func TestOpenMetricsParse_UTF8(t *testing.T) {
input := `# HELP "go.gc_duration_seconds" A summary of the GC invocation durations.
# TYPE "go.gc_duration_seconds" summary
# UNIT "go.gc_duration_seconds" seconds

View File

@ -32,6 +32,7 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/schema"
)
type promlexer struct {
@ -160,16 +161,19 @@ type PromParser struct {
// of the metric name and label names and values for this series.
// p.offsets[0] is the start character of the metric name.
// p.offsets[1] is the end of the metric name.
// Subsequently, p.offsets is a pair of pair of offsets for the positions
// Subsequently, p.offsets is a pair of offsets for the positions
// of the label name and value start and end characters.
offsets []int
enableTypeAndUnitLabels bool
}
// NewPromParser returns a new parser of the byte slice.
func NewPromParser(b []byte, st *labels.SymbolTable) Parser {
func NewPromParser(b []byte, st *labels.SymbolTable, enableTypeAndUnitLabels bool) Parser {
return &PromParser{
l: &promlexer{b: append(b, '\n')},
builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
l: &promlexer{b: append(b, '\n')},
builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
enableTypeAndUnitLabels: enableTypeAndUnitLabels,
}
}
@ -226,19 +230,33 @@ func (p *PromParser) Comment() []byte {
// Labels writes the labels of the current sample into the passed labels.
func (p *PromParser) Labels(l *labels.Labels) {
s := yoloString(p.series)
p.builder.Reset()
metricName := unreplace(s[p.offsets[0]-p.start : p.offsets[1]-p.start])
p.builder.Add(labels.MetricName, metricName)
m := schema.Metadata{
Name: metricName,
// NOTE(bwplotka): There is a known case where the type is wrong on a broken exposition
// (see the TestPromParse windspeed metric). Fixing it would require extra
// allocs and benchmarks. Since it was always broken, don't fix for now.
Type: p.mtype,
}
if p.enableTypeAndUnitLabels {
m.AddToLabels(&p.builder)
} else {
p.builder.Add(labels.MetricName, metricName)
}
for i := 2; i < len(p.offsets); i += 4 {
a := p.offsets[i] - p.start
b := p.offsets[i+1] - p.start
label := unreplace(s[a:b])
if p.enableTypeAndUnitLabels && !m.IsEmptyFor(label) {
// Dropping user provided metadata labels, if found in the OM metadata.
continue
}
c := p.offsets[i+2] - p.start
d := p.offsets[i+3] - p.start
value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d]))
p.builder.Add(label, value)
}

View File

@ -14,6 +14,7 @@
package textparse
import (
"fmt"
"io"
"testing"
@ -23,6 +24,45 @@ import (
"github.com/prometheus/prometheus/model/labels"
)
// lbls is a helper for the readability of the expectations.
func typeAndUnitLabels(typeAndUnitEnabled bool, enabled, disabled labels.Labels) labels.Labels {
if typeAndUnitEnabled {
return enabled
}
return disabled
}
// todoDetectFamilySwitch exists because there's a known TODO that require dedicated PR and benchmarks for PROM-39.
// OM and Prom text format do NOT require TYPE, HELP or UNIT lines. This means that metric families can switch without
// those metadata entries e.g.:
// ```
// TYPE go_goroutines gauge
// go_goroutines 33 # previous metric
// different_metric_total 12 # <--- different family!
// ```
// The expected type for "different_metric_total" is obviously unknown type and unit, but it's surprisingly expensive and complex
// to reliably write parser for those cases. Two main issues:
// a. TYPE and UNIT are associated with "metric family" which is different than resulting metric name (e.g. histograms).
// b. You have to alloc additional entries to pair TYPE and UNIT with metric families they refer to (nit)
//
// This problem is elevated for PROM-39 feature.
//
// Current metadata handling is semi broken here for this as the (a) is expensive and currently not fully accurate
// see: https://github.com/prometheus/prometheus/blob/dbf5d01a62249eddcd202303069f6cf7dd3c4a73/scrape/scrape.go#L1916
//
// To iterate, we keep it "knowingly" broken behind the feature flag.
// TODO(bwplotka): Remove this once we fix the problematic case e.g.
// - introduce more accurate isSeriesPartOfFamily shared helper or even parser method that tells when new metric family starts
func todoDetectFamilySwitch(typeAndUnitEnabled bool, expected labels.Labels, brokenTypeInherited model.MetricType) labels.Labels {
if typeAndUnitEnabled && brokenTypeInherited != model.MetricTypeUnknown {
// Hack for now.
b := labels.NewBuilder(expected)
b.Set("__type__", string(brokenTypeInherited))
return b.Labels()
}
return expected
}
func TestPromParse(t *testing.T) {
input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
# TYPE go_gc_duration_seconds summary
@ -55,153 +95,299 @@ some:aggregate:rate5m{a_b="c"} 1
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
go_goroutines 33 123123
# TYPE some_counter_total counter
# HELP some_counter_total Help after type.
some_counter_total 12
# HELP nohelp3
_metric_starting_with_underscore 1
testmetric{_label_starting_with_underscore="foo"} 1
testmetric{label="\"bar\""} 1
testmetric{le="10"} 1`
testmetric{le="10"} 1
# HELP type_and_unit_test1 Type specified in metadata overrides.
# TYPE type_and_unit_test1 gauge
type_and_unit_test1{__type__="counter"} 123
# HELP type_and_unit_test2 Type specified in label.
type_and_unit_test2{__type__="counter"} 123`
input += "\n# HELP metric foo\x00bar"
input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
exp := []parsedEntry{
{
m: "go_gc_duration_seconds",
help: "A summary of the GC invocation durations.",
}, {
m: "go_gc_duration_seconds",
typ: model.MetricTypeSummary,
}, {
m: `go_gc_duration_seconds{quantile="0"}`,
v: 4.9351e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"),
}, {
m: `go_gc_duration_seconds{quantile="0.25",}`,
v: 7.424100000000001e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"),
}, {
m: `go_gc_duration_seconds{quantile="0.5",a="b"}`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"),
}, {
m: `go_gc_duration_seconds{quantile="0.8", a="b"}`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.8", "a", "b"),
}, {
m: `go_gc_duration_seconds{ quantile="0.9", a="b"}`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.9", "a", "b"),
}, {
m: "prometheus_http_request_duration_seconds",
help: "Histogram of latencies for HTTP requests.",
}, {
m: "prometheus_http_request_duration_seconds",
typ: model.MetricTypeHistogram,
}, {
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="1"}`,
v: 423,
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "1.0"),
}, {
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="2"}`,
v: 1423,
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "2.0"),
}, {
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"}`,
v: 1423,
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "+Inf"),
}, {
m: `prometheus_http_request_duration_seconds_sum{handler="/"}`,
v: 2000,
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_sum", "handler", "/"),
}, {
m: `prometheus_http_request_duration_seconds_count{handler="/"}`,
v: 1423,
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_count", "handler", "/"),
}, {
comment: "# Hrandom comment starting with prefix of HELP",
}, {
comment: "#",
}, {
m: `wind_speed{A="2",c="3"}`,
v: 12345,
lset: labels.FromStrings("A", "2", "__name__", "wind_speed", "c", "3"),
}, {
comment: "# comment with escaped \\n newline",
}, {
comment: "# comment with escaped \\ escape character",
}, {
m: "nohelp1",
help: "",
}, {
m: "nohelp2",
help: "",
}, {
m: `go_gc_duration_seconds{ quantile="1.0", a="b" }`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
}, {
m: `go_gc_duration_seconds { quantile="1.0", a="b" }`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
}, {
m: `go_gc_duration_seconds { quantile= "1.0", a= "b", }`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
}, {
m: `go_gc_duration_seconds { quantile = "1.0", a = "b" }`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
}, {
// NOTE: Unlike OpenMetrics, PromParser allows spaces between label terms. This appears to be unintended and should probably be fixed.
m: `go_gc_duration_seconds { quantile = "2.0" a = "b" }`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "2.0", "a", "b"),
}, {
m: `go_gc_duration_seconds_count`,
v: 99,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds_count"),
}, {
m: `some:aggregate:rate5m{a_b="c"}`,
v: 1,
lset: labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"),
}, {
m: "go_goroutines",
help: "Number of goroutines that currently exist.",
}, {
m: "go_goroutines",
typ: model.MetricTypeGauge,
}, {
m: `go_goroutines`,
v: 33,
t: int64p(123123),
lset: labels.FromStrings("__name__", "go_goroutines"),
}, {
m: "_metric_starting_with_underscore",
v: 1,
lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"),
}, {
m: "testmetric{_label_starting_with_underscore=\"foo\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"),
}, {
m: "testmetric{label=\"\\\"bar\\\"\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
}, {
m: `testmetric{le="10"}`,
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "le", "10"),
}, {
m: "metric",
help: "foo\x00bar",
}, {
m: "null_byte_metric{a=\"abc\x00\"}",
v: 1,
lset: labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"),
},
}
for _, typeAndUnitEnabled := range []bool{false, true} {
t.Run(fmt.Sprintf("type-and-unit=%v", typeAndUnitEnabled), func(t *testing.T) {
exp := []parsedEntry{
{
m: "go_gc_duration_seconds",
help: "A summary of the GC invocation durations.",
},
{
m: "go_gc_duration_seconds",
typ: model.MetricTypeSummary,
},
{
m: `go_gc_duration_seconds{quantile="0"}`,
v: 4.9351e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "0.0"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"),
),
},
{
m: `go_gc_duration_seconds{quantile="0.25",}`,
v: 7.424100000000001e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "0.25"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"),
),
},
{
m: `go_gc_duration_seconds{quantile="0.5",a="b"}`,
v: 8.3835e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "0.5", "a", "b"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"),
),
},
{
m: `go_gc_duration_seconds{quantile="0.8", a="b"}`,
v: 8.3835e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "0.8", "a", "b"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.8", "a", "b"),
),
},
{
m: `go_gc_duration_seconds{ quantile="0.9", a="b"}`,
v: 8.3835e-05,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "0.9", "a", "b"),
labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.9", "a", "b"),
),
},
{
m: "prometheus_http_request_duration_seconds",
help: "Histogram of latencies for HTTP requests.",
},
{
m: "prometheus_http_request_duration_seconds",
typ: model.MetricTypeHistogram,
},
{
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="1"}`,
v: 423,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "__type__", string(model.MetricTypeHistogram), "handler", "/", "le", "1.0"),
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "1.0"),
),
},
{
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="2"}`,
v: 1423,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "__type__", string(model.MetricTypeHistogram), "handler", "/", "le", "2.0"),
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "2.0"),
),
},
{
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"}`,
v: 1423,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "__type__", string(model.MetricTypeHistogram), "handler", "/", "le", "+Inf"),
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "+Inf"),
),
},
{
m: `prometheus_http_request_duration_seconds_sum{handler="/"}`,
v: 2000,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_sum", "__type__", string(model.MetricTypeHistogram), "handler", "/"),
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_sum", "handler", "/"),
),
},
{
m: `prometheus_http_request_duration_seconds_count{handler="/"}`,
v: 1423,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_count", "__type__", string(model.MetricTypeHistogram), "handler", "/"),
labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_count", "handler", "/"),
),
},
{
comment: "# Hrandom comment starting with prefix of HELP",
},
{
comment: "#",
},
{
m: `wind_speed{A="2",c="3"}`,
v: 12345,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
// NOTE(bwplotka): This is knowingly broken, inheriting old type when TYPE was not specified on a new metric.
// This was broken forever on a case for a broken exposition. Don't fix for now (expensive).
labels.FromStrings("A", "2", "__name__", "wind_speed", "__type__", string(model.MetricTypeHistogram), "c", "3"),
labels.FromStrings("A", "2", "__name__", "wind_speed", "c", "3"),
),
},
{
comment: "# comment with escaped \\n newline",
},
{
comment: "# comment with escaped \\ escape character",
},
{
m: "nohelp1",
help: "",
},
{
m: "nohelp2",
help: "",
},
{
m: `go_gc_duration_seconds{ quantile="1.0", a="b" }`,
v: 8.3835e-05,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), model.MetricTypeHistogram),
},
{
m: `go_gc_duration_seconds { quantile="1.0", a="b" }`,
v: 8.3835e-05,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), model.MetricTypeHistogram),
},
{
m: `go_gc_duration_seconds { quantile= "1.0", a= "b", }`,
v: 8.3835e-05,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), model.MetricTypeHistogram),
},
{
m: `go_gc_duration_seconds { quantile = "1.0", a = "b" }`,
v: 8.3835e-05,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), model.MetricTypeHistogram),
},
{
// NOTE: Unlike OpenMetrics, PromParser allows spaces between label terms. This appears to be unintended and should probably be fixed.
m: `go_gc_duration_seconds { quantile = "2.0" a = "b" }`,
v: 8.3835e-05,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "2.0", "a", "b"), model.MetricTypeHistogram),
},
{
m: `go_gc_duration_seconds_count`,
v: 99,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds_count"), model.MetricTypeHistogram),
},
{
m: `some:aggregate:rate5m{a_b="c"}`,
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"), model.MetricTypeHistogram),
},
{
m: "go_goroutines",
help: "Number of goroutines that currently exist.",
},
{
m: "go_goroutines",
typ: model.MetricTypeGauge,
},
{
m: `go_goroutines`,
v: 33,
t: int64p(123123),
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "go_goroutines", "__type__", string(model.MetricTypeGauge)),
labels.FromStrings("__name__", "go_goroutines"),
),
},
{
m: "some_counter_total",
typ: model.MetricTypeCounter,
},
{
m: "some_counter_total",
help: "Help after type.",
},
{
m: `some_counter_total`,
v: 12,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "some_counter_total", "__type__", string(model.MetricTypeCounter)),
labels.FromStrings("__name__", "some_counter_total"),
),
},
{
m: "nohelp3",
help: "",
},
{
m: "_metric_starting_with_underscore",
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "_metric_starting_with_underscore"), model.MetricTypeCounter),
},
{
m: "testmetric{_label_starting_with_underscore=\"foo\"}",
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"), model.MetricTypeCounter),
},
{
m: "testmetric{label=\"\\\"bar\\\"\"}",
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "testmetric", "label", `"bar"`), model.MetricTypeCounter),
},
{
m: `testmetric{le="10"}`,
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "testmetric", "le", "10"), model.MetricTypeCounter),
},
{
m: "type_and_unit_test1",
help: "Type specified in metadata overrides.",
},
{
m: "type_and_unit_test1",
typ: model.MetricTypeGauge,
},
{
m: "type_and_unit_test1{__type__=\"counter\"}",
v: 123,
lset: typeAndUnitLabels(
typeAndUnitEnabled,
labels.FromStrings("__name__", "type_and_unit_test1", "__type__", string(model.MetricTypeGauge)),
labels.FromStrings("__name__", "type_and_unit_test1", "__type__", string(model.MetricTypeCounter)),
),
},
{
m: "type_and_unit_test2",
help: "Type specified in label.",
},
{
m: "type_and_unit_test2{__type__=\"counter\"}",
v: 123,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "type_and_unit_test2", "__type__", string(model.MetricTypeCounter)), model.MetricTypeGauge),
},
{
m: "metric",
help: "foo\x00bar",
},
{
m: "null_byte_metric{a=\"abc\x00\"}",
v: 1,
lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"), model.MetricTypeGauge),
},
}
p := NewPromParser([]byte(input), labels.NewSymbolTable())
got := testParse(t, p)
requireEntries(t, exp, got)
p := NewPromParser([]byte(input), labels.NewSymbolTable(), typeAndUnitEnabled)
got := testParse(t, p)
requireEntries(t, exp, got)
})
}
}
func TestUTF8PromParse(t *testing.T) {
@ -274,7 +460,7 @@ choices}`, "strange©™\n'quoted' \"name\"", "6"),
},
}
p := NewPromParser([]byte(input), labels.NewSymbolTable())
p := NewPromParser([]byte(input), labels.NewSymbolTable(), false)
got := testParse(t, p)
requireEntries(t, exp, got)
}
@ -355,7 +541,7 @@ func TestPromParseErrors(t *testing.T) {
}
for i, c := range cases {
p := NewPromParser([]byte(c.input), labels.NewSymbolTable())
p := NewPromParser([]byte(c.input), labels.NewSymbolTable(), false)
var err error
for err == nil {
_, err = p.Next()
@ -408,7 +594,7 @@ func TestPromNullByteHandling(t *testing.T) {
}
for i, c := range cases {
p := NewPromParser([]byte(c.input), labels.NewSymbolTable())
p := NewPromParser([]byte(c.input), labels.NewSymbolTable(), false)
var err error
for err == nil {
_, err = p.Next()

View File

@ -31,6 +31,7 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
dto "github.com/prometheus/prometheus/prompb/io/prometheus/client"
"github.com/prometheus/prometheus/schema"
)
// floatFormatBufPool is exclusively used in formatOpenMetricsFloat.
@ -72,23 +73,25 @@ type ProtobufParser struct {
exemplarReturned bool
// state is marked by the entry we are processing. EntryInvalid implies
// that we have to decode the next MetricFamily.
// that we have to decode the next MetricDescriptor.
state Entry
// Whether to also parse a classic histogram that is also present as a
// native histogram.
parseClassicHistograms bool
parseClassicHistograms bool
enableTypeAndUnitLabels bool
}
// NewProtobufParser returns a parser for the payload in the byte slice.
func NewProtobufParser(b []byte, parseClassicHistograms bool, st *labels.SymbolTable) Parser {
func NewProtobufParser(b []byte, parseClassicHistograms, enableTypeAndUnitLabels bool, st *labels.SymbolTable) Parser {
return &ProtobufParser{
dec: dto.NewMetricStreamingDecoder(b),
entryBytes: &bytes.Buffer{},
builder: labels.NewScratchBuilderWithSymbolTable(st, 16), // TODO(bwplotka): Try base builder.
state: EntryInvalid,
parseClassicHistograms: parseClassicHistograms,
state: EntryInvalid,
parseClassicHistograms: parseClassicHistograms,
enableTypeAndUnitLabels: enableTypeAndUnitLabels,
}
}
@ -551,10 +554,27 @@ func (p *ProtobufParser) Next() (Entry, error) {
// * p.fieldsDone depending on p.fieldPos.
func (p *ProtobufParser) onSeriesOrHistogramUpdate() error {
p.builder.Reset()
p.builder.Add(labels.MetricName, p.getMagicName())
if err := p.dec.Label(&p.builder); err != nil {
return err
if p.enableTypeAndUnitLabels {
_, typ := p.Type()
m := schema.Metadata{
Name: p.getMagicName(),
Type: typ,
Unit: p.dec.GetUnit(),
}
m.AddToLabels(&p.builder)
if err := p.dec.Label(schema.IgnoreOverriddenMetadataLabelsScratchBuilder{
Overwrite: m,
ScratchBuilder: &p.builder,
}); err != nil {
return err
}
} else {
p.builder.Add(labels.MetricName, p.getMagicName())
if err := p.dec.Label(&p.builder); err != nil {
return err
}
}
if needed, name, value := p.getMagicLabel(); needed {

View File

@ -832,8 +832,8 @@ func TestProtobufParse(t *testing.T) {
expected []parsedEntry
}{
{
name: "ignore classic buckets of native histograms",
parser: NewProtobufParser(inputBuf.Bytes(), false, labels.NewSymbolTable()),
name: "parseClassicHistograms=false/enableTypeAndUnitLabels=false",
parser: NewProtobufParser(inputBuf.Bytes(), false, false, labels.NewSymbolTable()),
expected: []parsedEntry{
{
m: "go_build_info",
@ -844,7 +844,7 @@ func TestProtobufParse(t *testing.T) {
typ: model.MetricTypeGauge,
},
{
m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)",
m: "go_build_info\xffchecksum\xff\xffpath\xffgithub.com/prometheus/client_golang\xffversion\xff(devel)",
v: 1,
lset: labels.FromStrings(
"__name__", "go_build_info",
@ -1467,8 +1467,8 @@ func TestProtobufParse(t *testing.T) {
},
},
{
name: "parse classic and native buckets",
parser: NewProtobufParser(inputBuf.Bytes(), true, labels.NewSymbolTable()),
name: "parseClassicHistograms=false/enableTypeAndUnitLabels=true",
parser: NewProtobufParser(inputBuf.Bytes(), false, true, labels.NewSymbolTable()),
expected: []parsedEntry{
{
m: "go_build_info",
@ -1479,7 +1479,679 @@ func TestProtobufParse(t *testing.T) {
typ: model.MetricTypeGauge,
},
{
m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)",
m: "go_build_info\xff__type__\xffgauge\xffchecksum\xff\xffpath\xffgithub.com/prometheus/client_golang\xffversion\xff(devel)",
v: 1,
lset: labels.FromStrings(
"__name__", "go_build_info",
"__type__", string(model.MetricTypeGauge),
"checksum", "",
"path", "github.com/prometheus/client_golang",
"version", "(devel)",
),
},
{
m: "go_memstats_alloc_bytes_total",
help: "Total number of bytes allocated, even if freed.",
},
{
m: "go_memstats_alloc_bytes_total",
unit: "bytes",
},
{
m: "go_memstats_alloc_bytes_total",
typ: model.MetricTypeCounter,
},
{
m: "go_memstats_alloc_bytes_total\xff__type__\xffcounter\xff__unit__\xffbytes",
v: 1.546544e+06,
lset: labels.FromStrings(
"__name__", "go_memstats_alloc_bytes_total",
"__type__", string(model.MetricTypeCounter),
"__unit__", "bytes",
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233},
},
},
{
m: "something_untyped",
help: "Just to test the untyped type.",
},
{
m: "something_untyped",
typ: model.MetricTypeUnknown,
},
{
m: "something_untyped",
t: int64p(1234567),
v: 42,
lset: labels.FromStrings(
"__name__", "something_untyped",
),
},
{
m: "test_histogram",
help: "Test histogram with many buckets removed to keep it manageable in size.",
},
{
m: "test_histogram",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram\xff__type__\xffhistogram",
t: int64p(1234568),
shs: &histogram.Histogram{
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings(
"__name__", "test_histogram",
"__type__", string(model.MetricTypeHistogram),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{
m: "test_gauge_histogram",
help: "Like test_histogram but as gauge histogram.",
},
{
m: "test_gauge_histogram",
typ: model.MetricTypeGaugeHistogram,
},
{
m: "test_gauge_histogram\xff__type__\xffgaugehistogram",
t: int64p(1234568),
shs: &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings(
"__name__", "test_gauge_histogram",
"__type__", string(model.MetricTypeGaugeHistogram),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{
m: "test_float_histogram",
help: "Test float histogram with many buckets removed to keep it manageable in size.",
},
{
m: "test_float_histogram",
typ: model.MetricTypeHistogram,
},
{
m: "test_float_histogram\xff__type__\xffhistogram",
t: int64p(1234568),
fhs: &histogram.FloatHistogram{
Count: 175.0,
ZeroCount: 2.0,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0},
NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0},
},
lset: labels.FromStrings(
"__name__", "test_float_histogram",
"__type__", string(model.MetricTypeHistogram),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{
m: "test_gauge_float_histogram",
help: "Like test_float_histogram but as gauge histogram.",
},
{
m: "test_gauge_float_histogram",
typ: model.MetricTypeGaugeHistogram,
},
{
m: "test_gauge_float_histogram\xff__type__\xffgaugehistogram",
t: int64p(1234568),
fhs: &histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Count: 175.0,
ZeroCount: 2.0,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0},
NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0},
},
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram",
"__type__", string(model.MetricTypeGaugeHistogram),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{
m: "test_histogram2",
help: "Similar histogram as before but now without sparse buckets.",
},
{
m: "test_histogram2",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram2_count\xff__type__\xffhistogram",
v: 175,
lset: labels.FromStrings(
"__name__", "test_histogram2_count",
"__type__", string(model.MetricTypeHistogram),
),
},
{
m: "test_histogram2_sum\xff__type__\xffhistogram",
v: 0.000828,
lset: labels.FromStrings(
"__name__", "test_histogram2_sum",
"__type__", string(model.MetricTypeHistogram),
),
},
{
m: "test_histogram2_bucket\xff__type__\xffhistogram\xffle\xff-0.00048",
v: 2,
lset: labels.FromStrings(
"__name__", "test_histogram2_bucket",
"__type__", string(model.MetricTypeHistogram),
"le", "-0.00048",
),
},
{
m: "test_histogram2_bucket\xff__type__\xffhistogram\xffle\xff-0.00038",
v: 4,
lset: labels.FromStrings(
"__name__", "test_histogram2_bucket",
"__type__", string(model.MetricTypeHistogram),
"le", "-0.00038",
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146},
},
},
{
m: "test_histogram2_bucket\xff__type__\xffhistogram\xffle\xff1.0",
v: 16,
lset: labels.FromStrings(
"__name__", "test_histogram2_bucket",
"__type__", string(model.MetricTypeHistogram),
"le", "1.0",
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false},
},
},
{
m: "test_histogram2_bucket\xff__type__\xffhistogram\xffle\xff+Inf",
v: 175,
lset: labels.FromStrings(
"__name__", "test_histogram2_bucket",
"__type__", string(model.MetricTypeHistogram),
"le", "+Inf",
),
},
{
m: "test_histogram3",
help: "Similar histogram as before but now with integer buckets.",
},
{
m: "test_histogram3",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram3_count\xff__type__\xffhistogram",
v: 6,
lset: labels.FromStrings(
"__name__", "test_histogram3_count",
"__type__", string(model.MetricTypeHistogram),
),
},
{
m: "test_histogram3_sum\xff__type__\xffhistogram",
v: 50,
lset: labels.FromStrings(
"__name__", "test_histogram3_sum",
"__type__", string(model.MetricTypeHistogram),
),
},
{
m: "test_histogram3_bucket\xff__type__\xffhistogram\xffle\xff-20.0",
v: 2,
lset: labels.FromStrings(
"__name__", "test_histogram3_bucket",
"__type__", string(model.MetricTypeHistogram),
"le", "-20.0",
),
},
{
m: "test_histogram3_bucket\xff__type__\xffhistogram\xffle\xff20.0",
v: 4,
lset: labels.FromStrings(
"__name__", "test_histogram3_bucket",
"__type__", string(model.MetricTypeHistogram),
"le", "20.0",
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146},
},
},
{
m: "test_histogram3_bucket\xff__type__\xffhistogram\xffle\xff30.0",
v: 6,
lset: labels.FromStrings(
"__name__", "test_histogram3_bucket",
"__type__", string(model.MetricTypeHistogram),
"le", "30.0",
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: false},
},
},
{
m: "test_histogram3_bucket\xff__type__\xffhistogram\xffle\xff+Inf",
v: 6,
lset: labels.FromStrings(
"__name__", "test_histogram3_bucket",
"__type__", string(model.MetricTypeHistogram),
"le", "+Inf",
),
},
{
m: "test_histogram_family",
help: "Test histogram metric family with two very simple histograms.",
},
{
m: "test_histogram_family",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram_family\xff__type__\xffhistogram\xfffoo\xffbar",
shs: &histogram.Histogram{
CounterResetHint: histogram.UnknownCounterReset,
Count: 5,
Sum: 12.1,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: 8, Length: 2},
},
NegativeSpans: []histogram.Span{},
PositiveBuckets: []int64{2, 1},
},
lset: labels.FromStrings(
"__name__", "test_histogram_family",
"__type__", string(model.MetricTypeHistogram),
"foo", "bar",
),
},
{
m: "test_histogram_family\xff__type__\xffhistogram\xfffoo\xffbaz",
shs: &histogram.Histogram{
CounterResetHint: histogram.UnknownCounterReset,
Count: 6,
Sum: 13.1,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: 8, Length: 2},
},
NegativeSpans: []histogram.Span{},
PositiveBuckets: []int64{1, 4},
},
lset: labels.FromStrings(
"__name__", "test_histogram_family",
"__type__", string(model.MetricTypeHistogram),
"foo", "baz",
),
},
{
m: "test_float_histogram_with_zerothreshold_zero",
help: "Test float histogram with a zero threshold of zero.",
},
{
m: "test_float_histogram_with_zerothreshold_zero",
typ: model.MetricTypeHistogram,
},
{
m: "test_float_histogram_with_zerothreshold_zero\xff__type__\xffhistogram",
fhs: &histogram.FloatHistogram{
Count: 5.0,
Sum: 12.1,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: 8, Length: 2},
},
PositiveBuckets: []float64{2.0, 3.0},
NegativeSpans: []histogram.Span{},
},
lset: labels.FromStrings(
"__name__", "test_float_histogram_with_zerothreshold_zero",
"__type__", string(model.MetricTypeHistogram),
),
},
{
m: "rpc_durations_seconds",
help: "RPC latency distributions.",
},
{
m: "rpc_durations_seconds",
typ: model.MetricTypeSummary,
},
{
m: "rpc_durations_seconds_count\xff__type__\xffsummary\xffservice\xffexponential",
v: 262,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds_count",
"__type__", string(model.MetricTypeSummary),
"service", "exponential",
),
},
{
m: "rpc_durations_seconds_sum\xff__type__\xffsummary\xffservice\xffexponential",
v: 0.00025551262820703587,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds_sum",
"__type__", string(model.MetricTypeSummary),
"service", "exponential",
),
},
{
m: "rpc_durations_seconds\xff__type__\xffsummary\xffquantile\xff0.5\xffservice\xffexponential",
v: 6.442786329648548e-07,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds",
"__type__", string(model.MetricTypeSummary),
"quantile", "0.5",
"service", "exponential",
),
},
{
m: "rpc_durations_seconds\xff__type__\xffsummary\xffquantile\xff0.9\xffservice\xffexponential",
v: 1.9435742936658396e-06,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds",
"__type__", string(model.MetricTypeSummary),
"quantile", "0.9",
"service", "exponential",
),
},
{
m: "rpc_durations_seconds\xff__type__\xffsummary\xffquantile\xff0.99\xffservice\xffexponential",
v: 4.0471608667037015e-06,
lset: labels.FromStrings(
"__type__", string(model.MetricTypeSummary),
"__name__", "rpc_durations_seconds",
"quantile", "0.99",
"service", "exponential",
),
},
{
m: "without_quantiles",
help: "A summary without quantiles.",
},
{
m: "without_quantiles",
typ: model.MetricTypeSummary,
},
{
m: "without_quantiles_count\xff__type__\xffsummary",
v: 42,
lset: labels.FromStrings(
"__name__", "without_quantiles_count",
"__type__", string(model.MetricTypeSummary),
),
},
{
m: "without_quantiles_sum\xff__type__\xffsummary",
v: 1.234,
lset: labels.FromStrings(
"__name__", "without_quantiles_sum",
"__type__", string(model.MetricTypeSummary),
),
},
{
m: "empty_histogram",
help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.",
},
{
m: "empty_histogram",
typ: model.MetricTypeHistogram,
},
{
m: "empty_histogram\xff__type__\xffhistogram",
shs: &histogram.Histogram{
CounterResetHint: histogram.UnknownCounterReset,
PositiveSpans: []histogram.Span{},
NegativeSpans: []histogram.Span{},
},
lset: labels.FromStrings(
"__name__", "empty_histogram",
"__type__", string(model.MetricTypeHistogram),
),
},
{
m: "test_counter_with_createdtimestamp",
help: "A counter with a created timestamp.",
},
{
m: "test_counter_with_createdtimestamp",
typ: model.MetricTypeCounter,
},
{
m: "test_counter_with_createdtimestamp\xff__type__\xffcounter",
v: 42,
ct: 1625851153146,
lset: labels.FromStrings(
"__name__", "test_counter_with_createdtimestamp",
"__type__", string(model.MetricTypeCounter),
),
},
{
m: "test_summary_with_createdtimestamp",
help: "A summary with a created timestamp.",
},
{
m: "test_summary_with_createdtimestamp",
typ: model.MetricTypeSummary,
},
{
m: "test_summary_with_createdtimestamp_count\xff__type__\xffsummary",
v: 42,
ct: 1625851153146,
lset: labels.FromStrings(
"__name__", "test_summary_with_createdtimestamp_count",
"__type__", string(model.MetricTypeSummary),
),
},
{
m: "test_summary_with_createdtimestamp_sum\xff__type__\xffsummary",
v: 1.234,
ct: 1625851153146,
lset: labels.FromStrings(
"__name__", "test_summary_with_createdtimestamp_sum",
"__type__", string(model.MetricTypeSummary),
),
},
{
m: "test_histogram_with_createdtimestamp",
help: "A histogram with a created timestamp.",
},
{
m: "test_histogram_with_createdtimestamp",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram_with_createdtimestamp\xff__type__\xffhistogram",
ct: 1625851153146,
shs: &histogram.Histogram{
CounterResetHint: histogram.UnknownCounterReset,
PositiveSpans: []histogram.Span{},
NegativeSpans: []histogram.Span{},
},
lset: labels.FromStrings(
"__name__", "test_histogram_with_createdtimestamp",
"__type__", string(model.MetricTypeHistogram),
),
},
{
m: "test_gaugehistogram_with_createdtimestamp",
help: "A gauge histogram with a created timestamp.",
},
{
m: "test_gaugehistogram_with_createdtimestamp",
typ: model.MetricTypeGaugeHistogram,
},
{
m: "test_gaugehistogram_with_createdtimestamp\xff__type__\xffgaugehistogram",
ct: 1625851153146,
shs: &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
PositiveSpans: []histogram.Span{},
NegativeSpans: []histogram.Span{},
},
lset: labels.FromStrings(
"__name__", "test_gaugehistogram_with_createdtimestamp",
"__type__", string(model.MetricTypeGaugeHistogram),
),
},
{
m: "test_histogram_with_native_histogram_exemplars",
help: "A histogram with native histogram exemplars.",
},
{
m: "test_histogram_with_native_histogram_exemplars",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram_with_native_histogram_exemplars\xff__type__\xffhistogram",
t: int64p(1234568),
shs: &histogram.Histogram{
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings(
"__name__", "test_histogram_with_native_histogram_exemplars",
"__type__", string(model.MetricTypeHistogram),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
{Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156},
},
},
{
m: "test_histogram_with_native_histogram_exemplars2",
help: "Another histogram with native histogram exemplars.",
},
{
m: "test_histogram_with_native_histogram_exemplars2",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram_with_native_histogram_exemplars2\xff__type__\xffhistogram",
t: int64p(1234568),
shs: &histogram.Histogram{
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings(
"__name__", "test_histogram_with_native_histogram_exemplars2",
"__type__", string(model.MetricTypeHistogram),
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
},
},
{
name: "parseClassicHistograms=true/enableTypeAndUnitLabels=false",
parser: NewProtobufParser(inputBuf.Bytes(), true, false, labels.NewSymbolTable()),
expected: []parsedEntry{
{
m: "go_build_info",
help: "Build information about the main Go module.",
},
{
m: "go_build_info",
typ: model.MetricTypeGauge,
},
{
m: "go_build_info\xffchecksum\xff\xffpath\xffgithub.com/prometheus/client_golang\xffversion\xff(devel)",
v: 1,
lset: labels.FromStrings(
"__name__", "go_build_info",

View File

@ -23,8 +23,6 @@ import (
proto "github.com/gogo/protobuf/proto"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
)
type MetricStreamingDecoder struct {
@ -153,12 +151,16 @@ func (m *MetricStreamingDecoder) GetLabel() {
panic("don't use GetLabel, use Label instead")
}
type scratchBuilder interface {
Add(name, value string)
}
// Label parses labels into labels scratch builder. Metric name is missing
// given the protobuf metric model and has to be deduced from the metric family name.
// TODO: The method name intentionally hide MetricStreamingDecoder.Metric.Label
// field to avoid direct use (it's not parsed). In future generator will generate
// structs tailored for streaming decoding.
func (m *MetricStreamingDecoder) Label(b *labels.ScratchBuilder) error {
func (m *MetricStreamingDecoder) Label(b scratchBuilder) error {
for _, l := range m.labels {
if err := parseLabel(m.mData[l.start:l.end], b); err != nil {
return err
@ -169,7 +171,7 @@ func (m *MetricStreamingDecoder) Label(b *labels.ScratchBuilder) error {
// parseLabel is essentially LabelPair.Unmarshal but directly adding into scratch builder
// and reusing strings.
func parseLabel(dAtA []byte, b *labels.ScratchBuilder) error {
func parseLabel(dAtA []byte, b scratchBuilder) error {
var name, value string
l := len(dAtA)
iNdEx := 0

View File

@ -44,6 +44,7 @@ import (
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/prometheus/schema"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/util/annotations"
@ -1202,7 +1203,7 @@ func (enh *EvalNodeHelper) resetHistograms(inVec Vector, arg parser.Expr) annota
mb.buckets = append(mb.buckets, Bucket{upperBound, sample.F})
}
for _, sample := range enh.nativeHistogramSamples {
for idx, sample := range enh.nativeHistogramSamples {
// We have to reconstruct the exact same signature as above for
// a classic histogram, just ignoring any le label.
enh.lblBuf = sample.Metric.Bytes(enh.lblBuf)
@ -1212,6 +1213,7 @@ func (enh *EvalNodeHelper) resetHistograms(inVec Vector, arg parser.Expr) annota
// labels. Do not evaluate anything.
annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), arg.PositionRange()))
delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf))
enh.nativeHistogramSamples[idx].H = nil
continue
}
}
@ -1376,7 +1378,7 @@ func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Label
return mat, warnings
}
func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, param float64) (Matrix, annotations.Annotations) {
func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, params *fParams) (Matrix, annotations.Annotations) {
// Keep a copy of the original point slice so that it can be returned to the pool.
origMatrix := slices.Clone(inputMatrix)
defer func() {
@ -1386,7 +1388,7 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
}
}()
var warnings annotations.Annotations
var annos annotations.Annotations
enh := &EvalNodeHelper{enableDelayedNameRemoval: ev.enableDelayedNameRemoval}
tempNumSamples := ev.currentSamples
@ -1416,46 +1418,43 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
}
groups := make([]groupedAggregation, groupCount)
var k int64
var ratio float64
var seriess map[uint64]Series
switch aggExpr.Op {
case parser.TOPK, parser.BOTTOMK, parser.LIMITK:
if !convertibleToInt64(param) {
ev.errorf("Scalar value %v overflows int64", param)
// Return early if all k values are less than one.
if params.Max() < 1 {
return nil, annos
}
k = int64(param)
if k > int64(len(inputMatrix)) {
k = int64(len(inputMatrix))
}
if k < 1 {
return nil, warnings
}
seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash.
seriess = make(map[uint64]Series, len(inputMatrix))
case parser.LIMIT_RATIO:
if math.IsNaN(param) {
ev.errorf("Ratio value %v is NaN", param)
// Return early if all r values are zero.
if params.Max() == 0 && params.Min() == 0 {
return nil, annos
}
switch {
case param == 0:
return nil, warnings
case param < -1.0:
ratio = -1.0
warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange()))
case param > 1.0:
ratio = 1.0
warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange()))
default:
ratio = param
if params.Max() > 1.0 {
annos.Add(annotations.NewInvalidRatioWarning(params.Max(), 1.0, aggExpr.Param.PositionRange()))
}
seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash.
if params.Min() < -1.0 {
annos.Add(annotations.NewInvalidRatioWarning(params.Min(), -1.0, aggExpr.Param.PositionRange()))
}
seriess = make(map[uint64]Series, len(inputMatrix))
case parser.QUANTILE:
if math.IsNaN(param) || param < 0 || param > 1 {
warnings.Add(annotations.NewInvalidQuantileWarning(param, aggExpr.Param.PositionRange()))
if params.HasAnyNaN() {
annos.Add(annotations.NewInvalidQuantileWarning(math.NaN(), aggExpr.Param.PositionRange()))
}
if params.Max() > 1 {
annos.Add(annotations.NewInvalidQuantileWarning(params.Max(), aggExpr.Param.PositionRange()))
}
if params.Min() < 0 {
annos.Add(annotations.NewInvalidQuantileWarning(params.Min(), aggExpr.Param.PositionRange()))
}
}
for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval {
fParam := params.Next()
if err := contextDone(ctx, "expression evaluation"); err != nil {
ev.error(err)
}
@ -1467,17 +1466,17 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
var ws annotations.Annotations
switch aggExpr.Op {
case parser.TOPK, parser.BOTTOMK, parser.LIMITK, parser.LIMIT_RATIO:
result, ws = ev.aggregationK(aggExpr, k, ratio, inputMatrix, seriesToResult, groups, enh, seriess)
result, ws = ev.aggregationK(aggExpr, fParam, inputMatrix, seriesToResult, groups, enh, seriess)
// If this could be an instant query, shortcut so as not to change sort order.
if ev.endTimestamp == ev.startTimestamp {
warnings.Merge(ws)
return result, warnings
if ev.startTimestamp == ev.endTimestamp {
annos.Merge(ws)
return result, annos
}
default:
ws = ev.aggregation(aggExpr, param, inputMatrix, result, seriesToResult, groups, enh)
ws = ev.aggregation(aggExpr, fParam, inputMatrix, result, seriesToResult, groups, enh)
}
warnings.Merge(ws)
annos.Merge(ws)
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env))
@ -1502,7 +1501,7 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
}
result = result[:dst]
}
return result, warnings
return result, annos
}
// evalSeries generates a Matrix between ev.startTimestamp and ev.endTimestamp (inclusive), each point spaced ev.interval apart, from series given offset.
@ -1680,18 +1679,14 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
var warnings annotations.Annotations
originalNumSamples := ev.currentSamples
// param is the number k for topk/bottomk, or q for quantile.
var fParam float64
if param != nil {
val, ws := ev.eval(ctx, param)
warnings.Merge(ws)
fParam = val.(Matrix)[0].Floats[0].F
}
fp, ws := newFParams(ctx, ev, param)
warnings.Merge(ws)
// Now fetch the data to be aggregated.
val, ws := ev.eval(ctx, e.Expr)
warnings.Merge(ws)
inputMatrix := val.(Matrix)
result, ws := ev.rangeEvalAgg(ctx, e, sortedGrouping, inputMatrix, fParam)
result, ws := ev.rangeEvalAgg(ctx, e, sortedGrouping, inputMatrix, fp)
warnings.Merge(ws)
ev.currentSamples = originalNumSamples + result.TotalSamples()
ev.samplesStats.UpdatePeak(ev.currentSamples)
@ -1827,7 +1822,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
it.Reset(chkIter)
metric := selVS.Series[i].Labels()
if !ev.enableDelayedNameRemoval && dropName {
metric = metric.DropMetricName()
metric = metric.DropReserved(schema.IsMetadataLabel)
}
ss := Series{
Metric: metric,
@ -1966,7 +1961,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
if e.Op == parser.SUB {
for i := range mat {
if !ev.enableDelayedNameRemoval {
mat[i].Metric = mat[i].Metric.DropMetricName()
mat[i].Metric = mat[i].Metric.DropReserved(schema.IsMetadataLabel)
}
mat[i].DropName = true
for j := range mat[i].Floats {
@ -2715,7 +2710,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
}
metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh)
if !ev.enableDelayedNameRemoval && returnBool {
metric = metric.DropMetricName()
metric = metric.DropReserved(schema.IsMetadataLabel)
}
insertedSigs, exists := matchedSigs[sig]
if matching.Card == parser.CardOneToOne {
@ -2782,8 +2777,9 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V
}
str := string(enh.lblResultBuf)
if shouldDropMetricName(op) {
enh.lb.Del(labels.MetricName)
if changesMetricSchema(op) {
// Setting empty Metadata causes the deletion of those if they exists.
schema.Metadata{}.SetToLabels(enh.lb)
}
if matching.Card == parser.CardOneToOne {
@ -2842,9 +2838,9 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
if keep {
lhsSample.F = float
lhsSample.H = histogram
if shouldDropMetricName(op) || returnBool {
if changesMetricSchema(op) || returnBool {
if !ev.enableDelayedNameRemoval {
lhsSample.Metric = lhsSample.Metric.DropMetricName()
lhsSample.Metric = lhsSample.Metric.DropReserved(schema.IsMetadataLabel)
}
lhsSample.DropName = true
}
@ -3268,7 +3264,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// seriesToResult maps inputMatrix indexes to groups indexes.
// For an instant query, returns a Matrix in descending order for topk or ascending for bottomk, or without any order for limitk / limit_ratio.
// For a range query, aggregates output in the seriess map.
func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int64, r float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) {
func (ev *evaluator) aggregationK(e *parser.AggregateExpr, fParam float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) {
op := e.Op
var s Sample
var annos annotations.Annotations
@ -3277,6 +3273,14 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int64, r float64, i
for i := range groups {
groups[i].seen = false
}
// advanceRemainingSeries discards any values at the current timestamp `ts`
// for the remaining input series. In range queries, if these values are not
// consumed now, they will no longer be accessible in the next evaluation step.
advanceRemainingSeries := func(ts int64, startIdx int) {
for i := startIdx; i < len(inputMatrix); i++ {
_, _, _ = ev.nextValues(ts, &inputMatrix[i])
}
}
seriesLoop:
for si := range inputMatrix {
@ -3286,6 +3290,42 @@ seriesLoop:
}
s = Sample{Metric: inputMatrix[si].Metric, F: f, H: h, DropName: inputMatrix[si].DropName}
var k int64
var r float64
switch op {
case parser.TOPK, parser.BOTTOMK, parser.LIMITK:
if !convertibleToInt64(fParam) {
ev.errorf("Scalar value %v overflows int64", fParam)
}
k = int64(fParam)
if k > int64(len(inputMatrix)) {
k = int64(len(inputMatrix))
}
if k < 1 {
if enh.Ts != ev.endTimestamp {
advanceRemainingSeries(enh.Ts, si+1)
}
return nil, annos
}
case parser.LIMIT_RATIO:
if math.IsNaN(fParam) {
ev.errorf("Ratio value %v is NaN", fParam)
}
switch {
case fParam == 0:
if enh.Ts != ev.endTimestamp {
advanceRemainingSeries(enh.Ts, si+1)
}
return nil, annos
case fParam < -1.0:
r = -1.0
case fParam > 1.0:
r = 1.0
default:
r = fParam
}
}
group := &groups[seriesToResult[si]]
// Initialize this group if it's the first time we've seen it.
if !group.seen {
@ -3376,6 +3416,10 @@ seriesLoop:
group.groupAggrComplete = true
groupsRemaining--
if groupsRemaining == 0 {
// Process other values in the series before breaking the loop in case of range query.
if enh.Ts != ev.endTimestamp {
advanceRemainingSeries(enh.Ts, si+1)
}
break seriesLoop
}
}
@ -3502,7 +3546,7 @@ func (ev *evaluator) cleanupMetricLabels(v parser.Value) {
mat := v.(Matrix)
for i := range mat {
if mat[i].DropName {
mat[i].Metric = mat[i].Metric.DropMetricName()
mat[i].Metric = mat[i].Metric.DropReserved(schema.IsMetadataLabel)
}
}
if mat.ContainsSameLabelset() {
@ -3512,7 +3556,7 @@ func (ev *evaluator) cleanupMetricLabels(v parser.Value) {
vec := v.(Vector)
for i := range vec {
if vec[i].DropName {
vec[i].Metric = vec[i].Metric.DropMetricName()
vec[i].Metric = vec[i].Metric.DropReserved(schema.IsMetadataLabel)
}
}
if vec.ContainsSameLabelset() {
@ -3614,9 +3658,9 @@ func btos(b bool) float64 {
return 0
}
// shouldDropMetricName returns whether the metric name should be dropped in the
// result of the op operation.
func shouldDropMetricName(op parser.ItemType) bool {
// changesSchema returns true whether the op operation changes the semantic meaning or
// schema of the metric.
func changesMetricSchema(op parser.ItemType) bool {
switch op {
case parser.ADD, parser.SUB, parser.DIV, parser.MUL, parser.POW, parser.MOD, parser.ATAN2:
return true

View File

@ -31,6 +31,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/prometheus/schema"
"github.com/prometheus/prometheus/util/annotations"
)
@ -577,7 +578,7 @@ func clamp(vec Vector, minVal, maxVal float64, enh *EvalNodeHelper) (Vector, ann
continue
}
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,
@ -612,7 +613,6 @@ func funcClampMin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper
// === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) (Vector, Annotations) ===
func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector)
// round returns a number rounded to toNearest.
// Ties are solved by rounding up.
toNearest := float64(1)
@ -621,23 +621,9 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
}
// Invert as it seems to cause fewer floating point accuracy issues.
toNearestInverse := 1.0 / toNearest
for _, el := range vec {
if el.H != nil {
// Process only float samples.
continue
}
f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,
F: f,
DropName: true,
})
}
return enh.Out, nil
return simpleFloatFunc(vals, enh, func(f float64) float64 {
return math.Floor(f*toNearestInverse+0.5) / toNearestInverse
}), nil
}
// === Scalar(node parser.ValueTypeVector) Scalar ===
@ -823,8 +809,8 @@ func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
}), annos
}
// === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
// compareOverTime is a helper used by funcMaxOverTime and funcMinOverTime.
func compareOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, compareFn func(float64, float64) bool) (Vector, annotations.Annotations) {
samples := vals[0].(Matrix)[0]
var annos annotations.Annotations
if len(samples.Floats) == 0 {
@ -837,7 +823,7 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
return aggrOverTime(vals, enh, func(s Series) float64 {
maxVal := s.Floats[0].F
for _, f := range s.Floats {
if f.F > maxVal || math.IsNaN(maxVal) {
if compareFn(f.F, maxVal) {
maxVal = f.F
}
}
@ -845,26 +831,18 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
}), annos
}
// === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool {
return (cur > maxVal) || math.IsNaN(maxVal)
})
}
// === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
samples := vals[0].(Matrix)[0]
var annos annotations.Annotations
if len(samples.Floats) == 0 {
return enh.Out, nil
}
if len(samples.Histograms) > 0 {
metricName := samples.Metric.Get(labels.MetricName)
annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
}
return aggrOverTime(vals, enh, func(s Series) float64 {
minVal := s.Floats[0].F
for _, f := range s.Floats {
if f.F < minVal || math.IsNaN(minVal) {
minVal = f.F
}
}
return minVal
}), annos
return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool {
return (cur < maxVal) || math.IsNaN(maxVal)
})
}
// === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
@ -997,11 +975,11 @@ func funcPresentOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNod
}), nil
}
func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector {
func simpleFloatFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector {
for _, el := range vals[0].(Vector) {
if el.H == nil { // Process only float samples.
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,
@ -1015,114 +993,114 @@ func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float6
// === abs(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAbs(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Abs), nil
return simpleFloatFunc(vals, enh, math.Abs), nil
}
// === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcCeil(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Ceil), nil
return simpleFloatFunc(vals, enh, math.Ceil), nil
}
// === floor(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcFloor(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Floor), nil
return simpleFloatFunc(vals, enh, math.Floor), nil
}
// === exp(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcExp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Exp), nil
return simpleFloatFunc(vals, enh, math.Exp), nil
}
// === sqrt(Vector VectorNode) (Vector, Annotations) ===
func funcSqrt(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Sqrt), nil
return simpleFloatFunc(vals, enh, math.Sqrt), nil
}
// === ln(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcLn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Log), nil
return simpleFloatFunc(vals, enh, math.Log), nil
}
// === log2(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcLog2(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Log2), nil
return simpleFloatFunc(vals, enh, math.Log2), nil
}
// === log10(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcLog10(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Log10), nil
return simpleFloatFunc(vals, enh, math.Log10), nil
}
// === sin(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcSin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Sin), nil
return simpleFloatFunc(vals, enh, math.Sin), nil
}
// === cos(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcCos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Cos), nil
return simpleFloatFunc(vals, enh, math.Cos), nil
}
// === tan(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcTan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Tan), nil
return simpleFloatFunc(vals, enh, math.Tan), nil
}
// === asin(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAsin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Asin), nil
return simpleFloatFunc(vals, enh, math.Asin), nil
}
// === acos(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAcos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Acos), nil
return simpleFloatFunc(vals, enh, math.Acos), nil
}
// === atan(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAtan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Atan), nil
return simpleFloatFunc(vals, enh, math.Atan), nil
}
// === sinh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcSinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Sinh), nil
return simpleFloatFunc(vals, enh, math.Sinh), nil
}
// === cosh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcCosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Cosh), nil
return simpleFloatFunc(vals, enh, math.Cosh), nil
}
// === tanh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcTanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Tanh), nil
return simpleFloatFunc(vals, enh, math.Tanh), nil
}
// === asinh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAsinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Asinh), nil
return simpleFloatFunc(vals, enh, math.Asinh), nil
}
// === acosh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAcosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Acosh), nil
return simpleFloatFunc(vals, enh, math.Acosh), nil
}
// === atanh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAtanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Atanh), nil
return simpleFloatFunc(vals, enh, math.Atanh), nil
}
// === rad(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcRad(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, func(v float64) float64 {
return simpleFloatFunc(vals, enh, func(v float64) float64 {
return v * math.Pi / 180
}), nil
}
// === deg(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcDeg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, func(v float64) float64 {
return simpleFloatFunc(vals, enh, func(v float64) float64 {
return v * 180 / math.Pi
}), nil
}
@ -1134,7 +1112,7 @@ func funcPi(_ []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector,
// === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcSgn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, func(v float64) float64 {
return simpleFloatFunc(vals, enh, func(v float64) float64 {
switch {
case v < 0:
return -1
@ -1151,7 +1129,7 @@ func funcTimestamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelpe
vec := vals[0].(Vector)
for _, el := range vec {
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,
@ -1271,79 +1249,48 @@ func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNo
return append(enh.Out, Sample{F: slope*duration + intercept}), nil
}
func simpleHistogramFunc(vals []parser.Value, enh *EvalNodeHelper, f func(h *histogram.FloatHistogram) float64) Vector {
for _, el := range vals[0].(Vector) {
if el.H != nil { // Process only histogram samples.
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,
F: f(el.H),
DropName: true,
})
}
}
return enh.Out
}
// === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramCount(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
inVec := vals[0].(Vector)
for _, sample := range inVec {
// Skip non-histogram samples.
if sample.H == nil {
continue
}
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{
Metric: sample.Metric,
F: sample.H.Count,
DropName: true,
})
}
return enh.Out, nil
return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 {
return h.Count
}), nil
}
// === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramSum(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
inVec := vals[0].(Vector)
for _, sample := range inVec {
// Skip non-histogram samples.
if sample.H == nil {
continue
}
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{
Metric: sample.Metric,
F: sample.H.Sum,
DropName: true,
})
}
return enh.Out, nil
return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 {
return h.Sum
}), nil
}
// === histogram_avg(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramAvg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
inVec := vals[0].(Vector)
for _, sample := range inVec {
// Skip non-histogram samples.
if sample.H == nil {
continue
}
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{
Metric: sample.Metric,
F: sample.H.Sum / sample.H.Count,
DropName: true,
})
}
return enh.Out, nil
return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 {
return h.Sum / h.Count
}), nil
}
func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) {
vec := vals[0].(Vector)
for _, sample := range vec {
// Skip non-histogram samples.
if sample.H == nil {
continue
}
mean := sample.H.Sum / sample.H.Count
return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 {
mean := h.Sum / h.Count
var variance, cVariance float64
it := sample.H.AllBucketIterator()
it := h.AllBucketIterator()
for it.Next() {
bucket := it.At()
if bucket.Count == 0 {
@ -1351,7 +1298,7 @@ func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResul
}
var val float64
switch {
case sample.H.UsesCustomBuckets():
case h.UsesCustomBuckets():
// Use arithmetic mean in case of custom buckets.
val = (bucket.Upper + bucket.Lower) / 2.0
case bucket.Lower <= 0 && bucket.Upper >= 0:
@ -1368,20 +1315,12 @@ func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResul
variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)
}
variance += cVariance
variance /= sample.H.Count
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName()
}
variance /= h.Count
if varianceToResult != nil {
variance = varianceToResult(variance)
}
enh.Out = append(enh.Out, Sample{
Metric: sample.Metric,
F: variance,
DropName: true,
})
}
return enh.Out, nil
return variance
}), nil
}
// === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) ===
@ -1404,8 +1343,12 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev
// Deal with the native histograms.
for _, sample := range enh.nativeHistogramSamples {
if sample.H == nil {
// Native histogram conflicts with classic histogram at the same timestamp, ignore.
continue
}
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName()
sample.Metric = sample.Metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: sample.Metric,
@ -1420,7 +1363,7 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev
continue
}
if !enh.enableDelayedNameRemoval {
mb.metric = mb.metric.DropMetricName()
mb.metric = mb.metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
@ -1446,8 +1389,12 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
// Deal with the native histograms.
for _, sample := range enh.nativeHistogramSamples {
if sample.H == nil {
// Native histogram conflicts with classic histogram at the same timestamp, ignore.
continue
}
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName()
sample.Metric = sample.Metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: sample.Metric,
@ -1465,7 +1412,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
}
if !enh.enableDelayedNameRemoval {
mb.metric = mb.metric.DropMetricName()
mb.metric = mb.metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
@ -1683,7 +1630,7 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo
}
t := time.Unix(int64(el.F), 0).UTC()
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel)
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,

View File

@ -61,7 +61,7 @@ const (
var symbolTable = labels.NewSymbolTable()
func fuzzParseMetricWithContentType(in []byte, contentType string) int {
p, warning := textparse.New(in, contentType, "", false, false, symbolTable)
p, warning := textparse.New(in, contentType, "", false, false, false, symbolTable)
if p == nil || warning != nil {
// An invalid content type is being passed, which should not happen
// in this context.

View File

@ -452,7 +452,7 @@ positive_duration_expr : duration_expr
offset_expr: expr OFFSET duration_expr
{
if numLit, ok := $3.(*NumberLiteral); ok {
yylex.(*parser).addOffset($1, time.Duration(numLit.Val*1000)*time.Millisecond)
yylex.(*parser).addOffset($1, time.Duration(math.Round(numLit.Val*float64(time.Second))))
$$ = $1
break
}
@ -506,7 +506,7 @@ matrix_selector : expr LEFT_BRACKET positive_duration_expr RIGHT_BRACKET
var rangeNl time.Duration
if numLit, ok := $3.(*NumberLiteral); ok {
rangeNl = time.Duration(numLit.Val*1000)*time.Millisecond
rangeNl = time.Duration(math.Round(numLit.Val*float64(time.Second)))
}
rangeExpr, _ := $3.(*DurationExpr)
$$ = &MatrixSelector{
@ -523,11 +523,11 @@ subquery_expr : expr LEFT_BRACKET positive_duration_expr COLON positive_durati
var rangeNl time.Duration
var stepNl time.Duration
if numLit, ok := $3.(*NumberLiteral); ok {
rangeNl = time.Duration(numLit.Val*1000)*time.Millisecond
rangeNl = time.Duration(math.Round(numLit.Val*float64(time.Second)))
}
rangeExpr, _ := $3.(*DurationExpr)
if numLit, ok := $5.(*NumberLiteral); ok {
stepNl = time.Duration(numLit.Val*1000)*time.Millisecond
stepNl = time.Duration(math.Round(numLit.Val*float64(time.Second)))
}
stepExpr, _ := $5.(*DurationExpr)
$$ = &SubqueryExpr{
@ -543,7 +543,7 @@ subquery_expr : expr LEFT_BRACKET positive_duration_expr COLON positive_durati
{
var rangeNl time.Duration
if numLit, ok := $3.(*NumberLiteral); ok {
rangeNl = time.Duration(numLit.Val*1000)*time.Millisecond
rangeNl = time.Duration(math.Round(numLit.Val*float64(time.Second)))
}
rangeExpr, _ := $3.(*DurationExpr)
$$ = &SubqueryExpr{

View File

@ -1372,7 +1372,7 @@ yydefault:
yyDollar = yyS[yypt-3 : yypt+1]
{
if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok {
yylex.(*parser).addOffset(yyDollar[1].node, time.Duration(numLit.Val*1000)*time.Millisecond)
yylex.(*parser).addOffset(yyDollar[1].node, time.Duration(math.Round(numLit.Val*float64(time.Second))))
yyVAL.node = yyDollar[1].node
break
}
@ -1423,7 +1423,7 @@ yydefault:
var rangeNl time.Duration
if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok {
rangeNl = time.Duration(numLit.Val*1000) * time.Millisecond
rangeNl = time.Duration(math.Round(numLit.Val * float64(time.Second)))
}
rangeExpr, _ := yyDollar[3].node.(*DurationExpr)
yyVAL.node = &MatrixSelector{
@ -1439,11 +1439,11 @@ yydefault:
var rangeNl time.Duration
var stepNl time.Duration
if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok {
rangeNl = time.Duration(numLit.Val*1000) * time.Millisecond
rangeNl = time.Duration(math.Round(numLit.Val * float64(time.Second)))
}
rangeExpr, _ := yyDollar[3].node.(*DurationExpr)
if numLit, ok := yyDollar[5].node.(*NumberLiteral); ok {
stepNl = time.Duration(numLit.Val*1000) * time.Millisecond
stepNl = time.Duration(math.Round(numLit.Val * float64(time.Second)))
}
stepExpr, _ := yyDollar[5].node.(*DurationExpr)
yyVAL.node = &SubqueryExpr{
@ -1460,7 +1460,7 @@ yydefault:
{
var rangeNl time.Duration
if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok {
rangeNl = time.Duration(numLit.Val*1000) * time.Millisecond
rangeNl = time.Duration(math.Round(numLit.Val * float64(time.Second)))
}
rangeExpr, _ := yyDollar[3].node.(*DurationExpr)
yyVAL.node = &SubqueryExpr{

View File

@ -2008,6 +2008,57 @@ var testExpr = []struct {
errMsg: `unexpected "}" in label matching, expected string`,
},
// Test matrix selector.
{
input: "test[1000ms]",
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "test",
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 4,
},
},
Range: 1000 * time.Millisecond,
EndPos: 12,
},
},
{
input: "test[1001ms]",
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "test",
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 4,
},
},
Range: 1001 * time.Millisecond,
EndPos: 12,
},
},
{
input: "test[1002ms]",
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "test",
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 4,
},
},
Range: 1002 * time.Millisecond,
EndPos: 12,
},
},
{
input: "test[5s]",
expected: &MatrixSelector{

View File

@ -274,7 +274,7 @@ load 5m
http_requests{job="app-server", instance="1", group="canary"} 0+80x10
http_requests_histogram{job="app-server", instance="2", group="canary"} {{schema:0 sum:10 count:10}}x11
http_requests_histogram{job="api-server", instance="3", group="production"} {{schema:0 sum:20 count:20}}x11
foo 3+0x10
foo 1+1x9 3
eval_ordered instant at 50m topk(3, http_requests)
http_requests{group="canary", instance="1", job="app-server"} 800
@ -340,6 +340,13 @@ eval_ordered instant at 50m topk(scalar(foo), http_requests)
http_requests{group="canary", instance="0", job="app-server"} 700
http_requests{group="production", instance="1", job="app-server"} 600
# Bug #15971.
eval range from 0m to 50m step 5m count(topk(scalar(foo), http_requests))
{} 1 2 3 4 5 6 7 8 9 9 3
eval range from 0m to 50m step 5m count(bottomk(scalar(foo), http_requests))
{} 1 2 3 4 5 6 7 8 9 9 3
# Tests for histogram: should ignore histograms.
eval_info instant at 50m topk(100, http_requests_histogram)
#empty
@ -447,7 +454,7 @@ load 10s
data{test="uneven samples",point="b"} 1
data{test="uneven samples",point="c"} 4
data_histogram{test="histogram sample", point="c"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}
foo .8
foo 0 1 0 1 0 1 0.8
eval instant at 1m quantile without(point)(0.8, data)
{test="two samples"} 0.8
@ -475,11 +482,18 @@ eval instant at 1m quantile without(point)((scalar(foo)), data)
{test="three samples"} 1.6
{test="uneven samples"} 2.8
eval_warn instant at 1m quantile without(point)(NaN, data)
eval instant at 1m quantile without(point)(NaN, data)
expect warn msg: PromQL warning: quantile value should be between 0 and 1, got NaN
{test="two samples"} NaN
{test="three samples"} NaN
{test="uneven samples"} NaN
# Bug #15971.
eval range from 0m to 1m step 10s quantile without(point) (scalar(foo), data)
{test="two samples"} 0 1 0 1 0 1 0.8
{test="three samples"} 0 2 0 2 0 2 1.6
{test="uneven samples"} 0 4 0 4 0 4 2.8
# Tests for group.
clear

View File

@ -1019,6 +1019,47 @@ eval instant at 1m sum_over_time(metric[2m])
eval instant at 1m avg_over_time(metric[2m])
{} 0.5
# Test per-series aggregation on dense samples.
clear
load 1ms
metric 1+0x4000
eval instant at 4s sum_over_time(metric[1000ms])
{} 1000
eval instant at 4s sum_over_time(metric[1001ms])
{} 1001
eval instant at 4s sum_over_time(metric[1002ms])
{} 1002
eval instant at 4s sum_over_time(metric[1003ms])
{} 1003
eval instant at 4s sum_over_time(metric[2000ms])
{} 2000
eval instant at 4s sum_over_time(metric[2001ms])
{} 2001
eval instant at 4s sum_over_time(metric[2002ms])
{} 2002
eval instant at 4s sum_over_time(metric[2003ms])
{} 2003
eval instant at 4s sum_over_time(metric[3000ms])
{} 3000
eval instant at 4s sum_over_time(metric[3001ms])
{} 3001
eval instant at 4s sum_over_time(metric[3002ms])
{} 3002
eval instant at 4s sum_over_time(metric[3003ms])
{} 3003
# Tests for stddev_over_time and stdvar_over_time.
clear
load 10s

View File

@ -584,3 +584,25 @@ eval instant at 10m histogram_count(increase(histogram_with_reset[15m]))
eval instant at 10m histogram_sum(increase(histogram_with_reset[15m]))
{} 91.5
clear
# Test histogram_quantile and histogram_fraction with conflicting classic and native histograms.
load 1m
series{host="a"} {{schema:0 sum:5 count:4 buckets:[9 2 1]}}
series{host="a", le="0.1"} 2
series{host="a", le="1"} 3
series{host="a", le="10"} 5
series{host="a", le="100"} 6
series{host="a", le="1000"} 8
series{host="a", le="+Inf"} 9
eval instant at 0 histogram_quantile(0.8, series)
expect no_info
expect warn msg: PromQL warning: vector contains a mix of classic and native histograms for metric name "series"
# Should return no results.
eval instant at 0 histogram_fraction(-Inf, 1, series)
expect no_info
expect warn msg: PromQL warning: vector contains a mix of classic and native histograms for metric name "series"
# Should return no results.

View File

@ -11,6 +11,8 @@ load 5m
http_requests{job="api-server", instance="3", group="canary"} 0+60x10
http_requests{job="api-server", instance="histogram_1", group="canary"} {{schema:0 sum:10 count:10}}x11
http_requests{job="api-server", instance="histogram_2", group="canary"} {{schema:0 sum:20 count:20}}x11
foo 1+1x10
bar 0 1 0 -1 0 1 0 -1 0 1 0
eval instant at 50m count(limitk by (group) (0, http_requests))
# empty
@ -69,6 +71,10 @@ eval instant at 50m count(limitk(1000, http_requests{instance=~"histogram_[0-9]"
eval range from 0 to 50m step 5m count(limitk(1000, http_requests{instance=~"histogram_[0-9]"}))
{} 2+0x10
# Bug #15971.
eval range from 0m to 50m step 5m count(limitk(scalar(foo), http_requests))
{} 1 2 3 4 5 6 7 8 8 8 8
# limit_ratio
eval range from 0 to 50m step 5m count(limit_ratio(0.0, http_requests))
# empty
@ -105,11 +111,13 @@ eval range from 0 to 50m step 5m count(limit_ratio(-1.0, http_requests) and http
{} 8+0x10
# Capped to 1.0 -> all samples.
eval_warn range from 0 to 50m step 5m count(limit_ratio(1.1, http_requests) and http_requests)
eval range from 0 to 50m step 5m count(limit_ratio(1.1, http_requests) and http_requests)
expect warn msg: PromQL warning: ratio value should be between -1 and 1, got 1.1, capping to 1
{} 8+0x10
# Capped to -1.0 -> all samples.
eval_warn range from 0 to 50m step 5m count(limit_ratio(-1.1, http_requests) and http_requests)
eval range from 0 to 50m step 5m count(limit_ratio(-1.1, http_requests) and http_requests)
expect warn msg: PromQL warning: ratio value should be between -1 and 1, got -1.1, capping to -1
{} 8+0x10
# Verify that limit_ratio(value) and limit_ratio(1.0-value) return the "complement" of each other.
@ -137,12 +145,12 @@ eval range from 0 to 50m step 5m count(limit_ratio(0.8, http_requests) or limit_
eval range from 0 to 50m step 5m count(limit_ratio(0.8, http_requests) and limit_ratio(-0.2, http_requests))
# empty
# Complement below for [some_ratio, 1.0 - some_ratio], some_ratio derived from time(),
# Complement below for [some_ratio, - (1.0 - some_ratio)], some_ratio derived from time(),
# using a small prime number to avoid rounded ratio values, and a small set of them.
eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) or limit_ratio(1.0 - (time() % 17/17), http_requests))
eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) or limit_ratio( - (1.0 - (time() % 17/17)), http_requests))
{} 8+0x10
eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) and limit_ratio(1.0 - (time() % 17/17), http_requests))
eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) and limit_ratio( - (1.0 - (time() % 17/17)), http_requests))
# empty
# Poor man's normality check: ok (loaded samples follow a nice linearity over labels and time).
@ -156,3 +164,7 @@ eval instant at 50m limit_ratio(1, http_requests{instance="histogram_1"})
eval range from 0 to 50m step 5m limit_ratio(1, http_requests{instance="histogram_1"})
{__name__="http_requests", group="canary", instance="histogram_1", job="api-server"} {{count:10 sum:10}}x10
# Bug #15971.
eval range from 0m to 50m step 5m count(limit_ratio(scalar(bar), http_requests))
{} _ 8 _ 8 _ 8 _ 8 _ 8 _

View File

@ -0,0 +1,280 @@
# Test PROM-39 type and unit labels with operators.
# A. Healthy case
# NOTE: __unit__"request" is not a best practice unit, but keeping that to test the unit handling.
load 5m
http_requests_total{__type__="counter", __unit__="request", job="api-server", instance="0", group="production"} 0+10x10
http_requests_total{__type__="counter", __unit__="request", job="api-server", instance="1", group="production"} 0+20x10
http_requests_total{__type__="counter", __unit__="request", job="api-server", instance="0", group="canary"} 0+30x10
http_requests_total{__type__="counter", __unit__="request", job="api-server", instance="1", group="canary"} 0+40x10
http_requests_total{__type__="counter", __unit__="request", job="app-server", instance="0", group="production"} 0+50x10
http_requests_total{__type__="counter", __unit__="request", job="app-server", instance="1", group="production"} 0+60x10
http_requests_total{__type__="counter", __unit__="request", job="app-server", instance="0", group="canary"} 0+70x10
http_requests_total{__type__="counter", __unit__="request", job="app-server", instance="1", group="canary"} 0+80x10
eval instant at 50m SUM(http_requests_total) BY (job)
{job="api-server"} 1000
{job="app-server"} 2600
eval instant at 50m SUM(http_requests_total{__type__="counter", __unit__="request"}) BY (job)
{job="api-server"} 1000
{job="app-server"} 2600
eval instant at 50m SUM({__type__="counter"}) BY (job)
{job="api-server"} 1000
{job="app-server"} 2600
eval instant at 50m SUM({__unit__="request"}) BY (job)
{job="api-server"} 1000
{job="app-server"} 2600
eval instant at 50m SUM({__type__="counter", __unit__="request"}) BY (job)
{job="api-server"} 1000
{job="app-server"} 2600
eval instant at 50m SUM(http_requests_total) BY (job) - COUNT(http_requests_total) BY (job)
{job="api-server"} 996
{job="app-server"} 2596
eval instant at 50m -http_requests_total{job="api-server",instance="0",group="production"}
{job="api-server",instance="0",group="production"} -100
eval instant at 50m +http_requests_total{job="api-server",instance="0",group="production"}
http_requests_total{__type__="counter", __unit__="request", job="api-server",instance="0",group="production"} 100
eval instant at 50m -10^3 * - SUM(http_requests_total) BY (job) ^ -1
{job="api-server"} 1
{job="app-server"} 0.38461538461538464
eval instant at 50m SUM(http_requests_total) BY (job) / 0
{job="api-server"} +Inf
{job="app-server"} +Inf
eval instant at 50m http_requests_total{group="canary", instance="0", job="api-server"} / 0
{group="canary", instance="0", job="api-server"} +Inf
eval instant at 50m 0 * http_requests_total{group="canary", instance="0", job="api-server"} % 0
{group="canary", instance="0", job="api-server"} NaN
eval instant at 50m http_requests_total{job="api-server", group="canary"}
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="0", job="api-server"} 300
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="api-server"} 400
eval instant at 50m rate(http_requests_total[25m]) * 25 * 60
{group="canary", instance="0", job="api-server"} 150
{group="canary", instance="0", job="app-server"} 350
{group="canary", instance="1", job="api-server"} 200
{group="canary", instance="1", job="app-server"} 400
{group="production", instance="0", job="api-server"} 50
{group="production", instance="0", job="app-server"} 249.99999999999997
{group="production", instance="1", job="api-server"} 100
{group="production", instance="1", job="app-server"} 300
eval instant at 50m http_requests_total{group="canary"} and http_requests_total{instance="0"}
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="0", job="api-server"} 300
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="0", job="app-server"} 700
eval instant at 50m (http_requests_total{group="canary"} + 1) and http_requests_total{instance="0"}
{group="canary", instance="0", job="api-server"} 301
{group="canary", instance="0", job="app-server"} 701
eval instant at 50m http_requests_total{group="canary"} or http_requests_total{group="production"}
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="0", job="api-server"} 300
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="0", job="app-server"} 700
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="api-server"} 400
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="app-server"} 800
http_requests_total{__type__="counter", __unit__="request", group="production", instance="0", job="api-server"} 100
http_requests_total{__type__="counter", __unit__="request", group="production", instance="0", job="app-server"} 500
http_requests_total{__type__="counter", __unit__="request", group="production", instance="1", job="api-server"} 200
http_requests_total{__type__="counter", __unit__="request", group="production", instance="1", job="app-server"} 600
# On overlap the rhs samples must be dropped.
eval instant at 50m (http_requests_total{group="canary"} + 1) or http_requests_total{instance="1"}
{group="canary", instance="0", job="api-server"} 301
{group="canary", instance="0", job="app-server"} 701
{group="canary", instance="1", job="api-server"} 401
{group="canary", instance="1", job="app-server"} 801
http_requests_total{__type__="counter", __unit__="request", group="production", instance="1", job="api-server"} 200
http_requests_total{__type__="counter", __unit__="request", group="production", instance="1", job="app-server"} 600
eval instant at 50m http_requests_total{group="canary"} unless http_requests_total{instance="0"}
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="api-server"} 400
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="app-server"} 800
eval instant at 50m http_requests_total{group="canary"} unless on(job) http_requests_total{instance="0"}
eval instant at 50m http_requests_total{group="canary"} unless on(job, instance) http_requests_total{instance="0"}
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="api-server"} 400
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="app-server"} 800
eval instant at 50m http_requests_total{group="canary"} / on(instance,job) http_requests_total{group="production"}
{instance="0", job="api-server"} 3
{instance="0", job="app-server"} 1.4
{instance="1", job="api-server"} 2
{instance="1", job="app-server"} 1.3333333333333333
eval instant at 50m http_requests_total{group="canary"} unless ignoring(group, instance) http_requests_total{instance="0"}
eval instant at 50m http_requests_total{group="canary"} unless ignoring(group) http_requests_total{instance="0"}
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="api-server"} 400
http_requests_total{__type__="counter", __unit__="request", group="canary", instance="1", job="app-server"} 800
eval instant at 50m http_requests_total{group="canary"} / ignoring(group) http_requests_total{group="production"}
{instance="0", job="api-server"} 3
{instance="0", job="app-server"} 1.4
{instance="1", job="api-server"} 2
{instance="1", job="app-server"} 1.3333333333333333
# Comparisons.
eval instant at 50m SUM(http_requests_total) BY (job) > 1000
{job="app-server"} 2600
eval instant at 50m SUM(http_requests_total) BY (job) == bool SUM(http_requests_total) BY (job)
{job="api-server"} 1
{job="app-server"} 1
eval instant at 50m SUM(http_requests_total) BY (job) != bool SUM(http_requests_total) BY (job)
{job="api-server"} 0
{job="app-server"} 0
eval instant at 50m http_requests_total{job="api-server", instance="0", group="production"} == bool 100
{job="api-server", instance="0", group="production"} 1
clear
# A. Inconsistent type and unit cases for unique series.
# NOTE: __unit__"request" is not a best practice unit, but keeping that to test the unit handling.
load 5m
http_requests_total{__type__="counter", __unit__="request", job="api-server", instance="0", group="production"} 0+10x10
http_requests_total{__type__="gauge", __unit__="request", job="api-server", instance="1", group="production"} 0+20x10
http_requests_total{__type__="gauge", __unit__="not-request", job="api-server", instance="0", group="canary"} 0+30x10
http_requests_total{__type__="counter", __unit__="not-request", job="api-server", instance="1", group="canary"} 0+40x10
http_requests_total{__type__="counter", __unit__="request", job="app-server", instance="0", group="production"} 0+50x10
http_requests_total{__type__="counter", __unit__="request", job="app-server", instance="1", group="production"} 0+60x10
http_requests_total{__type__="counter", __unit__="", job="app-server", instance="0", group="canary"} 0+70x10
http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10
eval instant at 50m SUM(http_requests_total) BY (job)
{job="api-server"} 1000
{job="app-server"} 2600
eval instant at 50m SUM(http_requests_total{__type__="counter", __unit__="request"}) BY (job)
{job="api-server"} 100
{job="app-server"} 1100
eval instant at 50m SUM({__type__="counter"}) BY (job)
{job="api-server"} 500
{job="app-server"} 1800
eval instant at 50m SUM({__unit__="request"}) BY (job)
{job="api-server"} 300
{job="app-server"} 1100
eval instant at 50m SUM({__type__="counter", __unit__="request"}) BY (job)
{job="api-server"} 100
{job="app-server"} 1100
eval instant at 50m SUM(http_requests_total) BY (job) - COUNT(http_requests_total) BY (job)
{job="api-server"} 996
{job="app-server"} 2596
eval instant at 50m -http_requests_total{job="api-server",instance="0",group="production"}
{job="api-server",instance="0",group="production"} -100
eval instant at 50m +http_requests_total{job="api-server",instance="0",group="production"}
http_requests_total{__type__="counter", __unit__="request", job="api-server",instance="0",group="production"} 100
eval instant at 50m -10^3 * - SUM(http_requests_total) BY (job) ^ -1
{job="api-server"} 1
{job="app-server"} 0.38461538461538464
eval instant at 50m SUM(http_requests_total) BY (job) / 0
{job="api-server"} +Inf
{job="app-server"} +Inf
eval instant at 50m http_requests_total{group="canary", instance="0", job="api-server"} / 0
{group="canary", instance="0", job="api-server"} +Inf
eval instant at 50m 0 * http_requests_total{group="canary", instance="0", job="api-server"} % 0
{group="canary", instance="0", job="api-server"} NaN
eval instant at 50m http_requests_total{job="api-server", group="canary"}
http_requests_total{__type__="gauge", __unit__="not-request", group="canary", instance="0", job="api-server"} 300
http_requests_total{__type__="counter", __unit__="not-request", group="canary", instance="1", job="api-server"} 400
eval instant at 50m http_requests_total{__type__="counter", job="api-server", group="canary"}
http_requests_total{__type__="counter", __unit__="not-request", group="canary", instance="1", job="api-server"} 400
eval instant at 50m rate(http_requests_total[25m]) * 25 * 60
{group="canary", instance="0", job="api-server"} 150
{group="canary", instance="0", job="app-server"} 350
{group="canary", instance="1", job="api-server"} 200
{group="canary", instance="1", job="app-server"} 400
{group="production", instance="0", job="api-server"} 50
{group="production", instance="0", job="app-server"} 249.99999999999997
{group="production", instance="1", job="api-server"} 100
{group="production", instance="1", job="app-server"} 300
eval instant at 50m http_requests_total{group="canary"} and http_requests_total{instance="0"}
http_requests_total{__type__="gauge", __unit__="not-request", group="canary", instance="0", job="api-server"} 300
http_requests_total{__type__="counter", __unit__="", group="canary", instance="0", job="app-server"} 700
eval instant at 50m (http_requests_total{group="canary"} + 1) and http_requests_total{instance="0"}
{group="canary", instance="0", job="api-server"} 301
{group="canary", instance="0", job="app-server"} 701
eval instant at 50m http_requests_total{group="canary"} or http_requests_total{group="production"}
http_requests_total{__type__="gauge", __unit__="not-request", group="canary", instance="0", job="api-server"} 300
http_requests_total{__type__="counter", __unit__="", group="canary", instance="0", job="app-server"} 700
http_requests_total{__type__="counter", __unit__="not-request", group="canary", instance="1", job="api-server"} 400
http_requests_total{group="canary", instance="1", job="app-server"} 800
http_requests_total{__type__="counter", __unit__="request", group="production", instance="0", job="api-server"} 100
http_requests_total{__type__="counter", __unit__="request", group="production", instance="0", job="app-server"} 500
http_requests_total{__type__="gauge", __unit__="request", group="production", instance="1", job="api-server"} 200
http_requests_total{__type__="counter", __unit__="request", group="production", instance="1", job="app-server"} 600
# On overlap the rhs samples must be dropped.
eval instant at 50m (http_requests_total{group="canary"} + 1) or http_requests_total{instance="1"}
{group="canary", instance="0", job="api-server"} 301
{group="canary", instance="0", job="app-server"} 701
{group="canary", instance="1", job="api-server"} 401
{group="canary", instance="1", job="app-server"} 801
http_requests_total{__type__="gauge", __unit__="request", group="production", instance="1", job="api-server"} 200
http_requests_total{__type__="counter", __unit__="request", group="production", instance="1", job="app-server"} 600
eval instant at 50m http_requests_total{group="canary"} unless http_requests_total{instance="0"}
http_requests_total{__type__="counter", __unit__="not-request", group="canary", instance="1", job="api-server"} 400
http_requests_total{group="canary", instance="1", job="app-server"} 800
eval instant at 50m http_requests_total{group="canary"} unless on(job) http_requests_total{instance="0"}
eval instant at 50m http_requests_total{group="canary"} unless on(job, instance) http_requests_total{instance="0"}
http_requests_total{__type__="counter", __unit__="not-request", group="canary", instance="1", job="api-server"} 400
http_requests_total{group="canary", instance="1", job="app-server"} 800
eval instant at 50m http_requests_total{group="canary"} / on(instance,job) http_requests_total{group="production"}
{instance="0", job="api-server"} 3
{instance="0", job="app-server"} 1.4
{instance="1", job="api-server"} 2
{instance="1", job="app-server"} 1.3333333333333333
eval instant at 50m http_requests_total{group="canary"} unless ignoring(group) http_requests_total{instance="0"}
http_requests_total{__type__="counter", __unit__="not-request", group="canary", instance="1", job="api-server"} 400
http_requests_total{group="canary", instance="1", job="app-server"} 800
eval instant at 50m http_requests_total{group="canary"} / ignoring(group) http_requests_total{group="production"}
# Comparisons.
eval instant at 50m SUM(http_requests_total) BY (job) > 1000
{job="app-server"} 2600
eval instant at 50m SUM(http_requests_total) BY (job) == bool SUM(http_requests_total) BY (job)
{job="api-server"} 1
{job="app-server"} 1
eval instant at 50m SUM(http_requests_total) BY (job) != bool SUM(http_requests_total) BY (job)
{job="api-server"} 0
{job="app-server"} 0
eval instant at 50m http_requests_total{job="api-server", instance="0", group="production"} == bool 100
{job="api-server", instance="0", group="production"} 1

View File

@ -14,6 +14,7 @@
package promql
import (
"context"
"encoding/json"
"errors"
"fmt"
@ -533,3 +534,68 @@ func (ssi *storageSeriesIterator) Next() chunkenc.ValueType {
func (ssi *storageSeriesIterator) Err() error {
return nil
}
type fParams struct {
series Series
constValue float64
isConstant bool
minValue float64
maxValue float64
hasAnyNaN bool
}
// newFParams evaluates the expression and returns an fParams object,
// which holds the parameter values (constant or series) along with min, max, and NaN info.
func newFParams(ctx context.Context, ev *evaluator, expr parser.Expr) (*fParams, annotations.Annotations) {
if expr == nil {
return &fParams{}, nil
}
var constParam bool
if _, ok := expr.(*parser.NumberLiteral); ok {
constParam = true
}
val, ws := ev.eval(ctx, expr)
mat, ok := val.(Matrix)
if !ok || len(mat) == 0 {
return &fParams{}, ws
}
fp := &fParams{
series: mat[0],
isConstant: constParam,
minValue: math.MaxFloat64,
maxValue: -math.MaxFloat64,
}
if constParam {
fp.constValue = fp.series.Floats[0].F
fp.minValue, fp.maxValue = fp.constValue, fp.constValue
fp.hasAnyNaN = math.IsNaN(fp.constValue)
return fp, ws
}
for _, v := range fp.series.Floats {
fp.maxValue = math.Max(fp.maxValue, v.F)
fp.minValue = math.Min(fp.minValue, v.F)
if math.IsNaN(v.F) {
fp.hasAnyNaN = true
}
}
return fp, ws
}
func (fp *fParams) Max() float64 { return fp.maxValue }
func (fp *fParams) Min() float64 { return fp.minValue }
func (fp *fParams) HasAnyNaN() bool { return fp.hasAnyNaN }
// Next returns the next value from the series or the constant value, and advances the series if applicable.
func (fp *fParams) Next() float64 {
if fp.isConstant {
return fp.constValue
}
if len(fp.series.Floats) > 0 {
val := fp.series.Floats[0].F
fp.series.Floats = fp.series.Floats[1:]
return val
}
return 0
}

View File

@ -20,6 +20,7 @@ import (
"math"
"os"
"path"
"slices"
"sort"
"strconv"
"sync"
@ -1008,11 +1009,8 @@ func TestMetricsUpdate(t *testing.T) {
var metrics int
for _, m := range ms {
s := m.GetName()
for _, n := range metricNames {
if s == n {
metrics += len(m.Metric)
break
}
if slices.Contains(metricNames, s) {
metrics += len(m.Metric)
}
}
return metrics

157
schema/labels.go Normal file
View File

@ -0,0 +1,157 @@
// Copyright 2025 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
import (
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
)
const (
// Special label names and selectors for schema.Metadata fields.
// They are currently private to ensure __name__, __type__ and __unit__ are used
// together and remain extensible in Prometheus. See NewMetadataFromLabels and Metadata
// methods for the interactions with the labels package structs.
metricName = "__name__"
metricType = "__type__"
metricUnit = "__unit__"
)
// IsMetadataLabel returns true if the given label name is a special
// schema Metadata label.
func IsMetadataLabel(name string) bool {
return name == metricName || name == metricType || name == metricUnit
}
// Metadata represents the core metric schema/metadata elements that:
// * are describing and identifying the metric schema/shape (e.g. name, type and unit).
// * are contributing to the general metric/series identity.
// * with the type-and-unit feature, are stored as Prometheus labels.
//
// Historically, similar information was encoded in the labels.MetricName (suffixes)
// and in the separate metadata.Metadata structures. However, with the
// type-and-unit-label feature (PROM-39), this information can be now stored directly
// in the special schema metadata labels, which offers better reliability (e.g. atomicity),
// compatibility and, in many cases, efficiency.
//
// NOTE: Metadata in the current form is generally similar (yet different) to:
// - The MetricFamily definition in OpenMetrics (https://prometheus.io/docs/specs/om/open_metrics_spec/#metricfamily).
// However, there is a small and important distinction around the metric name semantics
// for the "classic" representation of complex metrics like histograms. The
// Metadata.Name follows the __name__ semantics. See Name for details.
// - Original metadata.Metadata entries. However, not all fields in that metadata
// are "identifiable", notably the help field, plus metadata does not contain Name.
type Metadata struct {
// Name represents the final metric name for a Prometheus series.
// NOTE(bwplotka): Prometheus scrape formats (e.g. OpenMetrics) define
// the "metric family name". The Metadata.Name (so __name__ label) is not
// always the same as the MetricFamily.Name e.g.:
// * OpenMetrics metric family name on scrape: "acme_http_router_request_seconds"
// * Resulting Prometheus metric name: "acme_http_router_request_seconds_sum"
//
// Empty string means nameless metric (e.g. result of the PromQL function).
Name string
// Type represents the metric type. Empty value ("") is equivalent to
// model.UnknownMetricType.
Type model.MetricType
// Unit represents the metric unit. Empty string means an unitless metric (e.g.
// result of the PromQL function).
//
// NOTE: Currently unit value is not strictly defined other than OpenMetrics
// recommendations: https://prometheus.io/docs/specs/om/open_metrics_spec/#units-and-base-units
// TODO(bwplotka): Consider a stricter validation and rules e.g. lowercase only or UCUM standard.
// Read more in https://github.com/prometheus/proposals/blob/main/proposals/2024-09-25_metadata-labels.md#more-strict-unit-and-type-value-definition
Unit string
}
// NewMetadataFromLabels returns the schema metadata from the labels.
func NewMetadataFromLabels(ls labels.Labels) Metadata {
typ := model.MetricTypeUnknown
if got := ls.Get(metricType); got != "" {
typ = model.MetricType(got)
}
return Metadata{
Name: ls.Get(metricName),
Type: typ,
Unit: ls.Get(metricUnit),
}
}
// IsTypeEmpty returns true if the metric type is empty (not set).
func (m Metadata) IsTypeEmpty() bool {
return m.Type == "" || m.Type == model.MetricTypeUnknown
}
// IsEmptyFor returns true if the Metadata field, represented by the given labelName
// is empty (not set). If the labelName in not representing any Metadata field,
// IsEmptyFor returns true.
func (m Metadata) IsEmptyFor(labelName string) bool {
switch labelName {
case metricName:
return m.Name == ""
case metricType:
return m.IsTypeEmpty()
case metricUnit:
return m.Unit == ""
default:
return true
}
}
// AddToLabels adds metric schema metadata as labels into the labels.ScratchBuilder.
// Empty Metadata fields will be ignored (not added).
func (m Metadata) AddToLabels(b *labels.ScratchBuilder) {
if m.Name != "" {
b.Add(metricName, m.Name)
}
if !m.IsTypeEmpty() {
b.Add(metricType, string(m.Type))
}
if m.Unit != "" {
b.Add(metricUnit, m.Unit)
}
}
// SetToLabels injects metric schema metadata as labels into the labels.Builder.
// It follows the labels.Builder.Set semantics, so empty Metadata fields will
// remove the corresponding existing labels if they were previously set.
func (m Metadata) SetToLabels(b *labels.Builder) {
b.Set(metricName, m.Name)
if m.Type == model.MetricTypeUnknown {
// Unknown equals empty semantically, so remove the label on unknown too as per
// method signature comment.
b.Set(metricType, "")
} else {
b.Set(metricType, string(m.Type))
}
b.Set(metricUnit, m.Unit)
}
// IgnoreOverriddenMetadataLabelsScratchBuilder is a wrapper over labels scratch builder
// that ignores label additions that would collide with non-empty Overwrite Metadata fields.
type IgnoreOverriddenMetadataLabelsScratchBuilder struct {
*labels.ScratchBuilder
Overwrite Metadata
}
// Add a name/value pair, unless it would collide with the non-empty Overwrite Metadata
// field. Note if you Add the same name twice you will get a duplicate label, which is invalid.
func (b IgnoreOverriddenMetadataLabelsScratchBuilder) Add(name, value string) {
if !b.Overwrite.IsEmptyFor(name) {
return
}
b.ScratchBuilder.Add(name, value)
}

153
schema/labels_test.go Normal file
View File

@ -0,0 +1,153 @@
// Copyright 2025 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
import (
"fmt"
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/util/testutil"
)
func TestMetadata(t *testing.T) {
testMeta := Metadata{
Name: "metric_total",
Type: model.MetricTypeCounter,
Unit: "seconds",
}
for _, tcase := range []struct {
emptyName, emptyType, emptyUnit bool
}{
{},
{emptyName: true},
{emptyType: true},
{emptyUnit: true},
{emptyName: true, emptyType: true, emptyUnit: true},
} {
var (
expectedMeta Metadata
expectedLabels labels.Labels
)
{
// Setup expectations.
lb := labels.NewScratchBuilder(0)
lb.Add("foo", "bar")
if !tcase.emptyName {
lb.Add(metricName, testMeta.Name)
expectedMeta.Name = testMeta.Name
}
if !tcase.emptyType {
lb.Add(metricType, string(testMeta.Type))
expectedMeta.Type = testMeta.Type
} else {
expectedMeta.Type = model.MetricTypeUnknown
}
if !tcase.emptyUnit {
lb.Add(metricUnit, testMeta.Unit)
expectedMeta.Unit = testMeta.Unit
}
lb.Sort()
expectedLabels = lb.Labels()
}
t.Run(fmt.Sprintf("meta=%#v", expectedMeta), func(t *testing.T) {
{
// From labels to Metadata.
got := NewMetadataFromLabels(expectedLabels)
require.Equal(t, expectedMeta, got)
}
{
// Empty methods.
require.Equal(t, tcase.emptyName, expectedMeta.IsEmptyFor(metricName))
require.Equal(t, tcase.emptyType, expectedMeta.IsEmptyFor(metricType))
require.Equal(t, tcase.emptyType, expectedMeta.IsTypeEmpty())
require.Equal(t, tcase.emptyUnit, expectedMeta.IsEmptyFor(metricUnit))
}
{
// From Metadata to labels for various builders.
slb := labels.NewScratchBuilder(0)
slb.Add("foo", "bar")
expectedMeta.AddToLabels(&slb)
slb.Sort()
testutil.RequireEqual(t, expectedLabels, slb.Labels())
lb := labels.NewBuilder(labels.FromStrings("foo", "bar"))
expectedMeta.SetToLabels(lb)
testutil.RequireEqual(t, expectedLabels, lb.Labels())
}
})
}
}
func TestIgnoreOverriddenMetadataLabelsScratchBuilder(t *testing.T) {
// PROM-39 specifies that metadata labels should be sourced primarily from the metadata structures.
// However, the original labels should be preserved IF the metadata structure does not set or support certain information.
// Test those cases with common label interactions.
incomingLabels := labels.FromStrings(metricName, "different_name", metricType, string(model.MetricTypeSummary), metricUnit, "MB", "foo", "bar")
for _, tcase := range []struct {
highPrioMeta Metadata
expectedLabels labels.Labels
}{
{
expectedLabels: incomingLabels,
},
{
highPrioMeta: Metadata{
Name: "metric_total",
Type: model.MetricTypeCounter,
Unit: "seconds",
},
expectedLabels: labels.FromStrings(metricName, "metric_total", metricType, string(model.MetricTypeCounter), metricUnit, "seconds", "foo", "bar"),
},
{
highPrioMeta: Metadata{
Name: "metric_total",
Type: model.MetricTypeCounter,
},
expectedLabels: labels.FromStrings(metricName, "metric_total", metricType, string(model.MetricTypeCounter), metricUnit, "MB", "foo", "bar"),
},
{
highPrioMeta: Metadata{
Type: model.MetricTypeCounter,
Unit: "seconds",
},
expectedLabels: labels.FromStrings(metricName, "different_name", metricType, string(model.MetricTypeCounter), metricUnit, "seconds", "foo", "bar"),
},
{
highPrioMeta: Metadata{
Name: "metric_total",
Type: model.MetricTypeUnknown,
Unit: "seconds",
},
expectedLabels: labels.FromStrings(metricName, "metric_total", metricType, string(model.MetricTypeSummary), metricUnit, "seconds", "foo", "bar"),
},
} {
t.Run(fmt.Sprintf("meta=%#v", tcase.highPrioMeta), func(t *testing.T) {
lb := labels.NewScratchBuilder(0)
tcase.highPrioMeta.AddToLabels(&lb)
wrapped := &IgnoreOverriddenMetadataLabelsScratchBuilder{ScratchBuilder: &lb, Overwrite: tcase.highPrioMeta}
incomingLabels.Range(func(l labels.Label) {
wrapped.Add(l.Name, l.Value)
})
lb.Sort()
require.Equal(t, tcase.expectedLabels, lb.Labels())
})
}
}

View File

@ -87,6 +87,9 @@ type Options struct {
// Option to enable the ingestion of native histograms.
EnableNativeHistogramsIngestion bool
// EnableTypeAndUnitLabels
EnableTypeAndUnitLabels bool
// Optional HTTP client options to use when scraping.
HTTPClientOptions []config_util.HTTPClientOption

View File

@ -209,6 +209,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
opts.convertClassicHistToNHCB,
options.EnableNativeHistogramsIngestion,
options.EnableCreatedTimestampZeroIngestion,
options.EnableTypeAndUnitLabels,
options.ExtraMetrics,
options.AppendMetadata,
opts.target,
@ -932,6 +933,7 @@ type scrapeLoop struct {
// Feature flagged options.
enableNativeHistogramIngestion bool
enableCTZeroIngestion bool
enableTypeAndUnitLabels bool
appender func(ctx context.Context) storage.Appender
symbolTable *labels.SymbolTable
@ -1239,6 +1241,7 @@ func newScrapeLoop(ctx context.Context,
convertClassicHistToNHCB bool,
enableNativeHistogramIngestion bool,
enableCTZeroIngestion bool,
enableTypeAndUnitLabels bool,
reportExtraMetrics bool,
appendMetadataToWAL bool,
target *Target,
@ -1296,6 +1299,7 @@ func newScrapeLoop(ctx context.Context,
convertClassicHistToNHCB: convertClassicHistToNHCB,
enableNativeHistogramIngestion: enableNativeHistogramIngestion,
enableCTZeroIngestion: enableCTZeroIngestion,
enableTypeAndUnitLabels: enableTypeAndUnitLabels,
reportExtraMetrics: reportExtraMetrics,
appendMetadataToWAL: appendMetadataToWAL,
metrics: metrics,
@ -1622,7 +1626,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
return
}
p, err := textparse.New(b, contentType, sl.fallbackScrapeProtocol, sl.alwaysScrapeClassicHist, sl.enableCTZeroIngestion, sl.symbolTable)
p, err := textparse.New(b, contentType, sl.fallbackScrapeProtocol, sl.alwaysScrapeClassicHist, sl.enableCTZeroIngestion, sl.enableTypeAndUnitLabels, sl.symbolTable)
if p == nil {
sl.l.Error(
"Failed to determine correct type of scrape target.",

View File

@ -982,6 +982,7 @@ func newBasicScrapeLoopWithFallback(t testing.TB, ctx context.Context, scraper s
false,
false,
false,
false,
true,
nil,
false,
@ -1130,6 +1131,7 @@ func TestScrapeLoopRun(t *testing.T) {
false,
false,
false,
false,
nil,
false,
scrapeMetrics,
@ -1278,6 +1280,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
false,
false,
false,
false,
nil,
false,
scrapeMetrics,
@ -2005,7 +2008,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
fakeRef := storage.SeriesRef(1)
expValue := float64(1)
metric := []byte(`metric{n="1"} 1`)
p, warning := textparse.New(metric, "text/plain", "", false, false, labels.NewSymbolTable())
p, warning := textparse.New(metric, "text/plain", "", false, false, false, labels.NewSymbolTable())
require.NotNil(t, p)
require.NoError(t, warning)

View File

@ -381,8 +381,8 @@ func TestWriteStorageApplyConfig_PartialUpdate(t *testing.T) {
}
func TestOTLPWriteHandler(t *testing.T) {
exportRequest := generateOTLPWriteRequest()
timestamp := time.Now()
exportRequest := generateOTLPWriteRequest(timestamp)
for _, testCase := range []struct {
name string
otlpCfg config.OTLPConfig
@ -516,15 +516,13 @@ func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg c
return appendable
}
func generateOTLPWriteRequest() pmetricotlp.ExportRequest {
func generateOTLPWriteRequest(timestamp time.Time) pmetricotlp.ExportRequest {
d := pmetric.NewMetrics()
// Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram
// with resource attributes: service.name="test-service", service.instance.id="test-instance", host.name="test-host"
// with metric attribute: foo.bar="baz"
timestamp := time.Now()
resourceMetric := d.ResourceMetrics().AppendEmpty()
resourceMetric.Resource().Attributes().PutStr("service.name", "test-service")
resourceMetric.Resource().Attributes().PutStr("service.instance.id", "test-instance")

View File

@ -599,10 +599,8 @@ func Intersect(its ...Postings) Postings {
if len(its) == 1 {
return its[0]
}
for _, p := range its {
if p == EmptyPostings() {
return EmptyPostings()
}
if slices.Contains(its, EmptyPostings()) {
return EmptyPostings()
}
return newIntersectPostings(its...)

View File

@ -23,11 +23,11 @@ var corsHeaders = map[string]string{
"Access-Control-Allow-Headers": "Accept, Authorization, Content-Type, Origin",
"Access-Control-Allow-Methods": "GET, POST, OPTIONS",
"Access-Control-Expose-Headers": "Date",
"Vary": "Origin",
}
// SetCORS enables cross-site script calls.
// SetCORS enables cross-origin script calls.
func SetCORS(w http.ResponseWriter, o *regexp.Regexp, r *http.Request) {
w.Header().Add("Vary", "Origin")
origin := r.Header.Get("Origin")
if origin == "" {
return

View File

@ -48,8 +48,10 @@ func TestCORSHandler(t *testing.T) {
resp, err := client.Do(req)
require.NoError(t, err, "client get failed with unexpected error")
AccessControlAllowOrigin := resp.Header.Get("Access-Control-Allow-Origin")
Vary := resp.Header.Get("Vary")
require.Equal(t, "Origin", Vary)
AccessControlAllowOrigin := resp.Header.Get("Access-Control-Allow-Origin")
require.Equal(t, dummyOrigin, AccessControlAllowOrigin, "expected Access-Control-Allow-Origin header")
// OPTIONS with bad origin
@ -62,4 +64,20 @@ func TestCORSHandler(t *testing.T) {
AccessControlAllowOrigin = resp.Header.Get("Access-Control-Allow-Origin")
require.Empty(t, AccessControlAllowOrigin, "Access-Control-Allow-Origin header should not exist but it was set")
Vary = resp.Header.Get("Vary")
require.Equal(t, "Origin", Vary)
// OPTIONS with no origin
req, err = http.NewRequest(http.MethodOptions, server.URL+"/any_path", nil)
require.NoError(t, err)
resp, err = client.Do(req)
require.NoError(t, err)
Vary = resp.Header.Get("Vary")
require.Equal(t, "Origin", Vary)
AccessControlAllowOrigin = resp.Header.Get("Access-Control-Allow-Origin")
require.Empty(t, AccessControlAllowOrigin)
}

View File

@ -208,7 +208,7 @@ Loop:
}
if l.Name == labels.MetricName {
nameSeen = true
if l.Value == lastMetricName && // We already have the name in the current MetricFamily, and we ignore nameless metrics.
if l.Value == lastMetricName && // We already have the name in the current MetricDescriptor, and we ignore nameless metrics.
lastWasHistogram == isHistogram && // The sample type matches (float vs histogram).
// If it was a histogram, the histogram type (counter vs gauge) also matches.
(!isHistogram || lastHistogramWasGauge == (s.H.CounterResetHint == histogram.GaugeType)) {
@ -220,7 +220,7 @@ Loop:
// an invalid exposition. But since the consumer of this is Prometheus, and Prometheus can
// parse it fine, we allow it and bend the rules to make federation possible in those cases.
// Need to start a new MetricFamily. Ship off the old one (if any) before
// Need to start a new MetricDescriptor. Ship off the old one (if any) before
// creating the new one.
if protMetricFam != nil {
if err := enc.Encode(protMetricFam); err != nil {
@ -309,7 +309,7 @@ Loop:
lastWasHistogram = isHistogram
protMetricFam.Metric = append(protMetricFam.Metric, protMetric)
}
// Still have to ship off the last MetricFamily, if any.
// Still have to ship off the last MetricDescriptor, if any.
if protMetricFam != nil {
if err := enc.Encode(protMetricFam); err != nil {
federationErrors.Inc()

View File

@ -392,7 +392,7 @@ func TestFederationWithNativeHistograms(t *testing.T) {
require.Equal(t, http.StatusOK, res.Code)
body, err := io.ReadAll(res.Body)
require.NoError(t, err)
p := textparse.NewProtobufParser(body, false, labels.NewSymbolTable())
p := textparse.NewProtobufParser(body, false, false, labels.NewSymbolTable())
var actVec promql.Vector
metricFamilies := 0
l := labels.Labels{}

View File

@ -1,7 +1,7 @@
{
"name": "@prometheus-io/mantine-ui",
"private": true,
"version": "0.303.0",
"version": "0.304.0",
"type": "module",
"scripts": {
"start": "vite",
@ -28,7 +28,7 @@
"@microsoft/fetch-event-source": "^2.0.1",
"@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1",
"@prometheus-io/codemirror-promql": "0.303.0",
"@prometheus-io/codemirror-promql": "0.304.0",
"@reduxjs/toolkit": "^2.7.0",
"@tabler/icons-react": "^3.31.0",
"@tanstack/react-query": "^5.74.7",

View File

@ -224,6 +224,7 @@ function App() {
leftSection={p.icon}
rightSection={<IconChevronDown style={navIconStyle} />}
px={navLinkXPadding}
onClick={(e) => e.preventDefault()}
>
Status <IconChevronRight style={navIconStyle} /> {p.title}
</Button>
@ -236,14 +237,9 @@ function App() {
element={
<Menu.Target>
<Button
component={NavLink}
to="/"
className={classes.link}
leftSection={<IconServer style={navIconStyle} />}
rightSection={<IconChevronDown style={navIconStyle} />}
onClick={(e) => {
e.preventDefault();
}}
px={navLinkXPadding}
>
Status
@ -339,8 +335,12 @@ function App() {
>
<Group gap={10} wrap="nowrap">
<img src={PrometheusLogo} height={30} />
<Text hiddenFrom="sm" fz={20}>Prometheus</Text>
<Text visibleFrom="md" fz={20}>Prometheus</Text>
<Text hiddenFrom="sm" fz={20}>
Prometheus
</Text>
<Text visibleFrom="md" fz={20}>
Prometheus
</Text>
<Text fz={20}>{agentMode && "Agent"}</Text>
</Group>
</Link>

View File

@ -37,7 +37,7 @@ type RecordingRule = {
export type Rule = AlertingRule | RecordingRule;
interface RuleGroup {
export interface RuleGroup {
name: string;
file: string;
interval: string;

View File

@ -217,12 +217,7 @@ export default function AlertsPage() {
const renderedPageItems = useMemo(
() =>
currentPageGroups.map((g) => (
<Card
shadow="xs"
withBorder
p="md"
key={`${g.file}-${g.name}`}
>
<Card shadow="xs" withBorder p="md" key={`${g.file}-${g.name}`}>
<Group mb="sm" justify="space-between">
<Group align="baseline">
<Text fz="xl" fw={600} c="var(--mantine-primary-color-filled)">
@ -460,15 +455,13 @@ export default function AlertsPage() {
</Alert>
)
)}
<Stack>
<Pagination
total={totalPageCount}
value={effectiveActivePage}
onChange={setActivePage}
hideWithOnePage
/>
{renderedPageItems}
</Stack>
<Pagination
total={totalPageCount}
value={effectiveActivePage}
onChange={setActivePage}
hideWithOnePage
/>
{renderedPageItems}
</Stack>
);
}

View File

@ -1,6 +1,7 @@
import {
Accordion,
Alert,
Anchor,
Badge,
Card,
Group,
@ -8,9 +9,9 @@ import {
rem,
Stack,
Text,
TextInput,
Tooltip,
} from "@mantine/core";
// import { useQuery } from "react-query";
import {
humanizeDurationRelative,
humanizeDuration,
@ -23,17 +24,55 @@ import {
IconInfoCircle,
IconRefresh,
IconRepeat,
IconSearch,
IconTimeline,
} from "@tabler/icons-react";
import { useSuspenseAPIQuery } from "../api/api";
import { RulesResult } from "../api/responseTypes/rules";
import { Rule, RuleGroup, RulesResult } from "../api/responseTypes/rules";
import badgeClasses from "../Badge.module.css";
import RuleDefinition from "../components/RuleDefinition";
import { badgeIconStyle } from "../styles";
import { NumberParam, useQueryParam, withDefault } from "use-query-params";
import { badgeIconStyle, inputIconStyle } from "../styles";
import {
ArrayParam,
NumberParam,
StringParam,
useQueryParam,
withDefault,
} from "use-query-params";
import { useSettings } from "../state/settingsSlice";
import { useEffect } from "react";
import { useEffect, useMemo } from "react";
import CustomInfiniteScroll from "../components/CustomInfiniteScroll";
import { useDebouncedValue, useLocalStorage } from "@mantine/hooks";
import { KVSearch } from "@nexucis/kvsearch";
import { StateMultiSelect } from "../components/StateMultiSelect";
const kvSearch = new KVSearch<Rule>({
shouldSort: true,
indexedKeys: ["name", "labels", ["labels", /.*/]],
});
type RulesPageData = {
groups: (RuleGroup & { prefilterRulesCount: number })[];
};
const buildRulesPageData = (
data: RulesResult,
search: string,
healthFilter: (string | null)[]
): RulesPageData => {
const groups = data.groups.map((group) => ({
...group,
prefilterRulesCount: group.rules.length,
rules: (search === ""
? group.rules
: kvSearch.filter(search, group.rules).map((value) => value.original)
).filter(
(r) => healthFilter.length === 0 || healthFilter.includes(r.health)
),
}));
return { groups };
};
const healthBadgeClass = (state: string) => {
switch (state) {
@ -48,18 +87,53 @@ const healthBadgeClass = (state: string) => {
}
};
// Should be defined as a constant here instead of inline as a value
// to avoid unnecessary re-renders. Otherwise the empty array has
// a different reference on each render and causes subsequent memoized
// computations to re-run as long as no health filter is selected.
const emptyHealthFilter: string[] = [];
export default function RulesPage() {
const { data } = useSuspenseAPIQuery<RulesResult>({ path: `/rules` });
const { ruleGroupsPerPage } = useSettings();
// Define URL query params.
const [healthFilter, setHealthFilter] = useQueryParam(
"health",
withDefault(ArrayParam, emptyHealthFilter)
);
const [searchFilter, setSearchFilter] = useQueryParam(
"search",
withDefault(StringParam, "")
);
const [debouncedSearch] = useDebouncedValue<string>(searchFilter.trim(), 250);
const [showEmptyGroups, setShowEmptyGroups] = useLocalStorage<boolean>({
key: "alertsPage.showEmptyGroups",
defaultValue: false,
});
const { ruleGroupsPerPage } = useSettings();
const [activePage, setActivePage] = useQueryParam(
"page",
withDefault(NumberParam, 1)
);
// If we were e.g. on page 10 and the number of total pages decreases to 5 (due
// changing the max number of items per page), go to the largest possible page.
const totalPageCount = Math.ceil(data.data.groups.length / ruleGroupsPerPage);
// Update the page data whenever the fetched data or filters change.
const rulesPageData = useMemo(
() => buildRulesPageData(data.data, debouncedSearch, healthFilter),
[data, healthFilter, debouncedSearch]
);
const shownGroups = useMemo(
() =>
showEmptyGroups
? rulesPageData.groups
: rulesPageData.groups.filter((g) => g.rules.length > 0),
[rulesPageData.groups, showEmptyGroups]
);
// If we were e.g. on page 10 and the number of total pages decreases to 5 (due to filtering
// or changing the max number of items per page), go to the largest possible page.
const totalPageCount = Math.ceil(shownGroups.length / ruleGroupsPerPage);
const effectiveActivePage = Math.max(1, Math.min(activePage, totalPageCount));
useEffect(() => {
@ -68,79 +142,91 @@ export default function RulesPage() {
}
}, [effectiveActivePage, activePage, setActivePage]);
return (
<Stack mt="xs">
{data.data.groups.length === 0 && (
<Alert title="No rule groups" icon={<IconInfoCircle />}>
No rule groups configured.
</Alert>
)}
<Pagination
total={totalPageCount}
value={effectiveActivePage}
onChange={setActivePage}
hideWithOnePage
/>
{data.data.groups
.slice(
(effectiveActivePage - 1) * ruleGroupsPerPage,
effectiveActivePage * ruleGroupsPerPage
)
.map((g) => (
<Card
shadow="xs"
withBorder
p="md"
mb="md"
key={`${g.file}-${g.name}`}
>
<Group mb="sm" justify="space-between">
<Group align="baseline">
<Text fz="xl" fw={600} c="var(--mantine-primary-color-filled)">
{g.name}
</Text>
<Text fz="sm" c="gray.6">
{g.file}
</Text>
</Group>
<Group>
<Tooltip label="Last group evaluation" withArrow>
<Badge
variant="light"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconRefresh style={badgeIconStyle} />}
>
last run {humanizeDurationRelative(g.lastEvaluation, now())}
</Badge>
</Tooltip>
<Tooltip label="Duration of last group evaluation" withArrow>
<Badge
variant="light"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconHourglass style={badgeIconStyle} />}
>
took {humanizeDuration(parseFloat(g.evaluationTime) * 1000)}
</Badge>
</Tooltip>
<Tooltip label="Group evaluation interval" withArrow>
<Badge
variant="transparent"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconRepeat style={badgeIconStyle} />}
>
every {humanizeDuration(parseFloat(g.interval) * 1000)}{" "}
</Badge>
</Tooltip>
</Group>
const currentPageGroups = useMemo(
() =>
shownGroups.slice(
(effectiveActivePage - 1) * ruleGroupsPerPage,
effectiveActivePage * ruleGroupsPerPage
),
[shownGroups, effectiveActivePage, ruleGroupsPerPage]
);
// We memoize the actual rendering of the page items to avoid re-rendering
// them on every state change. This is especially important when the user
// types into the search box, as the search filter changes on every keystroke,
// even before debouncing takes place (extracting the filters and results list
// into separate components would be an alternative to this, but it's kinda
// convenient to have in the same file IMO).
const renderedPageItems = useMemo(
() =>
currentPageGroups.map((g) => (
<Card shadow="xs" withBorder p="md" key={`${g.file}-${g.name}`}>
<Group mb="sm" justify="space-between">
<Group align="baseline">
<Text fz="xl" fw={600} c="var(--mantine-primary-color-filled)">
{g.name}
</Text>
<Text fz="sm" c="gray.6">
{g.file}
</Text>
</Group>
{g.rules.length === 0 && (
<Alert title="No rules" icon={<IconInfoCircle />}>
No rules in rule group.
</Alert>
)}
<Group>
<Tooltip label="Last group evaluation" withArrow>
<Badge
variant="light"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconRefresh style={badgeIconStyle} />}
>
last run {humanizeDurationRelative(g.lastEvaluation, now())}
</Badge>
</Tooltip>
<Tooltip label="Duration of last group evaluation" withArrow>
<Badge
variant="light"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconHourglass style={badgeIconStyle} />}
>
took {humanizeDuration(parseFloat(g.evaluationTime) * 1000)}
</Badge>
</Tooltip>
<Tooltip label="Group evaluation interval" withArrow>
<Badge
variant="transparent"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconRepeat style={badgeIconStyle} />}
>
every {humanizeDuration(parseFloat(g.interval) * 1000)}{" "}
</Badge>
</Tooltip>
</Group>
</Group>
{g.prefilterRulesCount === 0 ? (
<Alert title="No rules" icon={<IconInfoCircle />}>
No rules in this group.
<Anchor
ml="md"
fz="1em"
onClick={() => setShowEmptyGroups(false)}
>
Hide empty groups
</Anchor>
</Alert>
) : g.rules.length === 0 ? (
<Alert title="No matching rules" icon={<IconInfoCircle />}>
No rules in this group match your filter criteria (omitted{" "}
{g.prefilterRulesCount} filtered rules).
<Anchor
ml="md"
fz="1em"
onClick={() => setShowEmptyGroups(false)}
>
Hide empty groups
</Anchor>
</Alert>
) : (
<CustomInfiniteScroll
allItems={g.rules}
child={({ items }) => (
@ -248,8 +334,64 @@ export default function RulesPage() {
</Accordion>
)}
/>
</Card>
))}
)}
</Card>
)),
[currentPageGroups, setShowEmptyGroups]
);
return (
<Stack mt="xs">
<Group>
<StateMultiSelect
options={["ok", "unknown", "err"]}
optionClass={(o) =>
o === "ok"
? badgeClasses.healthOk
: o === "unknown"
? badgeClasses.healthWarn
: badgeClasses.healthErr
}
placeholder="Filter by rule health"
values={(healthFilter?.filter((v) => v !== null) as string[]) || []}
onChange={(values) => setHealthFilter(values)}
/>
<TextInput
flex={1}
leftSection={<IconSearch style={inputIconStyle} />}
placeholder="Filter by rule name or labels"
value={searchFilter || ""}
onChange={(event) =>
setSearchFilter(event.currentTarget.value || null)
}
></TextInput>
</Group>
{rulesPageData.groups.length === 0 ? (
<Alert title="No rules found" icon={<IconInfoCircle />}>
No rules found.
</Alert>
) : (
!showEmptyGroups &&
rulesPageData.groups.length !== shownGroups.length && (
<Alert
title="Hiding groups with no matching rules"
icon={<IconInfoCircle />}
>
Hiding {rulesPageData.groups.length - shownGroups.length} empty
groups due to filters or no rules.
<Anchor ml="md" fz="1em" onClick={() => setShowEmptyGroups(true)}>
Show empty groups
</Anchor>
</Alert>
)
)}
<Pagination
total={totalPageCount}
value={effectiveActivePage}
onChange={setActivePage}
hideWithOnePage
/>
{renderedPageItems}
</Stack>
);
}

View File

@ -39,6 +39,9 @@ type ScrapePool = {
targets: TargetLabels[];
active: number;
total: number;
// Can be different from "total" if the "keep_dropped_targets" setting is used
// to limit the number of dropped targets for which the server keeps details.
serverTotal: number;
};
type ScrapePools = {
@ -62,11 +65,11 @@ const droppedTargetKVSearch = new KVSearch<DroppedTarget>({
const buildPoolsData = (
poolNames: string[],
activeTargets: Target[],
droppedTargets: DroppedTarget[],
targetsData: TargetsResult,
search: string,
stateFilter: (string | null)[]
): ScrapePools => {
const { activeTargets, droppedTargets, droppedTargetCounts } = targetsData;
const pools: ScrapePools = {};
for (const pn of poolNames) {
@ -74,6 +77,7 @@ const buildPoolsData = (
targets: [],
active: 0,
total: 0,
serverTotal: droppedTargetCounts[pn] || 0,
};
}
@ -88,6 +92,7 @@ const buildPoolsData = (
pool.active++;
pool.total++;
pool.serverTotal++;
}
const filteredActiveTargets =
@ -160,9 +165,7 @@ const ScrapePoolList: FC<ScrapePoolListProp> = ({
// Based on the selected pool (if any), load the list of targets.
const {
data: {
data: { activeTargets, droppedTargets },
},
data: { data: targetsData },
} = useSuspenseAPIQuery<TargetsResult>({
path: `/targets`,
params: {
@ -180,19 +183,11 @@ const ScrapePoolList: FC<ScrapePoolListProp> = ({
() =>
buildPoolsData(
selectedPool ? [selectedPool] : poolNames,
activeTargets,
droppedTargets,
targetsData,
debouncedSearch,
stateFilter
),
[
selectedPool,
poolNames,
activeTargets,
droppedTargets,
debouncedSearch,
stateFilter,
]
[selectedPool, poolNames, targetsData, debouncedSearch, stateFilter]
);
const allPoolNames = Object.keys(allPools);
const shownPoolNames = showEmptyPools
@ -250,22 +245,23 @@ const ScrapePoolList: FC<ScrapePoolListProp> = ({
<Text>{poolName}</Text>
<Group gap="xs">
<Text c="gray.6">
{pool.active} / {pool.total}
{pool.active} / {pool.serverTotal}
</Text>
<RingProgress
size={25}
thickness={5}
sections={
pool.total === 0
pool.serverTotal === 0
? []
: [
{
value: (pool.active / pool.total) * 100,
value: (pool.active / pool.serverTotal) * 100,
color: "green.4",
},
{
value:
((pool.total - pool.active) / pool.total) *
((pool.serverTotal - pool.active) /
pool.serverTotal) *
100,
color: "blue.6",
},
@ -276,6 +272,19 @@ const ScrapePoolList: FC<ScrapePoolListProp> = ({
</Group>
</Accordion.Control>
<Accordion.Panel>
{pool.total !== pool.serverTotal && (
<Alert
title="Only showing partial dropped targets"
icon={<IconInfoCircle />}
color="yellow"
mb="sm"
>
{pool.serverTotal - pool.total} further dropped targets are
not shown here because the server only kept details on a
maximum of {pool.total - pool.active} dropped targets (
<code>keep_dropped_targets</code> configuration setting).
</Alert>
)}
{pool.total === 0 ? (
<Alert title="No targets" icon={<IconInfoCircle />}>
No targets in this scrape pool.

View File

@ -19,7 +19,7 @@ import {
} from "@tabler/icons-react";
import { useSuspenseAPIQuery } from "../../api/api";
import { Target, TargetsResult } from "../../api/responseTypes/targets";
import React, { FC, useMemo } from "react";
import React, { FC, memo, useMemo } from "react";
import {
humanizeDurationRelative,
humanizeDuration,
@ -37,7 +37,6 @@ import CustomInfiniteScroll from "../../components/CustomInfiniteScroll";
import badgeClasses from "../../Badge.module.css";
import panelClasses from "../../Panel.module.css";
import TargetLabels from "./TargetLabels";
import { useDebouncedValue } from "@mantine/hooks";
import { targetPoolDisplayLimit } from "./TargetsPage";
import { badgeIconStyle } from "../../styles";
@ -145,278 +144,280 @@ type ScrapePoolListProp = {
searchFilter: string;
};
const ScrapePoolList: FC<ScrapePoolListProp> = ({
poolNames,
selectedPool,
healthFilter,
searchFilter,
}) => {
// Based on the selected pool (if any), load the list of targets.
const {
data: {
data: { activeTargets },
},
} = useSuspenseAPIQuery<TargetsResult>({
path: `/targets`,
params: {
state: "active",
scrapePool: selectedPool === null ? "" : selectedPool,
},
});
const ScrapePoolList: FC<ScrapePoolListProp> = memo(
({ poolNames, selectedPool, healthFilter, searchFilter }) => {
// Based on the selected pool (if any), load the list of targets.
const {
data: {
data: { activeTargets },
},
} = useSuspenseAPIQuery<TargetsResult>({
path: `/targets`,
params: {
state: "active",
scrapePool: selectedPool === null ? "" : selectedPool,
},
});
const dispatch = useAppDispatch();
const [showEmptyPools, setShowEmptyPools] = useLocalStorage<boolean>({
key: "targetsPage.showEmptyPools",
defaultValue: false,
});
const dispatch = useAppDispatch();
const [showEmptyPools, setShowEmptyPools] = useLocalStorage<boolean>({
key: "targetsPage.showEmptyPools",
defaultValue: false,
});
const { collapsedPools, showLimitAlert } = useAppSelector(
(state) => state.targetsPage
);
const { collapsedPools, showLimitAlert } = useAppSelector(
(state) => state.targetsPage
);
const [debouncedSearch] = useDebouncedValue<string>(searchFilter.trim(), 250);
const allPools = useMemo(
() =>
buildPoolsData(
selectedPool ? [selectedPool] : poolNames,
activeTargets,
searchFilter,
healthFilter
),
[selectedPool, poolNames, activeTargets, searchFilter, healthFilter]
);
const allPools = useMemo(
() =>
buildPoolsData(
selectedPool ? [selectedPool] : poolNames,
activeTargets,
debouncedSearch,
healthFilter
),
[selectedPool, poolNames, activeTargets, debouncedSearch, healthFilter]
);
const allPoolNames = Object.keys(allPools);
const shownPoolNames = showEmptyPools
? allPoolNames
: allPoolNames.filter((pn) => allPools[pn].targets.length !== 0);
const allPoolNames = Object.keys(allPools);
const shownPoolNames = showEmptyPools
? allPoolNames
: allPoolNames.filter((pn) => allPools[pn].targets.length !== 0);
return (
<Stack>
{allPoolNames.length === 0 ? (
<Alert title="No scrape pools found" icon={<IconInfoCircle />}>
No scrape pools found.
</Alert>
) : (
!showEmptyPools &&
allPoolNames.length !== shownPoolNames.length && (
<Alert
title="Hiding pools with no matching targets"
icon={<IconInfoCircle />}
>
Hiding {allPoolNames.length - shownPoolNames.length} empty pools due
to filters or no targets.
<Anchor ml="md" fz="1em" onClick={() => setShowEmptyPools(true)}>
Show empty pools
</Anchor>
return (
<Stack>
{allPoolNames.length === 0 ? (
<Alert title="No scrape pools found" icon={<IconInfoCircle />}>
No scrape pools found.
</Alert>
)
)}
{showLimitAlert && (
<Alert
title="Found many pools, showing only one"
icon={<IconInfoCircle />}
withCloseButton
onClose={() => dispatch(setShowLimitAlert(false))}
>
There are more than {targetPoolDisplayLimit} scrape pools. Showing
only the first one. Use the dropdown to select a different pool.
</Alert>
)}
<Accordion
multiple
variant="separated"
value={allPoolNames.filter((p) => !collapsedPools.includes(p))}
onChange={(value) =>
dispatch(
setCollapsedPools(allPoolNames.filter((p) => !value.includes(p)))
)
}
>
{shownPoolNames.map((poolName) => {
const pool = allPools[poolName];
return (
<Accordion.Item
key={poolName}
value={poolName}
className={poolPanelHealthClass(pool)}
) : (
!showEmptyPools &&
allPoolNames.length !== shownPoolNames.length && (
<Alert
title="Hiding pools with no matching targets"
icon={<IconInfoCircle />}
>
<Accordion.Control>
<Group wrap="nowrap" justify="space-between" mr="lg">
<Text>{poolName}</Text>
<Group gap="xs">
<Text c="gray.6">
{pool.upCount} / {pool.count} up
</Text>
<RingProgress
size={25}
thickness={5}
sections={
pool.count === 0
? []
: [
{
value: (pool.upCount / pool.count) * 100,
color: "green.4",
},
{
value: (pool.unknownCount / pool.count) * 100,
color: "gray.4",
},
{
value: (pool.downCount / pool.count) * 100,
color: "red.5",
},
]
}
/>
Hiding {allPoolNames.length - shownPoolNames.length} empty pools
due to filters or no targets.
<Anchor ml="md" fz="1em" onClick={() => setShowEmptyPools(true)}>
Show empty pools
</Anchor>
</Alert>
)
)}
{showLimitAlert && (
<Alert
title="Found many pools, showing only one"
icon={<IconInfoCircle />}
withCloseButton
onClose={() => dispatch(setShowLimitAlert(false))}
>
There are more than {targetPoolDisplayLimit} scrape pools. Showing
only the first one. Use the dropdown to select a different pool.
</Alert>
)}
<Accordion
multiple
variant="separated"
value={allPoolNames.filter((p) => !collapsedPools.includes(p))}
onChange={(value) =>
dispatch(
setCollapsedPools(allPoolNames.filter((p) => !value.includes(p)))
)
}
>
{shownPoolNames.map((poolName) => {
const pool = allPools[poolName];
return (
<Accordion.Item
key={poolName}
value={poolName}
className={poolPanelHealthClass(pool)}
>
<Accordion.Control>
<Group wrap="nowrap" justify="space-between" mr="lg">
<Text>{poolName}</Text>
<Group gap="xs">
<Text c="gray.6">
{pool.upCount} / {pool.count} up
</Text>
<RingProgress
size={25}
thickness={5}
sections={
pool.count === 0
? []
: [
{
value: (pool.upCount / pool.count) * 100,
color: "green.4",
},
{
value: (pool.unknownCount / pool.count) * 100,
color: "gray.4",
},
{
value: (pool.downCount / pool.count) * 100,
color: "red.5",
},
]
}
/>
</Group>
</Group>
</Group>
</Accordion.Control>
<Accordion.Panel>
{pool.count === 0 ? (
<Alert title="No targets" icon={<IconInfoCircle />}>
No active targets in this scrape pool.
<Anchor
ml="md"
fz="1em"
onClick={() => setShowEmptyPools(false)}
</Accordion.Control>
<Accordion.Panel>
{pool.count === 0 ? (
<Alert title="No targets" icon={<IconInfoCircle />}>
No active targets in this scrape pool.
<Anchor
ml="md"
fz="1em"
onClick={() => setShowEmptyPools(false)}
>
Hide empty pools
</Anchor>
</Alert>
) : pool.targets.length === 0 ? (
<Alert
title="No matching targets"
icon={<IconInfoCircle />}
>
Hide empty pools
</Anchor>
</Alert>
) : pool.targets.length === 0 ? (
<Alert title="No matching targets" icon={<IconInfoCircle />}>
No targets in this pool match your filter criteria (omitted{" "}
{pool.count} filtered targets).
<Anchor
ml="md"
fz="1em"
onClick={() => setShowEmptyPools(false)}
>
Hide empty pools
</Anchor>
</Alert>
) : (
<CustomInfiniteScroll
allItems={pool.targets}
child={({ items }) => (
<Table>
<Table.Thead>
<Table.Tr>
<Table.Th w="25%">Endpoint</Table.Th>
<Table.Th>Labels</Table.Th>
<Table.Th w={230}>Last scrape</Table.Th>
<Table.Th w={100}>State</Table.Th>
</Table.Tr>
</Table.Thead>
<Table.Tbody>
{items.map((target, i) => (
// TODO: Find a stable and definitely unique key.
<React.Fragment key={i}>
<Table.Tr
style={{
borderBottom: target.lastError
? "none"
: undefined,
}}
>
<Table.Td valign="top">
<EndpointLink
endpoint={target.scrapeUrl}
globalUrl={target.globalUrl}
/>
</Table.Td>
No targets in this pool match your filter criteria
(omitted {pool.count} filtered targets).
<Anchor
ml="md"
fz="1em"
onClick={() => setShowEmptyPools(false)}
>
Hide empty pools
</Anchor>
</Alert>
) : (
<CustomInfiniteScroll
allItems={pool.targets}
child={({ items }) => (
<Table>
<Table.Thead>
<Table.Tr>
<Table.Th w="25%">Endpoint</Table.Th>
<Table.Th>Labels</Table.Th>
<Table.Th w={230}>Last scrape</Table.Th>
<Table.Th w={100}>State</Table.Th>
</Table.Tr>
</Table.Thead>
<Table.Tbody>
{items.map((target, i) => (
// TODO: Find a stable and definitely unique key.
<React.Fragment key={i}>
<Table.Tr
style={{
borderBottom: target.lastError
? "none"
: undefined,
}}
>
<Table.Td valign="top">
<EndpointLink
endpoint={target.scrapeUrl}
globalUrl={target.globalUrl}
/>
</Table.Td>
<Table.Td valign="top">
<TargetLabels
labels={target.labels}
discoveredLabels={target.discoveredLabels}
/>
</Table.Td>
<Table.Td valign="top">
<Group gap="xs" wrap="wrap">
<Tooltip
label="Last target scrape"
withArrow
>
<Badge
variant="light"
className={badgeClasses.statsBadge}
styles={{
label: { textTransform: "none" },
}}
leftSection={
<IconRefresh style={badgeIconStyle} />
}
<Table.Td valign="top">
<TargetLabels
labels={target.labels}
discoveredLabels={target.discoveredLabels}
/>
</Table.Td>
<Table.Td valign="top">
<Group gap="xs" wrap="wrap">
<Tooltip
label="Last target scrape"
withArrow
>
{humanizeDurationRelative(
target.lastScrape,
now()
)}
</Badge>
</Tooltip>
<Badge
variant="light"
className={badgeClasses.statsBadge}
styles={{
label: { textTransform: "none" },
}}
leftSection={
<IconRefresh
style={badgeIconStyle}
/>
}
>
{humanizeDurationRelative(
target.lastScrape,
now()
)}
</Badge>
</Tooltip>
<Tooltip
label="Duration of last target scrape"
withArrow
>
<Badge
variant="light"
className={badgeClasses.statsBadge}
styles={{
label: { textTransform: "none" },
}}
leftSection={
<IconHourglass
style={badgeIconStyle}
/>
}
<Tooltip
label="Duration of last target scrape"
withArrow
>
{humanizeDuration(
target.lastScrapeDuration * 1000
)}
</Badge>
</Tooltip>
</Group>
</Table.Td>
<Table.Td valign="top">
<Badge
className={healthBadgeClass(target.health)}
>
{target.health}
</Badge>
</Table.Td>
</Table.Tr>
{target.lastError && (
<Table.Tr>
<Table.Td colSpan={5} valign="top">
<Alert
color="red"
mb="sm"
icon={<IconAlertTriangle />}
<Badge
variant="light"
className={badgeClasses.statsBadge}
styles={{
label: { textTransform: "none" },
}}
leftSection={
<IconHourglass
style={badgeIconStyle}
/>
}
>
{humanizeDuration(
target.lastScrapeDuration * 1000
)}
</Badge>
</Tooltip>
</Group>
</Table.Td>
<Table.Td valign="top">
<Badge
className={healthBadgeClass(
target.health
)}
>
<strong>Error scraping target:</strong>{" "}
{target.lastError}
</Alert>
{target.health}
</Badge>
</Table.Td>
</Table.Tr>
)}
</React.Fragment>
))}
</Table.Tbody>
</Table>
)}
/>
)}
</Accordion.Panel>
</Accordion.Item>
);
})}
</Accordion>
</Stack>
);
};
{target.lastError && (
<Table.Tr>
<Table.Td colSpan={5} valign="top">
<Alert
color="red"
mb="sm"
icon={<IconAlertTriangle />}
>
<strong>Error scraping target:</strong>{" "}
{target.lastError}
</Alert>
</Table.Td>
</Table.Tr>
)}
</React.Fragment>
))}
</Table.Tbody>
</Table>
)}
/>
)}
</Accordion.Panel>
</Accordion.Item>
);
})}
</Accordion>
</Stack>
);
}
);
export default ScrapePoolList;

View File

@ -30,9 +30,16 @@ import ScrapePoolList from "./ScrapePoolsList";
import { useSuspenseAPIQuery } from "../../api/api";
import { ScrapePoolsResult } from "../../api/responseTypes/scrapePools";
import { expandIconStyle, inputIconStyle } from "../../styles";
import { useDebouncedValue } from "@mantine/hooks";
export const targetPoolDisplayLimit = 20;
// Should be defined as a constant here instead of inline as a value
// to avoid unnecessary re-renders. Otherwise the empty array has
// a different reference on each render and causes subsequent memoized
// computations to re-run as long as no state filter is selected.
const emptyHealthFilter: string[] = [];
export default function TargetsPage() {
// Load the list of all available scrape pools.
const {
@ -48,12 +55,13 @@ export default function TargetsPage() {
const [scrapePool, setScrapePool] = useQueryParam("pool", StringParam);
const [healthFilter, setHealthFilter] = useQueryParam(
"health",
withDefault(ArrayParam, [])
withDefault(ArrayParam, emptyHealthFilter)
);
const [searchFilter, setSearchFilter] = useQueryParam(
"search",
withDefault(StringParam, "")
);
const [debouncedSearch] = useDebouncedValue<string>(searchFilter.trim(), 250);
const { collapsedPools, showLimitAlert } = useAppSelector(
(state) => state.targetsPage
@ -147,7 +155,7 @@ export default function TargetsPage() {
poolNames={scrapePools}
selectedPool={(limited && scrapePools[0]) || scrapePool || null}
healthFilter={healthFilter as string[]}
searchFilter={searchFilter}
searchFilter={debouncedSearch}
/>
</Suspense>
</ErrorBoundary>

View File

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/codemirror-promql",
"version": "0.303.0",
"version": "0.304.0",
"description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js",
@ -29,7 +29,7 @@
},
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": {
"@prometheus-io/lezer-promql": "0.303.0",
"@prometheus-io/lezer-promql": "0.304.0",
"lru-cache": "^11.1.0"
},
"devDependencies": {

View File

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/lezer-promql",
"version": "0.303.0",
"version": "0.304.0",
"description": "lezer-based PromQL grammar",
"main": "dist/index.cjs",
"type": "module",

View File

@ -24,7 +24,7 @@
},
"mantine-ui": {
"name": "@prometheus-io/mantine-ui",
"version": "0.303.0",
"version": "0.304.0",
"dependencies": {
"@codemirror/autocomplete": "^6.18.6",
"@codemirror/language": "^6.11.0",
@ -42,7 +42,7 @@
"@microsoft/fetch-event-source": "^2.0.1",
"@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1",
"@prometheus-io/codemirror-promql": "0.303.0",
"@prometheus-io/codemirror-promql": "0.304.0",
"@reduxjs/toolkit": "^2.7.0",
"@tabler/icons-react": "^3.31.0",
"@tanstack/react-query": "^5.74.7",
@ -189,10 +189,10 @@
},
"module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql",
"version": "0.303.0",
"version": "0.304.0",
"license": "Apache-2.0",
"dependencies": {
"@prometheus-io/lezer-promql": "0.303.0",
"@prometheus-io/lezer-promql": "0.304.0",
"lru-cache": "^11.1.0"
},
"devDependencies": {
@ -222,7 +222,7 @@
},
"module/lezer-promql": {
"name": "@prometheus-io/lezer-promql",
"version": "0.303.0",
"version": "0.304.0",
"license": "Apache-2.0",
"devDependencies": {
"@lezer/generator": "^1.7.3",

View File

@ -1,7 +1,7 @@
{
"name": "prometheus-io",
"description": "Monorepo for the Prometheus UI",
"version": "0.303.0",
"version": "0.304.0",
"private": true,
"scripts": {
"build": "bash build_ui.sh --all",