Merge branch 'prometheus:main' into fix/functions.mdAndStorage.md

This commit is contained in:
ADITYA TIWARI 2025-09-27 04:38:45 +05:30 committed by GitHub
commit ecdf459e9f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
72 changed files with 3858 additions and 1302 deletions

View File

@ -226,24 +226,24 @@ jobs:
- name: Install snmp_exporter/generator dependencies - name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter' if: github.repository == 'prometheus/snmp_exporter'
- name: Get golangci-lint version
id: golangci-lint-version
run: echo "version=$(make print-golangci-lint-version)" >> $GITHUB_OUTPUT
- name: Lint with stringlabels - name: Lint with stringlabels
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
with: with:
args: --verbose --build-tags=stringlabels args: --verbose --build-tags=stringlabels
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. version: ${{ steps.golangci-lint-version.outputs.version }}
version: v2.2.1
- name: Lint with slicelabels - name: Lint with slicelabels
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
with: with:
args: --verbose --build-tags=slicelabels args: --verbose --build-tags=slicelabels
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. version: ${{ steps.golangci-lint-version.outputs.version }}
version: v2.2.1
- name: Lint with dedupelabels - name: Lint with dedupelabels
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
with: with:
args: --verbose --build-tags=dedupelabels args: --verbose --build-tags=dedupelabels
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. version: ${{ steps.golangci-lint-version.outputs.version }}
version: v2.2.1
fuzzing: fuzzing:
uses: ./.github/workflows/fuzzing.yml uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request' if: github.event_name == 'pull_request'

View File

@ -74,6 +74,9 @@ linters:
- linters: - linters:
- godot - godot
source: "^// ===" source: "^// ==="
- linters:
- staticcheck
text: 'v1\.(Endpoints|EndpointSubset|EndpointPort|EndpointAddress) is deprecated: This API is deprecated in v1.33+'
warn-unused: true warn-unused: true
settings: settings:
depguard: depguard:

View File

@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT := SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT := GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v2.2.1 GOLANGCI_LINT_VERSION ?= v2.4.0
GOLANGCI_FMT_OPTS ?= GOLANGCI_FMT_OPTS ?=
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different. # windows isn't included here because of the path separator being different.
@ -266,6 +266,10 @@ $(GOLANGCI_LINT):
| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
endif endif
.PHONY: common-print-golangci-lint-version
common-print-golangci-lint-version:
@echo $(GOLANGCI_LINT_VERSION)
.PHONY: precheck .PHONY: precheck
precheck:: precheck::

View File

@ -16,7 +16,8 @@ Please see [the v2.55 RELEASE.md](https://github.com/prometheus/prometheus/blob/
| v3.4 | 2025-04-29 | Jan-Otto Kröpke (Github: @jkroepke)| | v3.4 | 2025-04-29 | Jan-Otto Kröpke (Github: @jkroepke)|
| v3.5 LTS | 2025-06-03 | Bryan Boreham (GitHub: @bboreham) | | v3.5 LTS | 2025-06-03 | Bryan Boreham (GitHub: @bboreham) |
| v3.6 | 2025-08-01 | Ayoub Mrini (Github: @machine424) | | v3.6 | 2025-08-01 | Ayoub Mrini (Github: @machine424) |
| v3.7 | 2025-09-15 | **volunteer welcome** | | v3.7 | 2025-09-25 | Arthur Sens and George Krajcsovits (Github: @ArthurSens and @krajorama)|
| v3.8 | 2025-11-06 | **volunteer welcome** |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.

View File

@ -275,6 +275,9 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
case "promql-delayed-name-removal": case "promql-delayed-name-removal":
c.promqlEnableDelayedNameRemoval = true c.promqlEnableDelayedNameRemoval = true
logger.Info("Experimental PromQL delayed name removal enabled.") logger.Info("Experimental PromQL delayed name removal enabled.")
case "promql-extended-range-selectors":
parser.EnableExtendedRangeSelectors = true
logger.Info("Experimental PromQL extended range selectors enabled.")
case "": case "":
continue continue
case "old-ui": case "old-ui":
@ -561,7 +564,7 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr, use-uncached-io. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr, use-uncached-io, promql-extended-range-selectors. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList) Default("").StringsVar(&cfg.featureList)
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)

View File

@ -32,6 +32,7 @@ import (
) )
// Endpoints discovers new endpoint targets. // Endpoints discovers new endpoint targets.
// Deprecated: The Endpoints API is deprecated starting in K8s v1.33+. Use EndpointSlice.
type Endpoints struct { type Endpoints struct {
logger *slog.Logger logger *slog.Logger
@ -47,11 +48,11 @@ type Endpoints struct {
endpointsStore cache.Store endpointsStore cache.Store
serviceStore cache.Store serviceStore cache.Store
queue *workqueue.Type queue *workqueue.Typed[string]
} }
// NewEndpoints returns a new endpoints discovery. // NewEndpoints returns a new endpoints discovery.
// Endpoints API is deprecated in k8s v1.33+, but we should still support it. // Deprecated: The Endpoints API is deprecated starting in K8s v1.33+. Use NewEndpointSlice.
func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node, namespace cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints { func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node, namespace cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints {
if l == nil { if l == nil {
l = promslog.NewNopLogger() l = promslog.NewNopLogger()
@ -79,7 +80,9 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node,
withNodeMetadata: node != nil, withNodeMetadata: node != nil,
namespaceInf: namespace, namespaceInf: namespace,
withNamespaceMetadata: namespace != nil, withNamespaceMetadata: namespace != nil,
queue: workqueue.NewNamed(RoleEndpoint.String()), queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{
Name: RoleEndpoint.String(),
}),
} }
_, err := e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ _, err := e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
@ -272,12 +275,11 @@ func (e *Endpoints) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {
keyObj, quit := e.queue.Get() key, quit := e.queue.Get()
if quit { if quit {
return false return false
} }
defer e.queue.Done(keyObj) defer e.queue.Done(key)
key := keyObj.(string)
namespace, name, err := cache.SplitMetaNamespaceKey(key) namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil { if err != nil {

View File

@ -50,7 +50,7 @@ type EndpointSlice struct {
endpointSliceStore cache.Store endpointSliceStore cache.Store
serviceStore cache.Store serviceStore cache.Store
queue *workqueue.Type queue *workqueue.Typed[string]
} }
// NewEndpointSlice returns a new endpointslice discovery. // NewEndpointSlice returns a new endpointslice discovery.
@ -79,7 +79,9 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n
withNodeMetadata: node != nil, withNodeMetadata: node != nil,
namespaceInf: namespace, namespaceInf: namespace,
withNamespaceMetadata: namespace != nil, withNamespaceMetadata: namespace != nil,
queue: workqueue.NewNamed(RoleEndpointSlice.String()), queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{
Name: RoleEndpointSlice.String(),
}),
} }
_, err := e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ _, err := e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
@ -236,12 +238,11 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group)
} }
func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {
keyObj, quit := e.queue.Get() key, quit := e.queue.Get()
if quit { if quit {
return false return false
} }
defer e.queue.Done(keyObj) defer e.queue.Done(key)
key := keyObj.(string)
namespace, name, err := cache.SplitMetaNamespaceKey(key) namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil { if err != nil {

View File

@ -35,7 +35,7 @@ type Ingress struct {
logger *slog.Logger logger *slog.Logger
informer cache.SharedIndexInformer informer cache.SharedIndexInformer
store cache.Store store cache.Store
queue *workqueue.Type queue *workqueue.Typed[string]
namespaceInf cache.SharedInformer namespaceInf cache.SharedInformer
withNamespaceMetadata bool withNamespaceMetadata bool
} }
@ -50,7 +50,9 @@ func NewIngress(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S
logger: l, logger: l,
informer: inf, informer: inf,
store: inf.GetStore(), store: inf.GetStore(),
queue: workqueue.NewNamed(RoleIngress.String()), queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{
Name: RoleIngress.String(),
}),
namespaceInf: namespace, namespaceInf: namespace,
withNamespaceMetadata: namespace != nil, withNamespaceMetadata: namespace != nil,
} }
@ -137,12 +139,11 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {
keyObj, quit := i.queue.Get() key, quit := i.queue.Get()
if quit { if quit {
return false return false
} }
defer i.queue.Done(keyObj) defer i.queue.Done(key)
key := keyObj.(string)
namespace, name, err := cache.SplitMetaNamespaceKey(key) namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil { if err != nil {

View File

@ -387,12 +387,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
var informer cache.SharedIndexInformer var informer cache.SharedIndexInformer
e := d.client.DiscoveryV1().EndpointSlices(namespace) e := d.client.DiscoveryV1().EndpointSlices(namespace)
elw := &cache.ListWatch{ elw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.endpointslice.field options.FieldSelector = d.selectors.endpointslice.field
options.LabelSelector = d.selectors.endpointslice.label options.LabelSelector = d.selectors.endpointslice.label
return e.List(ctx, options) return e.List(ctx, options)
}, },
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.endpointslice.field options.FieldSelector = d.selectors.endpointslice.field
options.LabelSelector = d.selectors.endpointslice.label options.LabelSelector = d.selectors.endpointslice.label
return e.Watch(ctx, options) return e.Watch(ctx, options)
@ -402,12 +402,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
s := d.client.CoreV1().Services(namespace) s := d.client.CoreV1().Services(namespace)
slw := &cache.ListWatch{ slw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.service.field options.FieldSelector = d.selectors.service.field
options.LabelSelector = d.selectors.service.label options.LabelSelector = d.selectors.service.label
return s.List(ctx, options) return s.List(ctx, options)
}, },
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.service.field options.FieldSelector = d.selectors.service.field
options.LabelSelector = d.selectors.service.label options.LabelSelector = d.selectors.service.label
return s.Watch(ctx, options) return s.Watch(ctx, options)
@ -415,12 +415,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
p := d.client.CoreV1().Pods(namespace) p := d.client.CoreV1().Pods(namespace)
plw := &cache.ListWatch{ plw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.pod.field options.FieldSelector = d.selectors.pod.field
options.LabelSelector = d.selectors.pod.label options.LabelSelector = d.selectors.pod.label
return p.List(ctx, options) return p.List(ctx, options)
}, },
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.pod.field options.FieldSelector = d.selectors.pod.field
options.LabelSelector = d.selectors.pod.label options.LabelSelector = d.selectors.pod.label
return p.Watch(ctx, options) return p.Watch(ctx, options)
@ -454,12 +454,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
for _, namespace := range namespaces { for _, namespace := range namespaces {
e := d.client.CoreV1().Endpoints(namespace) e := d.client.CoreV1().Endpoints(namespace)
elw := &cache.ListWatch{ elw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.endpoints.field options.FieldSelector = d.selectors.endpoints.field
options.LabelSelector = d.selectors.endpoints.label options.LabelSelector = d.selectors.endpoints.label
return e.List(ctx, options) return e.List(ctx, options)
}, },
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.endpoints.field options.FieldSelector = d.selectors.endpoints.field
options.LabelSelector = d.selectors.endpoints.label options.LabelSelector = d.selectors.endpoints.label
return e.Watch(ctx, options) return e.Watch(ctx, options)
@ -467,12 +467,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
s := d.client.CoreV1().Services(namespace) s := d.client.CoreV1().Services(namespace)
slw := &cache.ListWatch{ slw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.service.field options.FieldSelector = d.selectors.service.field
options.LabelSelector = d.selectors.service.label options.LabelSelector = d.selectors.service.label
return s.List(ctx, options) return s.List(ctx, options)
}, },
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.service.field options.FieldSelector = d.selectors.service.field
options.LabelSelector = d.selectors.service.label options.LabelSelector = d.selectors.service.label
return s.Watch(ctx, options) return s.Watch(ctx, options)
@ -480,12 +480,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
p := d.client.CoreV1().Pods(namespace) p := d.client.CoreV1().Pods(namespace)
plw := &cache.ListWatch{ plw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.pod.field options.FieldSelector = d.selectors.pod.field
options.LabelSelector = d.selectors.pod.label options.LabelSelector = d.selectors.pod.label
return p.List(ctx, options) return p.List(ctx, options)
}, },
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.pod.field options.FieldSelector = d.selectors.pod.field
options.LabelSelector = d.selectors.pod.label options.LabelSelector = d.selectors.pod.label
return p.Watch(ctx, options) return p.Watch(ctx, options)
@ -531,12 +531,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
for _, namespace := range namespaces { for _, namespace := range namespaces {
p := d.client.CoreV1().Pods(namespace) p := d.client.CoreV1().Pods(namespace)
plw := &cache.ListWatch{ plw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.pod.field options.FieldSelector = d.selectors.pod.field
options.LabelSelector = d.selectors.pod.label options.LabelSelector = d.selectors.pod.label
return p.List(ctx, options) return p.List(ctx, options)
}, },
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.pod.field options.FieldSelector = d.selectors.pod.field
options.LabelSelector = d.selectors.pod.label options.LabelSelector = d.selectors.pod.label
return p.Watch(ctx, options) return p.Watch(ctx, options)
@ -562,12 +562,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
for _, namespace := range namespaces { for _, namespace := range namespaces {
s := d.client.CoreV1().Services(namespace) s := d.client.CoreV1().Services(namespace)
slw := &cache.ListWatch{ slw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.service.field options.FieldSelector = d.selectors.service.field
options.LabelSelector = d.selectors.service.label options.LabelSelector = d.selectors.service.label
return s.List(ctx, options) return s.List(ctx, options)
}, },
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.service.field options.FieldSelector = d.selectors.service.field
options.LabelSelector = d.selectors.service.label options.LabelSelector = d.selectors.service.label
return s.Watch(ctx, options) return s.Watch(ctx, options)
@ -592,12 +592,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
for _, namespace := range namespaces { for _, namespace := range namespaces {
i := d.client.NetworkingV1().Ingresses(namespace) i := d.client.NetworkingV1().Ingresses(namespace)
ilw := &cache.ListWatch{ ilw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.ingress.field options.FieldSelector = d.selectors.ingress.field
options.LabelSelector = d.selectors.ingress.label options.LabelSelector = d.selectors.ingress.label
return i.List(ctx, options) return i.List(ctx, options)
}, },
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.ingress.field options.FieldSelector = d.selectors.ingress.field
options.LabelSelector = d.selectors.ingress.label options.LabelSelector = d.selectors.ingress.label
return i.Watch(ctx, options) return i.Watch(ctx, options)
@ -666,14 +666,14 @@ func retryOnError(ctx context.Context, interval time.Duration, f func() error) (
} }
} }
func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer { func (d *Discovery) newNodeInformer(_ context.Context) cache.SharedInformer {
nlw := &cache.ListWatch{ nlw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.node.field options.FieldSelector = d.selectors.node.field
options.LabelSelector = d.selectors.node.label options.LabelSelector = d.selectors.node.label
return d.client.CoreV1().Nodes().List(ctx, options) return d.client.CoreV1().Nodes().List(ctx, options)
}, },
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.node.field options.FieldSelector = d.selectors.node.field
options.LabelSelector = d.selectors.node.label options.LabelSelector = d.selectors.node.label
return d.client.CoreV1().Nodes().Watch(ctx, options) return d.client.CoreV1().Nodes().Watch(ctx, options)
@ -682,13 +682,13 @@ func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer {
return d.mustNewSharedInformer(nlw, &apiv1.Node{}, resyncDisabled) return d.mustNewSharedInformer(nlw, &apiv1.Node{}, resyncDisabled)
} }
func (d *Discovery) newNamespaceInformer(ctx context.Context) cache.SharedInformer { func (d *Discovery) newNamespaceInformer(_ context.Context) cache.SharedInformer {
// We don't filter on NamespaceDiscovery. // We don't filter on NamespaceDiscovery.
nlw := &cache.ListWatch{ nlw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
return d.client.CoreV1().Namespaces().List(ctx, options) return d.client.CoreV1().Namespaces().List(ctx, options)
}, },
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
return d.client.CoreV1().Namespaces().Watch(ctx, options) return d.client.CoreV1().Namespaces().Watch(ctx, options)
}, },
} }
@ -832,16 +832,16 @@ func (d *Discovery) newIndexedIngressesInformer(ilw *cache.ListWatch) cache.Shar
return d.mustNewSharedIndexInformer(ilw, &networkv1.Ingress{}, resyncDisabled, indexers) return d.mustNewSharedIndexInformer(ilw, &networkv1.Ingress{}, resyncDisabled, indexers)
} }
func (d *Discovery) informerWatchErrorHandler(r *cache.Reflector, err error) { func (d *Discovery) informerWatchErrorHandler(ctx context.Context, r *cache.Reflector, err error) {
d.metrics.failuresCount.Inc() d.metrics.failuresCount.Inc()
cache.DefaultWatchErrorHandler(r, err) cache.DefaultWatchErrorHandler(ctx, r, err)
} }
func (d *Discovery) mustNewSharedInformer(lw cache.ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) cache.SharedInformer { func (d *Discovery) mustNewSharedInformer(lw cache.ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) cache.SharedInformer {
informer := cache.NewSharedInformer(lw, exampleObject, defaultEventHandlerResyncPeriod) informer := cache.NewSharedInformer(lw, exampleObject, defaultEventHandlerResyncPeriod)
// Invoking SetWatchErrorHandler should fail only if the informer has been started beforehand. // Invoking SetWatchErrorHandler should fail only if the informer has been started beforehand.
// Such a scenario would suggest an incorrect use of the API, thus the panic. // Such a scenario would suggest an incorrect use of the API, thus the panic.
if err := informer.SetWatchErrorHandler(d.informerWatchErrorHandler); err != nil { if err := informer.SetWatchErrorHandlerWithContext(d.informerWatchErrorHandler); err != nil {
panic(err) panic(err)
} }
return informer return informer
@ -851,7 +851,7 @@ func (d *Discovery) mustNewSharedIndexInformer(lw cache.ListerWatcher, exampleOb
informer := cache.NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, indexers) informer := cache.NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, indexers)
// Invoking SetWatchErrorHandler should fail only if the informer has been started beforehand. // Invoking SetWatchErrorHandler should fail only if the informer has been started beforehand.
// Such a scenario would suggest an incorrect use of the API, thus the panic. // Such a scenario would suggest an incorrect use of the API, thus the panic.
if err := informer.SetWatchErrorHandler(d.informerWatchErrorHandler); err != nil { if err := informer.SetWatchErrorHandlerWithContext(d.informerWatchErrorHandler); err != nil {
panic(err) panic(err)
} }
return informer return informer

View File

@ -41,7 +41,7 @@ type Node struct {
logger *slog.Logger logger *slog.Logger
informer cache.SharedInformer informer cache.SharedInformer
store cache.Store store cache.Store
queue *workqueue.Type queue *workqueue.Typed[string]
} }
// NewNode returns a new node discovery. // NewNode returns a new node discovery.
@ -58,7 +58,9 @@ func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.Co
logger: l, logger: l,
informer: inf, informer: inf,
store: inf.GetStore(), store: inf.GetStore(),
queue: workqueue.NewNamed(RoleNode.String()), queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{
Name: RoleNode.String(),
}),
} }
_, err := n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ _, err := n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
@ -111,12 +113,11 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
func (n *Node) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { func (n *Node) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {
keyObj, quit := n.queue.Get() key, quit := n.queue.Get()
if quit { if quit {
return false return false
} }
defer n.queue.Done(keyObj) defer n.queue.Done(key)
key := keyObj.(string)
_, name, err := cache.SplitMetaNamespaceKey(key) _, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil { if err != nil {

View File

@ -47,7 +47,7 @@ type Pod struct {
withNamespaceMetadata bool withNamespaceMetadata bool
store cache.Store store cache.Store
logger *slog.Logger logger *slog.Logger
queue *workqueue.Type queue *workqueue.Typed[string]
} }
// NewPod creates a new pod discovery. // NewPod creates a new pod discovery.
@ -68,7 +68,9 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes, namespace cac
withNamespaceMetadata: namespace != nil, withNamespaceMetadata: namespace != nil,
store: pods.GetStore(), store: pods.GetStore(),
logger: l, logger: l,
queue: workqueue.NewNamed(RolePod.String()), queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{
Name: RolePod.String(),
}),
} }
_, err := p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ _, err := p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o any) { AddFunc: func(o any) {
@ -166,12 +168,11 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {
keyObj, quit := p.queue.Get() key, quit := p.queue.Get()
if quit { if quit {
return false return false
} }
defer p.queue.Done(keyObj) defer p.queue.Done(key)
key := keyObj.(string)
namespace, name, err := cache.SplitMetaNamespaceKey(key) namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil { if err != nil {

View File

@ -36,7 +36,7 @@ type Service struct {
logger *slog.Logger logger *slog.Logger
informer cache.SharedIndexInformer informer cache.SharedIndexInformer
store cache.Store store cache.Store
queue *workqueue.Type queue *workqueue.Typed[string]
namespaceInf cache.SharedInformer namespaceInf cache.SharedInformer
withNamespaceMetadata bool withNamespaceMetadata bool
} }
@ -55,7 +55,9 @@ func NewService(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S
logger: l, logger: l,
informer: inf, informer: inf,
store: inf.GetStore(), store: inf.GetStore(),
queue: workqueue.NewNamed(RoleService.String()), queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{
Name: RoleService.String(),
}),
namespaceInf: namespace, namespaceInf: namespace,
withNamespaceMetadata: namespace != nil, withNamespaceMetadata: namespace != nil,
} }
@ -142,12 +144,11 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
func (s *Service) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { func (s *Service) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {
keyObj, quit := s.queue.Get() key, quit := s.queue.Get()
if quit { if quit {
return false return false
} }
defer s.queue.Done(keyObj) defer s.queue.Done(key)
key := keyObj.(string)
namespace, name, err := cache.SplitMetaNamespaceKey(key) namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil { if err != nil {

View File

@ -42,8 +42,8 @@ var (
configTypesMu sync.Mutex configTypesMu sync.Mutex
configTypes = make(map[reflect.Type]reflect.Type) configTypes = make(map[reflect.Type]reflect.Type)
emptyStructType = reflect.TypeOf(struct{}{}) emptyStructType = reflect.TypeFor[struct{}]()
configsType = reflect.TypeOf(Configs{}) configsType = reflect.TypeFor[Configs]()
) )
// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling. // RegisterConfig registers the given Config type for YAML marshaling and unmarshaling.
@ -54,7 +54,7 @@ func RegisterConfig(config Config) {
func init() { func init() {
// N.B.: static_configs is the only Config type implemented by default. // N.B.: static_configs is the only Config type implemented by default.
// All other types are registered at init by their implementing packages. // All other types are registered at init by their implementing packages.
elemTyp := reflect.TypeOf(&targetgroup.Group{}) elemTyp := reflect.TypeFor[*targetgroup.Group]()
registerConfig(staticConfigsKey, elemTyp, StaticConfig{}) registerConfig(staticConfigsKey, elemTyp, StaticConfig{})
} }

View File

@ -58,7 +58,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | | <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr, use-uncached-io. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr, use-uncached-io, promql-extended-range-selectors. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | | | <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | |
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` | | <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |

View File

@ -302,3 +302,42 @@ memory in response to misleading cache growth.
This is currently implemented using direct I/O. This is currently implemented using direct I/O.
For more details, see the [proposal](https://github.com/prometheus/proposals/pull/45). For more details, see the [proposal](https://github.com/prometheus/proposals/pull/45).
## Extended Range Selectors
`--enable-feature=promql-extended-range-selectors`
Enables experimental `anchored` and `smoothed` modifiers for PromQL range and instant selectors. These modifiers provide more control over how range boundaries are handled in functions like `rate` and `increase`, especially with missing or irregular data.
Native Histograms are not yet supported by the extended range selectors.
### `anchored`
Uses the most recent sample (within the lookback delta) at the beginning of the range, or alternatively the first sample within the range if there is no sample within the lookback delta. The last sample within the range is also used at the end of the range. No extrapolation or interpolation is applied, so this is useful to get the direct difference between sample values.
Anchored range selector work with: `resets`, `changes`, `rate`, `increase`, and `delta`.
Example query:
`increase(http_requests_total[5m] anchored)`
**Note**: When using the anchored modifier with the increase function, the results returned are integers.
### `smoothed`
In range selectors, linearly interpolates values at the range boundaries, using the sample values before and after the boundaries for an improved estimation that is robust against irregular scrapes and missing samples. However, it requires a sample after the evaluation interval to work properly, see note below.
For instant selectors, values are linearly interpolated at the evaluation timestamp using the samples immediately before and after that point.
Smoothed range selectors work with: `rate`, `increase`, and `delta`.
Example query:
`rate(http_requests_total[step()] smoothed)`
> **Note for alerting and recording rules:**
> The `smoothed` modifier requires samples after the evaluation interval, so using it directly in alerting or recording rules will typically *under-estimate* the result, as future samples are not available at evaluation time.
> To use `smoothed` safely in rules, you **must** apply a `query_offset` to the rule group (see [documentation](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#rule_group)) to ensure the calculation window is fully in the past and all needed samples are available.
> For critical alerting, set the offset to at least one scrape interval; for less critical or more resilient use cases, consider a larger offset (multiple scrape intervals) to tolerate missed scrapes.
For more details, see the [design doc](https://github.com/prometheus/proposals/blob/main/proposals/2025-04-04_extended-range-selectors-semantics.md).
**Note**: Extended Range Selectors are not supported for subqueries.

View File

@ -79,7 +79,7 @@ navigating to its metrics endpoint:
Let us explore data that Prometheus has collected about itself. To Let us explore data that Prometheus has collected about itself. To
use Prometheus's built-in expression browser, navigate to use Prometheus's built-in expression browser, navigate to
http://localhost:9090/graph and choose the "Table" view within the "Graph" tab. http://localhost:9090/query and choose the "Graph" tab.
As you can gather from [localhost:9090/metrics](http://localhost:9090/metrics), As you can gather from [localhost:9090/metrics](http://localhost:9090/metrics),
one metric that Prometheus exports about itself is named one metric that Prometheus exports about itself is named
@ -113,7 +113,7 @@ For more about the expression language, see the
## Using the graphing interface ## Using the graphing interface
To graph expressions, navigate to http://localhost:9090/graph and use the "Graph" To graph expressions, navigate to http://localhost:9090/query and use the "Graph"
tab. tab.
For example, enter the following expression to graph the per-second rate of chunks For example, enter the following expression to graph the per-second rate of chunks

View File

@ -348,7 +348,9 @@ You can URL-encode these parameters directly in the request body by using the `P
or dynamic number of series selectors that may breach server-side URL character limits. or dynamic number of series selectors that may breach server-side URL character limits.
The `data` section of the query result consists of a list of objects that The `data` section of the query result consists of a list of objects that
contain the label name/value pairs which identify each series. contain the label name/value pairs which identify each series. Note that the
`start` and `end` times are approximate and the result may contain label values
for series which have no samples in the given interval.
The following example returns all series that match either of the selectors The following example returns all series that match either of the selectors
`up` or `process_start_time_seconds{job="prometheus"}`: `up` or `process_start_time_seconds{job="prometheus"}`:
@ -397,8 +399,9 @@ URL query parameters:
series from which to read the label names. Optional. series from which to read the label names. Optional.
- `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled. - `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
The `data` section of the JSON response is a list of string label names. Note
The `data` section of the JSON response is a list of string label names. that the `start` and `end` times are approximate and the result may contain
label names for series which have no samples in the given interval.
Here is an example. Here is an example.
@ -451,7 +454,10 @@ URL query parameters:
series from which to read the label values. Optional. series from which to read the label values. Optional.
- `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled. - `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
The `data` section of the JSON response is a list of string label values. The `data` section of the JSON response is a list of string label values. Note
that the `start` and `end` times are approximate and the result may contain
label values for series which have no samples in the given interval.
This example queries for all label values for the `http_status_code` label: This example queries for all label values for the `http_status_code` label:

View File

@ -113,8 +113,8 @@ require (
google.golang.org/protobuf v1.36.8 // indirect google.golang.org/protobuf v1.36.8 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apimachinery v0.32.3 // indirect k8s.io/apimachinery v0.33.5 // indirect
k8s.io/client-go v0.32.3 // indirect k8s.io/client-go v0.33.5 // indirect
k8s.io/klog/v2 v2.130.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect

View File

@ -140,16 +140,14 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@ -160,8 +158,8 @@ github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3
github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E= github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E=
github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/hashicorp/consul/api v1.32.0 h1:5wp5u780Gri7c4OedGEPzmlUEzi0g2KyiPphSr6zjVg= github.com/hashicorp/consul/api v1.32.0 h1:5wp5u780Gri7c4OedGEPzmlUEzi0g2KyiPphSr6zjVg=
@ -471,21 +469,23 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= k8s.io/api v0.33.5 h1:YR+uhYj05jdRpcksv8kjSliW+v9hwXxn6Cv10aR8Juw=
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= k8s.io/api v0.33.5/go.mod h1:2gzShdwXKT5yPGiqrTrn/U/nLZ7ZyT4WuAj3XGDVgVs=
k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= k8s.io/apimachinery v0.33.5 h1:NiT64hln4TQXeYR18/ES39OrNsjGz8NguxsBgp+6QIo=
k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/apimachinery v0.33.5/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= k8s.io/client-go v0.33.5 h1:I8BdmQGxInpkMEnJvV6iG7dqzP3JRlpZZlib3OMFc3o=
k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= k8s.io/client-go v0.33.5/go.mod h1:W8PQP4MxbM4ypgagVE65mUUqK1/ByQkSALF9tzuQ6u0=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

65
go.mod
View File

@ -27,7 +27,7 @@ require (
github.com/envoyproxy/go-control-plane/envoy v1.32.4 github.com/envoyproxy/go-control-plane/envoy v1.32.4
github.com/envoyproxy/protoc-gen-validate v1.2.1 github.com/envoyproxy/protoc-gen-validate v1.2.1
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
github.com/fsnotify/fsnotify v1.8.0 github.com/fsnotify/fsnotify v1.9.0
github.com/go-openapi/strfmt v0.23.0 github.com/go-openapi/strfmt v0.23.0
github.com/go-zookeeper/zk v1.0.4 github.com/go-zookeeper/zk v1.0.4
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
@ -93,36 +93,13 @@ require (
google.golang.org/grpc v1.73.0 google.golang.org/grpc v1.73.0
google.golang.org/protobuf v1.36.8 google.golang.org/protobuf v1.36.8
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.32.3 k8s.io/api v0.33.5
k8s.io/apimachinery v0.32.3 k8s.io/apimachinery v0.33.5
k8s.io/client-go v0.32.3 k8s.io/client-go v0.33.5
k8s.io/klog v1.0.0 k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.130.1 k8s.io/klog/v2 v2.130.1
) )
require (
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/hashicorp/go-version v1.7.0 // indirect
github.com/moby/sys/atomicwriter v0.1.0 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
go.opentelemetry.io/collector/featuregate v1.35.0 // indirect
go.opentelemetry.io/collector/internal/telemetry v0.129.0 // indirect
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect
go.opentelemetry.io/otel/log v0.12.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)
require ( require (
cloud.google.com/go/auth v0.16.2 // indirect cloud.google.com/go/auth v0.16.2 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
@ -132,8 +109,19 @@ require (
github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/armon/go-metrics v0.4.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect github.com/containerd/log v0.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@ -156,15 +144,14 @@ require (
github.com/go-openapi/validate v0.24.0 // indirect github.com/go-openapi/validate v0.24.0 // indirect
github.com/go-resty/resty/v2 v2.16.5 // indirect github.com/go-resty/resty/v2 v2.16.5 // indirect
github.com/go-viper/mapstructure/v2 v2.3.0 // indirect github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-querystring v1.1.0 // indirect github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/s2a-go v0.1.9 // indirect github.com/google/s2a-go v0.1.9 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
github.com/googleapis/gax-go/v2 v2.14.2 // indirect github.com/googleapis/gax-go/v2 v2.14.2 // indirect
github.com/gorilla/websocket v1.5.0 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
@ -174,6 +161,7 @@ require (
github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-version v1.7.0 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/serf v0.10.1 // indirect github.com/hashicorp/serf v0.10.1 // indirect
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
@ -193,6 +181,7 @@ require (
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/atomicwriter v0.1.0 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
@ -209,6 +198,7 @@ require (
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/otlptranslator v0.0.2 github.com/prometheus/otlptranslator v0.0.2
github.com/prometheus/procfs v0.16.1 // indirect github.com/prometheus/procfs v0.16.1 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.2 // indirect github.com/stretchr/objx v0.5.2 // indirect
github.com/x448/float16 v0.8.4 // indirect github.com/x448/float16 v0.8.4 // indirect
@ -217,7 +207,11 @@ require (
go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/collector/confmap v1.35.0 // indirect go.opentelemetry.io/collector/confmap v1.35.0 // indirect
go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 // indirect go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 // indirect
go.opentelemetry.io/collector/featuregate v1.35.0 // indirect
go.opentelemetry.io/collector/internal/telemetry v0.129.0 // indirect
go.opentelemetry.io/collector/pipeline v0.129.0 // indirect go.opentelemetry.io/collector/pipeline v0.129.0 // indirect
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect
go.opentelemetry.io/otel/log v0.12.2 // indirect
go.opentelemetry.io/proto/otlp v1.6.0 // indirect go.opentelemetry.io/proto/otlp v1.6.0 // indirect
go.uber.org/zap v1.27.0 // indirect go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.41.0 // indirect golang.org/x/crypto v0.41.0 // indirect
@ -231,11 +225,13 @@ require (
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gotest.tools/v3 v3.0.3 // indirect gotest.tools/v3 v3.0.3 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect
) )
@ -247,6 +243,3 @@ exclude (
github.com/grpc-ecosystem/grpc-gateway v1.14.7 github.com/grpc-ecosystem/grpc-gateway v1.14.7
google.golang.org/api v0.30.0 google.golang.org/api v0.30.0
) )
// Pin until https://github.com/fsnotify/fsnotify/issues/656 is resolved.
replace github.com/fsnotify/fsnotify v1.8.0 => github.com/fsnotify/fsnotify v1.7.0

41
go.sum
View File

@ -141,8 +141,8 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@ -198,10 +198,10 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@ -212,8 +212,6 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18= github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18=
github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
@ -226,8 +224,8 @@ github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3
github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E= github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E=
github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
@ -716,23 +714,26 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= k8s.io/api v0.33.5 h1:YR+uhYj05jdRpcksv8kjSliW+v9hwXxn6Cv10aR8Juw=
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= k8s.io/api v0.33.5/go.mod h1:2gzShdwXKT5yPGiqrTrn/U/nLZ7ZyT4WuAj3XGDVgVs=
k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= k8s.io/apimachinery v0.33.5 h1:NiT64hln4TQXeYR18/ES39OrNsjGz8NguxsBgp+6QIo=
k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/apimachinery v0.33.5/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= k8s.io/client-go v0.33.5 h1:I8BdmQGxInpkMEnJvV6iG7dqzP3JRlpZZlib3OMFc3o=
k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= k8s.io/client-go v0.33.5/go.mod h1:W8PQP4MxbM4ypgagVE65mUUqK1/ByQkSALF9tzuQ6u0=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

View File

@ -798,23 +798,24 @@ func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] {
// create false positives here. // create false positives here.
func (h *FloatHistogram) Validate() error { func (h *FloatHistogram) Validate() error {
var nCount, pCount float64 var nCount, pCount float64
if h.UsesCustomBuckets() { switch {
case IsCustomBucketsSchema(h.Schema):
if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("custom buckets: %w", err) return fmt.Errorf("custom buckets: %w", err)
} }
if h.ZeroCount != 0 { if h.ZeroCount != 0 {
return errors.New("custom buckets: must have zero count of 0") return ErrHistogramCustomBucketsZeroCount
} }
if h.ZeroThreshold != 0 { if h.ZeroThreshold != 0 {
return errors.New("custom buckets: must have zero threshold of 0") return ErrHistogramCustomBucketsZeroThresh
} }
if len(h.NegativeSpans) > 0 { if len(h.NegativeSpans) > 0 {
return errors.New("custom buckets: must not have negative spans") return ErrHistogramCustomBucketsNegSpans
} }
if len(h.NegativeBuckets) > 0 { if len(h.NegativeBuckets) > 0 {
return errors.New("custom buckets: must not have negative buckets") return ErrHistogramCustomBucketsNegBuckets
} }
} else { case IsExponentialSchema(h.Schema):
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("positive side: %w", err) return fmt.Errorf("positive side: %w", err)
} }
@ -826,8 +827,10 @@ func (h *FloatHistogram) Validate() error {
return fmt.Errorf("negative side: %w", err) return fmt.Errorf("negative side: %w", err)
} }
if h.CustomValues != nil { if h.CustomValues != nil {
return errors.New("histogram with exponential schema must not have custom bounds") return ErrHistogramExpSchemaCustomBounds
} }
default:
return InvalidSchemaError(h.Schema)
} }
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false) err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
if err != nil { if err != nil {

View File

@ -22,7 +22,9 @@ import (
const ( const (
ExponentialSchemaMax int32 = 8 ExponentialSchemaMax int32 = 8
ExponentialSchemaMaxReserved int32 = 52
ExponentialSchemaMin int32 = -4 ExponentialSchemaMin int32 = -4
ExponentialSchemaMinReserved int32 = -9
CustomBucketsSchema int32 = -53 CustomBucketsSchema int32 = -53
) )
@ -37,8 +39,23 @@ var (
ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite") ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite")
ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas") ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas")
ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds") ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds")
ErrHistogramCustomBucketsZeroCount = errors.New("custom buckets: must have zero count of 0")
ErrHistogramCustomBucketsZeroThresh = errors.New("custom buckets: must have zero threshold of 0")
ErrHistogramCustomBucketsNegSpans = errors.New("custom buckets: must not have negative spans")
ErrHistogramCustomBucketsNegBuckets = errors.New("custom buckets: must not have negative buckets")
ErrHistogramExpSchemaCustomBounds = errors.New("histogram with exponential schema must not have custom bounds")
ErrHistogramsInvalidSchema = fmt.Errorf("histogram has an invalid schema, which must be between %d and %d for exponential buckets, or %d for custom buckets", ExponentialSchemaMin, ExponentialSchemaMax, CustomBucketsSchema)
ErrHistogramsUnknownSchema = fmt.Errorf("histogram has an unknown schema, which must be between %d and %d for exponential buckets, or %d for custom buckets", ExponentialSchemaMinReserved, ExponentialSchemaMaxReserved, CustomBucketsSchema)
) )
func InvalidSchemaError(s int32) error {
return fmt.Errorf("%w, got schema %d", ErrHistogramsInvalidSchema, s)
}
func UnknownSchemaError(s int32) error {
return fmt.Errorf("%w, got schema %d", ErrHistogramsUnknownSchema, s)
}
func IsCustomBucketsSchema(s int32) bool { func IsCustomBucketsSchema(s int32) bool {
return s == CustomBucketsSchema return s == CustomBucketsSchema
} }
@ -47,6 +64,20 @@ func IsExponentialSchema(s int32) bool {
return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax
} }
func IsExponentialSchemaReserved(s int32) bool {
return s >= ExponentialSchemaMinReserved && s <= ExponentialSchemaMaxReserved
}
func IsValidSchema(s int32) bool {
return IsCustomBucketsSchema(s) || IsExponentialSchema(s)
}
// IsKnownSchema returns bool if we known and accept the schema, but need to
// reduce resolution to the nearest supported schema.
func IsKnownSchema(s int32) bool {
return IsCustomBucketsSchema(s) || IsExponentialSchemaReserved(s)
}
// BucketCount is a type constraint for the count in a bucket, which can be // BucketCount is a type constraint for the count in a bucket, which can be
// float64 (for type FloatHistogram) or uint64 (for type Histogram). // float64 (for type FloatHistogram) or uint64 (for type Histogram).
type BucketCount interface { type BucketCount interface {

View File

@ -14,7 +14,6 @@
package histogram package histogram
import ( import (
"errors"
"fmt" "fmt"
"math" "math"
"slices" "slices"
@ -425,23 +424,24 @@ func resize[T any](items []T, n int) []T {
// the total h.Count). // the total h.Count).
func (h *Histogram) Validate() error { func (h *Histogram) Validate() error {
var nCount, pCount uint64 var nCount, pCount uint64
if h.UsesCustomBuckets() { switch {
case IsCustomBucketsSchema(h.Schema):
if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("custom buckets: %w", err) return fmt.Errorf("custom buckets: %w", err)
} }
if h.ZeroCount != 0 { if h.ZeroCount != 0 {
return errors.New("custom buckets: must have zero count of 0") return ErrHistogramCustomBucketsZeroCount
} }
if h.ZeroThreshold != 0 { if h.ZeroThreshold != 0 {
return errors.New("custom buckets: must have zero threshold of 0") return ErrHistogramCustomBucketsZeroThresh
} }
if len(h.NegativeSpans) > 0 { if len(h.NegativeSpans) > 0 {
return errors.New("custom buckets: must not have negative spans") return ErrHistogramCustomBucketsNegSpans
} }
if len(h.NegativeBuckets) > 0 { if len(h.NegativeBuckets) > 0 {
return errors.New("custom buckets: must not have negative buckets") return ErrHistogramCustomBucketsNegBuckets
} }
} else { case IsExponentialSchema(h.Schema):
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("positive side: %w", err) return fmt.Errorf("positive side: %w", err)
} }
@ -453,8 +453,10 @@ func (h *Histogram) Validate() error {
return fmt.Errorf("negative side: %w", err) return fmt.Errorf("negative side: %w", err)
} }
if h.CustomValues != nil { if h.CustomValues != nil {
return errors.New("histogram with exponential schema must not have custom bounds") return ErrHistogramExpSchemaCustomBounds
} }
default:
return InvalidSchemaError(h.Schema)
} }
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true) err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true)
if err != nil { if err != nil {

View File

@ -1565,6 +1565,18 @@ func TestHistogramValidation(t *testing.T) {
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8}, CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8},
}, },
}, },
"schema too high": {
h: &Histogram{
Schema: 10,
},
errMsg: `histogram has an invalid schema, which must be between -4 and 8 for exponential buckets, or -53 for custom buckets, got schema 10`,
},
"schema too low": {
h: &Histogram{
Schema: -10,
},
errMsg: `histogram has an invalid schema, which must be between -4 and 8 for exponential buckets, or -53 for custom buckets, got schema -10`,
},
} }
for testName, tc := range tests { for testName, tc := range tests {

View File

@ -117,6 +117,18 @@ func rangeQueryCases() []benchCase {
expr: "rate(sparse[1m])", expr: "rate(sparse[1m])",
steps: 10000, steps: 10000,
}, },
// Smoothed rate.
{
expr: "rate(a_X[1m] smoothed)",
},
{
expr: "rate(a_X[1m] smoothed)",
steps: 10000,
},
{
expr: "rate(sparse[1m] smoothed)",
steps: 10000,
},
// Holt-Winters and long ranges. // Holt-Winters and long ranges.
{ {
expr: "double_exponential_smoothing(a_X[1d], 0.3, 0.3)", expr: "double_exponential_smoothing(a_X[1d], 0.3, 0.3)",
@ -266,6 +278,10 @@ func rangeQueryCases() []benchCase {
} }
func BenchmarkRangeQuery(b *testing.B) { func BenchmarkRangeQuery(b *testing.B) {
parser.EnableExtendedRangeSelectors = true
b.Cleanup(func() {
parser.EnableExtendedRangeSelectors = false
})
stor := teststorage.New(b) stor := teststorage.New(b)
stor.DisableCompactions() // Don't want auto-compaction disrupting timings. stor.DisableCompactions() // Don't want auto-compaction disrupting timings.
defer stor.Close() defer stor.Close()

View File

@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"io" "io"
"log/slog" "log/slog"
"maps"
"math" "math"
"reflect" "reflect"
"runtime" "runtime"
@ -124,6 +125,8 @@ var _ QueryLogger = (*logging.JSONFileLogger)(nil)
// QueryLogger is an interface that can be used to log all the queries logged // QueryLogger is an interface that can be used to log all the queries logged
// by the engine. // by the engine.
// logging.JSONFileLogger implements this interface, downstream users may use
// different implementations.
type QueryLogger interface { type QueryLogger interface {
slog.Handler slog.Handler
io.Closer io.Closer
@ -926,14 +929,28 @@ func getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path
// because wo want to exclude samples that are precisely the // because wo want to exclude samples that are precisely the
// lookback delta before the eval time. // lookback delta before the eval time.
start -= durationMilliseconds(s.LookbackDelta) - 1 start -= durationMilliseconds(s.LookbackDelta) - 1
if n.Smoothed {
end += durationMilliseconds(s.LookbackDelta)
}
} else { } else {
// For all matrix queries we want to ensure that we have // For matrix queries, adjust the start and end times to ensure the
// (end-start) + range selected this way we have `range` data // correct range of data is selected. For "anchored" selectors, extend
// before the start time. We subtract one from the range to // the start time backwards by the lookback delta plus the evaluation
// exclude samples positioned directly at the lower boundary of // range. For "smoothed" selectors, extend both the start and end times
// the range. // by the lookback delta, and also extend the start time by the
// evaluation range to cover the smoothing window. For standard range
// queries, extend the start time backwards by the range (minus one
// millisecond) to exclude samples exactly at the lower boundary.
switch {
case n.Anchored:
start -= durationMilliseconds(s.LookbackDelta+evalRange) - 1
case n.Smoothed:
start -= durationMilliseconds(s.LookbackDelta+evalRange) - 1
end += durationMilliseconds(s.LookbackDelta)
default:
start -= durationMilliseconds(evalRange) - 1 start -= durationMilliseconds(evalRange) - 1
} }
}
offsetMilliseconds := durationMilliseconds(n.OriginalOffset) offsetMilliseconds := durationMilliseconds(n.OriginalOffset)
start -= offsetMilliseconds start -= offsetMilliseconds
@ -979,7 +996,6 @@ func (ng *Engine) populateSeries(ctx context.Context, querier storage.Querier, s
evalRange = 0 evalRange = 0
hints.By, hints.Grouping = extractGroupsFromPath(path) hints.By, hints.Grouping = extractGroupsFromPath(path)
n.UnexpandedSeriesSet = querier.Select(ctx, false, hints, n.LabelMatchers...) n.UnexpandedSeriesSet = querier.Select(ctx, false, hints, n.LabelMatchers...)
case *parser.MatrixSelector: case *parser.MatrixSelector:
evalRange = n.Range evalRange = n.Range
} }
@ -1524,6 +1540,76 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
return result, annos return result, annos
} }
// smoothSeries is a helper function that smooths the series by interpolating the values
// based on values before and after the timestamp.
func (ev *evaluator) smoothSeries(series []storage.Series, offset time.Duration) Matrix {
dur := ev.endTimestamp - ev.startTimestamp
it := storage.NewBuffer(dur + 2*durationMilliseconds(ev.lookbackDelta))
offMS := offset.Milliseconds()
start := ev.startTimestamp - offMS
end := ev.endTimestamp - offMS
step := ev.interval
lb := durationMilliseconds(ev.lookbackDelta)
var chkIter chunkenc.Iterator
mat := make(Matrix, 0, len(series))
for _, s := range series {
ss := Series{Metric: s.Labels()}
chkIter = s.Iterator(chkIter)
it.Reset(chkIter)
var floats []FPoint
var hists []HPoint
for ts := start; ts <= end; ts += step {
matrixStart := ts - lb
matrixEnd := ts + lb
floats, hists = ev.matrixIterSlice(it, matrixStart, matrixEnd, floats, hists)
if len(floats) == 0 && len(hists) == 0 {
continue
}
if len(hists) > 0 {
// TODO: support native histograms.
ev.errorf("smoothed and anchored modifiers do not work with native histograms")
}
// Binary search for the first index with T >= ts.
i := sort.Search(len(floats), func(i int) bool { return floats[i].T >= ts })
switch {
case i < len(floats) && floats[i].T == ts:
// Exact match.
ss.Floats = append(ss.Floats, floats[i])
case i > 0 && i < len(floats):
// Interpolate between prev and next.
// TODO: detect if the sample is a counter, based on __type__ or metadata.
prev, next := floats[i-1], floats[i]
val := interpolate(prev, next, ts, false, false)
ss.Floats = append(ss.Floats, FPoint{F: val, T: ts})
case i > 0:
// No next point yet; carry forward previous value.
prev := floats[i-1]
ss.Floats = append(ss.Floats, FPoint{F: prev.F, T: ts})
default:
// i == 0 and floats[0].T > ts: there is no previous data yet; skip.
}
}
mat = append(mat, ss)
}
return mat
}
// evalSeries generates a Matrix between ev.startTimestamp and ev.endTimestamp (inclusive), each point spaced ev.interval apart, from series given offset. // evalSeries generates a Matrix between ev.startTimestamp and ev.endTimestamp (inclusive), each point spaced ev.interval apart, from series given offset.
// For every storage.Series iterator in series, the method iterates in ev.interval sized steps from ev.startTimestamp until and including ev.endTimestamp, // For every storage.Series iterator in series, the method iterates in ev.interval sized steps from ev.startTimestamp until and including ev.endTimestamp,
// collecting every corresponding sample (obtained via ev.vectorSelectorSingle) into a Series. // collecting every corresponding sample (obtained via ev.vectorSelectorSingle) into a Series.
@ -1784,6 +1870,17 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
sel := arg.(*parser.MatrixSelector) sel := arg.(*parser.MatrixSelector)
selVS := sel.VectorSelector.(*parser.VectorSelector) selVS := sel.VectorSelector.(*parser.VectorSelector)
switch {
case selVS.Anchored:
if _, ok := AnchoredSafeFunctions[e.Func.Name]; !ok {
ev.errorf("anchored modifier can only be used with: %s - not with %s", strings.Join(slices.Sorted(maps.Keys(AnchoredSafeFunctions)), ", "), e.Func.Name)
}
case selVS.Smoothed:
if _, ok := SmoothedSafeFunctions[e.Func.Name]; !ok {
ev.errorf("smoothed modifier can only be used with: %s - not with %s", strings.Join(slices.Sorted(maps.Keys(SmoothedSafeFunctions)), ", "), e.Func.Name)
}
}
ws, err := checkAndExpandSeriesSet(ctx, sel) ws, err := checkAndExpandSeriesSet(ctx, sel)
warnings.Merge(ws) warnings.Merge(ws)
if err != nil { if err != nil {
@ -1792,7 +1889,17 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
mat := make(Matrix, 0, len(selVS.Series)) // Output matrix. mat := make(Matrix, 0, len(selVS.Series)) // Output matrix.
offset := durationMilliseconds(selVS.Offset) offset := durationMilliseconds(selVS.Offset)
selRange := durationMilliseconds(sel.Range) selRange := durationMilliseconds(sel.Range)
stepRange := min(selRange, ev.interval)
var stepRange int64
switch {
case selVS.Anchored:
stepRange = min(selRange+durationMilliseconds(ev.lookbackDelta), ev.interval)
case selVS.Smoothed:
stepRange = min(selRange+durationMilliseconds(2*ev.lookbackDelta), ev.interval)
default:
stepRange = min(selRange, ev.interval)
}
// Reuse objects across steps to save memory allocations. // Reuse objects across steps to save memory allocations.
var floats []FPoint var floats []FPoint
var histograms []HPoint var histograms []HPoint
@ -1800,7 +1907,18 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
inMatrix := make(Matrix, 1) inMatrix := make(Matrix, 1)
enh := &EvalNodeHelper{Out: make(Vector, 0, 1), enableDelayedNameRemoval: ev.enableDelayedNameRemoval} enh := &EvalNodeHelper{Out: make(Vector, 0, 1), enableDelayedNameRemoval: ev.enableDelayedNameRemoval}
// Process all the calls for one time series at a time. // Process all the calls for one time series at a time.
it := storage.NewBuffer(selRange) // For anchored and smoothed selectors, we need to iterate over a
// larger range than the query range to account for the lookback delta.
// For standard range queries, we iterate over the query range.
bufferRange := selRange
switch {
case selVS.Anchored:
bufferRange += durationMilliseconds(ev.lookbackDelta)
case selVS.Smoothed:
bufferRange += durationMilliseconds(2 * ev.lookbackDelta)
}
it := storage.NewBuffer(bufferRange)
var chkIter chunkenc.Iterator var chkIter chunkenc.Iterator
// The last_over_time and first_over_time functions act like // The last_over_time and first_over_time functions act like
@ -1849,11 +1967,24 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
if ts == ev.startTimestamp || selVS.Timestamp == nil { if ts == ev.startTimestamp || selVS.Timestamp == nil {
maxt := ts - offset maxt := ts - offset
mint := maxt - selRange mint := maxt - selRange
switch {
case selVS.Anchored:
mint -= durationMilliseconds(ev.lookbackDelta)
case selVS.Smoothed:
mint -= durationMilliseconds(ev.lookbackDelta)
maxt += durationMilliseconds(ev.lookbackDelta)
}
floats, histograms = ev.matrixIterSlice(it, mint, maxt, floats, histograms) floats, histograms = ev.matrixIterSlice(it, mint, maxt, floats, histograms)
} }
if len(floats)+len(histograms) == 0 { if len(floats)+len(histograms) == 0 {
continue continue
} }
if selVS.Anchored || selVS.Smoothed {
if len(histograms) > 0 {
// TODO: support native histograms.
ev.errorf("smoothed and anchored modifiers do not work with native histograms")
}
}
inMatrix[0].Floats = floats inMatrix[0].Floats = floats
inMatrix[0].Histograms = histograms inMatrix[0].Histograms = histograms
enh.Ts = ts enh.Ts = ts
@ -2052,6 +2183,10 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
if err != nil { if err != nil {
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
} }
if e.Smoothed {
mat := ev.smoothSeries(e.Series, e.Offset)
return mat, ws
}
mat := ev.evalSeries(ctx, e.Series, e.Offset, false) mat := ev.evalSeries(ctx, e.Series, e.Offset, false)
return mat, ws return mat, ws
@ -2348,10 +2483,23 @@ func (ev *evaluator) matrixSelector(ctx context.Context, node *parser.MatrixSele
offset = durationMilliseconds(vs.Offset) offset = durationMilliseconds(vs.Offset)
maxt = ev.startTimestamp - offset maxt = ev.startTimestamp - offset
mint = maxt - durationMilliseconds(node.Range) mint = maxt - durationMilliseconds(node.Range)
// matrixMint keeps the original mint for smoothed and anchored selectors.
matrixMint = mint
// matrixMaxt keeps the original maxt for smoothed and anchored selectors.
matrixMaxt = maxt
matrix = make(Matrix, 0, len(vs.Series)) matrix = make(Matrix, 0, len(vs.Series))
bufferRange = durationMilliseconds(node.Range)
it = storage.NewBuffer(durationMilliseconds(node.Range))
) )
switch {
case vs.Anchored:
bufferRange += durationMilliseconds(ev.lookbackDelta)
mint -= durationMilliseconds(ev.lookbackDelta)
case vs.Smoothed:
bufferRange += 2 * durationMilliseconds(ev.lookbackDelta)
mint -= durationMilliseconds(ev.lookbackDelta)
maxt += durationMilliseconds(ev.lookbackDelta)
}
it := storage.NewBuffer(bufferRange)
ws, err := checkAndExpandSeriesSet(ctx, node) ws, err := checkAndExpandSeriesSet(ctx, node)
if err != nil { if err != nil {
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
@ -2370,6 +2518,18 @@ func (ev *evaluator) matrixSelector(ctx context.Context, node *parser.MatrixSele
} }
ss.Floats, ss.Histograms = ev.matrixIterSlice(it, mint, maxt, nil, nil) ss.Floats, ss.Histograms = ev.matrixIterSlice(it, mint, maxt, nil, nil)
switch {
case vs.Anchored:
if ss.Histograms != nil {
ev.errorf("anchored modifier is not supported with histograms")
}
ss.Floats = extendFloats(ss.Floats, matrixMint, matrixMaxt, false)
case vs.Smoothed:
if ss.Histograms != nil {
ev.errorf("anchored modifier is not supported with histograms")
}
ss.Floats = extendFloats(ss.Floats, matrixMint, matrixMaxt, true)
}
totalSize := int64(len(ss.Floats)) + int64(totalHPointSize(ss.Histograms)) totalSize := int64(len(ss.Floats)) + int64(totalHPointSize(ss.Histograms))
ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, totalSize) ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, totalSize)
@ -4035,3 +4195,39 @@ func (ev *evaluator) gatherVector(ts int64, input Matrix, output Vector, bufHelp
return output, bufHelpers return output, bufHelpers
} }
// extendFloats extends the floats to the given mint and maxt.
// This function is used with matrix selectors that are smoothed or anchored.
func extendFloats(floats []FPoint, mint, maxt int64, smoothed bool) []FPoint {
lastSampleIndex := len(floats) - 1
firstSampleIndex := max(0, sort.Search(lastSampleIndex, func(i int) bool { return floats[i].T > mint })-1)
if smoothed {
lastSampleIndex = sort.Search(lastSampleIndex, func(i int) bool { return floats[i].T >= maxt })
}
if floats[lastSampleIndex].T <= mint {
return []FPoint{}
}
// TODO: detect if the sample is a counter, based on __type__ or metadata.
left := pickOrInterpolateLeft(floats, firstSampleIndex, mint, smoothed, false)
right := pickOrInterpolateRight(floats, lastSampleIndex, maxt, smoothed, false)
// Filter out samples at boundaries or outside the range.
if floats[firstSampleIndex].T <= mint {
firstSampleIndex++
}
if floats[lastSampleIndex].T >= maxt {
lastSampleIndex--
}
// TODO: Preallocate the length of the new list.
out := make([]FPoint, 0)
// Create the new floats list with the boundary samples and the inner samples.
out = append(out, FPoint{T: mint, F: left})
out = append(out, floats[firstSampleIndex:lastSampleIndex+1]...)
out = append(out, FPoint{T: maxt, F: right})
return out
}

View File

@ -1513,6 +1513,160 @@ load 10s
} }
} }
func TestExtendedRangeSelectors(t *testing.T) {
parser.EnableExtendedRangeSelectors = true
t.Cleanup(func() {
parser.EnableExtendedRangeSelectors = false
})
engine := newTestEngine(t)
storage := promqltest.LoadedStorage(t, `
load 10s
metric 1+1x10
withreset 1+1x4 1+1x5
notregular 0 5 100 2 8
`)
t.Cleanup(func() { storage.Close() })
tc := []struct {
query string
t time.Time
expected promql.Matrix
}{
{
query: "metric[10s] smoothed",
t: time.Unix(10, 0),
expected: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 1, T: 0}, {F: 2, T: 10000}},
Metric: labels.FromStrings("__name__", "metric"),
},
},
},
{
query: "metric[10s] smoothed",
t: time.Unix(15, 0),
expected: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 1.5, T: 5000}, {F: 2, T: 10000}, {F: 2.5, T: 15000}},
Metric: labels.FromStrings("__name__", "metric"),
},
},
},
{
query: "metric[10s] smoothed",
t: time.Unix(5, 0),
expected: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 1, T: -5000}, {F: 1, T: 0}, {F: 1.5, T: 5000}},
Metric: labels.FromStrings("__name__", "metric"),
},
},
},
{
query: "metric[10s] smoothed",
t: time.Unix(105, 0),
expected: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 10.5, T: 95000}, {F: 11, T: 100000}, {F: 11, T: 105000}},
Metric: labels.FromStrings("__name__", "metric"),
},
},
},
{
query: "withreset[10s] smoothed",
t: time.Unix(45, 0),
expected: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 4.5, T: 35000}, {F: 5, T: 40000}, {F: 3, T: 45000}},
Metric: labels.FromStrings("__name__", "withreset"),
},
},
},
{
query: "metric[10s] anchored",
t: time.Unix(10, 0),
expected: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 1, T: 0}, {F: 2, T: 10000}},
Metric: labels.FromStrings("__name__", "metric"),
},
},
},
{
query: "metric[10s] anchored",
t: time.Unix(15, 0),
expected: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 1, T: 5000}, {F: 2, T: 10000}, {F: 2, T: 15000}},
Metric: labels.FromStrings("__name__", "metric"),
},
},
},
{
query: "metric[10s] anchored",
t: time.Unix(5, 0),
expected: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 1, T: -5000}, {F: 1, T: 0}, {F: 1, T: 5000}},
Metric: labels.FromStrings("__name__", "metric"),
},
},
},
{
query: "metric[10s] anchored",
t: time.Unix(105, 0),
expected: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 10, T: 95000}, {F: 11, T: 100000}, {F: 11, T: 105000}},
Metric: labels.FromStrings("__name__", "metric"),
},
},
},
{
query: "withreset[10s] anchored",
t: time.Unix(45, 0),
expected: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 4, T: 35000}, {F: 5, T: 40000}, {F: 5, T: 45000}},
Metric: labels.FromStrings("__name__", "withreset"),
},
},
},
{
query: "notregular[20s] smoothed",
t: time.Unix(30, 0),
expected: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 5, T: 10000}, {F: 100, T: 20000}, {F: 2, T: 30000}},
Metric: labels.FromStrings("__name__", "notregular"),
},
},
},
{
query: "notregular[20s] anchored",
t: time.Unix(30, 0),
expected: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 5, T: 10000}, {F: 100, T: 20000}, {F: 2, T: 30000}},
Metric: labels.FromStrings("__name__", "notregular"),
},
},
},
}
for _, tc := range tc {
t.Run(tc.query, func(t *testing.T) {
engine = promqltest.NewTestEngine(t, false, 0, 100)
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, tc.query, tc.t)
require.NoError(t, err)
res := qry.Exec(context.Background())
require.NoError(t, res.Err)
require.Equal(t, tc.expected, res.Value)
})
}
}
func TestAtModifier(t *testing.T) { func TestAtModifier(t *testing.T) {
engine := newTestEngine(t) engine := newTestEngine(t)
storage := promqltest.LoadedStorage(t, ` storage := promqltest.LoadedStorage(t, `
@ -3195,89 +3349,6 @@ func TestEngine_Close(t *testing.T) {
}) })
} }
func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
engine := newTestEngine(t)
baseT := timestamp.Time(0)
storage := promqltest.LoadedStorage(t, `
load 1m
some_metric{env="1"} 0+1x4
some_metric{env="2"} 0+2x4
some_metric{env="3"} {{count:0}}+{{count:1}}x4
some_metric_with_stale_marker 0 1 stale 3
`)
t.Cleanup(func() { require.NoError(t, storage.Close()) })
testCases := map[string]struct {
expr string
expected promql.Matrix
ts time.Time
}{
"matches series with points in range": {
expr: "some_metric[2m]",
ts: baseT.Add(2 * time.Minute),
expected: promql.Matrix{
{
Metric: labels.FromStrings("__name__", "some_metric", "env", "1"),
Floats: []promql.FPoint{
{T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1},
{T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 2},
},
},
{
Metric: labels.FromStrings("__name__", "some_metric", "env", "2"),
Floats: []promql.FPoint{
{T: timestamp.FromTime(baseT.Add(time.Minute)), F: 2},
{T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 4},
},
},
{
Metric: labels.FromStrings("__name__", "some_metric", "env", "3"),
Histograms: []promql.HPoint{
{T: timestamp.FromTime(baseT.Add(time.Minute)), H: &histogram.FloatHistogram{Count: 1, CounterResetHint: histogram.NotCounterReset}},
{T: timestamp.FromTime(baseT.Add(2 * time.Minute)), H: &histogram.FloatHistogram{Count: 2, CounterResetHint: histogram.NotCounterReset}},
},
},
},
},
"matches no series": {
expr: "some_nonexistent_metric[1m]",
ts: baseT,
expected: promql.Matrix{},
},
"no samples in range": {
expr: "some_metric[1m]",
ts: baseT.Add(20 * time.Minute),
expected: promql.Matrix{},
},
"metric with stale marker": {
expr: "some_metric_with_stale_marker[3m]",
ts: baseT.Add(3 * time.Minute),
expected: promql.Matrix{
{
Metric: labels.FromStrings("__name__", "some_metric_with_stale_marker"),
Floats: []promql.FPoint{
{T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1},
{T: timestamp.FromTime(baseT.Add(3 * time.Minute)), F: 3},
},
},
},
},
}
for name, testCase := range testCases {
t.Run(name, func(t *testing.T) {
q, err := engine.NewInstantQuery(context.Background(), storage, nil, testCase.expr, testCase.ts)
require.NoError(t, err)
defer q.Close()
res := q.Exec(context.Background())
require.NoError(t, res.Err)
testutil.RequireEqual(t, testCase.expected, res.Value)
})
}
}
func TestQueryLookbackDelta(t *testing.T) { func TestQueryLookbackDelta(t *testing.T) {
var ( var (
load = `load 5m load = `load 5m

View File

@ -65,13 +65,127 @@ func funcTime(_ []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (
}}, nil }}, nil
} }
// pickOrInterpolateLeft returns the value at the left boundary of the range.
// If interpolation is needed (when smoothed is true and the first sample is before the range start),
// it returns the interpolated value at the left boundary; otherwise, it returns the first sample's value.
func pickOrInterpolateLeft(floats []FPoint, first int, rangeStart int64, smoothed, isCounter bool) float64 {
if smoothed && floats[first].T < rangeStart {
return interpolate(floats[first], floats[first+1], rangeStart, isCounter, true)
}
return floats[first].F
}
// pickOrInterpolateRight returns the value at the right boundary of the range.
// If interpolation is needed (when smoothed is true and the last sample is after the range end),
// it returns the interpolated value at the right boundary; otherwise, it returns the last sample's value.
func pickOrInterpolateRight(floats []FPoint, last int, rangeEnd int64, smoothed, isCounter bool) float64 {
if smoothed && last > 0 && floats[last].T > rangeEnd {
return interpolate(floats[last-1], floats[last], rangeEnd, isCounter, false)
}
return floats[last].F
}
// interpolate performs linear interpolation between two points.
// If isCounter is true and there is a counter reset:
// - on the left edge, it sets the value to 0.
// - on the right edge, it adds the left value to the right value.
// It then calculates the interpolated value at the given timestamp.
func interpolate(p1, p2 FPoint, t int64, isCounter, leftEdge bool) float64 {
y1 := p1.F
y2 := p2.F
if isCounter && y2 < y1 {
if leftEdge {
y1 = 0
} else {
y2 += y1
}
}
return y1 + (y2-y1)*float64(t-p1.T)/float64(p2.T-p1.T)
}
// correctForCounterResets calculates the correction for counter resets.
// This function is only used for extendedRate functions with smoothed or anchored rates.
func correctForCounterResets(left, right float64, points []FPoint) float64 {
var correction float64
prev := left
for _, p := range points {
if p.F < prev {
correction += prev
}
prev = p.F
}
if right < prev {
correction += prev
}
return correction
}
// extendedRate is a utility function for anchored/smoothed rate/increase/delta.
// It calculates the rate (allowing for counter resets if isCounter is true),
// extrapolates if the first/last sample if needed, and returns
// the result as either per-second (if isRate is true) or overall.
func extendedRate(vals Matrix, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) {
var (
ms = args[0].(*parser.MatrixSelector)
vs = ms.VectorSelector.(*parser.VectorSelector)
samples = vals[0]
f = samples.Floats
lastSampleIndex = len(f) - 1
rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
rangeEnd = enh.Ts - durationMilliseconds(vs.Offset)
annos annotations.Annotations
smoothed = vs.Smoothed
)
firstSampleIndex := max(0, sort.Search(lastSampleIndex, func(i int) bool { return f[i].T > rangeStart })-1)
if smoothed {
lastSampleIndex = sort.Search(lastSampleIndex, func(i int) bool { return f[i].T >= rangeEnd })
}
if f[lastSampleIndex].T <= rangeStart {
return enh.Out, annos
}
left := pickOrInterpolateLeft(f, firstSampleIndex, rangeStart, smoothed, isCounter)
right := pickOrInterpolateRight(f, lastSampleIndex, rangeEnd, smoothed, isCounter)
resultFloat := right - left
if isCounter {
// We only need to consider samples exactly within the range
// for counter resets correction, as pickOrInterpolateLeft and
// pickOrInterpolateRight already handle the resets at boundaries.
if f[firstSampleIndex].T <= rangeStart {
firstSampleIndex++
}
if f[lastSampleIndex].T >= rangeEnd {
lastSampleIndex--
}
resultFloat += correctForCounterResets(left, right, f[firstSampleIndex:lastSampleIndex+1])
}
if isRate {
resultFloat /= ms.Range.Seconds()
}
return append(enh.Out, Sample{F: resultFloat}), annos
}
// extrapolatedRate is a utility function for rate/increase/delta. // extrapolatedRate is a utility function for rate/increase/delta.
// It calculates the rate (allowing for counter resets if isCounter is true), // It calculates the rate (allowing for counter resets if isCounter is true),
// extrapolates if the first/last sample is close to the boundary, and returns // extrapolates if the first/last sample is close to the boundary, and returns
// the result as either per-second (if isRate is true) or overall. // the result as either per-second (if isRate is true) or overall.
//
// Note: If the vector selector is smoothed or anchored, it will use the
// extendedRate function instead.
func extrapolatedRate(vals Matrix, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) { func extrapolatedRate(vals Matrix, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) {
ms := args[0].(*parser.MatrixSelector) ms := args[0].(*parser.MatrixSelector)
vs := ms.VectorSelector.(*parser.VectorSelector) vs := ms.VectorSelector.(*parser.VectorSelector)
if vs.Anchored || vs.Smoothed {
return extendedRate(vals, args, enh, isCounter, isRate)
}
var ( var (
samples = vals[0] samples = vals[0]
rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
@ -1548,8 +1662,21 @@ func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expression
return enh.Out, annos return enh.Out, annos
} }
// pickFirstSampleIndex returns the index of the last sample before
// or at the range start, or 0 if none exist before the range start.
// If the vector selector is not anchored, it always returns 0.
func pickFirstSampleIndex(floats []FPoint, args parser.Expressions, enh *EvalNodeHelper) int {
ms := args[0].(*parser.MatrixSelector)
vs := ms.VectorSelector.(*parser.VectorSelector)
if !vs.Anchored {
return 0
}
rangeStart := enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
return max(0, sort.Search(len(floats)-1, func(i int) bool { return floats[i].T > rangeStart })-1)
}
// === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === // === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcResets(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcResets(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
floats := matrixVal[0].Floats floats := matrixVal[0].Floats
histograms := matrixVal[0].Histograms histograms := matrixVal[0].Histograms
resets := 0 resets := 0
@ -1558,7 +1685,8 @@ func funcResets(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNod
} }
var prevSample, curSample Sample var prevSample, curSample Sample
for iFloat, iHistogram := 0, 0; iFloat < len(floats) || iHistogram < len(histograms); { firstSampleIndex := pickFirstSampleIndex(floats, args, enh)
for iFloat, iHistogram := firstSampleIndex, 0; iFloat < len(floats) || iHistogram < len(histograms); {
switch { switch {
// Process a float sample if no histogram sample remains or its timestamp is earlier. // Process a float sample if no histogram sample remains or its timestamp is earlier.
// Process a histogram sample if no float sample remains or its timestamp is earlier. // Process a histogram sample if no float sample remains or its timestamp is earlier.
@ -1571,7 +1699,7 @@ func funcResets(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNod
iHistogram++ iHistogram++
} }
// Skip the comparison for the first sample, just initialize prevSample. // Skip the comparison for the first sample, just initialize prevSample.
if iFloat+iHistogram == 1 { if iFloat+iHistogram == 1+firstSampleIndex {
prevSample = curSample prevSample = curSample
continue continue
} }
@ -1594,7 +1722,7 @@ func funcResets(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNod
} }
// === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === // === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcChanges(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcChanges(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
floats := matrixVal[0].Floats floats := matrixVal[0].Floats
histograms := matrixVal[0].Histograms histograms := matrixVal[0].Histograms
changes := 0 changes := 0
@ -1603,7 +1731,8 @@ func funcChanges(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNo
} }
var prevSample, curSample Sample var prevSample, curSample Sample
for iFloat, iHistogram := 0, 0; iFloat < len(floats) || iHistogram < len(histograms); { firstSampleIndex := pickFirstSampleIndex(floats, args, enh)
for iFloat, iHistogram := firstSampleIndex, 0; iFloat < len(floats) || iHistogram < len(histograms); {
switch { switch {
// Process a float sample if no histogram sample remains or its timestamp is earlier. // Process a float sample if no histogram sample remains or its timestamp is earlier.
// Process a histogram sample if no float sample remains or its timestamp is earlier. // Process a histogram sample if no float sample remains or its timestamp is earlier.
@ -1616,7 +1745,7 @@ func funcChanges(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNo
iHistogram++ iHistogram++
} }
// Skip the comparison for the first sample, just initialize prevSample. // Skip the comparison for the first sample, just initialize prevSample.
if iFloat+iHistogram == 1 { if iFloat+iHistogram == 1+firstSampleIndex {
prevSample = curSample prevSample = curSample
continue continue
} }
@ -1920,6 +2049,26 @@ var AtModifierUnsafeFunctions = map[string]struct{}{
"timestamp": {}, "timestamp": {},
} }
// AnchoredSafeFunctions are the functions that can be used with the anchored
// modifier. Anchored modifier returns matrices with samples outside of the
// boundaries, so not every function can be used with it.
var AnchoredSafeFunctions = map[string]struct{}{
"resets": {},
"changes": {},
"rate": {},
"increase": {},
"delta": {},
}
// SmoothedSafeFunctions are the functions that can be used with the smoothed
// modifier. Smoothed modifier returns matrices with samples outside of the
// boundaries, so not every function can be used with it.
var SmoothedSafeFunctions = map[string]struct{}{
"rate": {},
"increase": {},
"delta": {},
}
type vectorByValueHeap Vector type vectorByValueHeap Vector
func (s vectorByValueHeap) Len() int { func (s vectorByValueHeap) Len() int {

View File

@ -79,3 +79,24 @@ func TestKahanSumInc(t *testing.T) {
}) })
} }
} }
func TestInterpolate(t *testing.T) {
tests := []struct {
p1, p2 FPoint
t int64
isCounter bool
expected float64
}{
{FPoint{T: 1, F: 100}, FPoint{T: 2, F: 200}, 1, false, 100},
{FPoint{T: 0, F: 100}, FPoint{T: 2, F: 200}, 1, false, 150},
{FPoint{T: 0, F: 200}, FPoint{T: 2, F: 100}, 1, false, 150},
{FPoint{T: 0, F: 200}, FPoint{T: 2, F: 0}, 1, true, 200},
{FPoint{T: 0, F: 200}, FPoint{T: 2, F: 100}, 1, true, 250},
{FPoint{T: 0, F: 500}, FPoint{T: 2, F: 100}, 1, true, 550},
{FPoint{T: 0, F: 500}, FPoint{T: 10, F: 0}, 1, true, 500},
}
for _, test := range tests {
result := interpolate(test.p1, test.p2, test.t, test.isCounter, false)
require.Equal(t, test.expected, result)
}
}

View File

@ -226,6 +226,11 @@ type VectorSelector struct {
// This is the case when VectorSelector is used to represent the info function's second argument. // This is the case when VectorSelector is used to represent the info function's second argument.
BypassEmptyMatcherCheck bool BypassEmptyMatcherCheck bool
// Anchored is true when the VectorSelector is anchored.
Anchored bool
// Smoothed is true when the VectorSelector is smoothed.
Smoothed bool
PosRange posrange.PositionRange PosRange posrange.PositionRange
} }

View File

@ -141,6 +141,8 @@ GROUP_LEFT
GROUP_RIGHT GROUP_RIGHT
IGNORING IGNORING
OFFSET OFFSET
SMOOTHED
ANCHORED
ON ON
WITHOUT WITHOUT
%token keywordsEnd %token keywordsEnd
@ -187,7 +189,7 @@ START_METRIC_SELECTOR
%type <int> int %type <int> int
%type <uint> uint %type <uint> uint
%type <float> number series_value signed_number signed_or_unsigned_number %type <float> number series_value signed_number signed_or_unsigned_number
%type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector duration_expr paren_duration_expr positive_duration_expr offset_duration_expr %type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr anchored_expr smoothed_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector duration_expr paren_duration_expr positive_duration_expr offset_duration_expr
%start start %start start
@ -230,6 +232,8 @@ expr :
| matrix_selector | matrix_selector
| number_duration_literal | number_duration_literal
| offset_expr | offset_expr
| anchored_expr
| smoothed_expr
| paren_expr | paren_expr
| string_literal | string_literal
| subquery_expr | subquery_expr
@ -464,6 +468,20 @@ offset_expr: expr OFFSET offset_duration_expr
{ yylex.(*parser).unexpected("offset", "number, duration, or step()"); $$ = $1 } { yylex.(*parser).unexpected("offset", "number, duration, or step()"); $$ = $1 }
; ;
/*
* Anchored and smoothed modifiers
*/
anchored_expr: expr ANCHORED
{
yylex.(*parser).setAnchored($1)
}
smoothed_expr: expr SMOOTHED
{
yylex.(*parser).setSmoothed($1)
}
/* /*
* @ modifiers. * @ modifiers.
*/ */

File diff suppressed because it is too large Load Diff

View File

@ -129,6 +129,8 @@ var key = map[string]ItemType{
// Keywords. // Keywords.
"offset": OFFSET, "offset": OFFSET,
"smoothed": SMOOTHED,
"anchored": ANCHORED,
"by": BY, "by": BY,
"without": WITHOUT, "without": WITHOUT,
"on": ON, "on": ON,

View File

@ -42,6 +42,9 @@ var parserPool = sync.Pool{
// ExperimentalDurationExpr is a flag to enable experimental duration expression parsing. // ExperimentalDurationExpr is a flag to enable experimental duration expression parsing.
var ExperimentalDurationExpr bool var ExperimentalDurationExpr bool
// EnableExtendedRangeSelectors is a flag to enable experimental extended range selectors.
var EnableExtendedRangeSelectors bool
type Parser interface { type Parser interface {
ParseExpr() (Expr, error) ParseExpr() (Expr, error)
Close() Close()
@ -1021,6 +1024,52 @@ func (p *parser) addOffsetExpr(e Node, expr *DurationExpr) {
*endPosp = p.lastClosing *endPosp = p.lastClosing
} }
func (p *parser) setAnchored(e Node) {
if !EnableExtendedRangeSelectors {
p.addParseErrf(e.PositionRange(), "anchored modifier is experimental and not enabled")
return
}
switch s := e.(type) {
case *VectorSelector:
s.Anchored = true
if s.Smoothed {
p.addParseErrf(e.PositionRange(), "anchored and smoothed modifiers cannot be used together")
}
case *MatrixSelector:
s.VectorSelector.(*VectorSelector).Anchored = true
if s.VectorSelector.(*VectorSelector).Smoothed {
p.addParseErrf(e.PositionRange(), "anchored and smoothed modifiers cannot be used together")
}
case *SubqueryExpr:
p.addParseErrf(e.PositionRange(), "anchored modifier is not supported for subqueries")
default:
p.addParseErrf(e.PositionRange(), "anchored modifier not implemented")
}
}
func (p *parser) setSmoothed(e Node) {
if !EnableExtendedRangeSelectors {
p.addParseErrf(e.PositionRange(), "smoothed modifier is experimental and not enabled")
return
}
switch s := e.(type) {
case *VectorSelector:
s.Smoothed = true
if s.Anchored {
p.addParseErrf(e.PositionRange(), "anchored and smoothed modifiers cannot be used together")
}
case *MatrixSelector:
s.VectorSelector.(*VectorSelector).Smoothed = true
if s.VectorSelector.(*VectorSelector).Anchored {
p.addParseErrf(e.PositionRange(), "anchored and smoothed modifiers cannot be used together")
}
case *SubqueryExpr:
p.addParseErrf(e.PositionRange(), "smoothed modifier is not supported for subqueries")
default:
p.addParseErrf(e.PositionRange(), "smoothed modifier not implemented")
}
}
// setTimestamp is used to set the timestamp from the @ modifier in the generated parser. // setTimestamp is used to set the timestamp from the @ modifier in the generated parser.
func (p *parser) setTimestamp(e Node, ts float64) { func (p *parser) setTimestamp(e Node, ts float64) {
if math.IsInf(ts, -1) || math.IsInf(ts, 1) || math.IsNaN(ts) || if math.IsInf(ts, -1) || math.IsInf(ts, 1) || math.IsNaN(ts) ||

View File

@ -263,11 +263,18 @@ func (node *MatrixSelector) String() string {
vecSelector.Timestamp = nil vecSelector.Timestamp = nil
vecSelector.StartOrEnd = 0 vecSelector.StartOrEnd = 0
extendedAttribute := ""
switch {
case vecSelector.Anchored:
extendedAttribute = " anchored"
case vecSelector.Smoothed:
extendedAttribute = " smoothed"
}
rangeStr := model.Duration(node.Range).String() rangeStr := model.Duration(node.Range).String()
if node.RangeExpr != nil { if node.RangeExpr != nil {
rangeStr = node.RangeExpr.String() rangeStr = node.RangeExpr.String()
} }
str := fmt.Sprintf("%s[%s]%s%s", vecSelector.String(), rangeStr, at, offset) str := fmt.Sprintf("%s[%s]%s%s%s", vecSelector.String(), rangeStr, extendedAttribute, at, offset)
vecSelector.OriginalOffset, vecSelector.OriginalOffsetExpr, vecSelector.Timestamp, vecSelector.StartOrEnd = offsetVal, offsetExprVal, atVal, preproc vecSelector.OriginalOffset, vecSelector.OriginalOffsetExpr, vecSelector.Timestamp, vecSelector.StartOrEnd = offsetVal, offsetExprVal, atVal, preproc
@ -380,6 +387,12 @@ func (node *VectorSelector) String() string {
b.WriteString(" @ end()") b.WriteString(" @ end()")
} }
switch { switch {
case node.Anchored:
b.WriteString(" anchored")
case node.Smoothed:
b.WriteString(" smoothed")
}
switch {
case node.OriginalOffsetExpr != nil: case node.OriginalOffsetExpr != nil:
b.WriteString(" offset ") b.WriteString(" offset ")
node.OriginalOffsetExpr.writeTo(b) node.OriginalOffsetExpr.writeTo(b)

View File

@ -48,6 +48,11 @@ func TestConcurrentRangeQueries(t *testing.T) {
} }
// Enable experimental functions testing // Enable experimental functions testing
parser.EnableExperimentalFunctions = true parser.EnableExperimentalFunctions = true
parser.EnableExtendedRangeSelectors = true
t.Cleanup(func() {
parser.EnableExperimentalFunctions = false
parser.EnableExtendedRangeSelectors = false
})
engine := promqltest.NewTestEngineWithOpts(t, opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
const interval = 10000 // 10s interval. const interval = 10000 // 10s interval.

View File

@ -106,8 +106,44 @@ eval range from <start> to <end> step <step> <query>
* `<start>` and `<end>` specify the time range of the range query, and use the same syntax as `<time>` * `<start>` and `<end>` specify the time range of the range query, and use the same syntax as `<time>`
* `<step>` is the step of the range query, and uses the same syntax as `<time>` (eg. `30s`) * `<step>` is the step of the range query, and uses the same syntax as `<time>` (eg. `30s`)
* `<expect>`(optional) specifies expected annotations, errors, or result ordering. * `<expect>`(optional) specifies expected annotations, errors, or result ordering.
* `<expect range vector>` (optional) for an instant query you can specify expected range vector timestamps
* `<expect string> "<string>"` (optional) for matching a string literal
* `<series>` and `<points>` specify the expected values, and follow the same syntax as for `load` above * `<series>` and `<points>` specify the expected values, and follow the same syntax as for `load` above
### `expect string`
This can be used to specify that a string literal is the expected result.
Note that this is only supported on instant queries.
For example;
```
eval instant at 50m ("Foo")
expect string "Foo"
```
The expected string value must be within quotes. Double or back quotes are supported.
### `expect range vector`
This can be used to specify the expected timestamps on a range vector resulting from an instant query.
```
expect range vector <start> to <end> step <step>
```
For example;
```
load 10s
some_metric{env="a"} 1+1x5
some_metric{env="b"} 2+2x5
eval instant at 1m some_metric[1m]
expect range vector from 10s to 1m step 10s
some_metric{env="a"} 2 3 4 5 6
some_metric{env="b"} 4 6 8 10 12
```
### `expect` Syntax ### `expect` Syntax
``` ```

View File

@ -53,11 +53,14 @@ var (
patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn|info))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn|info))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`)
patExpect = regexp.MustCompile(`^expect\s+(ordered|fail|warn|no_warn|info|no_info)(?:\s+(regex|msg):(.+))?$`) patExpect = regexp.MustCompile(`^expect\s+(ordered|fail|warn|no_warn|info|no_info)(?:\s+(regex|msg):(.+))?$`)
patMatchAny = regexp.MustCompile(`^.*$`) patMatchAny = regexp.MustCompile(`^.*$`)
patExpectRange = regexp.MustCompile(`^` + rangeVectorPrefix + `\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+)$`)
) )
const ( const (
defaultEpsilon = 0.000001 // Relative error allowed for sample values. defaultEpsilon = 0.000001 // Relative error allowed for sample values.
DefaultMaxSamplesPerQuery = 10000 DefaultMaxSamplesPerQuery = 10000
rangeVectorPrefix = "expect range vector"
expectStringPrefix = "expect string"
) )
type TBRun interface { type TBRun interface {
@ -120,9 +123,11 @@ func RunBuiltinTestsWithStorage(t TBRun, engine promql.QueryEngine, newStorage f
t.Cleanup(func() { t.Cleanup(func() {
parser.EnableExperimentalFunctions = false parser.EnableExperimentalFunctions = false
parser.ExperimentalDurationExpr = false parser.ExperimentalDurationExpr = false
parser.EnableExtendedRangeSelectors = false
}) })
parser.EnableExperimentalFunctions = true parser.EnableExperimentalFunctions = true
parser.ExperimentalDurationExpr = true parser.ExperimentalDurationExpr = true
parser.EnableExtendedRangeSelectors = true
files, err := fs.Glob(testsFs, "*/*.test") files, err := fs.Glob(testsFs, "*/*.test")
require.NoError(t, err) require.NoError(t, err)
@ -314,7 +319,58 @@ func validateExpectedCmds(cmd *evalCmd) error {
return nil return nil
} }
func (*test) parseEval(lines []string, i int) (int, *evalCmd, error) { // Given an expected range vector definition, parse the line and return the start & end times and the step duration.
// ie parse a line such as "expect range vector from 10s to 1m step 10s".
// The from and to are parsed as durations and their values added to epoch(0) to form a time.Time.
// The step is parsed as a duration and returned as a time.Duration.
func (t *test) parseExpectRangeVector(line string) (*time.Time, *time.Time, *time.Duration, error) {
parts := patExpectRange.FindStringSubmatch(line)
if len(parts) != 4 {
return nil, nil, nil, fmt.Errorf("invalid range vector definition %q", line)
}
from := parts[1]
to := parts[2]
step := parts[3]
parsedFrom, parsedTo, parsedStep, err := t.parseDurations(from, to, step)
if err != nil {
return nil, nil, nil, err
}
start := testStartTime.Add(time.Duration(*parsedFrom))
end := testStartTime.Add(time.Duration(*parsedTo))
stepDuration := time.Duration(*parsedStep)
return &start, &end, &stepDuration, nil
}
// parseDurations parses the given from, to and step strings to Durations.
// Additionally, a check is performed to ensure to is before from.
func (*test) parseDurations(from, to, step string) (*model.Duration, *model.Duration, *model.Duration, error) {
parsedFrom, err := model.ParseDuration(from)
if err != nil {
return nil, nil, nil, fmt.Errorf("invalid start timestamp definition %q: %w", from, err)
}
parsedTo, err := model.ParseDuration(to)
if err != nil {
return nil, nil, nil, fmt.Errorf("invalid end timestamp definition %q: %w", to, err)
}
if parsedTo < parsedFrom {
return nil, nil, nil, fmt.Errorf("invalid test definition, end timestamp (%s) is before start timestamp (%s)", to, from)
}
parsedStep, err := model.ParseDuration(step)
if err != nil {
return nil, nil, nil, fmt.Errorf("invalid step definition %q: %w", step, err)
}
return &parsedFrom, &parsedTo, &parsedStep, nil
}
func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
instantParts := patEvalInstant.FindStringSubmatch(lines[i]) instantParts := patEvalInstant.FindStringSubmatch(lines[i])
rangeParts := patEvalRange.FindStringSubmatch(lines[i]) rangeParts := patEvalRange.FindStringSubmatch(lines[i])
@ -355,10 +411,11 @@ func (*test) parseEval(lines []string, i int) (int, *evalCmd, error) {
} }
var cmd *evalCmd var cmd *evalCmd
var offset model.Duration
if isInstant { if isInstant {
at := instantParts[2] at := instantParts[2]
offset, err := model.ParseDuration(at) offset, err = model.ParseDuration(at)
if err != nil { if err != nil {
return i, nil, formatErr("invalid timestamp definition %q: %s", at, err) return i, nil, formatErr("invalid timestamp definition %q: %s", at, err)
} }
@ -369,26 +426,12 @@ func (*test) parseEval(lines []string, i int) (int, *evalCmd, error) {
to := rangeParts[3] to := rangeParts[3]
step := rangeParts[4] step := rangeParts[4]
parsedFrom, err := model.ParseDuration(from) parsedFrom, parsedTo, parsedStep, err := t.parseDurations(from, to, step)
if err != nil { if err != nil {
return i, nil, formatErr("invalid start timestamp definition %q: %s", from, err) return i, nil, formatErr(err.Error())
} }
parsedTo, err := model.ParseDuration(to) cmd = newRangeEvalCmd(expr, testStartTime.Add(time.Duration(*parsedFrom)), testStartTime.Add(time.Duration(*parsedTo)), time.Duration(*parsedStep), i+1)
if err != nil {
return i, nil, formatErr("invalid end timestamp definition %q: %s", to, err)
}
if parsedTo < parsedFrom {
return i, nil, formatErr("invalid test definition, end timestamp (%s) is before start timestamp (%s)", to, from)
}
parsedStep, err := model.ParseDuration(step)
if err != nil {
return i, nil, formatErr("invalid step definition %q: %s", step, err)
}
cmd = newRangeEvalCmd(expr, testStartTime.Add(time.Duration(parsedFrom)), testStartTime.Add(time.Duration(parsedTo)), time.Duration(parsedStep), i+1)
} }
switch mod { switch mod {
@ -404,6 +447,8 @@ func (*test) parseEval(lines []string, i int) (int, *evalCmd, error) {
cmd.info = true cmd.info = true
} }
var expectRangeVector bool
for j := 1; i+1 < len(lines); j++ { for j := 1; i+1 < len(lines); j++ {
i++ i++
defLine := lines[i] defLine := lines[i]
@ -426,6 +471,32 @@ func (*test) parseEval(lines []string, i int) (int, *evalCmd, error) {
break break
} }
if strings.HasPrefix(defLine, rangeVectorPrefix) {
start, end, step, err := t.parseExpectRangeVector(defLine)
if err != nil {
return i, nil, formatErr("%w", err)
}
expectRangeVector = true
cmd.start = *start
cmd.end = *end
cmd.step = *step
cmd.eval = *end
cmd.excludeFromRangeQuery = true
continue
}
if strings.HasPrefix(defLine, expectStringPrefix) {
expectString, err := parseAsStringLiteral(defLine)
if err != nil {
return i, nil, formatErr("%w", err)
}
cmd.expectedString = expectString
cmd.excludeFromRangeQuery = true
continue
}
// This would still allow a metric named 'expect' if it is written as 'expect{}'. // This would still allow a metric named 'expect' if it is written as 'expect{}'.
if strings.Split(defLine, " ")[0] == "expect" { if strings.Split(defLine, " ")[0] == "expect" {
annoType, expectedAnno, err := parseExpect(defLine) annoType, expectedAnno, err := parseExpect(defLine)
@ -450,15 +521,35 @@ func (*test) parseEval(lines []string, i int) (int, *evalCmd, error) {
return i, nil, err return i, nil, err
} }
// Currently, we are not expecting any matrices. // Only allow a range vector for an instant query where we have defined the expected range vector timestamps.
if len(vals) > 1 && isInstant { if len(vals) > 1 && isInstant && !expectRangeVector {
return i, nil, formatErr("expecting multiple values in instant evaluation not allowed") return i, nil, formatErr("expecting multiple values in instant evaluation not allowed. consider using 'expect range vector' directive to enable a range vector result for an instant query")
} }
cmd.expectMetric(j, metric, vals...) cmd.expectMetric(j, metric, vals...)
} }
return i, cmd, nil return i, cmd, nil
} }
// parseAsStringLiteral returns the expected string from an expect string expression.
// It is valid for the line to match the expect string prefix exactly, and an empty string is returned.
func parseAsStringLiteral(line string) (string, error) {
if line == expectStringPrefix {
return "", errors.New("expected string literal not valid - a quoted string literal is required")
}
str := strings.TrimPrefix(line, expectStringPrefix+" ")
if len(str) == 0 {
return "", errors.New("expected string literal not valid - a quoted string literal is required")
}
str, err := strconv.Unquote(str)
if err != nil {
return "", errors.New("expected string literal not valid - check that the string is correctly quoted")
}
return str, nil
}
// getLines returns trimmed lines after removing the comments. // getLines returns trimmed lines after removing the comments.
func getLines(input string) []string { func getLines(input string) []string {
lines := strings.Split(input, "\n") lines := strings.Split(input, "\n")
@ -692,6 +783,7 @@ type evalCmd struct {
end time.Time end time.Time
step time.Duration step time.Duration
line int line int
eval time.Time
isRange bool // if false, instant query isRange bool // if false, instant query
fail, warn, ordered, info bool fail, warn, ordered, info bool
@ -703,6 +795,12 @@ type evalCmd struct {
metrics map[uint64]labels.Labels metrics map[uint64]labels.Labels
expectScalar bool expectScalar bool
expected map[uint64]entry expected map[uint64]entry
// we expect a string literal - is set instead of expected
expectedString string
// if true and this is an instant query then we will not test this in a range query scenario
excludeFromRangeQuery bool
} }
func (ev *evalCmd) isOrdered() bool { func (ev *evalCmd) isOrdered() bool {
@ -772,6 +870,7 @@ func newInstantEvalCmd(expr string, start time.Time, line int) *evalCmd {
return &evalCmd{ return &evalCmd{
expr: expr, expr: expr,
start: start, start: start,
eval: start,
line: line, line: line,
metrics: map[uint64]labels.Labels{}, metrics: map[uint64]labels.Labels{},
@ -1016,7 +1115,10 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
if !almost.Equal(exp0.Value, val.V, defaultEpsilon) { if !almost.Equal(exp0.Value, val.V, defaultEpsilon) {
return fmt.Errorf("expected scalar %v but got %v", exp0.Value, val.V) return fmt.Errorf("expected scalar %v but got %v", exp0.Value, val.V)
} }
case promql.String:
if ev.expectedString != val.V {
return fmt.Errorf("expected string \"%v\" but got \"%v\"", ev.expectedString, val.V)
}
default: default:
panic(fmt.Errorf("promql.Test.compareResult: unexpected result type %T", result)) panic(fmt.Errorf("promql.Test.compareResult: unexpected result type %T", result))
} }
@ -1354,11 +1456,12 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
} }
func (t *test) execInstantEval(cmd *evalCmd, engine promql.QueryEngine) error { func (t *test) execInstantEval(cmd *evalCmd, engine promql.QueryEngine) error {
queries, err := atModifierTestCases(cmd.expr, cmd.start) queries, err := atModifierTestCases(cmd.expr, cmd.eval)
if err != nil { if err != nil {
return err return err
} }
queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...) queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.eval}}, queries...)
for _, iq := range queries { for _, iq := range queries {
if err := t.runInstantQuery(iq, cmd, engine); err != nil { if err := t.runInstantQuery(iq, cmd, engine); err != nil {
return err return err
@ -1395,6 +1498,12 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err) return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
} }
// this query has have been explicitly excluded from range query testing
// ie it could be that the query result is not an instant vector or scalar
if cmd.excludeFromRangeQuery {
return nil
}
// Check query returns same result in range mode, // Check query returns same result in range mode,
// by checking against the middle step. // by checking against the middle step.
q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute) q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)

View File

@ -948,6 +948,144 @@ eval instant at 0m http_requests
`, `,
expectedError: `error in eval http_requests (line 12): invalid expect lines, multiple expect fail lines are not allowed`, expectedError: `error in eval http_requests (line 12): invalid expect lines, multiple expect fail lines are not allowed`,
}, },
"instant query with string literal": {
input: `
eval instant at 50m ("Foo")
expect string "Foo"
`,
},
"instant query with string literal with leading space": {
input: `
eval instant at 50m (" Foo")
expect string " Foo"
`,
},
"instant query with string literal with trailing space": {
input: `
eval instant at 50m ("Foo ")
expect string "Foo "
`,
},
"instant query with string literal as space": {
input: `
eval instant at 50m (" ")
expect string " "
`,
},
"instant query with string literal with empty string": {
input: `
eval instant at 50m ("")
expect string
`,
expectedError: `error in eval ("") (line 3): expected string literal not valid - a quoted string literal is required`,
},
"instant query with string literal with correctly quoted empty string": {
input: `
eval instant at 50m ("")
expect string ""
`,
},
"instant query with string literal - not quoted": {
input: `
eval instant at 50m ("Foo")
expect string Foo
`,
expectedError: `error in eval ("Foo") (line 3): expected string literal not valid - check that the string is correctly quoted`,
},
"instant query with empty string literal": {
input: `
eval instant at 50m ("Foo")
expect string ""
`,
expectedError: `error in eval ("Foo") (line 2): expected string "" but got "Foo"`,
},
"instant query with error string literal": {
input: `
eval instant at 50m ("Foo")
expect string "Bar"
`,
expectedError: `error in eval ("Foo") (line 2): expected string "Bar" but got "Foo"`,
},
"instant query with range result - result does not have a series that is expected": {
input: `
load 10s
some_metric{env="a"} 1+1x5
eval instant at 1m some_metric[1m]
expect range vector from 10s to 1m step 10s
some_metric{env="a"} 2 3 4 5 6
some_metric{env="b"} 4 6 8 10 12
`,
expectedError: `error in eval some_metric[1m] (line 5): expected metric {__name__="some_metric", env="b"} not found`,
},
"instant query with range result - result has a series which is not expected": {
input: `
load 10s
some_metric{env="a"} 1+1x5
some_metric{env="b"} 1+1x5
eval instant at 1m some_metric[1m]
expect range vector from 10s to 1m step 10s
some_metric{env="a"} 2 3 4 5 6
`,
expectedError: `error in eval some_metric[1m] (line 6): unexpected metric {__name__="some_metric", env="b"} in result, has 5 float points [2 @[10000] 3 @[20000] 4 @[30000] 5 @[40000] 6 @[50000]] and 0 histogram points []`,
},
"instant query with range result - result has a value that is not expected": {
input: `
load 10s
some_metric{env="a"} 1+1x5
eval instant at 1m some_metric[1m]
expect range vector from 10s to 1m step 10s
some_metric{env="a"} 9 3 4 5 6
`,
expectedError: `error in eval some_metric[1m] (line 5): expected float value at index 0 (t=10000) for {__name__="some_metric", env="a"} to be 9, but got 2 (result has 5 float points [2 @[10000] 3 @[20000] 4 @[30000] 5 @[40000] 6 @[50000]] and 0 histogram points [])`,
},
"instant query with range result - invalid expect range vector directive": {
input: `
load 10s
some_metric{env="a"} 1+1x5
eval instant at 1m some_metric[1m]
expect range vector from 10s
some_metric{env="a"} 2 3 4 5 6
`,
expectedError: `error in eval some_metric[1m] (line 6): invalid range vector definition "expect range vector from 10s"`,
},
"instant query with range result - result matches expected value": {
input: `
load 1m
some_metric{env="1"} 0+1x4
some_metric{env="2"} 0+2x4
eval instant at 2m some_metric[2m]
expect range vector from 1m to 2m step 60s
some_metric{env="1"} 1 2
some_metric{env="2"} 2 4
`,
},
"instant query with range result - result has a is missing a sample": {
input: `
load 1m
some_metric_with_stale_marker 0 1 stale 3
eval instant at 3m some_metric_with_stale_marker[3m]
expect range vector from 1m to 3m step 60s
some_metric_with_stale_marker{} 1 2 3
`,
expectedError: `error in eval some_metric_with_stale_marker[3m] (line 5): expected 3 float points and 0 histogram points for {__name__="some_metric_with_stale_marker"}, but got 2 float points [1 @[60000] 3 @[180000]] and 0 histogram points []`,
},
"instant query with range result - result has a sample where none is expected": {
input: `
load 1m
some_metric_with_stale_marker 0 1 2 3
eval instant at 3m some_metric_with_stale_marker[3m]
expect range vector from 1m to 3m step 60s
some_metric_with_stale_marker{} 1 _ 3
`,
expectedError: `error in eval some_metric_with_stale_marker[3m] (line 5): expected 2 float points and 0 histogram points for {__name__="some_metric_with_stale_marker"}, but got 3 float points [1 @[60000] 2 @[120000] 3 @[180000]] and 0 histogram points []`,
},
} }
for name, testCase := range testCases { for name, testCase := range testCases {

View File

@ -0,0 +1,414 @@
# Reference from PROM-52: Complete dataset
load 15s
metric 1+1x4 9+1x4
eval instant at 5s increase(metric[1m])
eval instant at 20s increase(metric[1m])
{} 1.833333333
eval instant at 35s increase(metric[1m])
{} 2.833333333
eval instant at 50s increase(metric[1m])
{} 4
eval instant at 65s increase(metric[1m])
{} 4
eval instant at 80s increase(metric[1m])
{} 8
eval instant at 95s increase(metric[1m])
{} 8
eval instant at 110s increase(metric[1m])
{} 8
eval instant at 125s increase(metric[1m])
{} 4
eval instant at 5s increase(metric[1m] anchored)
{} 0
eval instant at 20s increase(metric[1m] anchored)
{} 1
eval instant at 35s increase(metric[1m] anchored)
{} 2
eval instant at 50s increase(metric[1m] anchored)
{} 3
eval instant at 65s increase(metric[1m] anchored)
{} 4
eval instant at 80s increase(metric[1m] anchored)
{} 7
eval instant at 95s increase(metric[1m] anchored)
{} 7
eval instant at 110s increase(metric[1m] anchored)
{} 7
eval instant at 125s increase(metric[1m] anchored)
{} 7
eval instant at 5s increase(metric[1m] smoothed)
{} 0.333333333
eval instant at 20s increase(metric[1m] smoothed)
{} 1.333333333
eval instant at 35s increase(metric[1m] smoothed)
{} 2.333333333
eval instant at 50s increase(metric[1m] smoothed)
{} 3.333333333
eval instant at 65s increase(metric[1m] smoothed)
{} 5
eval instant at 80s increase(metric[1m] smoothed)
{} 7
eval instant at 95s increase(metric[1m] smoothed)
{} 7
eval instant at 110s increase(metric[1m] smoothed)
{} 7
eval instant at 125s increase(metric[1m] smoothed)
{} 6
# Reference from PROM-52: Partial dataset
clear
load 15s
metric 1+1x2 _ _ 9+1x4
eval instant at 5s increase(metric[1m])
eval instant at 20s increase(metric[1m])
{} 1.833333333
eval instant at 35s increase(metric[1m])
{} 2.833333333
eval instant at 50s increase(metric[1m])
{} 3.166666666
eval instant at 65s increase(metric[1m])
{} 2.166666666
eval instant at 80s increase(metric[1m])
{} 8
eval instant at 95s increase(metric[1m])
{} 1.833333333
eval instant at 110s increase(metric[1m])
{} 2.833333333
eval instant at 125s increase(metric[1m])
{} 4
eval instant at 5s increase(metric[1m] anchored)
{} 0
eval instant at 20s increase(metric[1m] anchored)
{} 1
eval instant at 35s increase(metric[1m] anchored)
{} 2
eval instant at 50s increase(metric[1m] anchored)
{} 2
eval instant at 65s increase(metric[1m] anchored)
{} 2
eval instant at 80s increase(metric[1m] anchored)
{} 7
eval instant at 95s increase(metric[1m] anchored)
{} 7
eval instant at 110s increase(metric[1m] anchored)
{} 8
eval instant at 125s increase(metric[1m] anchored)
{} 9
eval instant at 5s increase(metric[1m] smoothed)
{} 0.333333333
eval instant at 20s increase(metric[1m] smoothed)
{} 1.333333333
eval instant at 35s increase(metric[1m] smoothed)
{} 2.666666666
eval instant at 50s increase(metric[1m] smoothed)
{} 4.666666666
eval instant at 65s increase(metric[1m] smoothed)
{} 6.333333333
eval instant at 80s increase(metric[1m] smoothed)
{} 7
eval instant at 95s increase(metric[1m] smoothed)
{} 6.666666666
eval instant at 110s increase(metric[1m] smoothed)
{} 5.666666666
eval instant at 125s increase(metric[1m] smoothed)
{} 4.666666666
# Test that inverval is left-open.
clear
load 1m
metric 1 2 _ 4 5
eval instant at 2m increase(metric[1m] smoothed)
{} 1
eval instant at 2m increase(metric[1m] anchored)
# Basic test with counter resets
clear
load 1m
metric{id="1"} 1+1x4 1+1x4
metric{id="2"} 3 2+2x9
metric{id="3"} 5+3x2 3+3x6
eval instant at 1m30s increase(metric[1m])
eval instant at 1m30s increase(metric[1m] smoothed)
{id="1"} 1
{id="2"} 2
{id="3"} 3
eval instant at 1m30s increase(metric[1m] anchored)
{id="1"} 1
{id="2"} 2
{id="3"} 3
eval instant at 1m30s delta(metric[1m])
eval instant at 1m30s delta(metric[1m] anchored)
{id="1"} 1
{id="2"} -1
{id="3"} 3
eval instant at 3m0s delta(metric[1m] anchored)
{id="1"} 1
{id="2"} 2
{id="3"} -8
eval instant at 3m30s delta(metric[1m] anchored)
{id="1"} 1
{id="2"} 2
{id="3"} -8
eval instant at 6m increase(metric[5m])
{id="1"} 5
{id="2"} 10
{id="3"} 15
eval instant at 6m15s increase(metric[5m] smoothed)
{id="1"} 5
{id="2"} 10
{id="3"} 15
eval instant at 6m increase(metric[5m] smoothed)
{id="1"} 5
{id="2"} 10
{id="3"} 15
eval instant at 5m increase(metric[5m] anchored)
{id="1"} 5
{id="2"} 10
{id="3"} 15
eval instant at 15m increase(metric[5m] anchored)
clear
load 1m
metric{id="1"} 11 -1 100 0
metric{id="2"} 0 0 100 0 0 11 -1
eval instant at 5m30s delta(metric[5m] smoothed)
{id="1"} -5
{id="2"} 5
eval instant at 5m45s delta(metric[5m] smoothed)
{id="1"} -2
{id="2"} 2
clear
load 1m
metric{id="1"} 1+1x10
metric{id="2"} 1 1+1x10
metric{id="3"} 99-1x10
metric{id="4"} 99 99-1x10
eval instant at 5m changes(metric[5m])
{id="1"} 4
{id="2"} 4
{id="3"} 4
{id="4"} 4
eval instant at 5m30s changes(metric[5m])
{id="1"} 4
{id="2"} 4
{id="3"} 4
{id="4"} 4
eval instant at 5m0s changes(metric[5m] anchored)
{id="1"} 5
{id="2"} 4
{id="3"} 5
{id="4"} 4
eval instant at 6m changes(metric[5m] anchored)
{id="1"} 5
{id="2"} 5
{id="3"} 5
{id="4"} 5
eval instant at 5m30s changes(metric[5m] anchored)
{id="1"} 5
{id="2"} 4
{id="3"} 5
{id="4"} 4
eval instant at 5m30s resets(metric[5m])
{id="1"} 0
{id="2"} 0
{id="3"} 4
{id="4"} 4
eval instant at 5m30s resets(metric[5m] anchored)
{id="1"} 0
{id="2"} 0
{id="3"} 5
{id="4"} 4
clear
load 1m
metric{id="1"} 2 _ 1 _ _ _ _ _ 0
metric{id="2"} 99-1x10
eval instant at 2m changes(metric[1m])
{id="1"} 0
{id="2"} 0
eval instant at 3m changes(metric[1m])
{id="2"} 0
eval instant at 2m changes(metric[1m] anchored)
{id="1"} 1
{id="2"} 1
eval instant at 3m changes(metric[1m] anchored)
{id="1"} 1
{id="2"} 1
eval instant at 8m changes(metric[1m] anchored)
{id="1"} 0
{id="2"} 1
eval instant at 8m changes(metric[1m1ms] anchored)
{id="1"} 1
{id="2"} 2
eval instant at 2m resets(metric[1m])
{id="1"} 0
{id="2"} 0
eval instant at 3m resets(metric[1m])
{id="2"} 0
eval instant at 2m resets(metric[1m] anchored)
{id="1"} 1
{id="2"} 1
eval instant at 3m resets(metric[1m] anchored)
{id="1"} 1
{id="2"} 1
eval instant at 8m resets(metric[1m] anchored)
{id="1"} 0
{id="2"} 1
eval instant at 8m resets(metric[1m1ms] anchored)
{id="1"} 1
{id="2"} 2
clear
load 1m
metric 9 8 5 4
eval instant at 2m15s increase(metric[2m] smoothed)
{} 12
clear
eval instant at 1m deriv(foo[3m] smoothed)
expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with deriv
eval instant at 1m resets(foo[3m] smoothed)
expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with resets
eval instant at 1m changes(foo[3m] smoothed)
expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with changes
eval instant at 1m max_over_time(foo[3m] smoothed)
expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with max_over_time
eval instant at 1m predict_linear(foo[3m] smoothed, 4)
expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with predict_linear
eval instant at 1m deriv(foo[3m] anchored)
expect fail msg: anchored modifier can only be used with: changes, delta, increase, rate, resets - not with deriv
eval instant at 1m resets(foo[3m] anchored)
eval instant at 1m changes(foo[3m] anchored)
eval instant at 1m max_over_time(foo[3m] anchored)
expect fail msg: anchored modifier can only be used with: changes, delta, increase, rate, resets - not with max_over_time
eval instant at 1m predict_linear(foo[3m] anchored, 4)
expect fail msg: anchored modifier can only be used with: changes, delta, increase, rate, resets - not with predict_linear
clear
load 10s
metric 1+1x10
withreset 1+1x4 1+1x5
notregular 0 5 100 2 8
eval instant at 10s metric smoothed
metric 2
eval instant at 15s metric smoothed
metric 2.5
eval instant at 5s metric smoothed
metric 1.5
eval instant at 105s metric smoothed
metric 11
eval instant at 45s withreset smoothed
withreset 3
eval instant at 30s notregular smoothed
notregular 2

View File

@ -57,3 +57,18 @@ eval instant at 50m 0 / 0
eval instant at 50m 1 % 0 eval instant at 50m 1 % 0
NaN NaN
eval instant at 50m ("Foo")
expect string `Foo`
eval instant at 50m "Foo"
expect string "Foo"
eval instant at 50m " Foo "
expect string " Foo "
eval instant at 50m ("")
expect string ""
eval instant at 50m ""
expect string ""

View File

@ -1677,3 +1677,18 @@ eval instant at 1m histogram_count(histogram unless histogram_quantile(0.5, hist
eval instant at 1m histogram_quantile(0.5, histogram unless histogram_count(histogram) == 0) eval instant at 1m histogram_quantile(0.5, histogram unless histogram_count(histogram) == 0)
{} 3.1748021039363987 {} 3.1748021039363987
clear
# Regression test for:
# https://github.com/prometheus/prometheus/issues/14172
# https://github.com/prometheus/prometheus/issues/15177
load 1m
mixed_metric1 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:8 count:6 buckets:[1 4 1]}} 4 5 {{schema:0 sum:18 count:10 buckets:[3 4 3]}}
mixed_metric2 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:8 count:6 buckets:[1 4 1]}}
# The order of the float vs native histograms is preserved.
eval range from 0 to 8m step 1m mixed_metric1
mixed_metric1{} 1 2 3 {{count:4 sum:5 buckets:[1 2 1]}} {{count:6 sum:8 buckets:[1 4 1]}} 4 5 {{schema:0 sum:18 count:10 buckets:[3 4 3]}} {{schema:0 sum:18 count:10 buckets:[3 4 3]}}
eval range from 0 to 5m step 1m mixed_metric2
mixed_metric2 1 2 3 {{count:4 sum:5 buckets:[1 2 1]}} {{count:6 sum:8 buckets:[1 4 1]}} {{count:6 sum:8 buckets:[1 4 1]}}

View File

@ -71,3 +71,37 @@ eval range from 0 to 2m step 1m requests * 2
{job="1", __address__="bar"} 200 200 200 {job="1", __address__="bar"} 200 200 200
clear clear
load 10s
some_metric{env="a"} 1+1x5
some_metric{env="b"} 2+2x5
# Return a range vector - note the use of the expect range vector directive which defines expected range
eval instant at 1m some_metric[1m]
expect range vector from 10s to 1m step 10s
some_metric{env="a"} 2 3 4 5 6
some_metric{env="b"} 4 6 8 10 12
clear
load 1m
some_metric{env="1"} 0+1x4
some_metric{env="2"} 0+2x4
some_metric{env="3"} {{count:0}}+{{count:1}}x4
some_metric_with_stale_marker 0 1 stale 3
eval instant at 2m some_metric[2m]
expect range vector from 1m to 2m step 60s
some_metric{env="1"} 1 2
some_metric{env="2"} 2 4
some_metric{env="3"} {{count:1 counter_reset_hint:not_reset}} {{count:2 counter_reset_hint:not_reset}}
eval instant at 3m some_metric_with_stale_marker[3m]
expect range vector from 1m to 3m step 60s
some_metric_with_stale_marker{} 1 _ 3
eval instant at 1m some_nonexistent_metric[1m]
expect range vector from 10s to 1m step 10s
eval instant at 10m some_metric[1m]
expect range vector from 9m10s to 10m step 1m

View File

@ -414,12 +414,12 @@ type maxSchemaAppender struct {
func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if h != nil { if h != nil {
if histogram.IsExponentialSchema(h.Schema) && h.Schema > app.maxSchema { if histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > app.maxSchema {
h = h.ReduceResolution(app.maxSchema) h = h.ReduceResolution(app.maxSchema)
} }
} }
if fh != nil { if fh != nil {
if histogram.IsExponentialSchema(fh.Schema) && fh.Schema > app.maxSchema { if histogram.IsExponentialSchemaReserved(fh.Schema) && fh.Schema > app.maxSchema {
fh = fh.ReduceResolution(app.maxSchema) fh = fh.ReduceResolution(app.maxSchema)
} }
} }

View File

@ -34,8 +34,11 @@ jobs:
- name: Install snmp_exporter/generator dependencies - name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter' if: github.repository == 'prometheus/snmp_exporter'
- name: Get golangci-lint version
id: golangci-lint-version
run: echo "version=$(make print-golangci-lint-version)" >> $GITHUB_OUTPUT
- name: Lint - name: Lint
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
with: with:
args: --verbose args: --verbose
version: v2.2.1 version: ${{ steps.golangci-lint-version.outputs.version }}

View File

@ -388,6 +388,7 @@ type concreteSeriesIterator struct {
histogramsCur int histogramsCur int
curValType chunkenc.ValueType curValType chunkenc.ValueType
series *concreteSeries series *concreteSeries
err error
} }
func newConcreteSeriesIterator(series *concreteSeries) chunkenc.Iterator { func newConcreteSeriesIterator(series *concreteSeries) chunkenc.Iterator {
@ -404,10 +405,14 @@ func (c *concreteSeriesIterator) reset(series *concreteSeries) {
c.histogramsCur = -1 c.histogramsCur = -1
c.curValType = chunkenc.ValNone c.curValType = chunkenc.ValNone
c.series = series c.series = series
c.err = nil
} }
// Seek implements storage.SeriesIterator. // Seek implements storage.SeriesIterator.
func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
if c.err != nil {
return chunkenc.ValNone
}
if c.floatsCur == -1 { if c.floatsCur == -1 {
c.floatsCur = 0 c.floatsCur = 0
} }
@ -439,7 +444,7 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp { if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp {
c.curValType = chunkenc.ValFloat c.curValType = chunkenc.ValFloat
} else { } else {
c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur]) c.curValType = chunkenc.ValHistogram
} }
// When the timestamps do not overlap the cursor for the non-selected sample type has advanced too // When the timestamps do not overlap the cursor for the non-selected sample type has advanced too
// far; we decrement it back down here. // far; we decrement it back down here.
@ -453,11 +458,26 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
case c.floatsCur < len(c.series.floats): case c.floatsCur < len(c.series.floats):
c.curValType = chunkenc.ValFloat c.curValType = chunkenc.ValFloat
case c.histogramsCur < len(c.series.histograms): case c.histogramsCur < len(c.series.histograms):
c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur]) c.curValType = chunkenc.ValHistogram
}
if c.curValType == chunkenc.ValHistogram {
h := &c.series.histograms[c.histogramsCur]
c.curValType = getHistogramValType(h)
c.err = validateHistogramSchema(h)
}
if c.err != nil {
c.curValType = chunkenc.ValNone
} }
return c.curValType return c.curValType
} }
func validateHistogramSchema(h *prompb.Histogram) error {
if histogram.IsKnownSchema(h.Schema) {
return nil
}
return histogram.UnknownSchemaError(h.Schema)
}
func getHistogramValType(h *prompb.Histogram) chunkenc.ValueType { func getHistogramValType(h *prompb.Histogram) chunkenc.ValueType {
if h.IsFloatHistogram() { if h.IsFloatHistogram() {
return chunkenc.ValFloatHistogram return chunkenc.ValFloatHistogram
@ -480,14 +500,28 @@ func (c *concreteSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *hist
panic("iterator is not on an integer histogram sample") panic("iterator is not on an integer histogram sample")
} }
h := c.series.histograms[c.histogramsCur] h := c.series.histograms[c.histogramsCur]
return h.Timestamp, h.ToIntHistogram() mh := h.ToIntHistogram()
if mh.Schema > histogram.ExponentialSchemaMax && mh.Schema <= histogram.ExponentialSchemaMaxReserved {
// This is a very slow path, but it should only happen if the
// sample is from a newer Prometheus version that supports higher
// resolution.
mh.ReduceResolution(histogram.ExponentialSchemaMax)
}
return h.Timestamp, mh
} }
// AtFloatHistogram implements chunkenc.Iterator. // AtFloatHistogram implements chunkenc.Iterator.
func (c *concreteSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { func (c *concreteSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
if c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram { if c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram {
fh := c.series.histograms[c.histogramsCur] fh := c.series.histograms[c.histogramsCur]
return fh.Timestamp, fh.ToFloatHistogram() // integer will be auto-converted. mfh := fh.ToFloatHistogram() // integer will be auto-converted.
if mfh.Schema > histogram.ExponentialSchemaMax && mfh.Schema <= histogram.ExponentialSchemaMaxReserved {
// This is a very slow path, but it should only happen if the
// sample is from a newer Prometheus version that supports higher
// resolution.
mfh.ReduceResolution(histogram.ExponentialSchemaMax)
}
return fh.Timestamp, mfh
} }
panic("iterator is not on a histogram sample") panic("iterator is not on a histogram sample")
} }
@ -504,6 +538,9 @@ const noTS = int64(math.MaxInt64)
// Next implements chunkenc.Iterator. // Next implements chunkenc.Iterator.
func (c *concreteSeriesIterator) Next() chunkenc.ValueType { func (c *concreteSeriesIterator) Next() chunkenc.ValueType {
if c.err != nil {
return chunkenc.ValNone
}
peekFloatTS := noTS peekFloatTS := noTS
if c.floatsCur+1 < len(c.series.floats) { if c.floatsCur+1 < len(c.series.floats) {
peekFloatTS = c.series.floats[c.floatsCur+1].Timestamp peekFloatTS = c.series.floats[c.floatsCur+1].Timestamp
@ -532,12 +569,21 @@ func (c *concreteSeriesIterator) Next() chunkenc.ValueType {
c.histogramsCur++ c.histogramsCur++
c.curValType = chunkenc.ValFloat c.curValType = chunkenc.ValFloat
} }
if c.curValType == chunkenc.ValHistogram {
h := &c.series.histograms[c.histogramsCur]
c.curValType = getHistogramValType(h)
c.err = validateHistogramSchema(h)
}
if c.err != nil {
c.curValType = chunkenc.ValNone
}
return c.curValType return c.curValType
} }
// Err implements chunkenc.Iterator. // Err implements chunkenc.Iterator.
func (*concreteSeriesIterator) Err() error { func (c *concreteSeriesIterator) Err() error {
return nil return c.err
} }
// chunkedSeriesSet implements storage.SeriesSet. // chunkedSeriesSet implements storage.SeriesSet.

View File

@ -548,6 +548,79 @@ func TestConcreteSeriesIterator_FloatAndHistogramSamples(t *testing.T) {
require.Equal(t, chunkenc.ValNone, it.Seek(1)) require.Equal(t, chunkenc.ValNone, it.Seek(1))
} }
func TestConcreteSeriesIterator_InvalidHistogramSamples(t *testing.T) {
for _, schema := range []int32{-100, 100} {
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
h := prompb.FromIntHistogram(2, &testHistogram)
h.Schema = schema
fh := prompb.FromFloatHistogram(4, testHistogram.ToFloat(nil))
fh.Schema = schema
series := &concreteSeries{
labels: labels.FromStrings("foo", "bar"),
floats: []prompb.Sample{
{Value: 1, Timestamp: 0},
{Value: 2, Timestamp: 3},
},
histograms: []prompb.Histogram{
h,
fh,
},
}
it := series.Iterator(nil)
require.Equal(t, chunkenc.ValFloat, it.Next())
require.Equal(t, chunkenc.ValNone, it.Next())
require.Error(t, it.Err())
require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema)
it = series.Iterator(it)
require.Equal(t, chunkenc.ValFloat, it.Next())
require.Equal(t, chunkenc.ValNone, it.Next())
require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema)
it = series.Iterator(it)
require.Equal(t, chunkenc.ValNone, it.Seek(1))
require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema)
it = series.Iterator(it)
require.Equal(t, chunkenc.ValFloat, it.Seek(3))
require.Equal(t, chunkenc.ValNone, it.Next())
require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema)
it = series.Iterator(it)
require.Equal(t, chunkenc.ValNone, it.Seek(4))
require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema)
})
}
}
func TestConcreteSeriesIterator_ReducesHighResolutionHistograms(t *testing.T) {
for _, schema := range []int32{9, 52} {
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
h := testHistogram.Copy()
h.Schema = schema
fh := h.ToFloat(nil)
series := &concreteSeries{
labels: labels.FromStrings("foo", "bar"),
histograms: []prompb.Histogram{
prompb.FromIntHistogram(1, h),
prompb.FromFloatHistogram(2, fh),
},
}
it := series.Iterator(nil)
require.Equal(t, chunkenc.ValHistogram, it.Next())
_, gotH := it.AtHistogram(nil)
require.Equal(t, histogram.ExponentialSchemaMax, gotH.Schema)
_, gotFH := it.AtFloatHistogram(nil)
require.Equal(t, histogram.ExponentialSchemaMax, gotFH.Schema)
require.Equal(t, chunkenc.ValFloatHistogram, it.Next())
_, gotFH = it.AtFloatHistogram(nil)
require.Equal(t, histogram.ExponentialSchemaMax, gotFH.Schema)
require.Equal(t, chunkenc.ValNone, it.Next())
require.NoError(t, it.Err())
})
}
}
func TestFromQueryResultWithDuplicates(t *testing.T) { func TestFromQueryResultWithDuplicates(t *testing.T) {
ts1 := prompb.TimeSeries{ ts1 := prompb.TimeSeries{
Labels: []prompb.Label{ Labels: []prompb.Label{

View File

@ -139,7 +139,7 @@ func (b *combinedAppender) appendFloatOrHistogram(ls labels.Labels, meta metadat
ref = 0 ref = 0
} }
updateRefs := !exists || series.ct != ct updateRefs := !exists || series.ct != ct
if updateRefs && ct != 0 && b.ingestCTZeroSample { if updateRefs && ct != 0 && ct < t && b.ingestCTZeroSample {
var newRef storage.SeriesRef var newRef storage.SeriesRef
if h != nil { if h != nil {
newRef, err = b.app.AppendHistogramCTZeroSample(ref, ls, t, ct, h, nil) newRef, err = b.app.AppendHistogramCTZeroSample(ref, ls, t, ct, h, nil)
@ -147,10 +147,14 @@ func (b *combinedAppender) appendFloatOrHistogram(ls labels.Labels, meta metadat
newRef, err = b.app.AppendCTZeroSample(ref, ls, t, ct) newRef, err = b.app.AppendCTZeroSample(ref, ls, t, ct)
} }
if err != nil { if err != nil {
if !errors.Is(err, storage.ErrOutOfOrderCT) { if !errors.Is(err, storage.ErrOutOfOrderCT) && !errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
// Even for the first sample OOO is a common scenario because // Even for the first sample OOO is a common scenario because
// we can't tell if a CT was already ingested in a previous request. // we can't tell if a CT was already ingested in a previous request.
// We ignore the error. // We ignore the error.
// ErrDuplicateSampleForTimestamp is also a common scenario because
// unknown start times in Opentelemetry are indicated by setting
// the start time to the same as the first sample time.
// https://opentelemetry.io/docs/specs/otel/metrics/data-model/#cumulative-streams-handling-unknown-start-time
b.logger.Warn("Error when appending CT from OTLP", "err", err, "series", ls.String(), "created_timestamp", ct, "timestamp", t, "sample_type", sampleType(h)) b.logger.Warn("Error when appending CT from OTLP", "err", err, "series", ls.String(), "created_timestamp", ct, "timestamp", t, "sample_type", sampleType(h))
} }
} else { } else {

View File

@ -14,6 +14,7 @@
package prometheusremotewrite package prometheusremotewrite
import ( import (
"bytes"
"context" "context"
"errors" "errors"
"math" "math"
@ -160,8 +161,10 @@ func testCombinedAppenderOnTSDB(t *testing.T, ingestCTZeroSample bool) {
testCases := map[string]struct { testCases := map[string]struct {
appendFunc func(*testing.T, CombinedAppender) appendFunc func(*testing.T, CombinedAppender)
extraAppendFunc func(*testing.T, CombinedAppender)
expectedSamples []sample expectedSamples []sample
expectedExemplars []exemplar.QueryResult expectedExemplars []exemplar.QueryResult
expectedLogsForCT []string
}{ }{
"single float sample, zero CT": { "single float sample, zero CT": {
appendFunc: func(t *testing.T, app CombinedAppender) { appendFunc: func(t *testing.T, app CombinedAppender) {
@ -185,6 +188,10 @@ func testCombinedAppenderOnTSDB(t *testing.T, ingestCTZeroSample bool) {
f: 42.0, f: 42.0,
}, },
}, },
expectedLogsForCT: []string{
"Error when appending CT from OTLP",
"out of bound",
},
}, },
"single float sample, normal CT": { "single float sample, normal CT": {
appendFunc: func(t *testing.T, app CombinedAppender) { appendFunc: func(t *testing.T, app CombinedAppender) {
@ -212,6 +219,24 @@ func testCombinedAppenderOnTSDB(t *testing.T, ingestCTZeroSample bool) {
}, },
}, },
}, },
"two float samples in different messages, CT same time as first sample": {
appendFunc: func(t *testing.T, app CombinedAppender) {
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), 42.0, nil))
},
extraAppendFunc: func(t *testing.T, app CombinedAppender) {
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.Add(time.Second).UnixMilli(), 43.0, nil))
},
expectedSamples: []sample{
{
t: now.UnixMilli(),
f: 42.0,
},
{
t: now.Add(time.Second).UnixMilli(),
f: 43.0,
},
},
},
"single float sample, CT in the future of the sample": { "single float sample, CT in the future of the sample": {
appendFunc: func(t *testing.T, app CombinedAppender) { appendFunc: func(t *testing.T, app CombinedAppender) {
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(time.Minute).UnixMilli(), now.UnixMilli(), 42.0, nil)) require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(time.Minute).UnixMilli(), now.UnixMilli(), 42.0, nil))
@ -245,6 +270,10 @@ func testCombinedAppenderOnTSDB(t *testing.T, ingestCTZeroSample bool) {
h: tsdbutil.GenerateTestHistogram(42), h: tsdbutil.GenerateTestHistogram(42),
}, },
}, },
expectedLogsForCT: []string{
"Error when appending CT from OTLP",
"out of bound",
},
}, },
"single histogram sample, normal CT": { "single histogram sample, normal CT": {
appendFunc: func(t *testing.T, app CombinedAppender) { appendFunc: func(t *testing.T, app CombinedAppender) {
@ -273,6 +302,24 @@ func testCombinedAppenderOnTSDB(t *testing.T, ingestCTZeroSample bool) {
}, },
}, },
}, },
"two histogram samples in different messages, CT same time as first sample": {
appendFunc: func(t *testing.T, app CombinedAppender) {
require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
},
extraAppendFunc: func(t *testing.T, app CombinedAppender) {
require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.Add(time.Second).UnixMilli(), tsdbutil.GenerateTestHistogram(43), nil))
},
expectedSamples: []sample{
{
t: now.UnixMilli(),
h: tsdbutil.GenerateTestHistogram(42),
},
{
t: now.Add(time.Second).UnixMilli(),
h: tsdbutil.GenerateTestHistogram(43),
},
},
},
"single histogram sample, CT in the future of the sample": { "single histogram sample, CT in the future of the sample": {
appendFunc: func(t *testing.T, app CombinedAppender) { appendFunc: func(t *testing.T, app CombinedAppender) {
require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.Add(time.Minute).UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil)) require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.Add(time.Minute).UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
@ -344,6 +391,11 @@ func testCombinedAppenderOnTSDB(t *testing.T, ingestCTZeroSample bool) {
for name, tc := range testCases { for name, tc := range testCases {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
var expectedLogs []string
if ingestCTZeroSample {
expectedLogs = append(expectedLogs, tc.expectedLogsForCT...)
}
dir := t.TempDir() dir := t.TempDir()
opts := tsdb.DefaultOptions() opts := tsdb.DefaultOptions()
opts.EnableExemplarStorage = true opts.EnableExemplarStorage = true
@ -354,15 +406,32 @@ func testCombinedAppenderOnTSDB(t *testing.T, ingestCTZeroSample bool) {
t.Cleanup(func() { db.Close() }) t.Cleanup(func() { db.Close() })
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
ctx := context.Background() ctx := context.Background()
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
cappMetrics := NewCombinedAppenderMetrics(reg)
app := db.Appender(ctx) app := db.Appender(ctx)
capp := NewCombinedAppender(app, promslog.NewNopLogger(), ingestCTZeroSample, NewCombinedAppenderMetrics(reg)) capp := NewCombinedAppender(app, logger, ingestCTZeroSample, cappMetrics)
tc.appendFunc(t, capp) tc.appendFunc(t, capp)
require.NoError(t, app.Commit()) require.NoError(t, app.Commit())
if tc.extraAppendFunc != nil {
app = db.Appender(ctx)
capp = NewCombinedAppender(app, logger, ingestCTZeroSample, cappMetrics)
tc.extraAppendFunc(t, capp)
require.NoError(t, app.Commit())
}
if len(expectedLogs) > 0 {
for _, expectedLog := range expectedLogs {
require.Contains(t, output.String(), expectedLog)
}
} else {
require.Empty(t, output.String(), "unexpected log output")
}
q, err := db.Querier(int64(math.MinInt64), int64(math.MaxInt64)) q, err := db.Querier(int64(math.MinInt64), int64(math.MaxInt64))
require.NoError(t, err) require.NoError(t, err)

View File

@ -86,16 +86,16 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont
func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, temporality pmetric.AggregationTemporality) (*histogram.Histogram, annotations.Annotations, error) { func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, temporality pmetric.AggregationTemporality) (*histogram.Histogram, annotations.Annotations, error) {
var annots annotations.Annotations var annots annotations.Annotations
scale := p.Scale() scale := p.Scale()
if scale < -4 { if scale < histogram.ExponentialSchemaMin {
return nil, annots, return nil, annots,
fmt.Errorf("cannot convert exponential to native histogram."+ fmt.Errorf("cannot convert exponential to native histogram."+
" Scale must be >= -4, was %d", scale) " Scale must be >= %d, was %d", histogram.ExponentialSchemaMin, scale)
} }
var scaleDown int32 var scaleDown int32
if scale > 8 { if scale > histogram.ExponentialSchemaMax {
scaleDown = scale - 8 scaleDown = scale - histogram.ExponentialSchemaMax
scale = 8 scale = histogram.ExponentialSchemaMax
} }
pSpans, pDeltas := convertBucketsLayout(p.Positive().BucketCounts().AsRaw(), p.Positive().Offset(), scaleDown, true) pSpans, pDeltas := convertBucketsLayout(p.Positive().BucketCounts().AsRaw(), p.Positive().Offset(), scaleDown, true)

View File

@ -117,6 +117,24 @@ func (*writeHandler) parseProtoMsg(contentType string) (config.RemoteWriteProtoM
return config.RemoteWriteProtoMsgV1, nil return config.RemoteWriteProtoMsgV1, nil
} }
// isHistogramValidationError checks if the error is a native histogram validation error.
func isHistogramValidationError(err error) bool {
// TODO: Consider adding single histogram error type instead of individual sentinel errors.
return errors.Is(err, histogram.ErrHistogramCountMismatch) ||
errors.Is(err, histogram.ErrHistogramCountNotBigEnough) ||
errors.Is(err, histogram.ErrHistogramNegativeBucketCount) ||
errors.Is(err, histogram.ErrHistogramSpanNegativeOffset) ||
errors.Is(err, histogram.ErrHistogramSpansBucketsMismatch) ||
errors.Is(err, histogram.ErrHistogramCustomBucketsMismatch) ||
errors.Is(err, histogram.ErrHistogramCustomBucketsInvalid) ||
errors.Is(err, histogram.ErrHistogramCustomBucketsInfinite) ||
errors.Is(err, histogram.ErrHistogramCustomBucketsZeroCount) ||
errors.Is(err, histogram.ErrHistogramCustomBucketsZeroThresh) ||
errors.Is(err, histogram.ErrHistogramCustomBucketsNegSpans) ||
errors.Is(err, histogram.ErrHistogramCustomBucketsNegBuckets) ||
errors.Is(err, histogram.ErrHistogramExpSchemaCustomBounds)
}
func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
contentType := r.Header.Get("Content-Type") contentType := r.Header.Get("Content-Type")
if contentType == "" { if contentType == "" {
@ -190,6 +208,9 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Indicated an out-of-order sample is a bad request to prevent retries. // Indicated an out-of-order sample is a bad request to prevent retries.
http.Error(w, err.Error(), http.StatusBadRequest) http.Error(w, err.Error(), http.StatusBadRequest)
return return
case isHistogramValidationError(err):
http.Error(w, err.Error(), http.StatusBadRequest)
return
default: default:
h.logger.Error("Error while remote writing the v1 request", "err", err.Error()) h.logger.Error("Error while remote writing the v1 request", "err", err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
@ -229,7 +250,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
samplesWithInvalidLabels := 0 samplesWithInvalidLabels := 0
samplesAppended := 0 samplesAppended := 0
app := &timeLimitAppender{ app := &remoteWriteAppender{
Appender: h.appendable.Appender(ctx), Appender: h.appendable.Appender(ctx),
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
} }
@ -344,7 +365,7 @@ func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Hist
// NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors. // NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors.
// Once we have 5xx type of error, we immediately stop and rollback all appends. // Once we have 5xx type of error, we immediately stop and rollback all appends.
func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ WriteResponseStats, errHTTPCode int, _ error) { func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ WriteResponseStats, errHTTPCode int, _ error) {
app := &timeLimitAppender{ app := &remoteWriteAppender{
Appender: h.appendable.Appender(ctx), Appender: h.appendable.Appender(ctx),
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
} }
@ -474,6 +495,11 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
continue continue
} }
if isHistogramValidationError(err) {
h.logger.Error("Invalid histogram received", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp)
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
continue
}
return 0, http.StatusInternalServerError, err return 0, http.StatusInternalServerError, err
} }
@ -616,7 +642,7 @@ type rwExporter struct {
func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
otlpCfg := rw.config().OTLPConfig otlpCfg := rw.config().OTLPConfig
app := &timeLimitAppender{ app := &remoteWriteAppender{
Appender: rw.appendable.Appender(ctx), Appender: rw.appendable.Appender(ctx),
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
} }
@ -719,13 +745,13 @@ func hasDelta(md pmetric.Metrics) bool {
return false return false
} }
type timeLimitAppender struct { type remoteWriteAppender struct {
storage.Appender storage.Appender
maxTime int64 maxTime int64
} }
func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { func (app *remoteWriteAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
if t > app.maxTime { if t > app.maxTime {
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds) return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
} }
@ -737,11 +763,18 @@ func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels,
return ref, nil return ref, nil
} }
func (app *timeLimitAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { func (app *remoteWriteAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if t > app.maxTime { if t > app.maxTime {
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds) return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
} }
if h != nil && histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > histogram.ExponentialSchemaMax {
h = h.ReduceResolution(histogram.ExponentialSchemaMax)
}
if fh != nil && histogram.IsExponentialSchemaReserved(fh.Schema) && fh.Schema > histogram.ExponentialSchemaMax {
fh = fh.ReduceResolution(histogram.ExponentialSchemaMax)
}
ref, err := app.Appender.AppendHistogram(ref, l, t, h, fh) ref, err := app.Appender.AppendHistogram(ref, l, t, h, fh)
if err != nil { if err != nil {
return 0, err return 0, err
@ -749,7 +782,7 @@ func (app *timeLimitAppender) AppendHistogram(ref storage.SeriesRef, l labels.La
return ref, nil return ref, nil
} }
func (app *timeLimitAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { func (app *remoteWriteAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
if e.Ts > app.maxTime { if e.Ts > app.maxTime {
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds) return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
} }

View File

@ -806,6 +806,94 @@ func TestCommitErr_V1Message(t *testing.T) {
require.Equal(t, "commit error\n", string(body)) require.Equal(t, "commit error\n", string(body))
} }
// Regression test for https://github.com/prometheus/prometheus/issues/17206
func TestHistogramValidationErrorHandling(t *testing.T) {
testCases := []struct {
desc string
hist histogram.Histogram
expected string
}{
{
desc: "count mismatch",
hist: histogram.Histogram{
Schema: 2,
ZeroThreshold: 1e-128,
ZeroCount: 1,
Count: 10,
Sum: 20,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{2},
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
NegativeBuckets: []int64{3},
// Total: 1 (zero) + 2 (positive) + 3 (negative) = 6, but Count = 10
},
expected: "histogram's observation count should equal",
},
{
desc: "custom buckets zero count",
hist: histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 10,
Sum: 20,
ZeroCount: 1, // Invalid: custom buckets must have zero count of 0
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{10},
CustomValues: []float64{1.0},
},
expected: "custom buckets: must have zero count of 0",
},
}
for _, protoMsg := range []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2} {
protoName := "V1"
if protoMsg == config.RemoteWriteProtoMsgV2 {
protoName = "V2"
}
for _, tc := range testCases {
testName := fmt.Sprintf("%s %s", protoName, tc.desc)
t.Run(testName, func(t *testing.T) {
dir := t.TempDir()
opts := tsdb.DefaultOptions()
opts.EnableNativeHistograms = true
db, err := tsdb.Open(dir, nil, nil, opts, nil)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, db.Close()) })
handler := NewWriteHandler(promslog.NewNopLogger(), nil, db.Head(), []config.RemoteWriteProtoMsg{protoMsg}, false)
recorder := httptest.NewRecorder()
var buf []byte
if protoMsg == config.RemoteWriteProtoMsgV1 {
ts := []prompb.TimeSeries{{
Labels: []prompb.Label{{Name: "__name__", Value: "test"}},
Histograms: []prompb.Histogram{prompb.FromIntHistogram(1, &tc.hist)},
}}
buf, _, _, err = buildWriteRequest(nil, ts, nil, nil, nil, nil, "snappy")
} else {
st := writev2.NewSymbolTable()
ts := []writev2.TimeSeries{{
LabelsRefs: st.SymbolizeLabels(labels.FromStrings("__name__", "test"), nil),
Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &tc.hist)},
}}
buf, _, _, err = buildV2WriteRequest(promslog.NewNopLogger(), ts, st.Symbols(), nil, nil, nil, "snappy")
}
require.NoError(t, err)
req := httptest.NewRequest(http.MethodPost, "/api/v1/write", bytes.NewReader(buf))
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[protoMsg])
req.Header.Set("Content-Encoding", "snappy")
handler.ServeHTTP(recorder, req)
require.Equal(t, http.StatusBadRequest, recorder.Code)
require.Contains(t, recorder.Body.String(), tc.expected)
})
}
}
}
func TestCommitErr_V2Message(t *testing.T) { func TestCommitErr_V2Message(t *testing.T) {
payload, _, _, err := buildV2WriteRequest(promslog.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") payload, _, _, err := buildV2WriteRequest(promslog.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
require.NoError(t, err) require.NoError(t, err)
@ -1134,3 +1222,100 @@ func (m *mockAppendable) AppendCTZeroSample(_ storage.SeriesRef, l labels.Labels
m.samples = append(m.samples, mockSample{l, ct, 0}) m.samples = append(m.samples, mockSample{l, ct, 0})
return storage.SeriesRef(hash), nil return storage.SeriesRef(hash), nil
} }
var (
highSchemaHistogram = &histogram.Histogram{
Schema: 10,
PositiveSpans: []histogram.Span{
{
Offset: -1,
Length: 2,
},
},
PositiveBuckets: []int64{1, 2},
NegativeSpans: []histogram.Span{
{
Offset: 0,
Length: 1,
},
},
NegativeBuckets: []int64{1},
}
reducedSchemaHistogram = &histogram.Histogram{
Schema: 8,
PositiveSpans: []histogram.Span{
{
Offset: 0,
Length: 1,
},
},
PositiveBuckets: []int64{4},
NegativeSpans: []histogram.Span{
{
Offset: 0,
Length: 1,
},
},
NegativeBuckets: []int64{1},
}
)
func TestHistogramsReduction(t *testing.T) {
for _, protoMsg := range []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2} {
t.Run(string(protoMsg), func(t *testing.T) {
appendable := &mockAppendable{}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{protoMsg}, false)
var (
err error
payload []byte
)
if protoMsg == config.RemoteWriteProtoMsgV1 {
payload, _, _, err = buildWriteRequest(nil, []prompb.TimeSeries{
{
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric1"}},
Histograms: []prompb.Histogram{prompb.FromIntHistogram(1, highSchemaHistogram)},
},
{
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric2"}},
Histograms: []prompb.Histogram{prompb.FromFloatHistogram(2, highSchemaHistogram.ToFloat(nil))},
},
}, nil, nil, nil, nil, "snappy")
} else {
payload, _, _, err = buildV2WriteRequest(promslog.NewNopLogger(), []writev2.TimeSeries{
{
LabelsRefs: []uint32{0, 1},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, highSchemaHistogram)},
},
{
LabelsRefs: []uint32{0, 2},
Histograms: []writev2.Histogram{writev2.FromFloatHistogram(2, highSchemaHistogram.ToFloat(nil))},
},
}, []string{"__name__", "test_metric1", "test_metric2"},
nil, nil, nil, "snappy")
}
require.NoError(t, err)
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[protoMsg])
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, http.StatusNoContent, resp.StatusCode)
require.Empty(t, body)
require.Len(t, appendable.histograms, 2)
require.Equal(t, int64(1), appendable.histograms[0].t)
require.Equal(t, reducedSchemaHistogram, appendable.histograms[0].h)
require.Equal(t, int64(2), appendable.histograms[1].t)
require.Equal(t, reducedSchemaHistogram.ToFloat(nil), appendable.histograms[1].fh)
})
}
}

View File

@ -437,7 +437,7 @@ func (db *DB) resetWALReplayResources() {
func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef) (err error) { func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef) (err error) {
var ( var (
syms = labels.NewSymbolTable() // One table for the whole WAL. syms = labels.NewSymbolTable() // One table for the whole WAL.
dec = record.NewDecoder(syms) dec = record.NewDecoder(syms, db.logger)
lastRef = chunks.HeadSeriesRef(db.nextRef.Load()) lastRef = chunks.HeadSeriesRef(db.nextRef.Load())
decoded = make(chan any, 10) decoded = make(chan any, 10)

View File

@ -211,7 +211,7 @@ func TestCommit(t *testing.T) {
// Read records from WAL and check for expected count of series, samples, and exemplars. // Read records from WAL and check for expected count of series, samples, and exemplars.
var ( var (
r = wlog.NewReader(sr) r = wlog.NewReader(sr)
dec = record.NewDecoder(labels.NewSymbolTable()) dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
walSeriesCount, walSamplesCount, walExemplarsCount, walHistogramCount, walFloatHistogramCount int walSeriesCount, walSamplesCount, walExemplarsCount, walHistogramCount, walFloatHistogramCount int
) )
@ -344,7 +344,7 @@ func TestRollback(t *testing.T) {
// Read records from WAL and check for expected count of series and samples. // Read records from WAL and check for expected count of series and samples.
var ( var (
r = wlog.NewReader(sr) r = wlog.NewReader(sr)
dec = record.NewDecoder(labels.NewSymbolTable()) dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
walSeriesCount, walSamplesCount, walHistogramCount, walFloatHistogramCount, walExemplarsCount int walSeriesCount, walSamplesCount, walHistogramCount, walFloatHistogramCount, walExemplarsCount int
) )
@ -892,7 +892,7 @@ func TestStorage_DuplicateExemplarsIgnored(t *testing.T) {
defer sr.Close() defer sr.Close()
r := wlog.NewReader(sr) r := wlog.NewReader(sr)
dec := record.NewDecoder(labels.NewSymbolTable()) dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
for r.Next() { for r.Next() {
rec := r.Record() rec := r.Record()
if dec.Type(rec) == record.Exemplars { if dec.Type(rec) == record.Exemplars {
@ -1332,7 +1332,7 @@ func readWALSamples(t *testing.T, walDir string) []*walSample {
}(sr) }(sr)
r := wlog.NewReader(sr) r := wlog.NewReader(sr)
dec := record.NewDecoder(labels.NewSymbolTable()) dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
var ( var (
samples []record.RefSample samples []record.RefSample

View File

@ -866,7 +866,7 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
} }
if fh == nil { if fh == nil {
it.atFloatHistogramCalled = true it.atFloatHistogramCalled = true
return it.t, &histogram.FloatHistogram{ fh = &histogram.FloatHistogram{
CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead), CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
Count: it.cnt.value, Count: it.cnt.value,
ZeroCount: it.zCnt.value, ZeroCount: it.zCnt.value,
@ -879,6 +879,14 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
NegativeBuckets: it.nBuckets, NegativeBuckets: it.nBuckets,
CustomValues: it.customValues, CustomValues: it.customValues,
} }
if fh.Schema > histogram.ExponentialSchemaMax && fh.Schema <= histogram.ExponentialSchemaMaxReserved {
// This is a very slow path, but it should only happen if the
// chunk is from a newer Prometheus version that supports higher
// resolution.
fh = fh.Copy()
fh.ReduceResolution(histogram.ExponentialSchemaMax)
}
return it.t, fh
} }
fh.CounterResetHint = counterResetHint(it.counterResetHeader, it.numRead) fh.CounterResetHint = counterResetHint(it.counterResetHeader, it.numRead)
@ -903,6 +911,13 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
// Custom values are interned. The single copy is in this iterator. // Custom values are interned. The single copy is in this iterator.
fh.CustomValues = it.customValues fh.CustomValues = it.customValues
if fh.Schema > histogram.ExponentialSchemaMax && fh.Schema <= histogram.ExponentialSchemaMaxReserved {
// This is a very slow path, but it should only happen if the
// chunk is from a newer Prometheus version that supports higher
// resolution.
fh.ReduceResolution(histogram.ExponentialSchemaMax)
}
return it.t, fh return it.t, fh
} }
@ -954,6 +969,12 @@ func (it *floatHistogramIterator) Next() ValueType {
it.err = err it.err = err
return ValNone return ValNone
} }
if !histogram.IsKnownSchema(schema) {
it.err = histogram.UnknownSchemaError(schema)
return ValNone
}
it.schema = schema it.schema = schema
it.zThreshold = zeroThreshold it.zThreshold = zeroThreshold
it.pSpans, it.nSpans = posSpans, negSpans it.pSpans, it.nSpans = posSpans, negSpans

View File

@ -14,6 +14,7 @@
package chunkenc package chunkenc
import ( import (
"fmt"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -1462,3 +1463,62 @@ func TestFloatHistogramEmptyBucketsWithGaps(t *testing.T) {
require.Equal(t, ValNone, it.Next()) require.Equal(t, ValNone, it.Next())
require.NoError(t, it.Err()) require.NoError(t, it.Err())
} }
func TestFloatHistogramIteratorFailIfSchemaInValid(t *testing.T) {
for _, schema := range []int32{-101, 101} {
t.Run(fmt.Sprintf("schema %d", schema), func(t *testing.T) {
h := &histogram.FloatHistogram{
Schema: schema,
Count: 10,
Sum: 15.0,
ZeroThreshold: 1e-100,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{1, 2, 3, 4},
}
c := NewFloatHistogramChunk()
app, err := c.Appender()
require.NoError(t, err)
_, _, _, err = app.AppendFloatHistogram(nil, 1, h, false)
require.NoError(t, err)
it := c.Iterator(nil)
require.Equal(t, ValNone, it.Next())
require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema)
})
}
}
func TestFloatHistogramIteratorReduceSchema(t *testing.T) {
for _, schema := range []int32{9, 52} {
t.Run(fmt.Sprintf("schema %d", schema), func(t *testing.T) {
h := &histogram.FloatHistogram{
Schema: schema,
Count: 10,
Sum: 15.0,
ZeroThreshold: 1e-100,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{1, 2, 3, 4},
}
c := NewFloatHistogramChunk()
app, err := c.Appender()
require.NoError(t, err)
_, _, _, err = app.AppendFloatHistogram(nil, 1, h, false)
require.NoError(t, err)
it := c.Iterator(nil)
require.Equal(t, ValFloatHistogram, it.Next())
_, rh := it.AtFloatHistogram(nil)
require.Equal(t, histogram.ExponentialSchemaMax, rh.Schema)
})
}
}

View File

@ -921,7 +921,7 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog
} }
if h == nil { if h == nil {
it.atHistogramCalled = true it.atHistogramCalled = true
return it.t, &histogram.Histogram{ h = &histogram.Histogram{
CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead), CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
Count: it.cnt, Count: it.cnt,
ZeroCount: it.zCnt, ZeroCount: it.zCnt,
@ -934,6 +934,14 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog
NegativeBuckets: it.nBuckets, NegativeBuckets: it.nBuckets,
CustomValues: it.customValues, CustomValues: it.customValues,
} }
if h.Schema > histogram.ExponentialSchemaMax && h.Schema <= histogram.ExponentialSchemaMaxReserved {
// This is a very slow path, but it should only happen if the
// chunk is from a newer Prometheus version that supports higher
// resolution.
h = h.Copy()
h.ReduceResolution(histogram.ExponentialSchemaMax)
}
return it.t, h
} }
h.CounterResetHint = counterResetHint(it.counterResetHeader, it.numRead) h.CounterResetHint = counterResetHint(it.counterResetHeader, it.numRead)
@ -958,6 +966,13 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog
// Custom values are interned. The single copy is here in the iterator. // Custom values are interned. The single copy is here in the iterator.
h.CustomValues = it.customValues h.CustomValues = it.customValues
if h.Schema > histogram.ExponentialSchemaMax && h.Schema <= histogram.ExponentialSchemaMaxReserved {
// This is a very slow path, but it should only happen if the
// chunk is from a newer Prometheus version that supports higher
// resolution.
h.ReduceResolution(histogram.ExponentialSchemaMax)
}
return it.t, h return it.t, h
} }
@ -967,7 +982,7 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int
} }
if fh == nil { if fh == nil {
it.atFloatHistogramCalled = true it.atFloatHistogramCalled = true
return it.t, &histogram.FloatHistogram{ fh = &histogram.FloatHistogram{
CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead), CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
Count: float64(it.cnt), Count: float64(it.cnt),
ZeroCount: float64(it.zCnt), ZeroCount: float64(it.zCnt),
@ -980,6 +995,14 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int
NegativeBuckets: it.nFloatBuckets, NegativeBuckets: it.nFloatBuckets,
CustomValues: it.customValues, CustomValues: it.customValues,
} }
if fh.Schema > histogram.ExponentialSchemaMax && fh.Schema <= histogram.ExponentialSchemaMaxReserved {
// This is a very slow path, but it should only happen if the
// chunk is from a newer Prometheus version that supports higher
// resolution.
fh = fh.Copy()
fh.ReduceResolution(histogram.ExponentialSchemaMax)
}
return it.t, fh
} }
fh.CounterResetHint = counterResetHint(it.counterResetHeader, it.numRead) fh.CounterResetHint = counterResetHint(it.counterResetHeader, it.numRead)
@ -1012,6 +1035,13 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int
// Custom values are interned. The single copy is here in the iterator. // Custom values are interned. The single copy is here in the iterator.
fh.CustomValues = it.customValues fh.CustomValues = it.customValues
if fh.Schema > histogram.ExponentialSchemaMax && fh.Schema <= histogram.ExponentialSchemaMaxReserved {
// This is a very slow path, but it should only happen if the
// chunk is from a newer Prometheus version that supports higher
// resolution.
fh.ReduceResolution(histogram.ExponentialSchemaMax)
}
return it.t, fh return it.t, fh
} }
@ -1077,6 +1107,12 @@ func (it *histogramIterator) Next() ValueType {
it.err = err it.err = err
return ValNone return ValNone
} }
if !histogram.IsKnownSchema(schema) {
it.err = histogram.UnknownSchemaError(schema)
return ValNone
}
it.schema = schema it.schema = schema
it.zThreshold = zeroThreshold it.zThreshold = zeroThreshold
it.pSpans, it.nSpans = posSpans, negSpans it.pSpans, it.nSpans = posSpans, negSpans

View File

@ -14,6 +14,7 @@
package chunkenc package chunkenc
import ( import (
"fmt"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -1818,3 +1819,65 @@ func TestIntHistogramEmptyBucketsWithGaps(t *testing.T) {
require.Equal(t, ValNone, it.Next()) require.Equal(t, ValNone, it.Next())
require.NoError(t, it.Err()) require.NoError(t, it.Err())
} }
func TestHistogramIteratorFailIfSchemaInValid(t *testing.T) {
for _, schema := range []int32{-101, 101} {
t.Run(fmt.Sprintf("schema %d", schema), func(t *testing.T) {
h := &histogram.Histogram{
Schema: schema,
Count: 10,
Sum: 15.0,
ZeroThreshold: 1e-100,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 2, 3, 4},
}
c := NewHistogramChunk()
app, err := c.Appender()
require.NoError(t, err)
_, _, _, err = app.AppendHistogram(nil, 1, h, false)
require.NoError(t, err)
it := c.Iterator(nil)
require.Equal(t, ValNone, it.Next())
require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema)
})
}
}
func TestHistogramIteratorReduceSchema(t *testing.T) {
for _, schema := range []int32{9, 52} {
t.Run(fmt.Sprintf("schema %d", schema), func(t *testing.T) {
h := &histogram.Histogram{
Schema: schema,
Count: 10,
Sum: 15.0,
ZeroThreshold: 1e-100,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 2, 3, 4},
}
c := NewHistogramChunk()
app, err := c.Appender()
require.NoError(t, err)
_, _, _, err = app.AppendHistogram(nil, 1, h, false)
require.NoError(t, err)
it := c.Iterator(nil)
require.Equal(t, ValHistogram, it.Next())
_, rh := it.AtHistogram(nil)
require.Equal(t, histogram.ExponentialSchemaMax, rh.Schema)
_, rfh := it.AtFloatHistogram(nil)
require.Equal(t, histogram.ExponentialSchemaMax, rfh.Schema)
})
}
}

View File

@ -300,21 +300,89 @@ func TestDataNotAvailableAfterRollback(t *testing.T) {
}() }()
app := db.Appender(context.Background()) app := db.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, 0) _, err := app.Append(0, labels.FromStrings("type", "float"), 0, 0)
require.NoError(t, err)
_, err = app.AppendHistogram(
0, labels.FromStrings("type", "histogram"), 0,
&histogram.Histogram{Count: 42, Sum: math.NaN()}, nil,
)
require.NoError(t, err)
_, err = app.AppendHistogram(
0, labels.FromStrings("type", "floathistogram"), 0,
nil, &histogram.FloatHistogram{Count: 42, Sum: math.NaN()},
)
require.NoError(t, err) require.NoError(t, err)
err = app.Rollback() err = app.Rollback()
require.NoError(t, err) require.NoError(t, err)
for _, typ := range []string{"float", "histogram", "floathistogram"} {
querier, err := db.Querier(0, 1) querier, err := db.Querier(0, 1)
require.NoError(t, err) require.NoError(t, err)
defer querier.Close() seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "type", typ))
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
require.Equal(t, map[string][]chunks.Sample{}, seriesSet) require.Equal(t, map[string][]chunks.Sample{}, seriesSet)
} }
sr, err := wlog.NewSegmentsReader(db.head.wal.Dir())
require.NoError(t, err)
defer func() {
require.NoError(t, sr.Close())
}()
// Read records from WAL and check for expected count of series and samples.
var (
r = wlog.NewReader(sr)
dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
walSeriesCount, walSamplesCount, walHistogramCount, walFloatHistogramCount, walExemplarsCount int
)
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
var series []record.RefSeries
series, err = dec.Series(rec, series)
require.NoError(t, err)
walSeriesCount += len(series)
case record.Samples:
var samples []record.RefSample
samples, err = dec.Samples(rec, samples)
require.NoError(t, err)
walSamplesCount += len(samples)
case record.Exemplars:
var exemplars []record.RefExemplar
exemplars, err = dec.Exemplars(rec, exemplars)
require.NoError(t, err)
walExemplarsCount += len(exemplars)
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
var histograms []record.RefHistogramSample
histograms, err = dec.HistogramSamples(rec, histograms)
require.NoError(t, err)
walHistogramCount += len(histograms)
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
var floatHistograms []record.RefFloatHistogramSample
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
require.NoError(t, err)
walFloatHistogramCount += len(floatHistograms)
default:
}
}
// Check that only series get stored after calling Rollback.
require.Equal(t, 3, walSeriesCount, "series should have been written to WAL")
require.Equal(t, 0, walSamplesCount, "samples should not have been written to WAL")
require.Equal(t, 0, walExemplarsCount, "exemplars should not have been written to WAL")
require.Equal(t, 0, walHistogramCount, "histograms should not have been written to WAL")
require.Equal(t, 0, walFloatHistogramCount, "float histograms should not have been written to WAL")
}
func TestDBAppenderAddRef(t *testing.T) { func TestDBAppenderAddRef(t *testing.T) {
db := openTestDB(t, nil, nil) db := openTestDB(t, nil, nil)
defer func() { defer func() {
@ -4504,7 +4572,7 @@ func testOOOWALWrite(t *testing.T,
}() }()
var records []any var records []any
dec := record.NewDecoder(nil) dec := record.NewDecoder(nil, promslog.NewNopLogger())
for r.Next() { for r.Next() {
rec := r.Record() rec := r.Record()
switch typ := dec.Type(rec); typ { switch typ := dec.Type(rec); typ {
@ -4856,10 +4924,7 @@ func TestMetadataAssertInMemoryData(t *testing.T) {
} }
// TestMultipleEncodingsCommitOrder mainly serves to demonstrate when happens when committing a batch of samples for the // TestMultipleEncodingsCommitOrder mainly serves to demonstrate when happens when committing a batch of samples for the
// same series when there are multiple encodings. Commit() will process all float samples before histogram samples. This // same series when there are multiple encodings. With issue #15177 fixed, this now all works as expected.
// means that if histograms are appended before floats, the histograms could be marked as OOO when they are committed.
// While possible, this shouldn't happen very often - you need the same series to be ingested as both a float and a
// histogram in a single write request.
func TestMultipleEncodingsCommitOrder(t *testing.T) { func TestMultipleEncodingsCommitOrder(t *testing.T) {
opts := DefaultOptions() opts := DefaultOptions()
opts.OutOfOrderCapMax = 30 opts.OutOfOrderCapMax = 30
@ -4933,26 +4998,19 @@ func TestMultipleEncodingsCommitOrder(t *testing.T) {
s := addSample(app, int64(i), chunkenc.ValFloat) s := addSample(app, int64(i), chunkenc.ValFloat)
expSamples = append(expSamples, s) expSamples = append(expSamples, s)
} }
// These samples will be marked as OOO as their timestamps are less than the max timestamp for float samples in the
// same batch.
for i := 110; i < 120; i++ { for i := 110; i < 120; i++ {
s := addSample(app, int64(i), chunkenc.ValHistogram) s := addSample(app, int64(i), chunkenc.ValHistogram)
expSamples = append(expSamples, s) expSamples = append(expSamples, s)
} }
// These samples will be marked as OOO as their timestamps are less than the max timestamp for float samples in the
// same batch.
for i := 120; i < 130; i++ { for i := 120; i < 130; i++ {
s := addSample(app, int64(i), chunkenc.ValFloatHistogram) s := addSample(app, int64(i), chunkenc.ValFloatHistogram)
expSamples = append(expSamples, s) expSamples = append(expSamples, s)
} }
// These samples will be marked as in-order as their timestamps are greater than the max timestamp for float
// samples in the same batch.
for i := 140; i < 150; i++ { for i := 140; i < 150; i++ {
s := addSample(app, int64(i), chunkenc.ValFloatHistogram) s := addSample(app, int64(i), chunkenc.ValFloatHistogram)
expSamples = append(expSamples, s) expSamples = append(expSamples, s)
} }
// These samples will be marked as in-order, even though they're appended after the float histograms from ts 140-150 // These samples will be marked as out-of-order.
// because float samples are processed first and these samples are in-order wrt to the float samples in the batch.
for i := 130; i < 135; i++ { for i := 130; i < 135; i++ {
s := addSample(app, int64(i), chunkenc.ValFloat) s := addSample(app, int64(i), chunkenc.ValFloat)
expSamples = append(expSamples, s) expSamples = append(expSamples, s)
@ -4964,8 +5022,8 @@ func TestMultipleEncodingsCommitOrder(t *testing.T) {
return expSamples[i].T() < expSamples[j].T() return expSamples[i].T() < expSamples[j].T()
}) })
// oooCount = 20 because the histograms from 120 - 130 and float histograms from 120 - 130 are detected as OOO. // oooCount = 5 for the samples 130 to 134.
verifySamples(100, 150, expSamples, 20) verifySamples(100, 150, expSamples, 5)
// Append and commit some in-order histograms by themselves. // Append and commit some in-order histograms by themselves.
app = db.Appender(context.Background()) app = db.Appender(context.Background())
@ -4975,8 +5033,8 @@ func TestMultipleEncodingsCommitOrder(t *testing.T) {
} }
require.NoError(t, app.Commit()) require.NoError(t, app.Commit())
// oooCount remains at 20 as no new OOO samples have been added. // oooCount remains at 5.
verifySamples(100, 160, expSamples, 20) verifySamples(100, 160, expSamples, 5)
// Append and commit samples for all encoding types. This time all samples will be treated as OOO because samples // Append and commit samples for all encoding types. This time all samples will be treated as OOO because samples
// with newer timestamps have already been committed. // with newer timestamps have already been committed.
@ -5004,8 +5062,8 @@ func TestMultipleEncodingsCommitOrder(t *testing.T) {
return expSamples[i].T() < expSamples[j].T() return expSamples[i].T() < expSamples[j].T()
}) })
// oooCount = 50 as we've added 30 more OOO samples. // oooCount = 35 as we've added 30 more OOO samples.
verifySamples(50, 160, expSamples, 50) verifySamples(50, 160, expSamples, 35)
} }
// TODO(codesome): test more samples incoming once compaction has started. To verify new samples after the start // TODO(codesome): test more samples incoming once compaction has started. To verify new samples after the start
@ -7030,7 +7088,7 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) {
require.NoError(t, err) require.NoError(t, err)
sr, err := wlog.NewSegmentsReader(originalWblDir) sr, err := wlog.NewSegmentsReader(originalWblDir)
require.NoError(t, err) require.NoError(t, err)
dec := record.NewDecoder(labels.NewSymbolTable()) dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
r, markers, addedRecs := wlog.NewReader(sr), 0, 0 r, markers, addedRecs := wlog.NewReader(sr), 0, 0
for r.Next() { for r.Next() {
rec := r.Record() rec := r.Record()

View File

@ -86,7 +86,8 @@ type Head struct {
exemplarMetrics *ExemplarMetrics exemplarMetrics *ExemplarMetrics
exemplars ExemplarStorage exemplars ExemplarStorage
logger *slog.Logger logger *slog.Logger
appendPool zeropool.Pool[[]record.RefSample] refSeriesPool zeropool.Pool[[]record.RefSeries]
floatsPool zeropool.Pool[[]record.RefSample]
exemplarsPool zeropool.Pool[[]exemplarWithSeriesRef] exemplarsPool zeropool.Pool[[]exemplarWithSeriesRef]
histogramsPool zeropool.Pool[[]record.RefHistogramSample] histogramsPool zeropool.Pool[[]record.RefHistogramSample]
floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample] floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample]

View File

@ -164,13 +164,6 @@ func (h *Head) Appender(context.Context) storage.Appender {
func (h *Head) appender() *headAppender { func (h *Head) appender() *headAppender {
minValidTime := h.appendableMinValidTime() minValidTime := h.appendableMinValidTime()
appendID, cleanupAppendIDsBelow := h.iso.newAppendID(minValidTime) // Every appender gets an ID that is cleared upon commit/rollback. appendID, cleanupAppendIDsBelow := h.iso.newAppendID(minValidTime) // Every appender gets an ID that is cleared upon commit/rollback.
// Allocate the exemplars buffer only if exemplars are enabled.
var exemplarsBuf []exemplarWithSeriesRef
if h.opts.EnableExemplarStorage {
exemplarsBuf = h.getExemplarBuffer()
}
return &headAppender{ return &headAppender{
head: h, head: h,
minValidTime: minValidTime, minValidTime: minValidTime,
@ -178,12 +171,9 @@ func (h *Head) appender() *headAppender {
maxt: math.MinInt64, maxt: math.MinInt64,
headMaxt: h.MaxTime(), headMaxt: h.MaxTime(),
oooTimeWindow: h.opts.OutOfOrderTimeWindow.Load(), oooTimeWindow: h.opts.OutOfOrderTimeWindow.Load(),
samples: h.getAppendBuffer(), seriesRefs: h.getRefSeriesBuffer(),
sampleSeries: h.getSeriesBuffer(), series: h.getSeriesBuffer(),
exemplars: exemplarsBuf, typesInBatch: map[chunks.HeadSeriesRef]sampleType{},
histograms: h.getHistogramBuffer(),
floatHistograms: h.getFloatHistogramBuffer(),
metadata: h.getMetadataBuffer(),
appendID: appendID, appendID: appendID,
cleanupAppendIDsBelow: cleanupAppendIDsBelow, cleanupAppendIDsBelow: cleanupAppendIDsBelow,
} }
@ -213,16 +203,28 @@ func (h *Head) AppendableMinValidTime() (int64, bool) {
return h.appendableMinValidTime(), true return h.appendableMinValidTime(), true
} }
func (h *Head) getAppendBuffer() []record.RefSample { func (h *Head) getRefSeriesBuffer() []record.RefSeries {
b := h.appendPool.Get() b := h.refSeriesPool.Get()
if b == nil {
return make([]record.RefSeries, 0, 512)
}
return b
}
func (h *Head) putRefSeriesBuffer(b []record.RefSeries) {
h.refSeriesPool.Put(b[:0])
}
func (h *Head) getFloatBuffer() []record.RefSample {
b := h.floatsPool.Get()
if b == nil { if b == nil {
return make([]record.RefSample, 0, 512) return make([]record.RefSample, 0, 512)
} }
return b return b
} }
func (h *Head) putAppendBuffer(b []record.RefSample) { func (h *Head) putFloatBuffer(b []record.RefSample) {
h.appendPool.Put(b[:0]) h.floatsPool.Put(b[:0])
} }
func (h *Head) getExemplarBuffer() []exemplarWithSeriesRef { func (h *Head) getExemplarBuffer() []exemplarWithSeriesRef {
@ -312,6 +314,61 @@ type exemplarWithSeriesRef struct {
exemplar exemplar.Exemplar exemplar exemplar.Exemplar
} }
// sampleType describes sample types we need to distinguish for append batching.
// We need separate types for everything that goes into a different WAL record
// type or into a different chunk encoding.
type sampleType byte
const (
stNone sampleType = iota // To mark that the sample type does not matter.
stFloat // All simple floats (counters, gauges, untyped). Goes to `floats`.
stHistogram // Native integer histograms with a standard exponential schema. Goes to `histograms`.
stCustomBucketHistogram // Native integer histograms with custom bucket boundaries. Goes to `histograms`.
stFloatHistogram // Native float histograms. Goes to `floatHistograms`.
stCustomBucketFloatHistogram // Native float histograms with custom bucket boundaries. Goes to `floatHistograms`.
)
// appendBatch is used to partition all the appended data into batches that are
// "type clean", i.e. every series receives only samples of one type within the
// batch. Types in this regard are defined by the sampleType enum above.
// TODO(beorn7): The same concept could be extended to make sure every series in
// the batch has at most one metadata record. This is currently not implemented
// because it is unclear if it is needed at all. (Maybe we will remove metadata
// records altogether, see issue #15911.)
type appendBatch struct {
floats []record.RefSample // New float samples held by this appender.
floatSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
histograms []record.RefHistogramSample // New histogram samples held by this appender.
histogramSeries []*memSeries // HistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
floatHistograms []record.RefFloatHistogramSample // New float histogram samples held by this appender.
floatHistogramSeries []*memSeries // FloatHistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
metadata []record.RefMetadata // New metadata held by this appender.
metadataSeries []*memSeries // Series corresponding to the metadata held by this appender.
exemplars []exemplarWithSeriesRef // New exemplars held by this appender.
}
// close returns all the slices to the pools in Head and nil's them.
func (b *appendBatch) close(h *Head) {
h.putFloatBuffer(b.floats)
b.floats = nil
h.putSeriesBuffer(b.floatSeries)
b.floatSeries = nil
h.putHistogramBuffer(b.histograms)
b.histograms = nil
h.putSeriesBuffer(b.histogramSeries)
b.histogramSeries = nil
h.putFloatHistogramBuffer(b.floatHistograms)
b.floatHistograms = nil
h.putSeriesBuffer(b.floatHistogramSeries)
b.floatHistogramSeries = nil
h.putMetadataBuffer(b.metadata)
b.metadata = nil
h.putSeriesBuffer(b.metadataSeries)
b.metadataSeries = nil
h.putExemplarBuffer(b.exemplars)
b.exemplars = nil
}
type headAppender struct { type headAppender struct {
head *Head head *Head
minValidTime int64 // No samples below this timestamp are allowed. minValidTime int64 // No samples below this timestamp are allowed.
@ -321,15 +378,9 @@ type headAppender struct {
seriesRefs []record.RefSeries // New series records held by this appender. seriesRefs []record.RefSeries // New series records held by this appender.
series []*memSeries // New series held by this appender (using corresponding slices indexes from seriesRefs) series []*memSeries // New series held by this appender (using corresponding slices indexes from seriesRefs)
samples []record.RefSample // New float samples held by this appender. batches []*appendBatch // Holds all the other data to append. (In regular cases, there should be only one of these.)
sampleSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
histograms []record.RefHistogramSample // New histogram samples held by this appender. typesInBatch map[chunks.HeadSeriesRef]sampleType // Which (one) sample type each series holds in the most recent batch.
histogramSeries []*memSeries // HistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
floatHistograms []record.RefFloatHistogramSample // New float histogram samples held by this appender.
floatHistogramSeries []*memSeries // FloatHistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
metadata []record.RefMetadata // New metadata held by this appender.
metadataSeries []*memSeries // Series corresponding to the metadata held by this appender.
exemplars []exemplarWithSeriesRef // New exemplars held by this appender.
appendID, cleanupAppendIDsBelow uint64 appendID, cleanupAppendIDsBelow uint64
closed bool closed bool
@ -357,21 +408,27 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
} }
} }
s.Lock()
if value.IsStaleNaN(v) { if value.IsStaleNaN(v) {
// TODO(krajorama): reorganize Commit() to handle samples in append order // If we have added a sample before with this same appender, we
// not floats first and then histograms. Then we could do this conversion // can check the previously used type and turn a stale float
// in commit. This code should move into Commit(). // sample into a stale histogram sample or stale float histogram
switch { // sample as appropriate. This prevents an unnecessary creation
case s.lastHistogramValue != nil: // of a new batch. However, since other appenders might append
s.Unlock() // to the same series concurrently, this is not perfect but just
// an optimization for the more likely case.
switch a.typesInBatch[s.ref] {
case stHistogram, stCustomBucketHistogram:
return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil) return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil)
case s.lastFloatHistogramValue != nil: case stFloatHistogram, stCustomBucketFloatHistogram:
s.Unlock()
return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v}) return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v})
} }
// Note that a series reference not yet in the map will come out
// as stNone, but since we do not handle that case separately,
// we do not need to check for the difference between "unknown
// series" and "known series with stNone".
} }
s.Lock()
defer s.Unlock() defer s.Unlock()
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
// to skip that sample from the WAL and write only in the WBL. // to skip that sample from the WAL and write only in the WBL.
@ -403,12 +460,13 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
a.maxt = t a.maxt = t
} }
a.samples = append(a.samples, record.RefSample{ b := a.getCurrentBatch(stFloat, s.ref)
b.floats = append(b.floats, record.RefSample{
Ref: s.ref, Ref: s.ref,
T: t, T: t,
V: v, V: v,
}) })
a.sampleSeries = append(a.sampleSeries, s) b.floatSeries = append(b.floatSeries, s)
return storage.SeriesRef(s.ref), nil return storage.SeriesRef(s.ref), nil
} }
@ -448,8 +506,9 @@ func (a *headAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Lab
if ct > a.maxt { if ct > a.maxt {
a.maxt = ct a.maxt = ct
} }
a.samples = append(a.samples, record.RefSample{Ref: s.ref, T: ct, V: 0.0}) b := a.getCurrentBatch(stFloat, s.ref)
a.sampleSeries = append(a.sampleSeries, s) b.floats = append(b.floats, record.RefSample{Ref: s.ref, T: ct, V: 0.0})
b.floatSeries = append(b.floatSeries, s)
return storage.SeriesRef(s.ref), nil return storage.SeriesRef(s.ref), nil
} }
@ -476,6 +535,65 @@ func (a *headAppender) getOrCreate(lset labels.Labels) (s *memSeries, created bo
return s, created, nil return s, created, nil
} }
// getCurrentBatch returns the current batch if it fits the provided sampleType
// for the provided series. Otherwise, it adds a new batch and returns it.
func (a *headAppender) getCurrentBatch(st sampleType, s chunks.HeadSeriesRef) *appendBatch {
h := a.head
newBatch := func() *appendBatch {
b := appendBatch{
floats: h.getFloatBuffer(),
floatSeries: h.getSeriesBuffer(),
histograms: h.getHistogramBuffer(),
histogramSeries: h.getSeriesBuffer(),
floatHistograms: h.getFloatHistogramBuffer(),
floatHistogramSeries: h.getSeriesBuffer(),
metadata: h.getMetadataBuffer(),
metadataSeries: h.getSeriesBuffer(),
}
// Allocate the exemplars buffer only if exemplars are enabled.
if h.opts.EnableExemplarStorage {
b.exemplars = h.getExemplarBuffer()
}
clear(a.typesInBatch)
if st != stNone {
a.typesInBatch[s] = st
}
a.batches = append(a.batches, &b)
return &b
}
// First batch ever. Create it.
if len(a.batches) == 0 {
return newBatch()
}
// TODO(beorn7): If we ever see that the a.typesInBatch map grows so
// large that it matters for total memory consumption, we could limit
// the batch size here, i.e. cut a new batch even without a type change.
// Something like:
// if len(a.typesInBatch > limit) {
// return newBatch()
// }
lastBatch := a.batches[len(a.batches)-1]
if st == stNone {
// Type doesn't matter, last batch will always do.
return lastBatch
}
prevST, ok := a.typesInBatch[s]
switch {
case !ok: // New series. Add it to map and return current batch.
a.typesInBatch[s] = st
return lastBatch
case prevST == st: // Old series, same type. Just return batch.
return lastBatch
}
// An old series got a new type. Start new batch.
return newBatch()
}
// appendable checks whether the given sample is valid for appending to the series. // appendable checks whether the given sample is valid for appending to the series.
// If the sample is valid and in-order, it returns false with no error. // If the sample is valid and in-order, it returns false with no error.
// If the sample belongs to the out-of-order chunk, it returns true with no error. // If the sample belongs to the out-of-order chunk, it returns true with no error.
@ -638,7 +756,8 @@ func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels,
return 0, err return 0, err
} }
a.exemplars = append(a.exemplars, exemplarWithSeriesRef{ref, e}) b := a.getCurrentBatch(stNone, chunks.HeadSeriesRef(ref))
b.exemplars = append(b.exemplars, exemplarWithSeriesRef{ref, e})
return storage.SeriesRef(s.ref), nil return storage.SeriesRef(s.ref), nil
} }
@ -667,11 +786,10 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
} }
} }
var created bool
s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil { if s == nil {
var err error var err error
s, created, err = a.getOrCreate(lset) s, _, err = a.getOrCreate(lset)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -680,14 +798,6 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
switch { switch {
case h != nil: case h != nil:
s.Lock() s.Lock()
// TODO(krajorama): reorganize Commit() to handle samples in append order
// not floats first and then histograms. Then we would not need to do this.
// This whole "if" should be removed.
if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil {
s.lastHistogramValue = &histogram.Histogram{}
}
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
// to skip that sample from the WAL and write only in the WBL. // to skip that sample from the WAL and write only in the WBL.
_, delta, err := s.appendableHistogram(t, h, a.headMaxt, a.minValidTime, a.oooTimeWindow) _, delta, err := s.appendableHistogram(t, h, a.headMaxt, a.minValidTime, a.oooTimeWindow)
@ -707,22 +817,19 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
} }
return 0, err return 0, err
} }
a.histograms = append(a.histograms, record.RefHistogramSample{ st := stHistogram
if h.UsesCustomBuckets() {
st = stCustomBucketHistogram
}
b := a.getCurrentBatch(st, s.ref)
b.histograms = append(b.histograms, record.RefHistogramSample{
Ref: s.ref, Ref: s.ref,
T: t, T: t,
H: h, H: h,
}) })
a.histogramSeries = append(a.histogramSeries, s) b.histogramSeries = append(b.histogramSeries, s)
case fh != nil: case fh != nil:
s.Lock() s.Lock()
// TODO(krajorama): reorganize Commit() to handle samples in append order
// not floats first and then histograms. Then we would not need to do this.
// This whole "if" should be removed.
if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil {
s.lastFloatHistogramValue = &histogram.FloatHistogram{}
}
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
// to skip that sample from the WAL and write only in the WBL. // to skip that sample from the WAL and write only in the WBL.
_, delta, err := s.appendableFloatHistogram(t, fh, a.headMaxt, a.minValidTime, a.oooTimeWindow) _, delta, err := s.appendableFloatHistogram(t, fh, a.headMaxt, a.minValidTime, a.oooTimeWindow)
@ -742,12 +849,17 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
} }
return 0, err return 0, err
} }
a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{ st := stFloatHistogram
if fh.UsesCustomBuckets() {
st = stCustomBucketFloatHistogram
}
b := a.getCurrentBatch(st, s.ref)
b.floatHistograms = append(b.floatHistograms, record.RefFloatHistogramSample{
Ref: s.ref, Ref: s.ref,
T: t, T: t,
FH: fh, FH: fh,
}) })
a.floatHistogramSeries = append(a.floatHistogramSeries, s) b.floatHistogramSeries = append(b.floatHistogramSeries, s)
} }
if t < a.mint { if t < a.mint {
@ -769,11 +881,10 @@ func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset l
return 0, storage.ErrCTNewerThanSample return 0, storage.ErrCTNewerThanSample
} }
var created bool
s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil { if s == nil {
var err error var err error
s, created, err = a.getOrCreate(lset) s, _, err = a.getOrCreate(lset)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -784,16 +895,12 @@ func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset l
zeroHistogram := &histogram.Histogram{ zeroHistogram := &histogram.Histogram{
// The CTZeroSample represents a counter reset by definition. // The CTZeroSample represents a counter reset by definition.
CounterResetHint: histogram.CounterReset, CounterResetHint: histogram.CounterReset,
// Replicate other fields to avoid needless chunk creation.
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
CustomValues: h.CustomValues,
} }
s.Lock() s.Lock()
// TODO(krajorama): reorganize Commit() to handle samples in append order
// not floats first and then histograms. Then we would not need to do this.
// This whole "if" should be removed.
if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil {
s.lastHistogramValue = zeroHistogram
}
// For CTZeroSamples OOO is not allowed. // For CTZeroSamples OOO is not allowed.
// We set it to true to make this implementation as close as possible to the float implementation. // We set it to true to make this implementation as close as possible to the float implementation.
isOOO, _, err := s.appendableHistogram(ct, zeroHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow) isOOO, _, err := s.appendableHistogram(ct, zeroHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow)
@ -815,26 +922,27 @@ func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset l
s.pendingCommit = true s.pendingCommit = true
s.Unlock() s.Unlock()
a.histograms = append(a.histograms, record.RefHistogramSample{ st := stHistogram
if h.UsesCustomBuckets() {
st = stCustomBucketHistogram
}
b := a.getCurrentBatch(st, s.ref)
b.histograms = append(b.histograms, record.RefHistogramSample{
Ref: s.ref, Ref: s.ref,
T: ct, T: ct,
H: zeroHistogram, H: zeroHistogram,
}) })
a.histogramSeries = append(a.histogramSeries, s) b.histogramSeries = append(b.histogramSeries, s)
case fh != nil: case fh != nil:
zeroFloatHistogram := &histogram.FloatHistogram{ zeroFloatHistogram := &histogram.FloatHistogram{
// The CTZeroSample represents a counter reset by definition. // The CTZeroSample represents a counter reset by definition.
CounterResetHint: histogram.CounterReset, CounterResetHint: histogram.CounterReset,
// Replicate other fields to avoid needless chunk creation.
Schema: fh.Schema,
ZeroThreshold: fh.ZeroThreshold,
CustomValues: fh.CustomValues,
} }
s.Lock() s.Lock()
// TODO(krajorama): reorganize Commit() to handle samples in append order
// not floats first and then histograms. Then we would not need to do this.
// This whole "if" should be removed.
if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil {
s.lastFloatHistogramValue = zeroFloatHistogram
}
// We set it to true to make this implementation as close as possible to the float implementation. // We set it to true to make this implementation as close as possible to the float implementation.
isOOO, _, err := s.appendableFloatHistogram(ct, zeroFloatHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow) // OOO is not allowed for CTZeroSamples. isOOO, _, err := s.appendableFloatHistogram(ct, zeroFloatHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow) // OOO is not allowed for CTZeroSamples.
if err != nil { if err != nil {
@ -855,12 +963,17 @@ func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset l
s.pendingCommit = true s.pendingCommit = true
s.Unlock() s.Unlock()
a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{ st := stFloatHistogram
if fh.UsesCustomBuckets() {
st = stCustomBucketFloatHistogram
}
b := a.getCurrentBatch(st, s.ref)
b.floatHistograms = append(b.floatHistograms, record.RefFloatHistogramSample{
Ref: s.ref, Ref: s.ref,
T: ct, T: ct,
FH: zeroFloatHistogram, FH: zeroFloatHistogram,
}) })
a.floatHistogramSeries = append(a.floatHistogramSeries, s) b.floatHistogramSeries = append(b.floatHistogramSeries, s)
} }
if ct > a.maxt { if ct > a.maxt {
@ -889,13 +1002,14 @@ func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels,
s.Unlock() s.Unlock()
if hasNewMetadata { if hasNewMetadata {
a.metadata = append(a.metadata, record.RefMetadata{ b := a.getCurrentBatch(stNone, s.ref)
b.metadata = append(b.metadata, record.RefMetadata{
Ref: s.ref, Ref: s.ref,
Type: record.GetMetricType(meta.Type), Type: record.GetMetricType(meta.Type),
Unit: meta.Unit, Unit: meta.Unit,
Help: meta.Help, Help: meta.Help,
}) })
a.metadataSeries = append(a.metadataSeries, s) b.metadataSeries = append(b.metadataSeries, s)
} }
return ref, nil return ref, nil
@ -932,25 +1046,26 @@ func (a *headAppender) log() error {
return fmt.Errorf("log series: %w", err) return fmt.Errorf("log series: %w", err)
} }
} }
if len(a.metadata) > 0 { for _, b := range a.batches {
rec = enc.Metadata(a.metadata, buf) if len(b.metadata) > 0 {
rec = enc.Metadata(b.metadata, buf)
buf = rec[:0] buf = rec[:0]
if err := a.head.wal.Log(rec); err != nil { if err := a.head.wal.Log(rec); err != nil {
return fmt.Errorf("log metadata: %w", err) return fmt.Errorf("log metadata: %w", err)
} }
} }
if len(a.samples) > 0 { if len(b.floats) > 0 {
rec = enc.Samples(a.samples, buf) rec = enc.Samples(b.floats, buf)
buf = rec[:0] buf = rec[:0]
if err := a.head.wal.Log(rec); err != nil { if err := a.head.wal.Log(rec); err != nil {
return fmt.Errorf("log samples: %w", err) return fmt.Errorf("log samples: %w", err)
} }
} }
if len(a.histograms) > 0 { if len(b.histograms) > 0 {
var customBucketsHistograms []record.RefHistogramSample var customBucketsHistograms []record.RefHistogramSample
rec, customBucketsHistograms = enc.HistogramSamples(a.histograms, buf) rec, customBucketsHistograms = enc.HistogramSamples(b.histograms, buf)
buf = rec[:0] buf = rec[:0]
if len(rec) > 0 { if len(rec) > 0 {
if err := a.head.wal.Log(rec); err != nil { if err := a.head.wal.Log(rec); err != nil {
@ -965,9 +1080,9 @@ func (a *headAppender) log() error {
} }
} }
} }
if len(a.floatHistograms) > 0 { if len(b.floatHistograms) > 0 {
var customBucketsFloatHistograms []record.RefFloatHistogramSample var customBucketsFloatHistograms []record.RefFloatHistogramSample
rec, customBucketsFloatHistograms = enc.FloatHistogramSamples(a.floatHistograms, buf) rec, customBucketsFloatHistograms = enc.FloatHistogramSamples(b.floatHistograms, buf)
buf = rec[:0] buf = rec[:0]
if len(rec) > 0 { if len(rec) > 0 {
if err := a.head.wal.Log(rec); err != nil { if err := a.head.wal.Log(rec); err != nil {
@ -986,14 +1101,15 @@ func (a *headAppender) log() error {
// otherwise it might happen that we send the exemplars in a remote write // otherwise it might happen that we send the exemplars in a remote write
// batch before the samples, which in turn means the exemplar is rejected // batch before the samples, which in turn means the exemplar is rejected
// for missing series, since series are created due to samples. // for missing series, since series are created due to samples.
if len(a.exemplars) > 0 { if len(b.exemplars) > 0 {
rec = enc.Exemplars(exemplarsForEncoding(a.exemplars), buf) rec = enc.Exemplars(exemplarsForEncoding(b.exemplars), buf)
buf = rec[:0] buf = rec[:0]
if err := a.head.wal.Log(rec); err != nil { if err := a.head.wal.Log(rec); err != nil {
return fmt.Errorf("log exemplars: %w", err) return fmt.Errorf("log exemplars: %w", err)
} }
} }
}
return nil return nil
} }
@ -1040,10 +1156,10 @@ type appenderCommitContext struct {
enc record.Encoder enc record.Encoder
} }
// commitExemplars adds all exemplars from headAppender to the head's exemplar storage. // commitExemplars adds all exemplars from the provided batch to the head's exemplar storage.
func (a *headAppender) commitExemplars() { func (a *headAppender) commitExemplars(b *appendBatch) {
// No errors logging to WAL, so pass the exemplars along to the in memory storage. // No errors logging to WAL, so pass the exemplars along to the in memory storage.
for _, e := range a.exemplars { for _, e := range b.exemplars {
s := a.head.series.getByID(chunks.HeadSeriesRef(e.ref)) s := a.head.series.getByID(chunks.HeadSeriesRef(e.ref))
if s == nil { if s == nil {
// This is very unlikely to happen, but we have seen it in the wild. // This is very unlikely to happen, but we have seen it in the wild.
@ -1147,9 +1263,9 @@ func handleAppendableError(err error, appended, oooRejected, oobRejected, tooOld
} }
} }
// commitSamples processes and commits the samples in the headAppender to the series. // commitFloats processes and commits the samples in the provided batch to the
// It handles both in-order and out-of-order samples, updating the appenderCommitContext // series. It handles both in-order and out-of-order samples, updating the
// with the results of the append operations. // appenderCommitContext with the results of the append operations.
// //
// The function iterates over the samples in the headAppender and attempts to append each sample // The function iterates over the samples in the headAppender and attempts to append each sample
// to its corresponding series. It handles various error cases such as out-of-order samples, // to its corresponding series. It handles various error cases such as out-of-order samples,
@ -1166,14 +1282,68 @@ func handleAppendableError(err error, appended, oooRejected, oobRejected, tooOld
// operations on the series after appending the samples. // operations on the series after appending the samples.
// //
// There are also specific functions to commit histograms and float histograms. // There are also specific functions to commit histograms and float histograms.
func (a *headAppender) commitSamples(acc *appenderCommitContext) { func (a *headAppender) commitFloats(b *appendBatch, acc *appenderCommitContext) {
var ok, chunkCreated bool var ok, chunkCreated bool
var series *memSeries var series *memSeries
for i, s := range a.samples { for i, s := range b.floats {
series = a.sampleSeries[i] series = b.floatSeries[i]
series.Lock() series.Lock()
if value.IsStaleNaN(s.V) {
// If a float staleness marker had been appended for a
// series that got a histogram or float histogram
// appended before via this same appender, it would not
// show up here because we had already converted it. We
// end up here for two reasons: (1) This is the very
// first sample for this series appended via this
// appender. (2) A float sample was appended to this
// series before via this same appender.
//
// In either case, we need to check the previous sample
// in the memSeries to append the appropriately typed
// staleness marker. This is obviously so in case (1).
// In case (2), we would usually expect a float sample
// as the previous sample, but there might be concurrent
// appends that have added a histogram sample in the
// meantime. (This will probably lead to OOO shenanigans
// anyway, but that's a different story.)
//
// If the last sample in the memSeries is indeed a
// float, we don't have to do anything special here and
// just go on with the normal commit for a float sample.
// However, if the last sample in the memSeries is a
// histogram or float histogram, we have to convert the
// staleness marker to a histogram (or float histogram,
// respectively), and just add it at the end of the
// histograms (or float histograms) in the same batch,
// to be committed later in commitHistograms (or
// commitFloatHistograms). The latter is fine because we
// know there is no other histogram (or float histogram)
// sample for this same series in this same batch
// (because any such sample would have triggered a new
// batch).
switch {
case series.lastHistogramValue != nil:
b.histograms = append(b.histograms, record.RefHistogramSample{
Ref: series.ref,
T: s.T,
H: &histogram.Histogram{Sum: s.V},
})
b.histogramSeries = append(b.histogramSeries, series)
series.Unlock()
continue
case series.lastFloatHistogramValue != nil:
b.floatHistograms = append(b.floatHistograms, record.RefFloatHistogramSample{
Ref: series.ref,
T: s.T,
FH: &histogram.FloatHistogram{Sum: s.V},
})
b.floatHistogramSeries = append(b.floatHistogramSeries, series)
series.Unlock()
continue
}
}
oooSample, _, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, a.oooTimeWindow) oooSample, _, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, a.oooTimeWindow)
if err != nil { if err != nil {
handleAppendableError(err, &acc.floatsAppended, &acc.floatOOORejected, &acc.floatOOBRejected, &acc.floatTooOldRejected) handleAppendableError(err, &acc.floatsAppended, &acc.floatOOORejected, &acc.floatOOBRejected, &acc.floatTooOldRejected)
@ -1261,15 +1431,24 @@ func (a *headAppender) commitSamples(acc *appenderCommitContext) {
} }
} }
// For details on the commitHistograms function, see the commitSamples docs. // For details on the commitHistograms function, see the commitFloats docs.
func (a *headAppender) commitHistograms(acc *appenderCommitContext) { func (a *headAppender) commitHistograms(b *appendBatch, acc *appenderCommitContext) {
var ok, chunkCreated bool var ok, chunkCreated bool
var series *memSeries var series *memSeries
for i, s := range a.histograms { for i, s := range b.histograms {
series = a.histogramSeries[i] series = b.histogramSeries[i]
series.Lock() series.Lock()
// At this point, we could encounter a histogram staleness
// marker that should better be a float staleness marker or a
// float histogram staleness marker. This can only happen with
// concurrent appenders appending to the same series _and_ doing
// so in a mixed-type scenario. This case is expected to be very
// rare, so we do not bother here to convert the staleness
// marker. The worst case is that we need to cut a new chunk
// just for the staleness marker.
oooSample, _, err := series.appendableHistogram(s.T, s.H, a.headMaxt, a.minValidTime, a.oooTimeWindow) oooSample, _, err := series.appendableHistogram(s.T, s.H, a.headMaxt, a.minValidTime, a.oooTimeWindow)
if err != nil { if err != nil {
handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected) handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected)
@ -1361,15 +1540,24 @@ func (a *headAppender) commitHistograms(acc *appenderCommitContext) {
} }
} }
// For details on the commitFloatHistograms function, see the commitSamples docs. // For details on the commitFloatHistograms function, see the commitFloats docs.
func (a *headAppender) commitFloatHistograms(acc *appenderCommitContext) { func (a *headAppender) commitFloatHistograms(b *appendBatch, acc *appenderCommitContext) {
var ok, chunkCreated bool var ok, chunkCreated bool
var series *memSeries var series *memSeries
for i, s := range a.floatHistograms { for i, s := range b.floatHistograms {
series = a.floatHistogramSeries[i] series = b.floatHistogramSeries[i]
series.Lock() series.Lock()
// At this point, we could encounter a float histogram staleness
// marker that should better be a float staleness marker or an
// integer histogram staleness marker. This can only happen with
// concurrent appenders appending to the same series _and_ doing
// so in a mixed-type scenario. This case is expected to be very
// rare, so we do not bother here to convert the staleness
// marker. The worst case is that we need to cut a new chunk
// just for the staleness marker.
oooSample, _, err := series.appendableFloatHistogram(s.T, s.FH, a.headMaxt, a.minValidTime, a.oooTimeWindow) oooSample, _, err := series.appendableFloatHistogram(s.T, s.FH, a.headMaxt, a.minValidTime, a.oooTimeWindow)
if err != nil { if err != nil {
handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected) handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected)
@ -1461,14 +1649,14 @@ func (a *headAppender) commitFloatHistograms(acc *appenderCommitContext) {
} }
} }
// commitMetadata commits the metadata for each series in the headAppender. // commitMetadata commits the metadata for each series in the provided batch.
// It iterates over the metadata slice and updates the corresponding series // It iterates over the metadata slice and updates the corresponding series
// with the new metadata information. The series is locked during the update // with the new metadata information. The series is locked during the update
// to ensure thread safety. // to ensure thread safety.
func (a *headAppender) commitMetadata() { func commitMetadata(b *appendBatch) {
var series *memSeries var series *memSeries
for i, m := range a.metadata { for i, m := range b.metadata {
series = a.metadataSeries[i] series = b.metadataSeries[i]
series.Lock() series.Lock()
series.meta = &metadata.Metadata{Type: record.ToMetricType(m.Type), Unit: m.Unit, Help: m.Help} series.meta = &metadata.Metadata{Type: record.ToMetricType(m.Type), Unit: m.Unit, Help: m.Help}
series.Unlock() series.Unlock()
@ -1489,75 +1677,82 @@ func (a *headAppender) Commit() (err error) {
if a.closed { if a.closed {
return ErrAppenderClosed return ErrAppenderClosed
} }
defer func() { a.closed = true }()
h := a.head
defer func() {
h.putRefSeriesBuffer(a.seriesRefs)
h.putSeriesBuffer(a.series)
a.closed = true
}()
if err := a.log(); err != nil { if err := a.log(); err != nil {
_ = a.Rollback() // Most likely the same error will happen again. _ = a.Rollback() // Most likely the same error will happen again.
return fmt.Errorf("write to WAL: %w", err) return fmt.Errorf("write to WAL: %w", err)
} }
if a.head.writeNotified != nil { if h.writeNotified != nil {
a.head.writeNotified.Notify() h.writeNotified.Notify()
} }
a.commitExemplars()
defer a.head.metrics.activeAppenders.Dec()
defer a.head.putAppendBuffer(a.samples)
defer a.head.putSeriesBuffer(a.sampleSeries)
defer a.head.putExemplarBuffer(a.exemplars)
defer a.head.putHistogramBuffer(a.histograms)
defer a.head.putFloatHistogramBuffer(a.floatHistograms)
defer a.head.putMetadataBuffer(a.metadata)
defer a.head.iso.closeAppend(a.appendID)
acc := &appenderCommitContext{ acc := &appenderCommitContext{
floatsAppended: len(a.samples),
histogramsAppended: len(a.histograms) + len(a.floatHistograms),
inOrderMint: math.MaxInt64, inOrderMint: math.MaxInt64,
inOrderMaxt: math.MinInt64, inOrderMaxt: math.MinInt64,
oooMinT: math.MaxInt64, oooMinT: math.MaxInt64,
oooMaxT: math.MinInt64, oooMaxT: math.MinInt64,
oooCapMax: a.head.opts.OutOfOrderCapMax.Load(), oooCapMax: h.opts.OutOfOrderCapMax.Load(),
appendChunkOpts: chunkOpts{ appendChunkOpts: chunkOpts{
chunkDiskMapper: a.head.chunkDiskMapper, chunkDiskMapper: h.chunkDiskMapper,
chunkRange: a.head.chunkRange.Load(), chunkRange: h.chunkRange.Load(),
samplesPerChunk: a.head.opts.SamplesPerChunk, samplesPerChunk: h.opts.SamplesPerChunk,
}, },
} }
for _, b := range a.batches {
acc.floatsAppended += len(b.floats)
acc.histogramsAppended += len(b.histograms) + len(b.floatHistograms)
a.commitExemplars(b)
defer b.close(h)
}
defer h.metrics.activeAppenders.Dec()
defer h.iso.closeAppend(a.appendID)
defer func() { defer func() {
for i := range acc.oooRecords { for i := range acc.oooRecords {
a.head.putBytesBuffer(acc.oooRecords[i][:0]) h.putBytesBuffer(acc.oooRecords[i][:0])
} }
}() }()
a.commitSamples(acc) for _, b := range a.batches {
a.commitHistograms(acc) // Do not change the order of these calls. The staleness marker
a.commitFloatHistograms(acc) // handling depends on it.
a.commitMetadata() a.commitFloats(b, acc)
a.commitHistograms(b, acc)
a.commitFloatHistograms(b, acc)
commitMetadata(b)
}
// Unmark all series as pending commit after all samples have been committed. // Unmark all series as pending commit after all samples have been committed.
a.unmarkCreatedSeriesAsPendingCommit() a.unmarkCreatedSeriesAsPendingCommit()
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOORejected)) h.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOORejected))
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histoOOORejected)) h.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histoOOORejected))
a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOBRejected)) h.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOBRejected))
a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatTooOldRejected)) h.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatTooOldRejected))
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatsAppended)) h.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatsAppended))
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histogramsAppended)) h.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histogramsAppended))
a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.oooFloatsAccepted)) h.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.oooFloatsAccepted))
a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.oooHistogramAccepted)) h.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.oooHistogramAccepted))
a.head.updateMinMaxTime(acc.inOrderMint, acc.inOrderMaxt) h.updateMinMaxTime(acc.inOrderMint, acc.inOrderMaxt)
a.head.updateMinOOOMaxOOOTime(acc.oooMinT, acc.oooMaxT) h.updateMinOOOMaxOOOTime(acc.oooMinT, acc.oooMaxT)
acc.collectOOORecords(a) acc.collectOOORecords(a)
if a.head.wbl != nil { if h.wbl != nil {
if err := a.head.wbl.Log(acc.oooRecords...); err != nil { if err := h.wbl.Log(acc.oooRecords...); err != nil {
// TODO(codesome): Currently WBL logging of ooo samples is best effort here since we cannot try logging // TODO(codesome): Currently WBL logging of ooo samples is best effort here since we cannot try logging
// until we have found what samples become OOO. We can try having a metric for this failure. // until we have found what samples become OOO. We can try having a metric for this failure.
// Returning the error here is not correct because we have already put the samples into the memory, // Returning the error here is not correct because we have already put the samples into the memory,
// hence the append/insert was a success. // hence the append/insert was a success.
a.head.logger.Error("Failed to log out of order samples into the WAL", "err", err) h.logger.Error("Failed to log out of order samples into the WAL", "err", err)
} }
} }
return nil return nil
@ -2007,37 +2202,43 @@ func (a *headAppender) Rollback() (err error) {
if a.closed { if a.closed {
return ErrAppenderClosed return ErrAppenderClosed
} }
defer func() { a.closed = true }() h := a.head
defer a.head.metrics.activeAppenders.Dec() defer func() {
defer a.head.iso.closeAppend(a.appendID) a.unmarkCreatedSeriesAsPendingCommit()
defer a.head.putSeriesBuffer(a.sampleSeries) h.iso.closeAppend(a.appendID)
defer a.unmarkCreatedSeriesAsPendingCommit() h.metrics.activeAppenders.Dec()
a.closed = true
h.putRefSeriesBuffer(a.seriesRefs)
h.putSeriesBuffer(a.series)
}()
var series *memSeries var series *memSeries
for i := range a.samples { fmt.Println("ROLLBACK")
series = a.sampleSeries[i] for _, b := range a.batches {
for i := range b.floats {
series = b.floatSeries[i]
series.Lock() series.Lock()
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
series.pendingCommit = false series.pendingCommit = false
series.Unlock() series.Unlock()
} }
for i := range a.histograms { for i := range b.histograms {
series = a.histogramSeries[i] series = b.histogramSeries[i]
series.Lock() series.Lock()
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
series.pendingCommit = false series.pendingCommit = false
series.Unlock() series.Unlock()
} }
a.head.putAppendBuffer(a.samples) for i := range b.floatHistograms {
a.head.putExemplarBuffer(a.exemplars) series = b.floatHistogramSeries[i]
a.head.putHistogramBuffer(a.histograms) series.Lock()
a.head.putFloatHistogramBuffer(a.floatHistograms) series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
a.head.putMetadataBuffer(a.metadata) series.pendingCommit = false
a.samples = nil series.Unlock()
a.exemplars = nil }
a.histograms = nil b.close(h)
a.metadata = nil }
a.batches = a.batches[:0]
// Series are created in the head memory regardless of rollback. Thus we have // Series are created in the head memory regardless of rollback. Thus we have
// to log them to the WAL in any case. // to log them to the WAL in any case.
return a.log() return a.log()

View File

@ -34,6 +34,7 @@ import (
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/atomic" "go.uber.org/atomic"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
@ -185,7 +186,7 @@ func readTestWAL(t testing.TB, dir string) (recs []any) {
require.NoError(t, sr.Close()) require.NoError(t, sr.Close())
}() }()
dec := record.NewDecoder(labels.NewSymbolTable()) dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
r := wlog.NewReader(sr) r := wlog.NewReader(sr)
for r.Next() { for r.Next() {
@ -5336,8 +5337,6 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
samples []chunks.Sample samples []chunks.Sample
expChunks int expChunks int
err error err error
// If this is empty, samples above will be taken instead of this.
addToExp []chunks.Sample
}{ }{
// Histograms that end up in the expected samples are copied here so that we // Histograms that end up in the expected samples are copied here so that we
// can independently set the CounterResetHint later. // can independently set the CounterResetHint later.
@ -5377,43 +5376,29 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
samples: []chunks.Sample{sample{t: 100, fh: floatHists[4].Copy()}}, samples: []chunks.Sample{sample{t: 100, fh: floatHists[4].Copy()}},
err: storage.ErrOutOfOrderSample, err: storage.ErrOutOfOrderSample,
}, },
// The three next tests all failed before #15177 was fixed.
{ {
// Combination of histograms and float64 in the same commit. The behaviour is undefined, but we want to also
// verify how TSDB would behave. Here the histogram is appended at the end, hence will be considered as out of order.
samples: []chunks.Sample{ samples: []chunks.Sample{
sample{t: 400, f: 4}, sample{t: 400, f: 4},
sample{t: 500, h: hists[5]}, // This won't be committed. sample{t: 500, h: hists[5]},
sample{t: 600, f: 6}, sample{t: 600, f: 6},
}, },
addToExp: []chunks.Sample{ expChunks: 9, // Each of the three samples above creates a new chunk because the type changes.
sample{t: 400, f: 4},
sample{t: 600, f: 6},
},
expChunks: 7, // Only 1 new chunk for float64.
}, },
{ {
// Here the histogram is appended at the end, hence the first histogram is out of order.
samples: []chunks.Sample{ samples: []chunks.Sample{
sample{t: 700, h: hists[7]}, // Out of order w.r.t. the next float64 sample that is appended first. sample{t: 700, h: hists[7]},
sample{t: 800, f: 8}, sample{t: 800, f: 8},
sample{t: 900, h: hists[9]}, sample{t: 900, h: hists[9]},
}, },
addToExp: []chunks.Sample{ expChunks: 12, // Again each sample creates a new chunk.
sample{t: 800, f: 8},
sample{t: 900, h: hists[9].Copy()},
},
expChunks: 8, // float64 added to old chunk, only 1 new for histograms.
}, },
{ {
// Float histogram is appended at the end.
samples: []chunks.Sample{ samples: []chunks.Sample{
sample{t: 1000, fh: floatHists[7]}, // Out of order w.r.t. the next histogram. sample{t: 1000, fh: floatHists[7]},
sample{t: 1100, h: hists[9]}, sample{t: 1100, h: hists[9]},
}, },
addToExp: []chunks.Sample{ expChunks: 14, // Even changes between float and integer histogram create new chunks.
sample{t: 1100, h: hists[9].Copy()},
},
expChunks: 8,
}, },
} }
@ -5431,11 +5416,7 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
if a.err == nil { if a.err == nil {
require.NoError(t, app.Commit()) require.NoError(t, app.Commit())
if len(a.addToExp) > 0 {
expResult = append(expResult, a.addToExp...)
} else {
expResult = append(expResult, a.samples...) expResult = append(expResult, a.samples...)
}
checkExpChunks(a.expChunks) checkExpChunks(a.expChunks)
} else { } else {
require.NoError(t, app.Rollback()) require.NoError(t, app.Rollback())
@ -6751,7 +6732,27 @@ func TestHeadAppender_AppendFloatWithSameTimestampAsPreviousHistogram(t *testing
func TestHeadAppender_AppendCT(t *testing.T) { func TestHeadAppender_AppendCT(t *testing.T) {
testHistogram := tsdbutil.GenerateTestHistogram(1) testHistogram := tsdbutil.GenerateTestHistogram(1)
testHistogram.CounterResetHint = histogram.NotCounterReset
testFloatHistogram := tsdbutil.GenerateTestFloatHistogram(1) testFloatHistogram := tsdbutil.GenerateTestFloatHistogram(1)
testFloatHistogram.CounterResetHint = histogram.NotCounterReset
// TODO(beorn7): Once issue #15346 is fixed, the CounterResetHint of the
// following two zero histograms should be histogram.CounterReset.
testZeroHistogram := &histogram.Histogram{
Schema: testHistogram.Schema,
ZeroThreshold: testHistogram.ZeroThreshold,
PositiveSpans: testHistogram.PositiveSpans,
NegativeSpans: testHistogram.NegativeSpans,
PositiveBuckets: []int64{0, 0, 0, 0},
NegativeBuckets: []int64{0, 0, 0, 0},
}
testZeroFloatHistogram := &histogram.FloatHistogram{
Schema: testFloatHistogram.Schema,
ZeroThreshold: testFloatHistogram.ZeroThreshold,
PositiveSpans: testFloatHistogram.PositiveSpans,
NegativeSpans: testFloatHistogram.NegativeSpans,
PositiveBuckets: []float64{0, 0, 0, 0},
NegativeBuckets: []float64{0, 0, 0, 0},
}
type appendableSamples struct { type appendableSamples struct {
ts int64 ts int64
fSample float64 fSample float64
@ -6783,12 +6784,10 @@ func TestHeadAppender_AppendCT(t *testing.T) {
{ts: 101, h: testHistogram, ct: 1}, {ts: 101, h: testHistogram, ct: 1},
}, },
expectedSamples: func() []chunks.Sample { expectedSamples: func() []chunks.Sample {
hNoCounterReset := *testHistogram
hNoCounterReset.CounterResetHint = histogram.NotCounterReset
return []chunks.Sample{ return []chunks.Sample{
sample{t: 1, h: &histogram.Histogram{}}, sample{t: 1, h: testZeroHistogram},
sample{t: 100, h: testHistogram}, sample{t: 100, h: testHistogram},
sample{t: 101, h: &hNoCounterReset}, sample{t: 101, h: testHistogram},
} }
}(), }(),
}, },
@ -6799,12 +6798,10 @@ func TestHeadAppender_AppendCT(t *testing.T) {
{ts: 101, fh: testFloatHistogram, ct: 1}, {ts: 101, fh: testFloatHistogram, ct: 1},
}, },
expectedSamples: func() []chunks.Sample { expectedSamples: func() []chunks.Sample {
fhNoCounterReset := *testFloatHistogram
fhNoCounterReset.CounterResetHint = histogram.NotCounterReset
return []chunks.Sample{ return []chunks.Sample{
sample{t: 1, fh: &histogram.FloatHistogram{}}, sample{t: 1, fh: testZeroFloatHistogram},
sample{t: 100, fh: testFloatHistogram}, sample{t: 100, fh: testFloatHistogram},
sample{t: 101, fh: &fhNoCounterReset}, sample{t: 101, fh: testFloatHistogram},
} }
}(), }(),
}, },
@ -6827,12 +6824,10 @@ func TestHeadAppender_AppendCT(t *testing.T) {
{ts: 101, h: testHistogram, ct: 1}, {ts: 101, h: testHistogram, ct: 1},
}, },
expectedSamples: func() []chunks.Sample { expectedSamples: func() []chunks.Sample {
hNoCounterReset := *testHistogram
hNoCounterReset.CounterResetHint = histogram.NotCounterReset
return []chunks.Sample{ return []chunks.Sample{
sample{t: 1, h: &histogram.Histogram{}}, sample{t: 1, h: testZeroHistogram},
sample{t: 100, h: testHistogram}, sample{t: 100, h: testHistogram},
sample{t: 101, h: &hNoCounterReset}, sample{t: 101, h: testHistogram},
} }
}(), }(),
}, },
@ -6843,12 +6838,10 @@ func TestHeadAppender_AppendCT(t *testing.T) {
{ts: 101, fh: testFloatHistogram, ct: 1}, {ts: 101, fh: testFloatHistogram, ct: 1},
}, },
expectedSamples: func() []chunks.Sample { expectedSamples: func() []chunks.Sample {
fhNoCounterReset := *testFloatHistogram
fhNoCounterReset.CounterResetHint = histogram.NotCounterReset
return []chunks.Sample{ return []chunks.Sample{
sample{t: 1, fh: &histogram.FloatHistogram{}}, sample{t: 1, fh: testZeroFloatHistogram},
sample{t: 100, fh: testFloatHistogram}, sample{t: 100, fh: testFloatHistogram},
sample{t: 101, fh: &fhNoCounterReset}, sample{t: 101, fh: testFloatHistogram},
} }
}(), }(),
}, },
@ -6872,9 +6865,9 @@ func TestHeadAppender_AppendCT(t *testing.T) {
{ts: 102, h: testHistogram, ct: 101}, {ts: 102, h: testHistogram, ct: 101},
}, },
expectedSamples: []chunks.Sample{ expectedSamples: []chunks.Sample{
sample{t: 1, h: &histogram.Histogram{}}, sample{t: 1, h: testZeroHistogram},
sample{t: 100, h: testHistogram}, sample{t: 100, h: testHistogram},
sample{t: 101, h: &histogram.Histogram{CounterResetHint: histogram.UnknownCounterReset}}, sample{t: 101, h: testZeroHistogram},
sample{t: 102, h: testHistogram}, sample{t: 102, h: testHistogram},
}, },
}, },
@ -6885,9 +6878,9 @@ func TestHeadAppender_AppendCT(t *testing.T) {
{ts: 102, fh: testFloatHistogram, ct: 101}, {ts: 102, fh: testFloatHistogram, ct: 101},
}, },
expectedSamples: []chunks.Sample{ expectedSamples: []chunks.Sample{
sample{t: 1, fh: &histogram.FloatHistogram{}}, sample{t: 1, fh: testZeroFloatHistogram},
sample{t: 100, fh: testFloatHistogram}, sample{t: 100, fh: testFloatHistogram},
sample{t: 101, fh: &histogram.FloatHistogram{CounterResetHint: histogram.UnknownCounterReset}}, sample{t: 101, fh: testZeroFloatHistogram},
sample{t: 102, fh: testFloatHistogram}, sample{t: 102, fh: testFloatHistogram},
}, },
}, },
@ -6910,12 +6903,10 @@ func TestHeadAppender_AppendCT(t *testing.T) {
{ts: 101, h: testHistogram, ct: 100}, {ts: 101, h: testHistogram, ct: 100},
}, },
expectedSamples: func() []chunks.Sample { expectedSamples: func() []chunks.Sample {
hNoCounterReset := *testHistogram
hNoCounterReset.CounterResetHint = histogram.NotCounterReset
return []chunks.Sample{ return []chunks.Sample{
sample{t: 1, h: &histogram.Histogram{}}, sample{t: 1, h: testZeroHistogram},
sample{t: 100, h: testHistogram}, sample{t: 100, h: testHistogram},
sample{t: 101, h: &hNoCounterReset}, sample{t: 101, h: testHistogram},
} }
}(), }(),
}, },
@ -6926,12 +6917,10 @@ func TestHeadAppender_AppendCT(t *testing.T) {
{ts: 101, fh: testFloatHistogram, ct: 100}, {ts: 101, fh: testFloatHistogram, ct: 100},
}, },
expectedSamples: func() []chunks.Sample { expectedSamples: func() []chunks.Sample {
fhNoCounterReset := *testFloatHistogram
fhNoCounterReset.CounterResetHint = histogram.NotCounterReset
return []chunks.Sample{ return []chunks.Sample{
sample{t: 1, fh: &histogram.FloatHistogram{}}, sample{t: 1, fh: testZeroFloatHistogram},
sample{t: 100, fh: testFloatHistogram}, sample{t: 100, fh: testFloatHistogram},
sample{t: 101, fh: &fhNoCounterReset}, sample{t: 101, fh: testFloatHistogram},
} }
}(), }(),
}, },

View File

@ -155,7 +155,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
go func() { go func() {
defer close(decoded) defer close(decoded)
var err error var err error
dec := record.NewDecoder(syms) dec := record.NewDecoder(syms, h.logger)
for r.Next() { for r.Next() {
switch dec.Type(r.Record()) { switch dec.Type(r.Record()) {
case record.Series: case record.Series:
@ -767,7 +767,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
go func() { go func() {
defer close(decodedCh) defer close(decodedCh)
dec := record.NewDecoder(syms) dec := record.NewDecoder(syms, h.logger)
for r.Next() { for r.Next() {
var err error var err error
rec := r.Record() rec := r.Record()
@ -1572,7 +1572,7 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie
refSeries map[chunks.HeadSeriesRef]*memSeries refSeries map[chunks.HeadSeriesRef]*memSeries
exemplarBuf []record.RefExemplar exemplarBuf []record.RefExemplar
syms = labels.NewSymbolTable() // New table for the whole snapshot. syms = labels.NewSymbolTable() // New table for the whole snapshot.
dec = record.NewDecoder(syms) dec = record.NewDecoder(syms, h.logger)
) )
wg.Add(concurrency) wg.Add(concurrency)

View File

@ -18,6 +18,7 @@ package record
import ( import (
"errors" "errors"
"fmt" "fmt"
"log/slog"
"math" "math"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -202,10 +203,11 @@ type RefMmapMarker struct {
// Decoder decodes series, sample, metadata and tombstone records. // Decoder decodes series, sample, metadata and tombstone records.
type Decoder struct { type Decoder struct {
builder labels.ScratchBuilder builder labels.ScratchBuilder
logger *slog.Logger
} }
func NewDecoder(*labels.SymbolTable) Decoder { // FIXME remove t func NewDecoder(_ *labels.SymbolTable, logger *slog.Logger) Decoder { // FIXME remove t
return Decoder{builder: labels.NewScratchBuilder(0)} return Decoder{builder: labels.NewScratchBuilder(0), logger: logger}
} }
// Type returns the type of the record. // Type returns the type of the record.
@ -433,7 +435,7 @@ func (*Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMarke
return markers, nil return markers, nil
} }
func (*Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([]RefHistogramSample, error) { func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([]RefHistogramSample, error) {
dec := encoding.Decbuf{B: rec} dec := encoding.Decbuf{B: rec}
t := Type(dec.Byte()) t := Type(dec.Byte())
if t != HistogramSamples && t != CustomBucketsHistogramSamples { if t != HistogramSamples && t != CustomBucketsHistogramSamples {
@ -457,6 +459,18 @@ func (*Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([
} }
DecodeHistogram(&dec, rh.H) DecodeHistogram(&dec, rh.H)
if !histogram.IsKnownSchema(rh.H.Schema) {
d.logger.Warn("skipping histogram with unknown schema in WAL record", "schema", rh.H.Schema, "timestamp", rh.T)
continue
}
if rh.H.Schema > histogram.ExponentialSchemaMax && rh.H.Schema <= histogram.ExponentialSchemaMaxReserved {
// This is a very slow path, but it should only happen if the
// record is from a newer Prometheus version that supports higher
// resolution.
rh.H.ReduceResolution(histogram.ExponentialSchemaMax)
}
histograms = append(histograms, rh) histograms = append(histograms, rh)
} }
@ -525,7 +539,7 @@ func DecodeHistogram(buf *encoding.Decbuf, h *histogram.Histogram) {
} }
} }
func (*Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) { func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) {
dec := encoding.Decbuf{B: rec} dec := encoding.Decbuf{B: rec}
t := Type(dec.Byte()) t := Type(dec.Byte())
if t != FloatHistogramSamples && t != CustomBucketsFloatHistogramSamples { if t != FloatHistogramSamples && t != CustomBucketsFloatHistogramSamples {
@ -549,6 +563,18 @@ func (*Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogram
} }
DecodeFloatHistogram(&dec, rh.FH) DecodeFloatHistogram(&dec, rh.FH)
if !histogram.IsKnownSchema(rh.FH.Schema) {
d.logger.Warn("skipping histogram with unknown schema in WAL record", "schema", rh.FH.Schema, "timestamp", rh.T)
continue
}
if rh.FH.Schema > histogram.ExponentialSchemaMax && rh.FH.Schema <= histogram.ExponentialSchemaMaxReserved {
// This is a very slow path, but it should only happen if the
// record is from a newer Prometheus version that supports higher
// resolution.
rh.FH.ReduceResolution(histogram.ExponentialSchemaMax)
}
histograms = append(histograms, rh) histograms = append(histograms, rh)
} }

View File

@ -15,11 +15,13 @@
package record package record
import ( import (
"bytes"
"fmt" "fmt"
"math/rand" "math/rand"
"testing" "testing"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
@ -32,7 +34,7 @@ import (
func TestRecord_EncodeDecode(t *testing.T) { func TestRecord_EncodeDecode(t *testing.T) {
var enc Encoder var enc Encoder
dec := NewDecoder(labels.NewSymbolTable()) dec := NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
series := []RefSeries{ series := []RefSeries{
{ {
@ -224,11 +226,151 @@ func TestRecord_EncodeDecode(t *testing.T) {
require.Equal(t, floatHistograms, decGaugeFloatHistograms) require.Equal(t, floatHistograms, decGaugeFloatHistograms)
} }
func TestRecord_DecodeInvalidHistogramSchema(t *testing.T) {
for _, schema := range []int32{-100, 100} {
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
var enc Encoder
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
dec := NewDecoder(labels.NewSymbolTable(), logger)
histograms := []RefHistogramSample{
{
Ref: 56,
T: 1234,
H: &histogram.Histogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: schema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
},
},
}
histSamples, _ := enc.HistogramSamples(histograms, nil)
decHistograms, err := dec.HistogramSamples(histSamples, nil)
require.NoError(t, err)
require.Empty(t, decHistograms)
require.Contains(t, output.String(), "skipping histogram with unknown schema in WAL record")
})
}
}
func TestRecord_DecodeInvalidFloatHistogramSchema(t *testing.T) {
for _, schema := range []int32{-100, 100} {
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
var enc Encoder
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
dec := NewDecoder(labels.NewSymbolTable(), logger)
histograms := []RefFloatHistogramSample{
{
Ref: 56,
T: 1234,
FH: &histogram.FloatHistogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: schema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{1, 1, -1, 0},
},
},
}
histSamples, _ := enc.FloatHistogramSamples(histograms, nil)
decHistograms, err := dec.FloatHistogramSamples(histSamples, nil)
require.NoError(t, err)
require.Empty(t, decHistograms)
require.Contains(t, output.String(), "skipping histogram with unknown schema in WAL record")
})
}
}
func TestRecord_DecodeTooHighResolutionHistogramSchema(t *testing.T) {
for _, schema := range []int32{9, 52} {
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
var enc Encoder
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
dec := NewDecoder(labels.NewSymbolTable(), logger)
histograms := []RefHistogramSample{
{
Ref: 56,
T: 1234,
H: &histogram.Histogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: schema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
},
},
}
histSamples, _ := enc.HistogramSamples(histograms, nil)
decHistograms, err := dec.HistogramSamples(histSamples, nil)
require.NoError(t, err)
require.Len(t, decHistograms, 1)
require.Equal(t, histogram.ExponentialSchemaMax, decHistograms[0].H.Schema)
})
}
}
func TestRecord_DecodeTooHighResolutionFloatHistogramSchema(t *testing.T) {
for _, schema := range []int32{9, 52} {
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
var enc Encoder
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
dec := NewDecoder(labels.NewSymbolTable(), logger)
histograms := []RefFloatHistogramSample{
{
Ref: 56,
T: 1234,
FH: &histogram.FloatHistogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: schema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{1, 1, -1, 0},
},
},
}
histSamples, _ := enc.FloatHistogramSamples(histograms, nil)
decHistograms, err := dec.FloatHistogramSamples(histSamples, nil)
require.NoError(t, err)
require.Len(t, decHistograms, 1)
require.Equal(t, histogram.ExponentialSchemaMax, decHistograms[0].FH.Schema)
})
}
}
// TestRecord_Corrupted ensures that corrupted records return the correct error. // TestRecord_Corrupted ensures that corrupted records return the correct error.
// Bugfix check for pull/521 and pull/523. // Bugfix check for pull/521 and pull/523.
func TestRecord_Corrupted(t *testing.T) { func TestRecord_Corrupted(t *testing.T) {
var enc Encoder var enc Encoder
dec := NewDecoder(labels.NewSymbolTable()) dec := NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
t.Run("Test corrupted series record", func(t *testing.T) { t.Run("Test corrupted series record", func(t *testing.T) {
series := []RefSeries{ series := []RefSeries{

View File

@ -156,7 +156,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He
exemplars []record.RefExemplar exemplars []record.RefExemplar
metadata []record.RefMetadata metadata []record.RefMetadata
st = labels.NewSymbolTable() // Needed for decoding; labels do not outlive this function. st = labels.NewSymbolTable() // Needed for decoding; labels do not outlive this function.
dec = record.NewDecoder(st) dec = record.NewDecoder(st, logger)
enc record.Encoder enc record.Encoder
buf []byte buf []byte
recs [][]byte recs [][]byte

View File

@ -311,7 +311,7 @@ func TestCheckpoint(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer sr.Close() defer sr.Close()
dec := record.NewDecoder(labels.NewSymbolTable()) dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
var series []record.RefSeries var series []record.RefSeries
var metadata []record.RefMetadata var metadata []record.RefMetadata
r := NewReader(sr) r := NewReader(sr)

View File

@ -494,7 +494,7 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error {
// Also used with readCheckpoint - implements segmentReadFn. // Also used with readCheckpoint - implements segmentReadFn.
func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
var ( var (
dec = record.NewDecoder(labels.NewSymbolTable()) // One table per WAL segment means it won't grow indefinitely. dec = record.NewDecoder(labels.NewSymbolTable(), w.logger) // One table per WAL segment means it won't grow indefinitely.
series []record.RefSeries series []record.RefSeries
samples []record.RefSample samples []record.RefSample
samplesToSend []record.RefSample samplesToSend []record.RefSample
@ -647,7 +647,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
// Used with readCheckpoint - implements segmentReadFn. // Used with readCheckpoint - implements segmentReadFn.
func (w *Watcher) readSegmentForGC(r *LiveReader, segmentNum int, _ bool) error { func (w *Watcher) readSegmentForGC(r *LiveReader, segmentNum int, _ bool) error {
var ( var (
dec = record.NewDecoder(labels.NewSymbolTable()) // Needed for decoding; labels do not outlive this function. dec = record.NewDecoder(labels.NewSymbolTable(), w.logger) // Needed for decoding; labels do not outlive this function.
series []record.RefSeries series []record.RefSeries
) )
for r.Next() && !isClosed(w.quit) { for r.Next() && !isClosed(w.quit) {

View File

@ -27,8 +27,8 @@ var _ slog.Handler = (*JSONFileLogger)(nil)
var _ io.Closer = (*JSONFileLogger)(nil) var _ io.Closer = (*JSONFileLogger)(nil)
// JSONFileLogger represents a logger that writes JSON to a file. It implements // JSONFileLogger represents a logger that writes JSON to a file.
// the slog.Handler interface, as well as the io.Closer interface. // It implements the promql.QueryLogger interface.
type JSONFileLogger struct { type JSONFileLogger struct {
handler slog.Handler handler slog.Handler
file *os.File file *os.File

View File

@ -190,8 +190,13 @@ Loop:
isHistogram := s.H != nil isHistogram := s.H != nil
formatType := format.FormatType() formatType := format.FormatType()
if isHistogram && if isHistogram &&
formatType != expfmt.TypeProtoDelim && formatType != expfmt.TypeProtoText && formatType != expfmt.TypeProtoCompact { !s.H.UsesCustomBuckets() &&
// Can't serve the native histogram. formatType != expfmt.TypeProtoDelim &&
formatType != expfmt.TypeProtoText &&
formatType != expfmt.TypeProtoCompact {
// Can't serve a native histogram with a non-protobuf format.
// (We can serve an NHCB, though, as it is converted to a
// classic histogram for federation.)
// TODO(codesome): Serve them when other protocols get the native histogram support. // TODO(codesome): Serve them when other protocols get the native histogram support.
continue continue
} }
@ -208,20 +213,30 @@ Loop:
} }
if l.Name == labels.MetricName { if l.Name == labels.MetricName {
nameSeen = true nameSeen = true
if l.Value == lastMetricName && // We already have the name in the current MetricDescriptor, and we ignore nameless metrics. // We already have the name in the current MetricDescriptor,
lastWasHistogram == isHistogram && // The sample type matches (float vs histogram). // and we ignore nameless metrics.
// If it was a histogram, the histogram type (counter vs gauge) also matches. if l.Value == lastMetricName &&
(!isHistogram || lastHistogramWasGauge == (s.H.CounterResetHint == histogram.GaugeType)) { // The sample type matches (float vs histogram).
lastWasHistogram == isHistogram &&
// If it was a histogram, the histogram type
// (counter vs gauge) also matches.
(!isHistogram ||
lastHistogramWasGauge == (s.H.CounterResetHint == histogram.GaugeType)) {
return nil return nil
} }
// Since we now check for the sample type and type of histogram above, we will end up // Since we now check for the sample type and
// creating multiple metric families for the same metric name. This would technically be // type of histogram above, we will end up
// an invalid exposition. But since the consumer of this is Prometheus, and Prometheus can // creating multiple metric families for the
// parse it fine, we allow it and bend the rules to make federation possible in those cases. // same metric name. This would technically be
// an invalid exposition. But since the consumer
// of this is Prometheus, and Prometheus can
// parse it fine, we allow it and bend the rules
// to make federation possible in those cases.
// Need to start a new MetricDescriptor. Ship off the old one (if any) before // Need to start a new MetricDescriptor. Ship
// creating the new one. // off the old one (if any) before creating the
// new one.
if protMetricFam != nil { if protMetricFam != nil {
if err := enc.Encode(protMetricFam); err != nil { if err := enc.Encode(protMetricFam); err != nil {
return err return err
@ -278,32 +293,10 @@ Loop:
} }
} else { } else {
lastHistogramWasGauge = s.H.CounterResetHint == histogram.GaugeType lastHistogramWasGauge = s.H.CounterResetHint == histogram.GaugeType
protMetric.Histogram = &dto.Histogram{ if s.H.UsesCustomBuckets() {
SampleCountFloat: proto.Float64(s.H.Count), protMetric.Histogram = makeClassicHistogram(s.H)
SampleSum: proto.Float64(s.H.Sum), } else {
Schema: proto.Int32(s.H.Schema), protMetric.Histogram = makeNativeHistogram(s.H)
ZeroThreshold: proto.Float64(s.H.ZeroThreshold),
ZeroCountFloat: proto.Float64(s.H.ZeroCount),
NegativeCount: s.H.NegativeBuckets,
PositiveCount: s.H.PositiveBuckets,
}
if len(s.H.PositiveSpans) > 0 {
protMetric.Histogram.PositiveSpan = make([]*dto.BucketSpan, len(s.H.PositiveSpans))
for i, sp := range s.H.PositiveSpans {
protMetric.Histogram.PositiveSpan[i] = &dto.BucketSpan{
Offset: proto.Int32(sp.Offset),
Length: proto.Uint32(sp.Length),
}
}
}
if len(s.H.NegativeSpans) > 0 {
protMetric.Histogram.NegativeSpan = make([]*dto.BucketSpan, len(s.H.NegativeSpans))
for i, sp := range s.H.NegativeSpans {
protMetric.Histogram.NegativeSpan[i] = &dto.BucketSpan{
Offset: proto.Int32(sp.Offset),
Length: proto.Uint32(sp.Length),
}
}
} }
} }
lastWasHistogram = isHistogram lastWasHistogram = isHistogram
@ -317,3 +310,68 @@ Loop:
} }
} }
} }
// makeNativeHistogram creates a dto.Histogram representing a native histogram.
// Use only for standard exponential schemas.
func makeNativeHistogram(h *histogram.FloatHistogram) *dto.Histogram {
result := &dto.Histogram{
SampleCountFloat: proto.Float64(h.Count),
SampleSum: proto.Float64(h.Sum),
Schema: proto.Int32(h.Schema),
ZeroThreshold: proto.Float64(h.ZeroThreshold),
ZeroCountFloat: proto.Float64(h.ZeroCount),
NegativeCount: h.NegativeBuckets,
PositiveCount: h.PositiveBuckets,
}
if len(h.PositiveSpans) > 0 {
result.PositiveSpan = make([]*dto.BucketSpan, len(h.PositiveSpans))
for i, sp := range h.PositiveSpans {
result.PositiveSpan[i] = &dto.BucketSpan{
Offset: proto.Int32(sp.Offset),
Length: proto.Uint32(sp.Length),
}
}
}
if len(h.NegativeSpans) > 0 {
result.NegativeSpan = make([]*dto.BucketSpan, len(h.NegativeSpans))
for i, sp := range h.NegativeSpans {
result.NegativeSpan[i] = &dto.BucketSpan{
Offset: proto.Int32(sp.Offset),
Length: proto.Uint32(sp.Length),
}
}
}
return result
}
// makeClassicHistogram creates a dto.Histogram representing a classic
// histogram. Use only for NHCB (schema -53).
func makeClassicHistogram(h *histogram.FloatHistogram) *dto.Histogram {
result := &dto.Histogram{
SampleCountFloat: proto.Float64(h.Count),
SampleSum: proto.Float64(h.Sum),
}
result.Bucket = make([]*dto.Bucket, len(h.CustomValues))
var (
cumulativeCount float64
bucketIter = h.PositiveBucketIterator()
bucketAvailable = bucketIter.Next()
)
for i, le := range h.CustomValues {
for bucketAvailable && int(bucketIter.At().Index) < i {
bucketAvailable = bucketIter.Next()
}
if bucketAvailable && int(bucketIter.At().Index) == i {
cumulativeCount += bucketIter.At().Count
}
result.Bucket[i] = &dto.Bucket{
UpperBound: proto.Float64(le),
CumulativeCountFloat: proto.Float64(cumulativeCount),
}
}
// Note that we do not add the +Inf bucket explicitly. In the protobuf
// exposition format, it is optional. For other exposition formats, the
// code converting the protobuf created here into the actual exposition
// payload will add the +Inf bucket.
return result
}

View File

@ -340,8 +340,19 @@ func TestFederationWithNativeHistograms(t *testing.T) {
}, },
NegativeBuckets: []int64{2, 2, -2, 0}, NegativeBuckets: []int64{2, 2, -2, 0},
} }
nhcb := &histogram.Histogram{
Count: 6,
Sum: 1.234,
Schema: -53,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
},
PositiveBuckets: []int64{3, -1, -1},
CustomValues: []float64{0.1, 0.2, 0.5, 1, 2},
}
app := db.Appender(context.Background()) app := db.Appender(context.Background())
for i := range 6 { for i := range 7 {
l := labels.FromStrings("__name__", "test_metric", "foo", strconv.Itoa(i)) l := labels.FromStrings("__name__", "test_metric", "foo", strconv.Itoa(i))
expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", strconv.Itoa(i)) expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", strconv.Itoa(i))
var err error var err error
@ -360,6 +371,56 @@ func TestFederationWithNativeHistograms(t *testing.T) {
H: histWithoutZeroBucket.ToFloat(nil), H: histWithoutZeroBucket.ToFloat(nil),
Metric: expL, Metric: expL,
}) })
case 6:
_, err = app.AppendHistogram(0, l, 100*60*1000, nhcb.Copy(), nil)
expL = labels.FromStrings("__name__", "test_metric_count", "instance", "", "foo", strconv.Itoa(i))
expVec = append(expVec, promql.Sample{
T: 100 * 60 * 1000,
F: 6,
Metric: expL,
})
expL = labels.FromStrings("__name__", "test_metric_sum", "instance", "", "foo", strconv.Itoa(i))
expVec = append(expVec, promql.Sample{
T: 100 * 60 * 1000,
F: 1.234,
Metric: expL,
})
expL = labels.FromStrings("__name__", "test_metric_bucket", "instance", "", "foo", strconv.Itoa(i), "le", "0.1")
expVec = append(expVec, promql.Sample{
T: 100 * 60 * 1000,
F: 3,
Metric: expL,
})
expL = labels.FromStrings("__name__", "test_metric_bucket", "instance", "", "foo", strconv.Itoa(i), "le", "0.2")
expVec = append(expVec, promql.Sample{
T: 100 * 60 * 1000,
F: 5,
Metric: expL,
})
expL = labels.FromStrings("__name__", "test_metric_bucket", "instance", "", "foo", strconv.Itoa(i), "le", "0.5")
expVec = append(expVec, promql.Sample{
T: 100 * 60 * 1000,
F: 5,
Metric: expL,
})
expL = labels.FromStrings("__name__", "test_metric_bucket", "instance", "", "foo", strconv.Itoa(i), "le", "1.0")
expVec = append(expVec, promql.Sample{
T: 100 * 60 * 1000,
F: 5,
Metric: expL,
})
expL = labels.FromStrings("__name__", "test_metric_bucket", "instance", "", "foo", strconv.Itoa(i), "le", "2.0")
expVec = append(expVec, promql.Sample{
T: 100 * 60 * 1000,
F: 6,
Metric: expL,
})
expL = labels.FromStrings("__name__", "test_metric_bucket", "instance", "", "foo", strconv.Itoa(i), "le", "+Inf")
expVec = append(expVec, promql.Sample{
T: 100 * 60 * 1000,
F: 6,
Metric: expL,
})
default: default:
hist.ZeroCount++ hist.ZeroCount++
hist.Count++ hist.Count++