mirror of
https://github.com/prometheus/prometheus.git
synced 2025-10-28 06:51:23 +01:00
Merge branch 'prometheus:main' into fix/functions.mdAndStorage.md
This commit is contained in:
commit
ecdf459e9f
12
.github/workflows/ci.yml
vendored
12
.github/workflows/ci.yml
vendored
@ -226,24 +226,24 @@ jobs:
|
||||
- name: Install snmp_exporter/generator dependencies
|
||||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||
if: github.repository == 'prometheus/snmp_exporter'
|
||||
- name: Get golangci-lint version
|
||||
id: golangci-lint-version
|
||||
run: echo "version=$(make print-golangci-lint-version)" >> $GITHUB_OUTPUT
|
||||
- name: Lint with stringlabels
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
with:
|
||||
args: --verbose --build-tags=stringlabels
|
||||
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
|
||||
version: v2.2.1
|
||||
version: ${{ steps.golangci-lint-version.outputs.version }}
|
||||
- name: Lint with slicelabels
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
with:
|
||||
args: --verbose --build-tags=slicelabels
|
||||
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
|
||||
version: v2.2.1
|
||||
version: ${{ steps.golangci-lint-version.outputs.version }}
|
||||
- name: Lint with dedupelabels
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
with:
|
||||
args: --verbose --build-tags=dedupelabels
|
||||
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
|
||||
version: v2.2.1
|
||||
version: ${{ steps.golangci-lint-version.outputs.version }}
|
||||
fuzzing:
|
||||
uses: ./.github/workflows/fuzzing.yml
|
||||
if: github.event_name == 'pull_request'
|
||||
|
||||
@ -74,6 +74,9 @@ linters:
|
||||
- linters:
|
||||
- godot
|
||||
source: "^// ==="
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: 'v1\.(Endpoints|EndpointSubset|EndpointPort|EndpointAddress) is deprecated: This API is deprecated in v1.33+'
|
||||
warn-unused: true
|
||||
settings:
|
||||
depguard:
|
||||
|
||||
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
||||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v2.2.1
|
||||
GOLANGCI_LINT_VERSION ?= v2.4.0
|
||||
GOLANGCI_FMT_OPTS ?=
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
@ -266,6 +266,10 @@ $(GOLANGCI_LINT):
|
||||
| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
|
||||
endif
|
||||
|
||||
.PHONY: common-print-golangci-lint-version
|
||||
common-print-golangci-lint-version:
|
||||
@echo $(GOLANGCI_LINT_VERSION)
|
||||
|
||||
.PHONY: precheck
|
||||
precheck::
|
||||
|
||||
|
||||
@ -16,7 +16,8 @@ Please see [the v2.55 RELEASE.md](https://github.com/prometheus/prometheus/blob/
|
||||
| v3.4 | 2025-04-29 | Jan-Otto Kröpke (Github: @jkroepke)|
|
||||
| v3.5 LTS | 2025-06-03 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v3.6 | 2025-08-01 | Ayoub Mrini (Github: @machine424) |
|
||||
| v3.7 | 2025-09-15 | **volunteer welcome** |
|
||||
| v3.7 | 2025-09-25 | Arthur Sens and George Krajcsovits (Github: @ArthurSens and @krajorama)|
|
||||
| v3.8 | 2025-11-06 | **volunteer welcome** |
|
||||
|
||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||
|
||||
|
||||
@ -275,6 +275,9 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
||||
case "promql-delayed-name-removal":
|
||||
c.promqlEnableDelayedNameRemoval = true
|
||||
logger.Info("Experimental PromQL delayed name removal enabled.")
|
||||
case "promql-extended-range-selectors":
|
||||
parser.EnableExtendedRangeSelectors = true
|
||||
logger.Info("Experimental PromQL extended range selectors enabled.")
|
||||
case "":
|
||||
continue
|
||||
case "old-ui":
|
||||
@ -561,7 +564,7 @@ func main() {
|
||||
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
||||
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr, use-uncached-io. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr, use-uncached-io, promql-extended-range-selectors. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
Default("").StringsVar(&cfg.featureList)
|
||||
|
||||
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
|
||||
|
||||
@ -32,6 +32,7 @@ import (
|
||||
)
|
||||
|
||||
// Endpoints discovers new endpoint targets.
|
||||
// Deprecated: The Endpoints API is deprecated starting in K8s v1.33+. Use EndpointSlice.
|
||||
type Endpoints struct {
|
||||
logger *slog.Logger
|
||||
|
||||
@ -47,11 +48,11 @@ type Endpoints struct {
|
||||
endpointsStore cache.Store
|
||||
serviceStore cache.Store
|
||||
|
||||
queue *workqueue.Type
|
||||
queue *workqueue.Typed[string]
|
||||
}
|
||||
|
||||
// NewEndpoints returns a new endpoints discovery.
|
||||
// Endpoints API is deprecated in k8s v1.33+, but we should still support it.
|
||||
// Deprecated: The Endpoints API is deprecated starting in K8s v1.33+. Use NewEndpointSlice.
|
||||
func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node, namespace cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints {
|
||||
if l == nil {
|
||||
l = promslog.NewNopLogger()
|
||||
@ -79,7 +80,9 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node,
|
||||
withNodeMetadata: node != nil,
|
||||
namespaceInf: namespace,
|
||||
withNamespaceMetadata: namespace != nil,
|
||||
queue: workqueue.NewNamed(RoleEndpoint.String()),
|
||||
queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{
|
||||
Name: RoleEndpoint.String(),
|
||||
}),
|
||||
}
|
||||
|
||||
_, err := e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -272,12 +275,11 @@ func (e *Endpoints) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
}
|
||||
|
||||
func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {
|
||||
keyObj, quit := e.queue.Get()
|
||||
key, quit := e.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer e.queue.Done(keyObj)
|
||||
key := keyObj.(string)
|
||||
defer e.queue.Done(key)
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
|
||||
@ -50,7 +50,7 @@ type EndpointSlice struct {
|
||||
endpointSliceStore cache.Store
|
||||
serviceStore cache.Store
|
||||
|
||||
queue *workqueue.Type
|
||||
queue *workqueue.Typed[string]
|
||||
}
|
||||
|
||||
// NewEndpointSlice returns a new endpointslice discovery.
|
||||
@ -79,7 +79,9 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n
|
||||
withNodeMetadata: node != nil,
|
||||
namespaceInf: namespace,
|
||||
withNamespaceMetadata: namespace != nil,
|
||||
queue: workqueue.NewNamed(RoleEndpointSlice.String()),
|
||||
queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{
|
||||
Name: RoleEndpointSlice.String(),
|
||||
}),
|
||||
}
|
||||
|
||||
_, err := e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -236,12 +238,11 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group)
|
||||
}
|
||||
|
||||
func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {
|
||||
keyObj, quit := e.queue.Get()
|
||||
key, quit := e.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer e.queue.Done(keyObj)
|
||||
key := keyObj.(string)
|
||||
defer e.queue.Done(key)
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
|
||||
@ -35,7 +35,7 @@ type Ingress struct {
|
||||
logger *slog.Logger
|
||||
informer cache.SharedIndexInformer
|
||||
store cache.Store
|
||||
queue *workqueue.Type
|
||||
queue *workqueue.Typed[string]
|
||||
namespaceInf cache.SharedInformer
|
||||
withNamespaceMetadata bool
|
||||
}
|
||||
@ -47,10 +47,12 @@ func NewIngress(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S
|
||||
ingressDeleteCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleDelete)
|
||||
|
||||
s := &Ingress{
|
||||
logger: l,
|
||||
informer: inf,
|
||||
store: inf.GetStore(),
|
||||
queue: workqueue.NewNamed(RoleIngress.String()),
|
||||
logger: l,
|
||||
informer: inf,
|
||||
store: inf.GetStore(),
|
||||
queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{
|
||||
Name: RoleIngress.String(),
|
||||
}),
|
||||
namespaceInf: namespace,
|
||||
withNamespaceMetadata: namespace != nil,
|
||||
}
|
||||
@ -137,12 +139,11 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
}
|
||||
|
||||
func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {
|
||||
keyObj, quit := i.queue.Get()
|
||||
key, quit := i.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer i.queue.Done(keyObj)
|
||||
key := keyObj.(string)
|
||||
defer i.queue.Done(key)
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
|
||||
@ -387,12 +387,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
var informer cache.SharedIndexInformer
|
||||
e := d.client.DiscoveryV1().EndpointSlices(namespace)
|
||||
elw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.endpointslice.field
|
||||
options.LabelSelector = d.selectors.endpointslice.label
|
||||
return e.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.endpointslice.field
|
||||
options.LabelSelector = d.selectors.endpointslice.label
|
||||
return e.Watch(ctx, options)
|
||||
@ -402,12 +402,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
|
||||
s := d.client.CoreV1().Services(namespace)
|
||||
slw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.service.field
|
||||
options.LabelSelector = d.selectors.service.label
|
||||
return s.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.service.field
|
||||
options.LabelSelector = d.selectors.service.label
|
||||
return s.Watch(ctx, options)
|
||||
@ -415,12 +415,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
}
|
||||
p := d.client.CoreV1().Pods(namespace)
|
||||
plw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.pod.field
|
||||
options.LabelSelector = d.selectors.pod.label
|
||||
return p.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.pod.field
|
||||
options.LabelSelector = d.selectors.pod.label
|
||||
return p.Watch(ctx, options)
|
||||
@ -454,12 +454,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
for _, namespace := range namespaces {
|
||||
e := d.client.CoreV1().Endpoints(namespace)
|
||||
elw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.endpoints.field
|
||||
options.LabelSelector = d.selectors.endpoints.label
|
||||
return e.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.endpoints.field
|
||||
options.LabelSelector = d.selectors.endpoints.label
|
||||
return e.Watch(ctx, options)
|
||||
@ -467,12 +467,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
}
|
||||
s := d.client.CoreV1().Services(namespace)
|
||||
slw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.service.field
|
||||
options.LabelSelector = d.selectors.service.label
|
||||
return s.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.service.field
|
||||
options.LabelSelector = d.selectors.service.label
|
||||
return s.Watch(ctx, options)
|
||||
@ -480,12 +480,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
}
|
||||
p := d.client.CoreV1().Pods(namespace)
|
||||
plw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.pod.field
|
||||
options.LabelSelector = d.selectors.pod.label
|
||||
return p.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.pod.field
|
||||
options.LabelSelector = d.selectors.pod.label
|
||||
return p.Watch(ctx, options)
|
||||
@ -531,12 +531,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
for _, namespace := range namespaces {
|
||||
p := d.client.CoreV1().Pods(namespace)
|
||||
plw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.pod.field
|
||||
options.LabelSelector = d.selectors.pod.label
|
||||
return p.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.pod.field
|
||||
options.LabelSelector = d.selectors.pod.label
|
||||
return p.Watch(ctx, options)
|
||||
@ -562,12 +562,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
for _, namespace := range namespaces {
|
||||
s := d.client.CoreV1().Services(namespace)
|
||||
slw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.service.field
|
||||
options.LabelSelector = d.selectors.service.label
|
||||
return s.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.service.field
|
||||
options.LabelSelector = d.selectors.service.label
|
||||
return s.Watch(ctx, options)
|
||||
@ -592,12 +592,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
for _, namespace := range namespaces {
|
||||
i := d.client.NetworkingV1().Ingresses(namespace)
|
||||
ilw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.ingress.field
|
||||
options.LabelSelector = d.selectors.ingress.label
|
||||
return i.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.ingress.field
|
||||
options.LabelSelector = d.selectors.ingress.label
|
||||
return i.Watch(ctx, options)
|
||||
@ -666,14 +666,14 @@ func retryOnError(ctx context.Context, interval time.Duration, f func() error) (
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer {
|
||||
func (d *Discovery) newNodeInformer(_ context.Context) cache.SharedInformer {
|
||||
nlw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.node.field
|
||||
options.LabelSelector = d.selectors.node.label
|
||||
return d.client.CoreV1().Nodes().List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.node.field
|
||||
options.LabelSelector = d.selectors.node.label
|
||||
return d.client.CoreV1().Nodes().Watch(ctx, options)
|
||||
@ -682,13 +682,13 @@ func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer {
|
||||
return d.mustNewSharedInformer(nlw, &apiv1.Node{}, resyncDisabled)
|
||||
}
|
||||
|
||||
func (d *Discovery) newNamespaceInformer(ctx context.Context) cache.SharedInformer {
|
||||
func (d *Discovery) newNamespaceInformer(_ context.Context) cache.SharedInformer {
|
||||
// We don't filter on NamespaceDiscovery.
|
||||
nlw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
|
||||
return d.client.CoreV1().Namespaces().List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
|
||||
return d.client.CoreV1().Namespaces().Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
@ -832,16 +832,16 @@ func (d *Discovery) newIndexedIngressesInformer(ilw *cache.ListWatch) cache.Shar
|
||||
return d.mustNewSharedIndexInformer(ilw, &networkv1.Ingress{}, resyncDisabled, indexers)
|
||||
}
|
||||
|
||||
func (d *Discovery) informerWatchErrorHandler(r *cache.Reflector, err error) {
|
||||
func (d *Discovery) informerWatchErrorHandler(ctx context.Context, r *cache.Reflector, err error) {
|
||||
d.metrics.failuresCount.Inc()
|
||||
cache.DefaultWatchErrorHandler(r, err)
|
||||
cache.DefaultWatchErrorHandler(ctx, r, err)
|
||||
}
|
||||
|
||||
func (d *Discovery) mustNewSharedInformer(lw cache.ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) cache.SharedInformer {
|
||||
informer := cache.NewSharedInformer(lw, exampleObject, defaultEventHandlerResyncPeriod)
|
||||
// Invoking SetWatchErrorHandler should fail only if the informer has been started beforehand.
|
||||
// Such a scenario would suggest an incorrect use of the API, thus the panic.
|
||||
if err := informer.SetWatchErrorHandler(d.informerWatchErrorHandler); err != nil {
|
||||
if err := informer.SetWatchErrorHandlerWithContext(d.informerWatchErrorHandler); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return informer
|
||||
@ -851,7 +851,7 @@ func (d *Discovery) mustNewSharedIndexInformer(lw cache.ListerWatcher, exampleOb
|
||||
informer := cache.NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, indexers)
|
||||
// Invoking SetWatchErrorHandler should fail only if the informer has been started beforehand.
|
||||
// Such a scenario would suggest an incorrect use of the API, thus the panic.
|
||||
if err := informer.SetWatchErrorHandler(d.informerWatchErrorHandler); err != nil {
|
||||
if err := informer.SetWatchErrorHandlerWithContext(d.informerWatchErrorHandler); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return informer
|
||||
|
||||
@ -41,7 +41,7 @@ type Node struct {
|
||||
logger *slog.Logger
|
||||
informer cache.SharedInformer
|
||||
store cache.Store
|
||||
queue *workqueue.Type
|
||||
queue *workqueue.Typed[string]
|
||||
}
|
||||
|
||||
// NewNode returns a new node discovery.
|
||||
@ -58,7 +58,9 @@ func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.Co
|
||||
logger: l,
|
||||
informer: inf,
|
||||
store: inf.GetStore(),
|
||||
queue: workqueue.NewNamed(RoleNode.String()),
|
||||
queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{
|
||||
Name: RoleNode.String(),
|
||||
}),
|
||||
}
|
||||
|
||||
_, err := n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -111,12 +113,11 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
}
|
||||
|
||||
func (n *Node) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {
|
||||
keyObj, quit := n.queue.Get()
|
||||
key, quit := n.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer n.queue.Done(keyObj)
|
||||
key := keyObj.(string)
|
||||
defer n.queue.Done(key)
|
||||
|
||||
_, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
|
||||
@ -47,7 +47,7 @@ type Pod struct {
|
||||
withNamespaceMetadata bool
|
||||
store cache.Store
|
||||
logger *slog.Logger
|
||||
queue *workqueue.Type
|
||||
queue *workqueue.Typed[string]
|
||||
}
|
||||
|
||||
// NewPod creates a new pod discovery.
|
||||
@ -68,7 +68,9 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes, namespace cac
|
||||
withNamespaceMetadata: namespace != nil,
|
||||
store: pods.GetStore(),
|
||||
logger: l,
|
||||
queue: workqueue.NewNamed(RolePod.String()),
|
||||
queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{
|
||||
Name: RolePod.String(),
|
||||
}),
|
||||
}
|
||||
_, err := p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(o any) {
|
||||
@ -166,12 +168,11 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
}
|
||||
|
||||
func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {
|
||||
keyObj, quit := p.queue.Get()
|
||||
key, quit := p.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer p.queue.Done(keyObj)
|
||||
key := keyObj.(string)
|
||||
defer p.queue.Done(key)
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
|
||||
@ -36,7 +36,7 @@ type Service struct {
|
||||
logger *slog.Logger
|
||||
informer cache.SharedIndexInformer
|
||||
store cache.Store
|
||||
queue *workqueue.Type
|
||||
queue *workqueue.Typed[string]
|
||||
namespaceInf cache.SharedInformer
|
||||
withNamespaceMetadata bool
|
||||
}
|
||||
@ -52,10 +52,12 @@ func NewService(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S
|
||||
svcDeleteCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleDelete)
|
||||
|
||||
s := &Service{
|
||||
logger: l,
|
||||
informer: inf,
|
||||
store: inf.GetStore(),
|
||||
queue: workqueue.NewNamed(RoleService.String()),
|
||||
logger: l,
|
||||
informer: inf,
|
||||
store: inf.GetStore(),
|
||||
queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{
|
||||
Name: RoleService.String(),
|
||||
}),
|
||||
namespaceInf: namespace,
|
||||
withNamespaceMetadata: namespace != nil,
|
||||
}
|
||||
@ -142,12 +144,11 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
}
|
||||
|
||||
func (s *Service) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {
|
||||
keyObj, quit := s.queue.Get()
|
||||
key, quit := s.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer s.queue.Done(keyObj)
|
||||
key := keyObj.(string)
|
||||
defer s.queue.Done(key)
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
|
||||
@ -42,8 +42,8 @@ var (
|
||||
configTypesMu sync.Mutex
|
||||
configTypes = make(map[reflect.Type]reflect.Type)
|
||||
|
||||
emptyStructType = reflect.TypeOf(struct{}{})
|
||||
configsType = reflect.TypeOf(Configs{})
|
||||
emptyStructType = reflect.TypeFor[struct{}]()
|
||||
configsType = reflect.TypeFor[Configs]()
|
||||
)
|
||||
|
||||
// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling.
|
||||
@ -54,7 +54,7 @@ func RegisterConfig(config Config) {
|
||||
func init() {
|
||||
// N.B.: static_configs is the only Config type implemented by default.
|
||||
// All other types are registered at init by their implementing packages.
|
||||
elemTyp := reflect.TypeOf(&targetgroup.Group{})
|
||||
elemTyp := reflect.TypeFor[*targetgroup.Group]()
|
||||
registerConfig(staticConfigsKey, elemTyp, StaticConfig{})
|
||||
}
|
||||
|
||||
|
||||
@ -58,7 +58,7 @@ The Prometheus monitoring server
|
||||
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
|
||||
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
|
||||
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
|
||||
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr, use-uncached-io. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
|
||||
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr, use-uncached-io, promql-extended-range-selectors. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
|
||||
| <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | |
|
||||
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
|
||||
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |
|
||||
|
||||
@ -302,3 +302,42 @@ memory in response to misleading cache growth.
|
||||
This is currently implemented using direct I/O.
|
||||
|
||||
For more details, see the [proposal](https://github.com/prometheus/proposals/pull/45).
|
||||
|
||||
## Extended Range Selectors
|
||||
|
||||
`--enable-feature=promql-extended-range-selectors`
|
||||
|
||||
Enables experimental `anchored` and `smoothed` modifiers for PromQL range and instant selectors. These modifiers provide more control over how range boundaries are handled in functions like `rate` and `increase`, especially with missing or irregular data.
|
||||
|
||||
Native Histograms are not yet supported by the extended range selectors.
|
||||
|
||||
### `anchored`
|
||||
|
||||
Uses the most recent sample (within the lookback delta) at the beginning of the range, or alternatively the first sample within the range if there is no sample within the lookback delta. The last sample within the range is also used at the end of the range. No extrapolation or interpolation is applied, so this is useful to get the direct difference between sample values.
|
||||
|
||||
Anchored range selector work with: `resets`, `changes`, `rate`, `increase`, and `delta`.
|
||||
|
||||
Example query:
|
||||
`increase(http_requests_total[5m] anchored)`
|
||||
|
||||
**Note**: When using the anchored modifier with the increase function, the results returned are integers.
|
||||
|
||||
### `smoothed`
|
||||
|
||||
In range selectors, linearly interpolates values at the range boundaries, using the sample values before and after the boundaries for an improved estimation that is robust against irregular scrapes and missing samples. However, it requires a sample after the evaluation interval to work properly, see note below.
|
||||
|
||||
For instant selectors, values are linearly interpolated at the evaluation timestamp using the samples immediately before and after that point.
|
||||
|
||||
Smoothed range selectors work with: `rate`, `increase`, and `delta`.
|
||||
|
||||
Example query:
|
||||
`rate(http_requests_total[step()] smoothed)`
|
||||
|
||||
> **Note for alerting and recording rules:**
|
||||
> The `smoothed` modifier requires samples after the evaluation interval, so using it directly in alerting or recording rules will typically *under-estimate* the result, as future samples are not available at evaluation time.
|
||||
> To use `smoothed` safely in rules, you **must** apply a `query_offset` to the rule group (see [documentation](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#rule_group)) to ensure the calculation window is fully in the past and all needed samples are available.
|
||||
> For critical alerting, set the offset to at least one scrape interval; for less critical or more resilient use cases, consider a larger offset (multiple scrape intervals) to tolerate missed scrapes.
|
||||
|
||||
For more details, see the [design doc](https://github.com/prometheus/proposals/blob/main/proposals/2025-04-04_extended-range-selectors-semantics.md).
|
||||
|
||||
**Note**: Extended Range Selectors are not supported for subqueries.
|
||||
@ -79,7 +79,7 @@ navigating to its metrics endpoint:
|
||||
|
||||
Let us explore data that Prometheus has collected about itself. To
|
||||
use Prometheus's built-in expression browser, navigate to
|
||||
http://localhost:9090/graph and choose the "Table" view within the "Graph" tab.
|
||||
http://localhost:9090/query and choose the "Graph" tab.
|
||||
|
||||
As you can gather from [localhost:9090/metrics](http://localhost:9090/metrics),
|
||||
one metric that Prometheus exports about itself is named
|
||||
@ -113,7 +113,7 @@ For more about the expression language, see the
|
||||
|
||||
## Using the graphing interface
|
||||
|
||||
To graph expressions, navigate to http://localhost:9090/graph and use the "Graph"
|
||||
To graph expressions, navigate to http://localhost:9090/query and use the "Graph"
|
||||
tab.
|
||||
|
||||
For example, enter the following expression to graph the per-second rate of chunks
|
||||
|
||||
@ -348,7 +348,9 @@ You can URL-encode these parameters directly in the request body by using the `P
|
||||
or dynamic number of series selectors that may breach server-side URL character limits.
|
||||
|
||||
The `data` section of the query result consists of a list of objects that
|
||||
contain the label name/value pairs which identify each series.
|
||||
contain the label name/value pairs which identify each series. Note that the
|
||||
`start` and `end` times are approximate and the result may contain label values
|
||||
for series which have no samples in the given interval.
|
||||
|
||||
The following example returns all series that match either of the selectors
|
||||
`up` or `process_start_time_seconds{job="prometheus"}`:
|
||||
@ -397,8 +399,9 @@ URL query parameters:
|
||||
series from which to read the label names. Optional.
|
||||
- `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
|
||||
|
||||
|
||||
The `data` section of the JSON response is a list of string label names.
|
||||
The `data` section of the JSON response is a list of string label names. Note
|
||||
that the `start` and `end` times are approximate and the result may contain
|
||||
label names for series which have no samples in the given interval.
|
||||
|
||||
Here is an example.
|
||||
|
||||
@ -451,7 +454,10 @@ URL query parameters:
|
||||
series from which to read the label values. Optional.
|
||||
- `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
|
||||
|
||||
The `data` section of the JSON response is a list of string label values.
|
||||
The `data` section of the JSON response is a list of string label values. Note
|
||||
that the `start` and `end` times are approximate and the result may contain
|
||||
label values for series which have no samples in the given interval.
|
||||
|
||||
|
||||
This example queries for all label values for the `http_status_code` label:
|
||||
|
||||
|
||||
@ -113,8 +113,8 @@ require (
|
||||
google.golang.org/protobuf v1.36.8 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apimachinery v0.32.3 // indirect
|
||||
k8s.io/client-go v0.32.3 // indirect
|
||||
k8s.io/apimachinery v0.33.5 // indirect
|
||||
k8s.io/client-go v0.33.5 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
|
||||
@ -140,16 +140,14 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
|
||||
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
|
||||
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
@ -160,8 +158,8 @@ github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3
|
||||
github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
|
||||
github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E=
|
||||
github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
|
||||
github.com/hashicorp/consul/api v1.32.0 h1:5wp5u780Gri7c4OedGEPzmlUEzi0g2KyiPphSr6zjVg=
|
||||
@ -471,21 +469,23 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
|
||||
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
|
||||
k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
|
||||
k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU=
|
||||
k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY=
|
||||
k8s.io/api v0.33.5 h1:YR+uhYj05jdRpcksv8kjSliW+v9hwXxn6Cv10aR8Juw=
|
||||
k8s.io/api v0.33.5/go.mod h1:2gzShdwXKT5yPGiqrTrn/U/nLZ7ZyT4WuAj3XGDVgVs=
|
||||
k8s.io/apimachinery v0.33.5 h1:NiT64hln4TQXeYR18/ES39OrNsjGz8NguxsBgp+6QIo=
|
||||
k8s.io/apimachinery v0.33.5/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
|
||||
k8s.io/client-go v0.33.5 h1:I8BdmQGxInpkMEnJvV6iG7dqzP3JRlpZZlib3OMFc3o=
|
||||
k8s.io/client-go v0.33.5/go.mod h1:W8PQP4MxbM4ypgagVE65mUUqK1/ByQkSALF9tzuQ6u0=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
||||
65
go.mod
65
go.mod
@ -27,7 +27,7 @@ require (
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
|
||||
github.com/fsnotify/fsnotify v1.8.0
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/go-openapi/strfmt v0.23.0
|
||||
github.com/go-zookeeper/zk v1.0.4
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
@ -93,36 +93,13 @@ require (
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.8
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.32.3
|
||||
k8s.io/apimachinery v0.32.3
|
||||
k8s.io/client-go v0.32.3
|
||||
k8s.io/api v0.33.5
|
||||
k8s.io/apimachinery v0.33.5
|
||||
k8s.io/client-go v0.33.5
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||
go.opentelemetry.io/collector/featuregate v1.35.0 // indirect
|
||||
go.opentelemetry.io/collector/internal/telemetry v0.129.0 // indirect
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect
|
||||
go.opentelemetry.io/otel/log v0.12.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.16.2 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
@ -132,8 +109,19 @@ require (
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
@ -156,15 +144,14 @@ require (
|
||||
github.com/go-openapi/validate v0.24.0 // indirect
|
||||
github.com/go-resty/resty/v2 v2.16.5 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/hashicorp/cronexpr v1.1.2 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
@ -174,6 +161,7 @@ require (
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
||||
github.com/hashicorp/serf v0.10.1 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
@ -193,6 +181,7 @@ require (
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
@ -209,6 +198,7 @@ require (
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/otlptranslator v0.0.2
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
@ -217,7 +207,11 @@ require (
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap v1.35.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 // indirect
|
||||
go.opentelemetry.io/collector/featuregate v1.35.0 // indirect
|
||||
go.opentelemetry.io/collector/internal/telemetry v0.129.0 // indirect
|
||||
go.opentelemetry.io/collector/pipeline v0.129.0 // indirect
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect
|
||||
go.opentelemetry.io/otel/log v0.12.2 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.41.0 // indirect
|
||||
@ -231,11 +225,13 @@ require (
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
@ -247,6 +243,3 @@ exclude (
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.14.7
|
||||
google.golang.org/api v0.30.0
|
||||
)
|
||||
|
||||
// Pin until https://github.com/fsnotify/fsnotify/issues/656 is resolved.
|
||||
replace github.com/fsnotify/fsnotify v1.8.0 => github.com/fsnotify/fsnotify v1.7.0
|
||||
|
||||
41
go.sum
41
go.sum
@ -141,8 +141,8 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
@ -198,10 +198,10 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6
|
||||
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
|
||||
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
@ -212,8 +212,6 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18=
|
||||
github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
|
||||
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
|
||||
@ -226,8 +224,8 @@ github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3
|
||||
github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
|
||||
github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E=
|
||||
github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
||||
@ -716,23 +714,26 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
|
||||
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
|
||||
k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
|
||||
k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU=
|
||||
k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY=
|
||||
k8s.io/api v0.33.5 h1:YR+uhYj05jdRpcksv8kjSliW+v9hwXxn6Cv10aR8Juw=
|
||||
k8s.io/api v0.33.5/go.mod h1:2gzShdwXKT5yPGiqrTrn/U/nLZ7ZyT4WuAj3XGDVgVs=
|
||||
k8s.io/apimachinery v0.33.5 h1:NiT64hln4TQXeYR18/ES39OrNsjGz8NguxsBgp+6QIo=
|
||||
k8s.io/apimachinery v0.33.5/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
|
||||
k8s.io/client-go v0.33.5 h1:I8BdmQGxInpkMEnJvV6iG7dqzP3JRlpZZlib3OMFc3o=
|
||||
k8s.io/client-go v0.33.5/go.mod h1:W8PQP4MxbM4ypgagVE65mUUqK1/ByQkSALF9tzuQ6u0=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
||||
@ -798,23 +798,24 @@ func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] {
|
||||
// create false positives here.
|
||||
func (h *FloatHistogram) Validate() error {
|
||||
var nCount, pCount float64
|
||||
if h.UsesCustomBuckets() {
|
||||
switch {
|
||||
case IsCustomBucketsSchema(h.Schema):
|
||||
if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return fmt.Errorf("custom buckets: %w", err)
|
||||
}
|
||||
if h.ZeroCount != 0 {
|
||||
return errors.New("custom buckets: must have zero count of 0")
|
||||
return ErrHistogramCustomBucketsZeroCount
|
||||
}
|
||||
if h.ZeroThreshold != 0 {
|
||||
return errors.New("custom buckets: must have zero threshold of 0")
|
||||
return ErrHistogramCustomBucketsZeroThresh
|
||||
}
|
||||
if len(h.NegativeSpans) > 0 {
|
||||
return errors.New("custom buckets: must not have negative spans")
|
||||
return ErrHistogramCustomBucketsNegSpans
|
||||
}
|
||||
if len(h.NegativeBuckets) > 0 {
|
||||
return errors.New("custom buckets: must not have negative buckets")
|
||||
return ErrHistogramCustomBucketsNegBuckets
|
||||
}
|
||||
} else {
|
||||
case IsExponentialSchema(h.Schema):
|
||||
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return fmt.Errorf("positive side: %w", err)
|
||||
}
|
||||
@ -826,8 +827,10 @@ func (h *FloatHistogram) Validate() error {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
if h.CustomValues != nil {
|
||||
return errors.New("histogram with exponential schema must not have custom bounds")
|
||||
return ErrHistogramExpSchemaCustomBounds
|
||||
}
|
||||
default:
|
||||
return InvalidSchemaError(h.Schema)
|
||||
}
|
||||
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
|
||||
if err != nil {
|
||||
|
||||
@ -21,24 +21,41 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
ExponentialSchemaMax int32 = 8
|
||||
ExponentialSchemaMin int32 = -4
|
||||
CustomBucketsSchema int32 = -53
|
||||
ExponentialSchemaMax int32 = 8
|
||||
ExponentialSchemaMaxReserved int32 = 52
|
||||
ExponentialSchemaMin int32 = -4
|
||||
ExponentialSchemaMinReserved int32 = -9
|
||||
CustomBucketsSchema int32 = -53
|
||||
)
|
||||
|
||||
var (
|
||||
ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets")
|
||||
ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)")
|
||||
ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative")
|
||||
ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative")
|
||||
ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided")
|
||||
ErrHistogramCustomBucketsMismatch = errors.New("histogram custom bounds are too few")
|
||||
ErrHistogramCustomBucketsInvalid = errors.New("histogram custom bounds must be in strictly increasing order")
|
||||
ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite")
|
||||
ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas")
|
||||
ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds")
|
||||
ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets")
|
||||
ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)")
|
||||
ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative")
|
||||
ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative")
|
||||
ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided")
|
||||
ErrHistogramCustomBucketsMismatch = errors.New("histogram custom bounds are too few")
|
||||
ErrHistogramCustomBucketsInvalid = errors.New("histogram custom bounds must be in strictly increasing order")
|
||||
ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite")
|
||||
ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas")
|
||||
ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds")
|
||||
ErrHistogramCustomBucketsZeroCount = errors.New("custom buckets: must have zero count of 0")
|
||||
ErrHistogramCustomBucketsZeroThresh = errors.New("custom buckets: must have zero threshold of 0")
|
||||
ErrHistogramCustomBucketsNegSpans = errors.New("custom buckets: must not have negative spans")
|
||||
ErrHistogramCustomBucketsNegBuckets = errors.New("custom buckets: must not have negative buckets")
|
||||
ErrHistogramExpSchemaCustomBounds = errors.New("histogram with exponential schema must not have custom bounds")
|
||||
ErrHistogramsInvalidSchema = fmt.Errorf("histogram has an invalid schema, which must be between %d and %d for exponential buckets, or %d for custom buckets", ExponentialSchemaMin, ExponentialSchemaMax, CustomBucketsSchema)
|
||||
ErrHistogramsUnknownSchema = fmt.Errorf("histogram has an unknown schema, which must be between %d and %d for exponential buckets, or %d for custom buckets", ExponentialSchemaMinReserved, ExponentialSchemaMaxReserved, CustomBucketsSchema)
|
||||
)
|
||||
|
||||
func InvalidSchemaError(s int32) error {
|
||||
return fmt.Errorf("%w, got schema %d", ErrHistogramsInvalidSchema, s)
|
||||
}
|
||||
|
||||
func UnknownSchemaError(s int32) error {
|
||||
return fmt.Errorf("%w, got schema %d", ErrHistogramsUnknownSchema, s)
|
||||
}
|
||||
|
||||
func IsCustomBucketsSchema(s int32) bool {
|
||||
return s == CustomBucketsSchema
|
||||
}
|
||||
@ -47,6 +64,20 @@ func IsExponentialSchema(s int32) bool {
|
||||
return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax
|
||||
}
|
||||
|
||||
func IsExponentialSchemaReserved(s int32) bool {
|
||||
return s >= ExponentialSchemaMinReserved && s <= ExponentialSchemaMaxReserved
|
||||
}
|
||||
|
||||
func IsValidSchema(s int32) bool {
|
||||
return IsCustomBucketsSchema(s) || IsExponentialSchema(s)
|
||||
}
|
||||
|
||||
// IsKnownSchema returns bool if we known and accept the schema, but need to
|
||||
// reduce resolution to the nearest supported schema.
|
||||
func IsKnownSchema(s int32) bool {
|
||||
return IsCustomBucketsSchema(s) || IsExponentialSchemaReserved(s)
|
||||
}
|
||||
|
||||
// BucketCount is a type constraint for the count in a bucket, which can be
|
||||
// float64 (for type FloatHistogram) or uint64 (for type Histogram).
|
||||
type BucketCount interface {
|
||||
|
||||
@ -14,7 +14,6 @@
|
||||
package histogram
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"slices"
|
||||
@ -425,23 +424,24 @@ func resize[T any](items []T, n int) []T {
|
||||
// the total h.Count).
|
||||
func (h *Histogram) Validate() error {
|
||||
var nCount, pCount uint64
|
||||
if h.UsesCustomBuckets() {
|
||||
switch {
|
||||
case IsCustomBucketsSchema(h.Schema):
|
||||
if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return fmt.Errorf("custom buckets: %w", err)
|
||||
}
|
||||
if h.ZeroCount != 0 {
|
||||
return errors.New("custom buckets: must have zero count of 0")
|
||||
return ErrHistogramCustomBucketsZeroCount
|
||||
}
|
||||
if h.ZeroThreshold != 0 {
|
||||
return errors.New("custom buckets: must have zero threshold of 0")
|
||||
return ErrHistogramCustomBucketsZeroThresh
|
||||
}
|
||||
if len(h.NegativeSpans) > 0 {
|
||||
return errors.New("custom buckets: must not have negative spans")
|
||||
return ErrHistogramCustomBucketsNegSpans
|
||||
}
|
||||
if len(h.NegativeBuckets) > 0 {
|
||||
return errors.New("custom buckets: must not have negative buckets")
|
||||
return ErrHistogramCustomBucketsNegBuckets
|
||||
}
|
||||
} else {
|
||||
case IsExponentialSchema(h.Schema):
|
||||
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return fmt.Errorf("positive side: %w", err)
|
||||
}
|
||||
@ -453,8 +453,10 @@ func (h *Histogram) Validate() error {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
if h.CustomValues != nil {
|
||||
return errors.New("histogram with exponential schema must not have custom bounds")
|
||||
return ErrHistogramExpSchemaCustomBounds
|
||||
}
|
||||
default:
|
||||
return InvalidSchemaError(h.Schema)
|
||||
}
|
||||
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true)
|
||||
if err != nil {
|
||||
|
||||
@ -1565,6 +1565,18 @@ func TestHistogramValidation(t *testing.T) {
|
||||
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
},
|
||||
},
|
||||
"schema too high": {
|
||||
h: &Histogram{
|
||||
Schema: 10,
|
||||
},
|
||||
errMsg: `histogram has an invalid schema, which must be between -4 and 8 for exponential buckets, or -53 for custom buckets, got schema 10`,
|
||||
},
|
||||
"schema too low": {
|
||||
h: &Histogram{
|
||||
Schema: -10,
|
||||
},
|
||||
errMsg: `histogram has an invalid schema, which must be between -4 and 8 for exponential buckets, or -53 for custom buckets, got schema -10`,
|
||||
},
|
||||
}
|
||||
|
||||
for testName, tc := range tests {
|
||||
|
||||
@ -117,6 +117,18 @@ func rangeQueryCases() []benchCase {
|
||||
expr: "rate(sparse[1m])",
|
||||
steps: 10000,
|
||||
},
|
||||
// Smoothed rate.
|
||||
{
|
||||
expr: "rate(a_X[1m] smoothed)",
|
||||
},
|
||||
{
|
||||
expr: "rate(a_X[1m] smoothed)",
|
||||
steps: 10000,
|
||||
},
|
||||
{
|
||||
expr: "rate(sparse[1m] smoothed)",
|
||||
steps: 10000,
|
||||
},
|
||||
// Holt-Winters and long ranges.
|
||||
{
|
||||
expr: "double_exponential_smoothing(a_X[1d], 0.3, 0.3)",
|
||||
@ -266,6 +278,10 @@ func rangeQueryCases() []benchCase {
|
||||
}
|
||||
|
||||
func BenchmarkRangeQuery(b *testing.B) {
|
||||
parser.EnableExtendedRangeSelectors = true
|
||||
b.Cleanup(func() {
|
||||
parser.EnableExtendedRangeSelectors = false
|
||||
})
|
||||
stor := teststorage.New(b)
|
||||
stor.DisableCompactions() // Don't want auto-compaction disrupting timings.
|
||||
defer stor.Close()
|
||||
|
||||
220
promql/engine.go
220
promql/engine.go
@ -21,6 +21,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"maps"
|
||||
"math"
|
||||
"reflect"
|
||||
"runtime"
|
||||
@ -124,6 +125,8 @@ var _ QueryLogger = (*logging.JSONFileLogger)(nil)
|
||||
|
||||
// QueryLogger is an interface that can be used to log all the queries logged
|
||||
// by the engine.
|
||||
// logging.JSONFileLogger implements this interface, downstream users may use
|
||||
// different implementations.
|
||||
type QueryLogger interface {
|
||||
slog.Handler
|
||||
io.Closer
|
||||
@ -926,13 +929,27 @@ func getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path
|
||||
// because wo want to exclude samples that are precisely the
|
||||
// lookback delta before the eval time.
|
||||
start -= durationMilliseconds(s.LookbackDelta) - 1
|
||||
if n.Smoothed {
|
||||
end += durationMilliseconds(s.LookbackDelta)
|
||||
}
|
||||
} else {
|
||||
// For all matrix queries we want to ensure that we have
|
||||
// (end-start) + range selected this way we have `range` data
|
||||
// before the start time. We subtract one from the range to
|
||||
// exclude samples positioned directly at the lower boundary of
|
||||
// the range.
|
||||
start -= durationMilliseconds(evalRange) - 1
|
||||
// For matrix queries, adjust the start and end times to ensure the
|
||||
// correct range of data is selected. For "anchored" selectors, extend
|
||||
// the start time backwards by the lookback delta plus the evaluation
|
||||
// range. For "smoothed" selectors, extend both the start and end times
|
||||
// by the lookback delta, and also extend the start time by the
|
||||
// evaluation range to cover the smoothing window. For standard range
|
||||
// queries, extend the start time backwards by the range (minus one
|
||||
// millisecond) to exclude samples exactly at the lower boundary.
|
||||
switch {
|
||||
case n.Anchored:
|
||||
start -= durationMilliseconds(s.LookbackDelta+evalRange) - 1
|
||||
case n.Smoothed:
|
||||
start -= durationMilliseconds(s.LookbackDelta+evalRange) - 1
|
||||
end += durationMilliseconds(s.LookbackDelta)
|
||||
default:
|
||||
start -= durationMilliseconds(evalRange) - 1
|
||||
}
|
||||
}
|
||||
|
||||
offsetMilliseconds := durationMilliseconds(n.OriginalOffset)
|
||||
@ -979,7 +996,6 @@ func (ng *Engine) populateSeries(ctx context.Context, querier storage.Querier, s
|
||||
evalRange = 0
|
||||
hints.By, hints.Grouping = extractGroupsFromPath(path)
|
||||
n.UnexpandedSeriesSet = querier.Select(ctx, false, hints, n.LabelMatchers...)
|
||||
|
||||
case *parser.MatrixSelector:
|
||||
evalRange = n.Range
|
||||
}
|
||||
@ -1524,6 +1540,76 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
|
||||
return result, annos
|
||||
}
|
||||
|
||||
// smoothSeries is a helper function that smooths the series by interpolating the values
|
||||
// based on values before and after the timestamp.
|
||||
func (ev *evaluator) smoothSeries(series []storage.Series, offset time.Duration) Matrix {
|
||||
dur := ev.endTimestamp - ev.startTimestamp
|
||||
|
||||
it := storage.NewBuffer(dur + 2*durationMilliseconds(ev.lookbackDelta))
|
||||
|
||||
offMS := offset.Milliseconds()
|
||||
start := ev.startTimestamp - offMS
|
||||
end := ev.endTimestamp - offMS
|
||||
step := ev.interval
|
||||
lb := durationMilliseconds(ev.lookbackDelta)
|
||||
|
||||
var chkIter chunkenc.Iterator
|
||||
mat := make(Matrix, 0, len(series))
|
||||
|
||||
for _, s := range series {
|
||||
ss := Series{Metric: s.Labels()}
|
||||
|
||||
chkIter = s.Iterator(chkIter)
|
||||
it.Reset(chkIter)
|
||||
|
||||
var floats []FPoint
|
||||
var hists []HPoint
|
||||
|
||||
for ts := start; ts <= end; ts += step {
|
||||
matrixStart := ts - lb
|
||||
matrixEnd := ts + lb
|
||||
|
||||
floats, hists = ev.matrixIterSlice(it, matrixStart, matrixEnd, floats, hists)
|
||||
if len(floats) == 0 && len(hists) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(hists) > 0 {
|
||||
// TODO: support native histograms.
|
||||
ev.errorf("smoothed and anchored modifiers do not work with native histograms")
|
||||
}
|
||||
|
||||
// Binary search for the first index with T >= ts.
|
||||
i := sort.Search(len(floats), func(i int) bool { return floats[i].T >= ts })
|
||||
|
||||
switch {
|
||||
case i < len(floats) && floats[i].T == ts:
|
||||
// Exact match.
|
||||
ss.Floats = append(ss.Floats, floats[i])
|
||||
|
||||
case i > 0 && i < len(floats):
|
||||
// Interpolate between prev and next.
|
||||
// TODO: detect if the sample is a counter, based on __type__ or metadata.
|
||||
prev, next := floats[i-1], floats[i]
|
||||
val := interpolate(prev, next, ts, false, false)
|
||||
ss.Floats = append(ss.Floats, FPoint{F: val, T: ts})
|
||||
|
||||
case i > 0:
|
||||
// No next point yet; carry forward previous value.
|
||||
prev := floats[i-1]
|
||||
ss.Floats = append(ss.Floats, FPoint{F: prev.F, T: ts})
|
||||
|
||||
default:
|
||||
// i == 0 and floats[0].T > ts: there is no previous data yet; skip.
|
||||
}
|
||||
}
|
||||
|
||||
mat = append(mat, ss)
|
||||
}
|
||||
|
||||
return mat
|
||||
}
|
||||
|
||||
// evalSeries generates a Matrix between ev.startTimestamp and ev.endTimestamp (inclusive), each point spaced ev.interval apart, from series given offset.
|
||||
// For every storage.Series iterator in series, the method iterates in ev.interval sized steps from ev.startTimestamp until and including ev.endTimestamp,
|
||||
// collecting every corresponding sample (obtained via ev.vectorSelectorSingle) into a Series.
|
||||
@ -1784,6 +1870,17 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
|
||||
sel := arg.(*parser.MatrixSelector)
|
||||
selVS := sel.VectorSelector.(*parser.VectorSelector)
|
||||
|
||||
switch {
|
||||
case selVS.Anchored:
|
||||
if _, ok := AnchoredSafeFunctions[e.Func.Name]; !ok {
|
||||
ev.errorf("anchored modifier can only be used with: %s - not with %s", strings.Join(slices.Sorted(maps.Keys(AnchoredSafeFunctions)), ", "), e.Func.Name)
|
||||
}
|
||||
case selVS.Smoothed:
|
||||
if _, ok := SmoothedSafeFunctions[e.Func.Name]; !ok {
|
||||
ev.errorf("smoothed modifier can only be used with: %s - not with %s", strings.Join(slices.Sorted(maps.Keys(SmoothedSafeFunctions)), ", "), e.Func.Name)
|
||||
}
|
||||
}
|
||||
|
||||
ws, err := checkAndExpandSeriesSet(ctx, sel)
|
||||
warnings.Merge(ws)
|
||||
if err != nil {
|
||||
@ -1792,7 +1889,17 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
|
||||
mat := make(Matrix, 0, len(selVS.Series)) // Output matrix.
|
||||
offset := durationMilliseconds(selVS.Offset)
|
||||
selRange := durationMilliseconds(sel.Range)
|
||||
stepRange := min(selRange, ev.interval)
|
||||
|
||||
var stepRange int64
|
||||
switch {
|
||||
case selVS.Anchored:
|
||||
stepRange = min(selRange+durationMilliseconds(ev.lookbackDelta), ev.interval)
|
||||
case selVS.Smoothed:
|
||||
stepRange = min(selRange+durationMilliseconds(2*ev.lookbackDelta), ev.interval)
|
||||
default:
|
||||
stepRange = min(selRange, ev.interval)
|
||||
}
|
||||
|
||||
// Reuse objects across steps to save memory allocations.
|
||||
var floats []FPoint
|
||||
var histograms []HPoint
|
||||
@ -1800,7 +1907,18 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
|
||||
inMatrix := make(Matrix, 1)
|
||||
enh := &EvalNodeHelper{Out: make(Vector, 0, 1), enableDelayedNameRemoval: ev.enableDelayedNameRemoval}
|
||||
// Process all the calls for one time series at a time.
|
||||
it := storage.NewBuffer(selRange)
|
||||
// For anchored and smoothed selectors, we need to iterate over a
|
||||
// larger range than the query range to account for the lookback delta.
|
||||
// For standard range queries, we iterate over the query range.
|
||||
bufferRange := selRange
|
||||
switch {
|
||||
case selVS.Anchored:
|
||||
bufferRange += durationMilliseconds(ev.lookbackDelta)
|
||||
case selVS.Smoothed:
|
||||
bufferRange += durationMilliseconds(2 * ev.lookbackDelta)
|
||||
}
|
||||
|
||||
it := storage.NewBuffer(bufferRange)
|
||||
var chkIter chunkenc.Iterator
|
||||
|
||||
// The last_over_time and first_over_time functions act like
|
||||
@ -1849,11 +1967,24 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
|
||||
if ts == ev.startTimestamp || selVS.Timestamp == nil {
|
||||
maxt := ts - offset
|
||||
mint := maxt - selRange
|
||||
switch {
|
||||
case selVS.Anchored:
|
||||
mint -= durationMilliseconds(ev.lookbackDelta)
|
||||
case selVS.Smoothed:
|
||||
mint -= durationMilliseconds(ev.lookbackDelta)
|
||||
maxt += durationMilliseconds(ev.lookbackDelta)
|
||||
}
|
||||
floats, histograms = ev.matrixIterSlice(it, mint, maxt, floats, histograms)
|
||||
}
|
||||
if len(floats)+len(histograms) == 0 {
|
||||
continue
|
||||
}
|
||||
if selVS.Anchored || selVS.Smoothed {
|
||||
if len(histograms) > 0 {
|
||||
// TODO: support native histograms.
|
||||
ev.errorf("smoothed and anchored modifiers do not work with native histograms")
|
||||
}
|
||||
}
|
||||
inMatrix[0].Floats = floats
|
||||
inMatrix[0].Histograms = histograms
|
||||
enh.Ts = ts
|
||||
@ -2052,6 +2183,10 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
|
||||
if err != nil {
|
||||
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
|
||||
}
|
||||
if e.Smoothed {
|
||||
mat := ev.smoothSeries(e.Series, e.Offset)
|
||||
return mat, ws
|
||||
}
|
||||
mat := ev.evalSeries(ctx, e.Series, e.Offset, false)
|
||||
return mat, ws
|
||||
|
||||
@ -2348,10 +2483,23 @@ func (ev *evaluator) matrixSelector(ctx context.Context, node *parser.MatrixSele
|
||||
offset = durationMilliseconds(vs.Offset)
|
||||
maxt = ev.startTimestamp - offset
|
||||
mint = maxt - durationMilliseconds(node.Range)
|
||||
matrix = make(Matrix, 0, len(vs.Series))
|
||||
|
||||
it = storage.NewBuffer(durationMilliseconds(node.Range))
|
||||
// matrixMint keeps the original mint for smoothed and anchored selectors.
|
||||
matrixMint = mint
|
||||
// matrixMaxt keeps the original maxt for smoothed and anchored selectors.
|
||||
matrixMaxt = maxt
|
||||
matrix = make(Matrix, 0, len(vs.Series))
|
||||
bufferRange = durationMilliseconds(node.Range)
|
||||
)
|
||||
switch {
|
||||
case vs.Anchored:
|
||||
bufferRange += durationMilliseconds(ev.lookbackDelta)
|
||||
mint -= durationMilliseconds(ev.lookbackDelta)
|
||||
case vs.Smoothed:
|
||||
bufferRange += 2 * durationMilliseconds(ev.lookbackDelta)
|
||||
mint -= durationMilliseconds(ev.lookbackDelta)
|
||||
maxt += durationMilliseconds(ev.lookbackDelta)
|
||||
}
|
||||
it := storage.NewBuffer(bufferRange)
|
||||
ws, err := checkAndExpandSeriesSet(ctx, node)
|
||||
if err != nil {
|
||||
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
|
||||
@ -2370,6 +2518,18 @@ func (ev *evaluator) matrixSelector(ctx context.Context, node *parser.MatrixSele
|
||||
}
|
||||
|
||||
ss.Floats, ss.Histograms = ev.matrixIterSlice(it, mint, maxt, nil, nil)
|
||||
switch {
|
||||
case vs.Anchored:
|
||||
if ss.Histograms != nil {
|
||||
ev.errorf("anchored modifier is not supported with histograms")
|
||||
}
|
||||
ss.Floats = extendFloats(ss.Floats, matrixMint, matrixMaxt, false)
|
||||
case vs.Smoothed:
|
||||
if ss.Histograms != nil {
|
||||
ev.errorf("anchored modifier is not supported with histograms")
|
||||
}
|
||||
ss.Floats = extendFloats(ss.Floats, matrixMint, matrixMaxt, true)
|
||||
}
|
||||
totalSize := int64(len(ss.Floats)) + int64(totalHPointSize(ss.Histograms))
|
||||
ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, totalSize)
|
||||
|
||||
@ -4035,3 +4195,39 @@ func (ev *evaluator) gatherVector(ts int64, input Matrix, output Vector, bufHelp
|
||||
|
||||
return output, bufHelpers
|
||||
}
|
||||
|
||||
// extendFloats extends the floats to the given mint and maxt.
|
||||
// This function is used with matrix selectors that are smoothed or anchored.
|
||||
func extendFloats(floats []FPoint, mint, maxt int64, smoothed bool) []FPoint {
|
||||
lastSampleIndex := len(floats) - 1
|
||||
|
||||
firstSampleIndex := max(0, sort.Search(lastSampleIndex, func(i int) bool { return floats[i].T > mint })-1)
|
||||
if smoothed {
|
||||
lastSampleIndex = sort.Search(lastSampleIndex, func(i int) bool { return floats[i].T >= maxt })
|
||||
}
|
||||
|
||||
if floats[lastSampleIndex].T <= mint {
|
||||
return []FPoint{}
|
||||
}
|
||||
|
||||
// TODO: detect if the sample is a counter, based on __type__ or metadata.
|
||||
left := pickOrInterpolateLeft(floats, firstSampleIndex, mint, smoothed, false)
|
||||
right := pickOrInterpolateRight(floats, lastSampleIndex, maxt, smoothed, false)
|
||||
|
||||
// Filter out samples at boundaries or outside the range.
|
||||
if floats[firstSampleIndex].T <= mint {
|
||||
firstSampleIndex++
|
||||
}
|
||||
if floats[lastSampleIndex].T >= maxt {
|
||||
lastSampleIndex--
|
||||
}
|
||||
|
||||
// TODO: Preallocate the length of the new list.
|
||||
out := make([]FPoint, 0)
|
||||
// Create the new floats list with the boundary samples and the inner samples.
|
||||
out = append(out, FPoint{T: mint, F: left})
|
||||
out = append(out, floats[firstSampleIndex:lastSampleIndex+1]...)
|
||||
out = append(out, FPoint{T: maxt, F: right})
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
@ -1513,6 +1513,160 @@ load 10s
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtendedRangeSelectors(t *testing.T) {
|
||||
parser.EnableExtendedRangeSelectors = true
|
||||
t.Cleanup(func() {
|
||||
parser.EnableExtendedRangeSelectors = false
|
||||
})
|
||||
|
||||
engine := newTestEngine(t)
|
||||
storage := promqltest.LoadedStorage(t, `
|
||||
load 10s
|
||||
metric 1+1x10
|
||||
withreset 1+1x4 1+1x5
|
||||
notregular 0 5 100 2 8
|
||||
`)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
|
||||
tc := []struct {
|
||||
query string
|
||||
t time.Time
|
||||
expected promql.Matrix
|
||||
}{
|
||||
{
|
||||
query: "metric[10s] smoothed",
|
||||
t: time.Unix(10, 0),
|
||||
expected: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 1, T: 0}, {F: 2, T: 10000}},
|
||||
Metric: labels.FromStrings("__name__", "metric"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
query: "metric[10s] smoothed",
|
||||
t: time.Unix(15, 0),
|
||||
expected: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 1.5, T: 5000}, {F: 2, T: 10000}, {F: 2.5, T: 15000}},
|
||||
Metric: labels.FromStrings("__name__", "metric"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
query: "metric[10s] smoothed",
|
||||
t: time.Unix(5, 0),
|
||||
expected: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 1, T: -5000}, {F: 1, T: 0}, {F: 1.5, T: 5000}},
|
||||
Metric: labels.FromStrings("__name__", "metric"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
query: "metric[10s] smoothed",
|
||||
t: time.Unix(105, 0),
|
||||
expected: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 10.5, T: 95000}, {F: 11, T: 100000}, {F: 11, T: 105000}},
|
||||
Metric: labels.FromStrings("__name__", "metric"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
query: "withreset[10s] smoothed",
|
||||
t: time.Unix(45, 0),
|
||||
expected: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 4.5, T: 35000}, {F: 5, T: 40000}, {F: 3, T: 45000}},
|
||||
Metric: labels.FromStrings("__name__", "withreset"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
query: "metric[10s] anchored",
|
||||
t: time.Unix(10, 0),
|
||||
expected: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 1, T: 0}, {F: 2, T: 10000}},
|
||||
Metric: labels.FromStrings("__name__", "metric"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
query: "metric[10s] anchored",
|
||||
t: time.Unix(15, 0),
|
||||
expected: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 1, T: 5000}, {F: 2, T: 10000}, {F: 2, T: 15000}},
|
||||
Metric: labels.FromStrings("__name__", "metric"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
query: "metric[10s] anchored",
|
||||
t: time.Unix(5, 0),
|
||||
expected: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 1, T: -5000}, {F: 1, T: 0}, {F: 1, T: 5000}},
|
||||
Metric: labels.FromStrings("__name__", "metric"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
query: "metric[10s] anchored",
|
||||
t: time.Unix(105, 0),
|
||||
expected: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 10, T: 95000}, {F: 11, T: 100000}, {F: 11, T: 105000}},
|
||||
Metric: labels.FromStrings("__name__", "metric"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
query: "withreset[10s] anchored",
|
||||
t: time.Unix(45, 0),
|
||||
expected: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 4, T: 35000}, {F: 5, T: 40000}, {F: 5, T: 45000}},
|
||||
Metric: labels.FromStrings("__name__", "withreset"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
query: "notregular[20s] smoothed",
|
||||
t: time.Unix(30, 0),
|
||||
expected: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 5, T: 10000}, {F: 100, T: 20000}, {F: 2, T: 30000}},
|
||||
Metric: labels.FromStrings("__name__", "notregular"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
query: "notregular[20s] anchored",
|
||||
t: time.Unix(30, 0),
|
||||
expected: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 5, T: 10000}, {F: 100, T: 20000}, {F: 2, T: 30000}},
|
||||
Metric: labels.FromStrings("__name__", "notregular"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tc {
|
||||
t.Run(tc.query, func(t *testing.T) {
|
||||
engine = promqltest.NewTestEngine(t, false, 0, 100)
|
||||
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, tc.query, tc.t)
|
||||
require.NoError(t, err)
|
||||
res := qry.Exec(context.Background())
|
||||
require.NoError(t, res.Err)
|
||||
require.Equal(t, tc.expected, res.Value)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAtModifier(t *testing.T) {
|
||||
engine := newTestEngine(t)
|
||||
storage := promqltest.LoadedStorage(t, `
|
||||
@ -3195,89 +3349,6 @@ func TestEngine_Close(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
|
||||
engine := newTestEngine(t)
|
||||
|
||||
baseT := timestamp.Time(0)
|
||||
storage := promqltest.LoadedStorage(t, `
|
||||
load 1m
|
||||
some_metric{env="1"} 0+1x4
|
||||
some_metric{env="2"} 0+2x4
|
||||
some_metric{env="3"} {{count:0}}+{{count:1}}x4
|
||||
some_metric_with_stale_marker 0 1 stale 3
|
||||
`)
|
||||
t.Cleanup(func() { require.NoError(t, storage.Close()) })
|
||||
|
||||
testCases := map[string]struct {
|
||||
expr string
|
||||
expected promql.Matrix
|
||||
ts time.Time
|
||||
}{
|
||||
"matches series with points in range": {
|
||||
expr: "some_metric[2m]",
|
||||
ts: baseT.Add(2 * time.Minute),
|
||||
expected: promql.Matrix{
|
||||
{
|
||||
Metric: labels.FromStrings("__name__", "some_metric", "env", "1"),
|
||||
Floats: []promql.FPoint{
|
||||
{T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1},
|
||||
{T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 2},
|
||||
},
|
||||
},
|
||||
{
|
||||
Metric: labels.FromStrings("__name__", "some_metric", "env", "2"),
|
||||
Floats: []promql.FPoint{
|
||||
{T: timestamp.FromTime(baseT.Add(time.Minute)), F: 2},
|
||||
{T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 4},
|
||||
},
|
||||
},
|
||||
{
|
||||
Metric: labels.FromStrings("__name__", "some_metric", "env", "3"),
|
||||
Histograms: []promql.HPoint{
|
||||
{T: timestamp.FromTime(baseT.Add(time.Minute)), H: &histogram.FloatHistogram{Count: 1, CounterResetHint: histogram.NotCounterReset}},
|
||||
{T: timestamp.FromTime(baseT.Add(2 * time.Minute)), H: &histogram.FloatHistogram{Count: 2, CounterResetHint: histogram.NotCounterReset}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"matches no series": {
|
||||
expr: "some_nonexistent_metric[1m]",
|
||||
ts: baseT,
|
||||
expected: promql.Matrix{},
|
||||
},
|
||||
"no samples in range": {
|
||||
expr: "some_metric[1m]",
|
||||
ts: baseT.Add(20 * time.Minute),
|
||||
expected: promql.Matrix{},
|
||||
},
|
||||
"metric with stale marker": {
|
||||
expr: "some_metric_with_stale_marker[3m]",
|
||||
ts: baseT.Add(3 * time.Minute),
|
||||
expected: promql.Matrix{
|
||||
{
|
||||
Metric: labels.FromStrings("__name__", "some_metric_with_stale_marker"),
|
||||
Floats: []promql.FPoint{
|
||||
{T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1},
|
||||
{T: timestamp.FromTime(baseT.Add(3 * time.Minute)), F: 3},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, testCase := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
q, err := engine.NewInstantQuery(context.Background(), storage, nil, testCase.expr, testCase.ts)
|
||||
require.NoError(t, err)
|
||||
defer q.Close()
|
||||
|
||||
res := q.Exec(context.Background())
|
||||
require.NoError(t, res.Err)
|
||||
testutil.RequireEqual(t, testCase.expected, res.Value)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryLookbackDelta(t *testing.T) {
|
||||
var (
|
||||
load = `load 5m
|
||||
|
||||
@ -65,13 +65,127 @@ func funcTime(_ []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (
|
||||
}}, nil
|
||||
}
|
||||
|
||||
// pickOrInterpolateLeft returns the value at the left boundary of the range.
|
||||
// If interpolation is needed (when smoothed is true and the first sample is before the range start),
|
||||
// it returns the interpolated value at the left boundary; otherwise, it returns the first sample's value.
|
||||
func pickOrInterpolateLeft(floats []FPoint, first int, rangeStart int64, smoothed, isCounter bool) float64 {
|
||||
if smoothed && floats[first].T < rangeStart {
|
||||
return interpolate(floats[first], floats[first+1], rangeStart, isCounter, true)
|
||||
}
|
||||
return floats[first].F
|
||||
}
|
||||
|
||||
// pickOrInterpolateRight returns the value at the right boundary of the range.
|
||||
// If interpolation is needed (when smoothed is true and the last sample is after the range end),
|
||||
// it returns the interpolated value at the right boundary; otherwise, it returns the last sample's value.
|
||||
func pickOrInterpolateRight(floats []FPoint, last int, rangeEnd int64, smoothed, isCounter bool) float64 {
|
||||
if smoothed && last > 0 && floats[last].T > rangeEnd {
|
||||
return interpolate(floats[last-1], floats[last], rangeEnd, isCounter, false)
|
||||
}
|
||||
return floats[last].F
|
||||
}
|
||||
|
||||
// interpolate performs linear interpolation between two points.
|
||||
// If isCounter is true and there is a counter reset:
|
||||
// - on the left edge, it sets the value to 0.
|
||||
// - on the right edge, it adds the left value to the right value.
|
||||
// It then calculates the interpolated value at the given timestamp.
|
||||
func interpolate(p1, p2 FPoint, t int64, isCounter, leftEdge bool) float64 {
|
||||
y1 := p1.F
|
||||
y2 := p2.F
|
||||
if isCounter && y2 < y1 {
|
||||
if leftEdge {
|
||||
y1 = 0
|
||||
} else {
|
||||
y2 += y1
|
||||
}
|
||||
}
|
||||
|
||||
return y1 + (y2-y1)*float64(t-p1.T)/float64(p2.T-p1.T)
|
||||
}
|
||||
|
||||
// correctForCounterResets calculates the correction for counter resets.
|
||||
// This function is only used for extendedRate functions with smoothed or anchored rates.
|
||||
func correctForCounterResets(left, right float64, points []FPoint) float64 {
|
||||
var correction float64
|
||||
prev := left
|
||||
for _, p := range points {
|
||||
if p.F < prev {
|
||||
correction += prev
|
||||
}
|
||||
prev = p.F
|
||||
}
|
||||
if right < prev {
|
||||
correction += prev
|
||||
}
|
||||
return correction
|
||||
}
|
||||
|
||||
// extendedRate is a utility function for anchored/smoothed rate/increase/delta.
|
||||
// It calculates the rate (allowing for counter resets if isCounter is true),
|
||||
// extrapolates if the first/last sample if needed, and returns
|
||||
// the result as either per-second (if isRate is true) or overall.
|
||||
func extendedRate(vals Matrix, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) {
|
||||
var (
|
||||
ms = args[0].(*parser.MatrixSelector)
|
||||
vs = ms.VectorSelector.(*parser.VectorSelector)
|
||||
samples = vals[0]
|
||||
f = samples.Floats
|
||||
lastSampleIndex = len(f) - 1
|
||||
rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
|
||||
rangeEnd = enh.Ts - durationMilliseconds(vs.Offset)
|
||||
annos annotations.Annotations
|
||||
smoothed = vs.Smoothed
|
||||
)
|
||||
|
||||
firstSampleIndex := max(0, sort.Search(lastSampleIndex, func(i int) bool { return f[i].T > rangeStart })-1)
|
||||
if smoothed {
|
||||
lastSampleIndex = sort.Search(lastSampleIndex, func(i int) bool { return f[i].T >= rangeEnd })
|
||||
}
|
||||
|
||||
if f[lastSampleIndex].T <= rangeStart {
|
||||
return enh.Out, annos
|
||||
}
|
||||
|
||||
left := pickOrInterpolateLeft(f, firstSampleIndex, rangeStart, smoothed, isCounter)
|
||||
right := pickOrInterpolateRight(f, lastSampleIndex, rangeEnd, smoothed, isCounter)
|
||||
|
||||
resultFloat := right - left
|
||||
|
||||
if isCounter {
|
||||
// We only need to consider samples exactly within the range
|
||||
// for counter resets correction, as pickOrInterpolateLeft and
|
||||
// pickOrInterpolateRight already handle the resets at boundaries.
|
||||
if f[firstSampleIndex].T <= rangeStart {
|
||||
firstSampleIndex++
|
||||
}
|
||||
if f[lastSampleIndex].T >= rangeEnd {
|
||||
lastSampleIndex--
|
||||
}
|
||||
|
||||
resultFloat += correctForCounterResets(left, right, f[firstSampleIndex:lastSampleIndex+1])
|
||||
}
|
||||
if isRate {
|
||||
resultFloat /= ms.Range.Seconds()
|
||||
}
|
||||
|
||||
return append(enh.Out, Sample{F: resultFloat}), annos
|
||||
}
|
||||
|
||||
// extrapolatedRate is a utility function for rate/increase/delta.
|
||||
// It calculates the rate (allowing for counter resets if isCounter is true),
|
||||
// extrapolates if the first/last sample is close to the boundary, and returns
|
||||
// the result as either per-second (if isRate is true) or overall.
|
||||
//
|
||||
// Note: If the vector selector is smoothed or anchored, it will use the
|
||||
// extendedRate function instead.
|
||||
func extrapolatedRate(vals Matrix, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) {
|
||||
ms := args[0].(*parser.MatrixSelector)
|
||||
vs := ms.VectorSelector.(*parser.VectorSelector)
|
||||
if vs.Anchored || vs.Smoothed {
|
||||
return extendedRate(vals, args, enh, isCounter, isRate)
|
||||
}
|
||||
|
||||
var (
|
||||
samples = vals[0]
|
||||
rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
|
||||
@ -1548,8 +1662,21 @@ func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expression
|
||||
return enh.Out, annos
|
||||
}
|
||||
|
||||
// pickFirstSampleIndex returns the index of the last sample before
|
||||
// or at the range start, or 0 if none exist before the range start.
|
||||
// If the vector selector is not anchored, it always returns 0.
|
||||
func pickFirstSampleIndex(floats []FPoint, args parser.Expressions, enh *EvalNodeHelper) int {
|
||||
ms := args[0].(*parser.MatrixSelector)
|
||||
vs := ms.VectorSelector.(*parser.VectorSelector)
|
||||
if !vs.Anchored {
|
||||
return 0
|
||||
}
|
||||
rangeStart := enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
|
||||
return max(0, sort.Search(len(floats)-1, func(i int) bool { return floats[i].T > rangeStart })-1)
|
||||
}
|
||||
|
||||
// === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
|
||||
func funcResets(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
func funcResets(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
floats := matrixVal[0].Floats
|
||||
histograms := matrixVal[0].Histograms
|
||||
resets := 0
|
||||
@ -1558,7 +1685,8 @@ func funcResets(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNod
|
||||
}
|
||||
|
||||
var prevSample, curSample Sample
|
||||
for iFloat, iHistogram := 0, 0; iFloat < len(floats) || iHistogram < len(histograms); {
|
||||
firstSampleIndex := pickFirstSampleIndex(floats, args, enh)
|
||||
for iFloat, iHistogram := firstSampleIndex, 0; iFloat < len(floats) || iHistogram < len(histograms); {
|
||||
switch {
|
||||
// Process a float sample if no histogram sample remains or its timestamp is earlier.
|
||||
// Process a histogram sample if no float sample remains or its timestamp is earlier.
|
||||
@ -1571,7 +1699,7 @@ func funcResets(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNod
|
||||
iHistogram++
|
||||
}
|
||||
// Skip the comparison for the first sample, just initialize prevSample.
|
||||
if iFloat+iHistogram == 1 {
|
||||
if iFloat+iHistogram == 1+firstSampleIndex {
|
||||
prevSample = curSample
|
||||
continue
|
||||
}
|
||||
@ -1594,7 +1722,7 @@ func funcResets(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNod
|
||||
}
|
||||
|
||||
// === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
|
||||
func funcChanges(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
func funcChanges(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
floats := matrixVal[0].Floats
|
||||
histograms := matrixVal[0].Histograms
|
||||
changes := 0
|
||||
@ -1603,7 +1731,8 @@ func funcChanges(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNo
|
||||
}
|
||||
|
||||
var prevSample, curSample Sample
|
||||
for iFloat, iHistogram := 0, 0; iFloat < len(floats) || iHistogram < len(histograms); {
|
||||
firstSampleIndex := pickFirstSampleIndex(floats, args, enh)
|
||||
for iFloat, iHistogram := firstSampleIndex, 0; iFloat < len(floats) || iHistogram < len(histograms); {
|
||||
switch {
|
||||
// Process a float sample if no histogram sample remains or its timestamp is earlier.
|
||||
// Process a histogram sample if no float sample remains or its timestamp is earlier.
|
||||
@ -1616,7 +1745,7 @@ func funcChanges(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNo
|
||||
iHistogram++
|
||||
}
|
||||
// Skip the comparison for the first sample, just initialize prevSample.
|
||||
if iFloat+iHistogram == 1 {
|
||||
if iFloat+iHistogram == 1+firstSampleIndex {
|
||||
prevSample = curSample
|
||||
continue
|
||||
}
|
||||
@ -1920,6 +2049,26 @@ var AtModifierUnsafeFunctions = map[string]struct{}{
|
||||
"timestamp": {},
|
||||
}
|
||||
|
||||
// AnchoredSafeFunctions are the functions that can be used with the anchored
|
||||
// modifier. Anchored modifier returns matrices with samples outside of the
|
||||
// boundaries, so not every function can be used with it.
|
||||
var AnchoredSafeFunctions = map[string]struct{}{
|
||||
"resets": {},
|
||||
"changes": {},
|
||||
"rate": {},
|
||||
"increase": {},
|
||||
"delta": {},
|
||||
}
|
||||
|
||||
// SmoothedSafeFunctions are the functions that can be used with the smoothed
|
||||
// modifier. Smoothed modifier returns matrices with samples outside of the
|
||||
// boundaries, so not every function can be used with it.
|
||||
var SmoothedSafeFunctions = map[string]struct{}{
|
||||
"rate": {},
|
||||
"increase": {},
|
||||
"delta": {},
|
||||
}
|
||||
|
||||
type vectorByValueHeap Vector
|
||||
|
||||
func (s vectorByValueHeap) Len() int {
|
||||
|
||||
@ -79,3 +79,24 @@ func TestKahanSumInc(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInterpolate(t *testing.T) {
|
||||
tests := []struct {
|
||||
p1, p2 FPoint
|
||||
t int64
|
||||
isCounter bool
|
||||
expected float64
|
||||
}{
|
||||
{FPoint{T: 1, F: 100}, FPoint{T: 2, F: 200}, 1, false, 100},
|
||||
{FPoint{T: 0, F: 100}, FPoint{T: 2, F: 200}, 1, false, 150},
|
||||
{FPoint{T: 0, F: 200}, FPoint{T: 2, F: 100}, 1, false, 150},
|
||||
{FPoint{T: 0, F: 200}, FPoint{T: 2, F: 0}, 1, true, 200},
|
||||
{FPoint{T: 0, F: 200}, FPoint{T: 2, F: 100}, 1, true, 250},
|
||||
{FPoint{T: 0, F: 500}, FPoint{T: 2, F: 100}, 1, true, 550},
|
||||
{FPoint{T: 0, F: 500}, FPoint{T: 10, F: 0}, 1, true, 500},
|
||||
}
|
||||
for _, test := range tests {
|
||||
result := interpolate(test.p1, test.p2, test.t, test.isCounter, false)
|
||||
require.Equal(t, test.expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
@ -226,6 +226,11 @@ type VectorSelector struct {
|
||||
// This is the case when VectorSelector is used to represent the info function's second argument.
|
||||
BypassEmptyMatcherCheck bool
|
||||
|
||||
// Anchored is true when the VectorSelector is anchored.
|
||||
Anchored bool
|
||||
// Smoothed is true when the VectorSelector is smoothed.
|
||||
Smoothed bool
|
||||
|
||||
PosRange posrange.PositionRange
|
||||
}
|
||||
|
||||
|
||||
@ -141,6 +141,8 @@ GROUP_LEFT
|
||||
GROUP_RIGHT
|
||||
IGNORING
|
||||
OFFSET
|
||||
SMOOTHED
|
||||
ANCHORED
|
||||
ON
|
||||
WITHOUT
|
||||
%token keywordsEnd
|
||||
@ -187,7 +189,7 @@ START_METRIC_SELECTOR
|
||||
%type <int> int
|
||||
%type <uint> uint
|
||||
%type <float> number series_value signed_number signed_or_unsigned_number
|
||||
%type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector duration_expr paren_duration_expr positive_duration_expr offset_duration_expr
|
||||
%type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr anchored_expr smoothed_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector duration_expr paren_duration_expr positive_duration_expr offset_duration_expr
|
||||
|
||||
%start start
|
||||
|
||||
@ -230,6 +232,8 @@ expr :
|
||||
| matrix_selector
|
||||
| number_duration_literal
|
||||
| offset_expr
|
||||
| anchored_expr
|
||||
| smoothed_expr
|
||||
| paren_expr
|
||||
| string_literal
|
||||
| subquery_expr
|
||||
@ -464,6 +468,20 @@ offset_expr: expr OFFSET offset_duration_expr
|
||||
{ yylex.(*parser).unexpected("offset", "number, duration, or step()"); $$ = $1 }
|
||||
;
|
||||
|
||||
/*
|
||||
* Anchored and smoothed modifiers
|
||||
*/
|
||||
|
||||
anchored_expr: expr ANCHORED
|
||||
{
|
||||
yylex.(*parser).setAnchored($1)
|
||||
}
|
||||
|
||||
smoothed_expr: expr SMOOTHED
|
||||
{
|
||||
yylex.(*parser).setSmoothed($1)
|
||||
}
|
||||
|
||||
/*
|
||||
* @ modifiers.
|
||||
*/
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -129,6 +129,8 @@ var key = map[string]ItemType{
|
||||
|
||||
// Keywords.
|
||||
"offset": OFFSET,
|
||||
"smoothed": SMOOTHED,
|
||||
"anchored": ANCHORED,
|
||||
"by": BY,
|
||||
"without": WITHOUT,
|
||||
"on": ON,
|
||||
|
||||
@ -42,6 +42,9 @@ var parserPool = sync.Pool{
|
||||
// ExperimentalDurationExpr is a flag to enable experimental duration expression parsing.
|
||||
var ExperimentalDurationExpr bool
|
||||
|
||||
// EnableExtendedRangeSelectors is a flag to enable experimental extended range selectors.
|
||||
var EnableExtendedRangeSelectors bool
|
||||
|
||||
type Parser interface {
|
||||
ParseExpr() (Expr, error)
|
||||
Close()
|
||||
@ -1021,6 +1024,52 @@ func (p *parser) addOffsetExpr(e Node, expr *DurationExpr) {
|
||||
*endPosp = p.lastClosing
|
||||
}
|
||||
|
||||
func (p *parser) setAnchored(e Node) {
|
||||
if !EnableExtendedRangeSelectors {
|
||||
p.addParseErrf(e.PositionRange(), "anchored modifier is experimental and not enabled")
|
||||
return
|
||||
}
|
||||
switch s := e.(type) {
|
||||
case *VectorSelector:
|
||||
s.Anchored = true
|
||||
if s.Smoothed {
|
||||
p.addParseErrf(e.PositionRange(), "anchored and smoothed modifiers cannot be used together")
|
||||
}
|
||||
case *MatrixSelector:
|
||||
s.VectorSelector.(*VectorSelector).Anchored = true
|
||||
if s.VectorSelector.(*VectorSelector).Smoothed {
|
||||
p.addParseErrf(e.PositionRange(), "anchored and smoothed modifiers cannot be used together")
|
||||
}
|
||||
case *SubqueryExpr:
|
||||
p.addParseErrf(e.PositionRange(), "anchored modifier is not supported for subqueries")
|
||||
default:
|
||||
p.addParseErrf(e.PositionRange(), "anchored modifier not implemented")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) setSmoothed(e Node) {
|
||||
if !EnableExtendedRangeSelectors {
|
||||
p.addParseErrf(e.PositionRange(), "smoothed modifier is experimental and not enabled")
|
||||
return
|
||||
}
|
||||
switch s := e.(type) {
|
||||
case *VectorSelector:
|
||||
s.Smoothed = true
|
||||
if s.Anchored {
|
||||
p.addParseErrf(e.PositionRange(), "anchored and smoothed modifiers cannot be used together")
|
||||
}
|
||||
case *MatrixSelector:
|
||||
s.VectorSelector.(*VectorSelector).Smoothed = true
|
||||
if s.VectorSelector.(*VectorSelector).Anchored {
|
||||
p.addParseErrf(e.PositionRange(), "anchored and smoothed modifiers cannot be used together")
|
||||
}
|
||||
case *SubqueryExpr:
|
||||
p.addParseErrf(e.PositionRange(), "smoothed modifier is not supported for subqueries")
|
||||
default:
|
||||
p.addParseErrf(e.PositionRange(), "smoothed modifier not implemented")
|
||||
}
|
||||
}
|
||||
|
||||
// setTimestamp is used to set the timestamp from the @ modifier in the generated parser.
|
||||
func (p *parser) setTimestamp(e Node, ts float64) {
|
||||
if math.IsInf(ts, -1) || math.IsInf(ts, 1) || math.IsNaN(ts) ||
|
||||
|
||||
@ -263,11 +263,18 @@ func (node *MatrixSelector) String() string {
|
||||
vecSelector.Timestamp = nil
|
||||
vecSelector.StartOrEnd = 0
|
||||
|
||||
extendedAttribute := ""
|
||||
switch {
|
||||
case vecSelector.Anchored:
|
||||
extendedAttribute = " anchored"
|
||||
case vecSelector.Smoothed:
|
||||
extendedAttribute = " smoothed"
|
||||
}
|
||||
rangeStr := model.Duration(node.Range).String()
|
||||
if node.RangeExpr != nil {
|
||||
rangeStr = node.RangeExpr.String()
|
||||
}
|
||||
str := fmt.Sprintf("%s[%s]%s%s", vecSelector.String(), rangeStr, at, offset)
|
||||
str := fmt.Sprintf("%s[%s]%s%s%s", vecSelector.String(), rangeStr, extendedAttribute, at, offset)
|
||||
|
||||
vecSelector.OriginalOffset, vecSelector.OriginalOffsetExpr, vecSelector.Timestamp, vecSelector.StartOrEnd = offsetVal, offsetExprVal, atVal, preproc
|
||||
|
||||
@ -380,6 +387,12 @@ func (node *VectorSelector) String() string {
|
||||
b.WriteString(" @ end()")
|
||||
}
|
||||
switch {
|
||||
case node.Anchored:
|
||||
b.WriteString(" anchored")
|
||||
case node.Smoothed:
|
||||
b.WriteString(" smoothed")
|
||||
}
|
||||
switch {
|
||||
case node.OriginalOffsetExpr != nil:
|
||||
b.WriteString(" offset ")
|
||||
node.OriginalOffsetExpr.writeTo(b)
|
||||
|
||||
@ -48,6 +48,11 @@ func TestConcurrentRangeQueries(t *testing.T) {
|
||||
}
|
||||
// Enable experimental functions testing
|
||||
parser.EnableExperimentalFunctions = true
|
||||
parser.EnableExtendedRangeSelectors = true
|
||||
t.Cleanup(func() {
|
||||
parser.EnableExperimentalFunctions = false
|
||||
parser.EnableExtendedRangeSelectors = false
|
||||
})
|
||||
engine := promqltest.NewTestEngineWithOpts(t, opts)
|
||||
|
||||
const interval = 10000 // 10s interval.
|
||||
|
||||
@ -106,8 +106,44 @@ eval range from <start> to <end> step <step> <query>
|
||||
* `<start>` and `<end>` specify the time range of the range query, and use the same syntax as `<time>`
|
||||
* `<step>` is the step of the range query, and uses the same syntax as `<time>` (eg. `30s`)
|
||||
* `<expect>`(optional) specifies expected annotations, errors, or result ordering.
|
||||
* `<expect range vector>` (optional) for an instant query you can specify expected range vector timestamps
|
||||
* `<expect string> "<string>"` (optional) for matching a string literal
|
||||
* `<series>` and `<points>` specify the expected values, and follow the same syntax as for `load` above
|
||||
|
||||
### `expect string`
|
||||
|
||||
This can be used to specify that a string literal is the expected result.
|
||||
|
||||
Note that this is only supported on instant queries.
|
||||
|
||||
For example;
|
||||
|
||||
```
|
||||
eval instant at 50m ("Foo")
|
||||
expect string "Foo"
|
||||
```
|
||||
|
||||
The expected string value must be within quotes. Double or back quotes are supported.
|
||||
|
||||
### `expect range vector`
|
||||
|
||||
This can be used to specify the expected timestamps on a range vector resulting from an instant query.
|
||||
|
||||
```
|
||||
expect range vector <start> to <end> step <step>
|
||||
```
|
||||
|
||||
For example;
|
||||
```
|
||||
load 10s
|
||||
some_metric{env="a"} 1+1x5
|
||||
some_metric{env="b"} 2+2x5
|
||||
eval instant at 1m some_metric[1m]
|
||||
expect range vector from 10s to 1m step 10s
|
||||
some_metric{env="a"} 2 3 4 5 6
|
||||
some_metric{env="b"} 4 6 8 10 12
|
||||
```
|
||||
|
||||
### `expect` Syntax
|
||||
|
||||
```
|
||||
|
||||
@ -53,11 +53,14 @@ var (
|
||||
patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn|info))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`)
|
||||
patExpect = regexp.MustCompile(`^expect\s+(ordered|fail|warn|no_warn|info|no_info)(?:\s+(regex|msg):(.+))?$`)
|
||||
patMatchAny = regexp.MustCompile(`^.*$`)
|
||||
patExpectRange = regexp.MustCompile(`^` + rangeVectorPrefix + `\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+)$`)
|
||||
)
|
||||
|
||||
const (
|
||||
defaultEpsilon = 0.000001 // Relative error allowed for sample values.
|
||||
DefaultMaxSamplesPerQuery = 10000
|
||||
rangeVectorPrefix = "expect range vector"
|
||||
expectStringPrefix = "expect string"
|
||||
)
|
||||
|
||||
type TBRun interface {
|
||||
@ -120,9 +123,11 @@ func RunBuiltinTestsWithStorage(t TBRun, engine promql.QueryEngine, newStorage f
|
||||
t.Cleanup(func() {
|
||||
parser.EnableExperimentalFunctions = false
|
||||
parser.ExperimentalDurationExpr = false
|
||||
parser.EnableExtendedRangeSelectors = false
|
||||
})
|
||||
parser.EnableExperimentalFunctions = true
|
||||
parser.ExperimentalDurationExpr = true
|
||||
parser.EnableExtendedRangeSelectors = true
|
||||
|
||||
files, err := fs.Glob(testsFs, "*/*.test")
|
||||
require.NoError(t, err)
|
||||
@ -314,7 +319,58 @@ func validateExpectedCmds(cmd *evalCmd) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
||||
// Given an expected range vector definition, parse the line and return the start & end times and the step duration.
|
||||
// ie parse a line such as "expect range vector from 10s to 1m step 10s".
|
||||
// The from and to are parsed as durations and their values added to epoch(0) to form a time.Time.
|
||||
// The step is parsed as a duration and returned as a time.Duration.
|
||||
func (t *test) parseExpectRangeVector(line string) (*time.Time, *time.Time, *time.Duration, error) {
|
||||
parts := patExpectRange.FindStringSubmatch(line)
|
||||
if len(parts) != 4 {
|
||||
return nil, nil, nil, fmt.Errorf("invalid range vector definition %q", line)
|
||||
}
|
||||
|
||||
from := parts[1]
|
||||
to := parts[2]
|
||||
step := parts[3]
|
||||
|
||||
parsedFrom, parsedTo, parsedStep, err := t.parseDurations(from, to, step)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
start := testStartTime.Add(time.Duration(*parsedFrom))
|
||||
end := testStartTime.Add(time.Duration(*parsedTo))
|
||||
stepDuration := time.Duration(*parsedStep)
|
||||
|
||||
return &start, &end, &stepDuration, nil
|
||||
}
|
||||
|
||||
// parseDurations parses the given from, to and step strings to Durations.
|
||||
// Additionally, a check is performed to ensure to is before from.
|
||||
func (*test) parseDurations(from, to, step string) (*model.Duration, *model.Duration, *model.Duration, error) {
|
||||
parsedFrom, err := model.ParseDuration(from)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("invalid start timestamp definition %q: %w", from, err)
|
||||
}
|
||||
|
||||
parsedTo, err := model.ParseDuration(to)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("invalid end timestamp definition %q: %w", to, err)
|
||||
}
|
||||
|
||||
if parsedTo < parsedFrom {
|
||||
return nil, nil, nil, fmt.Errorf("invalid test definition, end timestamp (%s) is before start timestamp (%s)", to, from)
|
||||
}
|
||||
|
||||
parsedStep, err := model.ParseDuration(step)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("invalid step definition %q: %w", step, err)
|
||||
}
|
||||
|
||||
return &parsedFrom, &parsedTo, &parsedStep, nil
|
||||
}
|
||||
|
||||
func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
||||
instantParts := patEvalInstant.FindStringSubmatch(lines[i])
|
||||
rangeParts := patEvalRange.FindStringSubmatch(lines[i])
|
||||
|
||||
@ -355,10 +411,11 @@ func (*test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
||||
}
|
||||
|
||||
var cmd *evalCmd
|
||||
var offset model.Duration
|
||||
|
||||
if isInstant {
|
||||
at := instantParts[2]
|
||||
offset, err := model.ParseDuration(at)
|
||||
offset, err = model.ParseDuration(at)
|
||||
if err != nil {
|
||||
return i, nil, formatErr("invalid timestamp definition %q: %s", at, err)
|
||||
}
|
||||
@ -369,26 +426,12 @@ func (*test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
||||
to := rangeParts[3]
|
||||
step := rangeParts[4]
|
||||
|
||||
parsedFrom, err := model.ParseDuration(from)
|
||||
parsedFrom, parsedTo, parsedStep, err := t.parseDurations(from, to, step)
|
||||
if err != nil {
|
||||
return i, nil, formatErr("invalid start timestamp definition %q: %s", from, err)
|
||||
return i, nil, formatErr(err.Error())
|
||||
}
|
||||
|
||||
parsedTo, err := model.ParseDuration(to)
|
||||
if err != nil {
|
||||
return i, nil, formatErr("invalid end timestamp definition %q: %s", to, err)
|
||||
}
|
||||
|
||||
if parsedTo < parsedFrom {
|
||||
return i, nil, formatErr("invalid test definition, end timestamp (%s) is before start timestamp (%s)", to, from)
|
||||
}
|
||||
|
||||
parsedStep, err := model.ParseDuration(step)
|
||||
if err != nil {
|
||||
return i, nil, formatErr("invalid step definition %q: %s", step, err)
|
||||
}
|
||||
|
||||
cmd = newRangeEvalCmd(expr, testStartTime.Add(time.Duration(parsedFrom)), testStartTime.Add(time.Duration(parsedTo)), time.Duration(parsedStep), i+1)
|
||||
cmd = newRangeEvalCmd(expr, testStartTime.Add(time.Duration(*parsedFrom)), testStartTime.Add(time.Duration(*parsedTo)), time.Duration(*parsedStep), i+1)
|
||||
}
|
||||
|
||||
switch mod {
|
||||
@ -404,6 +447,8 @@ func (*test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
||||
cmd.info = true
|
||||
}
|
||||
|
||||
var expectRangeVector bool
|
||||
|
||||
for j := 1; i+1 < len(lines); j++ {
|
||||
i++
|
||||
defLine := lines[i]
|
||||
@ -426,6 +471,32 @@ func (*test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
||||
break
|
||||
}
|
||||
|
||||
if strings.HasPrefix(defLine, rangeVectorPrefix) {
|
||||
start, end, step, err := t.parseExpectRangeVector(defLine)
|
||||
if err != nil {
|
||||
return i, nil, formatErr("%w", err)
|
||||
}
|
||||
|
||||
expectRangeVector = true
|
||||
cmd.start = *start
|
||||
cmd.end = *end
|
||||
cmd.step = *step
|
||||
cmd.eval = *end
|
||||
cmd.excludeFromRangeQuery = true
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(defLine, expectStringPrefix) {
|
||||
expectString, err := parseAsStringLiteral(defLine)
|
||||
if err != nil {
|
||||
return i, nil, formatErr("%w", err)
|
||||
}
|
||||
cmd.expectedString = expectString
|
||||
cmd.excludeFromRangeQuery = true
|
||||
continue
|
||||
}
|
||||
|
||||
// This would still allow a metric named 'expect' if it is written as 'expect{}'.
|
||||
if strings.Split(defLine, " ")[0] == "expect" {
|
||||
annoType, expectedAnno, err := parseExpect(defLine)
|
||||
@ -450,15 +521,35 @@ func (*test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
||||
return i, nil, err
|
||||
}
|
||||
|
||||
// Currently, we are not expecting any matrices.
|
||||
if len(vals) > 1 && isInstant {
|
||||
return i, nil, formatErr("expecting multiple values in instant evaluation not allowed")
|
||||
// Only allow a range vector for an instant query where we have defined the expected range vector timestamps.
|
||||
if len(vals) > 1 && isInstant && !expectRangeVector {
|
||||
return i, nil, formatErr("expecting multiple values in instant evaluation not allowed. consider using 'expect range vector' directive to enable a range vector result for an instant query")
|
||||
}
|
||||
cmd.expectMetric(j, metric, vals...)
|
||||
}
|
||||
return i, cmd, nil
|
||||
}
|
||||
|
||||
// parseAsStringLiteral returns the expected string from an expect string expression.
|
||||
// It is valid for the line to match the expect string prefix exactly, and an empty string is returned.
|
||||
func parseAsStringLiteral(line string) (string, error) {
|
||||
if line == expectStringPrefix {
|
||||
return "", errors.New("expected string literal not valid - a quoted string literal is required")
|
||||
}
|
||||
|
||||
str := strings.TrimPrefix(line, expectStringPrefix+" ")
|
||||
if len(str) == 0 {
|
||||
return "", errors.New("expected string literal not valid - a quoted string literal is required")
|
||||
}
|
||||
|
||||
str, err := strconv.Unquote(str)
|
||||
if err != nil {
|
||||
return "", errors.New("expected string literal not valid - check that the string is correctly quoted")
|
||||
}
|
||||
|
||||
return str, nil
|
||||
}
|
||||
|
||||
// getLines returns trimmed lines after removing the comments.
|
||||
func getLines(input string) []string {
|
||||
lines := strings.Split(input, "\n")
|
||||
@ -692,6 +783,7 @@ type evalCmd struct {
|
||||
end time.Time
|
||||
step time.Duration
|
||||
line int
|
||||
eval time.Time
|
||||
|
||||
isRange bool // if false, instant query
|
||||
fail, warn, ordered, info bool
|
||||
@ -703,6 +795,12 @@ type evalCmd struct {
|
||||
metrics map[uint64]labels.Labels
|
||||
expectScalar bool
|
||||
expected map[uint64]entry
|
||||
|
||||
// we expect a string literal - is set instead of expected
|
||||
expectedString string
|
||||
|
||||
// if true and this is an instant query then we will not test this in a range query scenario
|
||||
excludeFromRangeQuery bool
|
||||
}
|
||||
|
||||
func (ev *evalCmd) isOrdered() bool {
|
||||
@ -772,6 +870,7 @@ func newInstantEvalCmd(expr string, start time.Time, line int) *evalCmd {
|
||||
return &evalCmd{
|
||||
expr: expr,
|
||||
start: start,
|
||||
eval: start,
|
||||
line: line,
|
||||
|
||||
metrics: map[uint64]labels.Labels{},
|
||||
@ -1016,7 +1115,10 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
|
||||
if !almost.Equal(exp0.Value, val.V, defaultEpsilon) {
|
||||
return fmt.Errorf("expected scalar %v but got %v", exp0.Value, val.V)
|
||||
}
|
||||
|
||||
case promql.String:
|
||||
if ev.expectedString != val.V {
|
||||
return fmt.Errorf("expected string \"%v\" but got \"%v\"", ev.expectedString, val.V)
|
||||
}
|
||||
default:
|
||||
panic(fmt.Errorf("promql.Test.compareResult: unexpected result type %T", result))
|
||||
}
|
||||
@ -1354,11 +1456,12 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
|
||||
}
|
||||
|
||||
func (t *test) execInstantEval(cmd *evalCmd, engine promql.QueryEngine) error {
|
||||
queries, err := atModifierTestCases(cmd.expr, cmd.start)
|
||||
queries, err := atModifierTestCases(cmd.expr, cmd.eval)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...)
|
||||
queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.eval}}, queries...)
|
||||
|
||||
for _, iq := range queries {
|
||||
if err := t.runInstantQuery(iq, cmd, engine); err != nil {
|
||||
return err
|
||||
@ -1395,6 +1498,12 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
|
||||
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
|
||||
}
|
||||
|
||||
// this query has have been explicitly excluded from range query testing
|
||||
// ie it could be that the query result is not an instant vector or scalar
|
||||
if cmd.excludeFromRangeQuery {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check query returns same result in range mode,
|
||||
// by checking against the middle step.
|
||||
q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
|
||||
|
||||
@ -948,6 +948,144 @@ eval instant at 0m http_requests
|
||||
`,
|
||||
expectedError: `error in eval http_requests (line 12): invalid expect lines, multiple expect fail lines are not allowed`,
|
||||
},
|
||||
"instant query with string literal": {
|
||||
input: `
|
||||
eval instant at 50m ("Foo")
|
||||
expect string "Foo"
|
||||
`,
|
||||
},
|
||||
"instant query with string literal with leading space": {
|
||||
input: `
|
||||
eval instant at 50m (" Foo")
|
||||
expect string " Foo"
|
||||
`,
|
||||
},
|
||||
"instant query with string literal with trailing space": {
|
||||
input: `
|
||||
eval instant at 50m ("Foo ")
|
||||
expect string "Foo "
|
||||
`,
|
||||
},
|
||||
"instant query with string literal as space": {
|
||||
input: `
|
||||
eval instant at 50m (" ")
|
||||
expect string " "
|
||||
`,
|
||||
},
|
||||
"instant query with string literal with empty string": {
|
||||
input: `
|
||||
eval instant at 50m ("")
|
||||
expect string
|
||||
`,
|
||||
expectedError: `error in eval ("") (line 3): expected string literal not valid - a quoted string literal is required`,
|
||||
},
|
||||
"instant query with string literal with correctly quoted empty string": {
|
||||
input: `
|
||||
eval instant at 50m ("")
|
||||
expect string ""
|
||||
`,
|
||||
},
|
||||
"instant query with string literal - not quoted": {
|
||||
input: `
|
||||
eval instant at 50m ("Foo")
|
||||
expect string Foo
|
||||
`,
|
||||
expectedError: `error in eval ("Foo") (line 3): expected string literal not valid - check that the string is correctly quoted`,
|
||||
},
|
||||
"instant query with empty string literal": {
|
||||
input: `
|
||||
eval instant at 50m ("Foo")
|
||||
expect string ""
|
||||
`,
|
||||
expectedError: `error in eval ("Foo") (line 2): expected string "" but got "Foo"`,
|
||||
},
|
||||
"instant query with error string literal": {
|
||||
input: `
|
||||
eval instant at 50m ("Foo")
|
||||
expect string "Bar"
|
||||
`,
|
||||
expectedError: `error in eval ("Foo") (line 2): expected string "Bar" but got "Foo"`,
|
||||
},
|
||||
"instant query with range result - result does not have a series that is expected": {
|
||||
input: `
|
||||
load 10s
|
||||
some_metric{env="a"} 1+1x5
|
||||
|
||||
eval instant at 1m some_metric[1m]
|
||||
expect range vector from 10s to 1m step 10s
|
||||
some_metric{env="a"} 2 3 4 5 6
|
||||
some_metric{env="b"} 4 6 8 10 12
|
||||
`,
|
||||
expectedError: `error in eval some_metric[1m] (line 5): expected metric {__name__="some_metric", env="b"} not found`,
|
||||
},
|
||||
"instant query with range result - result has a series which is not expected": {
|
||||
input: `
|
||||
load 10s
|
||||
some_metric{env="a"} 1+1x5
|
||||
some_metric{env="b"} 1+1x5
|
||||
|
||||
eval instant at 1m some_metric[1m]
|
||||
expect range vector from 10s to 1m step 10s
|
||||
some_metric{env="a"} 2 3 4 5 6
|
||||
`,
|
||||
expectedError: `error in eval some_metric[1m] (line 6): unexpected metric {__name__="some_metric", env="b"} in result, has 5 float points [2 @[10000] 3 @[20000] 4 @[30000] 5 @[40000] 6 @[50000]] and 0 histogram points []`,
|
||||
},
|
||||
"instant query with range result - result has a value that is not expected": {
|
||||
input: `
|
||||
load 10s
|
||||
some_metric{env="a"} 1+1x5
|
||||
|
||||
eval instant at 1m some_metric[1m]
|
||||
expect range vector from 10s to 1m step 10s
|
||||
some_metric{env="a"} 9 3 4 5 6
|
||||
`,
|
||||
expectedError: `error in eval some_metric[1m] (line 5): expected float value at index 0 (t=10000) for {__name__="some_metric", env="a"} to be 9, but got 2 (result has 5 float points [2 @[10000] 3 @[20000] 4 @[30000] 5 @[40000] 6 @[50000]] and 0 histogram points [])`,
|
||||
},
|
||||
"instant query with range result - invalid expect range vector directive": {
|
||||
input: `
|
||||
load 10s
|
||||
some_metric{env="a"} 1+1x5
|
||||
|
||||
eval instant at 1m some_metric[1m]
|
||||
expect range vector from 10s
|
||||
some_metric{env="a"} 2 3 4 5 6
|
||||
`,
|
||||
expectedError: `error in eval some_metric[1m] (line 6): invalid range vector definition "expect range vector from 10s"`,
|
||||
},
|
||||
"instant query with range result - result matches expected value": {
|
||||
input: `
|
||||
load 1m
|
||||
some_metric{env="1"} 0+1x4
|
||||
some_metric{env="2"} 0+2x4
|
||||
|
||||
eval instant at 2m some_metric[2m]
|
||||
expect range vector from 1m to 2m step 60s
|
||||
some_metric{env="1"} 1 2
|
||||
some_metric{env="2"} 2 4
|
||||
`,
|
||||
},
|
||||
"instant query with range result - result has a is missing a sample": {
|
||||
input: `
|
||||
load 1m
|
||||
some_metric_with_stale_marker 0 1 stale 3
|
||||
|
||||
eval instant at 3m some_metric_with_stale_marker[3m]
|
||||
expect range vector from 1m to 3m step 60s
|
||||
some_metric_with_stale_marker{} 1 2 3
|
||||
`,
|
||||
expectedError: `error in eval some_metric_with_stale_marker[3m] (line 5): expected 3 float points and 0 histogram points for {__name__="some_metric_with_stale_marker"}, but got 2 float points [1 @[60000] 3 @[180000]] and 0 histogram points []`,
|
||||
},
|
||||
"instant query with range result - result has a sample where none is expected": {
|
||||
input: `
|
||||
load 1m
|
||||
some_metric_with_stale_marker 0 1 2 3
|
||||
|
||||
eval instant at 3m some_metric_with_stale_marker[3m]
|
||||
expect range vector from 1m to 3m step 60s
|
||||
some_metric_with_stale_marker{} 1 _ 3
|
||||
`,
|
||||
expectedError: `error in eval some_metric_with_stale_marker[3m] (line 5): expected 2 float points and 0 histogram points for {__name__="some_metric_with_stale_marker"}, but got 3 float points [1 @[60000] 2 @[120000] 3 @[180000]] and 0 histogram points []`,
|
||||
},
|
||||
}
|
||||
|
||||
for name, testCase := range testCases {
|
||||
|
||||
414
promql/promqltest/testdata/extended_vectors.test
vendored
Normal file
414
promql/promqltest/testdata/extended_vectors.test
vendored
Normal file
@ -0,0 +1,414 @@
|
||||
# Reference from PROM-52: Complete dataset
|
||||
|
||||
load 15s
|
||||
metric 1+1x4 9+1x4
|
||||
|
||||
eval instant at 5s increase(metric[1m])
|
||||
|
||||
eval instant at 20s increase(metric[1m])
|
||||
{} 1.833333333
|
||||
|
||||
eval instant at 35s increase(metric[1m])
|
||||
{} 2.833333333
|
||||
|
||||
eval instant at 50s increase(metric[1m])
|
||||
{} 4
|
||||
|
||||
eval instant at 65s increase(metric[1m])
|
||||
{} 4
|
||||
|
||||
eval instant at 80s increase(metric[1m])
|
||||
{} 8
|
||||
|
||||
eval instant at 95s increase(metric[1m])
|
||||
{} 8
|
||||
|
||||
eval instant at 110s increase(metric[1m])
|
||||
{} 8
|
||||
|
||||
eval instant at 125s increase(metric[1m])
|
||||
{} 4
|
||||
|
||||
eval instant at 5s increase(metric[1m] anchored)
|
||||
{} 0
|
||||
|
||||
eval instant at 20s increase(metric[1m] anchored)
|
||||
{} 1
|
||||
|
||||
eval instant at 35s increase(metric[1m] anchored)
|
||||
{} 2
|
||||
|
||||
eval instant at 50s increase(metric[1m] anchored)
|
||||
{} 3
|
||||
|
||||
eval instant at 65s increase(metric[1m] anchored)
|
||||
{} 4
|
||||
|
||||
eval instant at 80s increase(metric[1m] anchored)
|
||||
{} 7
|
||||
|
||||
eval instant at 95s increase(metric[1m] anchored)
|
||||
{} 7
|
||||
|
||||
eval instant at 110s increase(metric[1m] anchored)
|
||||
{} 7
|
||||
|
||||
eval instant at 125s increase(metric[1m] anchored)
|
||||
{} 7
|
||||
|
||||
eval instant at 5s increase(metric[1m] smoothed)
|
||||
{} 0.333333333
|
||||
|
||||
eval instant at 20s increase(metric[1m] smoothed)
|
||||
{} 1.333333333
|
||||
|
||||
eval instant at 35s increase(metric[1m] smoothed)
|
||||
{} 2.333333333
|
||||
|
||||
eval instant at 50s increase(metric[1m] smoothed)
|
||||
{} 3.333333333
|
||||
|
||||
eval instant at 65s increase(metric[1m] smoothed)
|
||||
{} 5
|
||||
|
||||
eval instant at 80s increase(metric[1m] smoothed)
|
||||
{} 7
|
||||
|
||||
eval instant at 95s increase(metric[1m] smoothed)
|
||||
{} 7
|
||||
|
||||
eval instant at 110s increase(metric[1m] smoothed)
|
||||
{} 7
|
||||
|
||||
eval instant at 125s increase(metric[1m] smoothed)
|
||||
{} 6
|
||||
|
||||
# Reference from PROM-52: Partial dataset
|
||||
|
||||
clear
|
||||
load 15s
|
||||
metric 1+1x2 _ _ 9+1x4
|
||||
|
||||
eval instant at 5s increase(metric[1m])
|
||||
|
||||
eval instant at 20s increase(metric[1m])
|
||||
{} 1.833333333
|
||||
|
||||
eval instant at 35s increase(metric[1m])
|
||||
{} 2.833333333
|
||||
|
||||
eval instant at 50s increase(metric[1m])
|
||||
{} 3.166666666
|
||||
|
||||
eval instant at 65s increase(metric[1m])
|
||||
{} 2.166666666
|
||||
|
||||
eval instant at 80s increase(metric[1m])
|
||||
{} 8
|
||||
|
||||
eval instant at 95s increase(metric[1m])
|
||||
{} 1.833333333
|
||||
|
||||
eval instant at 110s increase(metric[1m])
|
||||
{} 2.833333333
|
||||
|
||||
eval instant at 125s increase(metric[1m])
|
||||
{} 4
|
||||
|
||||
eval instant at 5s increase(metric[1m] anchored)
|
||||
{} 0
|
||||
|
||||
eval instant at 20s increase(metric[1m] anchored)
|
||||
{} 1
|
||||
|
||||
eval instant at 35s increase(metric[1m] anchored)
|
||||
{} 2
|
||||
|
||||
eval instant at 50s increase(metric[1m] anchored)
|
||||
{} 2
|
||||
|
||||
eval instant at 65s increase(metric[1m] anchored)
|
||||
{} 2
|
||||
|
||||
eval instant at 80s increase(metric[1m] anchored)
|
||||
{} 7
|
||||
|
||||
eval instant at 95s increase(metric[1m] anchored)
|
||||
{} 7
|
||||
|
||||
eval instant at 110s increase(metric[1m] anchored)
|
||||
{} 8
|
||||
|
||||
eval instant at 125s increase(metric[1m] anchored)
|
||||
{} 9
|
||||
|
||||
eval instant at 5s increase(metric[1m] smoothed)
|
||||
{} 0.333333333
|
||||
|
||||
eval instant at 20s increase(metric[1m] smoothed)
|
||||
{} 1.333333333
|
||||
|
||||
eval instant at 35s increase(metric[1m] smoothed)
|
||||
{} 2.666666666
|
||||
|
||||
eval instant at 50s increase(metric[1m] smoothed)
|
||||
{} 4.666666666
|
||||
|
||||
eval instant at 65s increase(metric[1m] smoothed)
|
||||
{} 6.333333333
|
||||
|
||||
eval instant at 80s increase(metric[1m] smoothed)
|
||||
{} 7
|
||||
|
||||
eval instant at 95s increase(metric[1m] smoothed)
|
||||
{} 6.666666666
|
||||
|
||||
eval instant at 110s increase(metric[1m] smoothed)
|
||||
{} 5.666666666
|
||||
|
||||
eval instant at 125s increase(metric[1m] smoothed)
|
||||
{} 4.666666666
|
||||
|
||||
# Test that inverval is left-open.
|
||||
|
||||
clear
|
||||
load 1m
|
||||
metric 1 2 _ 4 5
|
||||
|
||||
eval instant at 2m increase(metric[1m] smoothed)
|
||||
{} 1
|
||||
|
||||
eval instant at 2m increase(metric[1m] anchored)
|
||||
|
||||
# Basic test with counter resets
|
||||
|
||||
clear
|
||||
load 1m
|
||||
metric{id="1"} 1+1x4 1+1x4
|
||||
metric{id="2"} 3 2+2x9
|
||||
metric{id="3"} 5+3x2 3+3x6
|
||||
|
||||
eval instant at 1m30s increase(metric[1m])
|
||||
|
||||
eval instant at 1m30s increase(metric[1m] smoothed)
|
||||
{id="1"} 1
|
||||
{id="2"} 2
|
||||
{id="3"} 3
|
||||
|
||||
eval instant at 1m30s increase(metric[1m] anchored)
|
||||
{id="1"} 1
|
||||
{id="2"} 2
|
||||
{id="3"} 3
|
||||
|
||||
eval instant at 1m30s delta(metric[1m])
|
||||
|
||||
eval instant at 1m30s delta(metric[1m] anchored)
|
||||
{id="1"} 1
|
||||
{id="2"} -1
|
||||
{id="3"} 3
|
||||
|
||||
eval instant at 3m0s delta(metric[1m] anchored)
|
||||
{id="1"} 1
|
||||
{id="2"} 2
|
||||
{id="3"} -8
|
||||
|
||||
eval instant at 3m30s delta(metric[1m] anchored)
|
||||
{id="1"} 1
|
||||
{id="2"} 2
|
||||
{id="3"} -8
|
||||
|
||||
eval instant at 6m increase(metric[5m])
|
||||
{id="1"} 5
|
||||
{id="2"} 10
|
||||
{id="3"} 15
|
||||
|
||||
eval instant at 6m15s increase(metric[5m] smoothed)
|
||||
{id="1"} 5
|
||||
{id="2"} 10
|
||||
{id="3"} 15
|
||||
|
||||
eval instant at 6m increase(metric[5m] smoothed)
|
||||
{id="1"} 5
|
||||
{id="2"} 10
|
||||
{id="3"} 15
|
||||
|
||||
eval instant at 5m increase(metric[5m] anchored)
|
||||
{id="1"} 5
|
||||
{id="2"} 10
|
||||
{id="3"} 15
|
||||
|
||||
eval instant at 15m increase(metric[5m] anchored)
|
||||
|
||||
clear
|
||||
load 1m
|
||||
metric{id="1"} 11 -1 100 0
|
||||
metric{id="2"} 0 0 100 0 0 11 -1
|
||||
|
||||
eval instant at 5m30s delta(metric[5m] smoothed)
|
||||
{id="1"} -5
|
||||
{id="2"} 5
|
||||
|
||||
eval instant at 5m45s delta(metric[5m] smoothed)
|
||||
{id="1"} -2
|
||||
{id="2"} 2
|
||||
|
||||
clear
|
||||
load 1m
|
||||
metric{id="1"} 1+1x10
|
||||
metric{id="2"} 1 1+1x10
|
||||
metric{id="3"} 99-1x10
|
||||
metric{id="4"} 99 99-1x10
|
||||
|
||||
eval instant at 5m changes(metric[5m])
|
||||
{id="1"} 4
|
||||
{id="2"} 4
|
||||
{id="3"} 4
|
||||
{id="4"} 4
|
||||
|
||||
eval instant at 5m30s changes(metric[5m])
|
||||
{id="1"} 4
|
||||
{id="2"} 4
|
||||
{id="3"} 4
|
||||
{id="4"} 4
|
||||
|
||||
|
||||
eval instant at 5m0s changes(metric[5m] anchored)
|
||||
{id="1"} 5
|
||||
{id="2"} 4
|
||||
{id="3"} 5
|
||||
{id="4"} 4
|
||||
|
||||
eval instant at 6m changes(metric[5m] anchored)
|
||||
{id="1"} 5
|
||||
{id="2"} 5
|
||||
{id="3"} 5
|
||||
{id="4"} 5
|
||||
|
||||
eval instant at 5m30s changes(metric[5m] anchored)
|
||||
{id="1"} 5
|
||||
{id="2"} 4
|
||||
{id="3"} 5
|
||||
{id="4"} 4
|
||||
|
||||
eval instant at 5m30s resets(metric[5m])
|
||||
{id="1"} 0
|
||||
{id="2"} 0
|
||||
{id="3"} 4
|
||||
{id="4"} 4
|
||||
|
||||
eval instant at 5m30s resets(metric[5m] anchored)
|
||||
{id="1"} 0
|
||||
{id="2"} 0
|
||||
{id="3"} 5
|
||||
{id="4"} 4
|
||||
|
||||
clear
|
||||
load 1m
|
||||
metric{id="1"} 2 _ 1 _ _ _ _ _ 0
|
||||
metric{id="2"} 99-1x10
|
||||
|
||||
eval instant at 2m changes(metric[1m])
|
||||
{id="1"} 0
|
||||
{id="2"} 0
|
||||
|
||||
eval instant at 3m changes(metric[1m])
|
||||
{id="2"} 0
|
||||
|
||||
eval instant at 2m changes(metric[1m] anchored)
|
||||
{id="1"} 1
|
||||
{id="2"} 1
|
||||
|
||||
eval instant at 3m changes(metric[1m] anchored)
|
||||
{id="1"} 1
|
||||
{id="2"} 1
|
||||
|
||||
eval instant at 8m changes(metric[1m] anchored)
|
||||
{id="1"} 0
|
||||
{id="2"} 1
|
||||
|
||||
eval instant at 8m changes(metric[1m1ms] anchored)
|
||||
{id="1"} 1
|
||||
{id="2"} 2
|
||||
|
||||
eval instant at 2m resets(metric[1m])
|
||||
{id="1"} 0
|
||||
{id="2"} 0
|
||||
|
||||
eval instant at 3m resets(metric[1m])
|
||||
{id="2"} 0
|
||||
|
||||
eval instant at 2m resets(metric[1m] anchored)
|
||||
{id="1"} 1
|
||||
{id="2"} 1
|
||||
|
||||
eval instant at 3m resets(metric[1m] anchored)
|
||||
{id="1"} 1
|
||||
{id="2"} 1
|
||||
|
||||
eval instant at 8m resets(metric[1m] anchored)
|
||||
{id="1"} 0
|
||||
{id="2"} 1
|
||||
|
||||
eval instant at 8m resets(metric[1m1ms] anchored)
|
||||
{id="1"} 1
|
||||
{id="2"} 2
|
||||
|
||||
clear
|
||||
load 1m
|
||||
metric 9 8 5 4
|
||||
|
||||
eval instant at 2m15s increase(metric[2m] smoothed)
|
||||
{} 12
|
||||
|
||||
clear
|
||||
eval instant at 1m deriv(foo[3m] smoothed)
|
||||
expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with deriv
|
||||
|
||||
eval instant at 1m resets(foo[3m] smoothed)
|
||||
expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with resets
|
||||
|
||||
eval instant at 1m changes(foo[3m] smoothed)
|
||||
expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with changes
|
||||
|
||||
eval instant at 1m max_over_time(foo[3m] smoothed)
|
||||
expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with max_over_time
|
||||
|
||||
eval instant at 1m predict_linear(foo[3m] smoothed, 4)
|
||||
expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with predict_linear
|
||||
|
||||
eval instant at 1m deriv(foo[3m] anchored)
|
||||
expect fail msg: anchored modifier can only be used with: changes, delta, increase, rate, resets - not with deriv
|
||||
|
||||
eval instant at 1m resets(foo[3m] anchored)
|
||||
|
||||
eval instant at 1m changes(foo[3m] anchored)
|
||||
|
||||
eval instant at 1m max_over_time(foo[3m] anchored)
|
||||
expect fail msg: anchored modifier can only be used with: changes, delta, increase, rate, resets - not with max_over_time
|
||||
|
||||
eval instant at 1m predict_linear(foo[3m] anchored, 4)
|
||||
expect fail msg: anchored modifier can only be used with: changes, delta, increase, rate, resets - not with predict_linear
|
||||
|
||||
clear
|
||||
load 10s
|
||||
metric 1+1x10
|
||||
withreset 1+1x4 1+1x5
|
||||
notregular 0 5 100 2 8
|
||||
|
||||
eval instant at 10s metric smoothed
|
||||
metric 2
|
||||
|
||||
eval instant at 15s metric smoothed
|
||||
metric 2.5
|
||||
|
||||
eval instant at 5s metric smoothed
|
||||
metric 1.5
|
||||
|
||||
eval instant at 105s metric smoothed
|
||||
metric 11
|
||||
|
||||
eval instant at 45s withreset smoothed
|
||||
withreset 3
|
||||
|
||||
eval instant at 30s notregular smoothed
|
||||
notregular 2
|
||||
15
promql/promqltest/testdata/literals.test
vendored
15
promql/promqltest/testdata/literals.test
vendored
@ -57,3 +57,18 @@ eval instant at 50m 0 / 0
|
||||
|
||||
eval instant at 50m 1 % 0
|
||||
NaN
|
||||
|
||||
eval instant at 50m ("Foo")
|
||||
expect string `Foo`
|
||||
|
||||
eval instant at 50m "Foo"
|
||||
expect string "Foo"
|
||||
|
||||
eval instant at 50m " Foo "
|
||||
expect string " Foo "
|
||||
|
||||
eval instant at 50m ("")
|
||||
expect string ""
|
||||
|
||||
eval instant at 50m ""
|
||||
expect string ""
|
||||
@ -1677,3 +1677,18 @@ eval instant at 1m histogram_count(histogram unless histogram_quantile(0.5, hist
|
||||
eval instant at 1m histogram_quantile(0.5, histogram unless histogram_count(histogram) == 0)
|
||||
{} 3.1748021039363987
|
||||
|
||||
clear
|
||||
|
||||
# Regression test for:
|
||||
# https://github.com/prometheus/prometheus/issues/14172
|
||||
# https://github.com/prometheus/prometheus/issues/15177
|
||||
load 1m
|
||||
mixed_metric1 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:8 count:6 buckets:[1 4 1]}} 4 5 {{schema:0 sum:18 count:10 buckets:[3 4 3]}}
|
||||
mixed_metric2 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:8 count:6 buckets:[1 4 1]}}
|
||||
|
||||
# The order of the float vs native histograms is preserved.
|
||||
eval range from 0 to 8m step 1m mixed_metric1
|
||||
mixed_metric1{} 1 2 3 {{count:4 sum:5 buckets:[1 2 1]}} {{count:6 sum:8 buckets:[1 4 1]}} 4 5 {{schema:0 sum:18 count:10 buckets:[3 4 3]}} {{schema:0 sum:18 count:10 buckets:[3 4 3]}}
|
||||
|
||||
eval range from 0 to 5m step 1m mixed_metric2
|
||||
mixed_metric2 1 2 3 {{count:4 sum:5 buckets:[1 2 1]}} {{count:6 sum:8 buckets:[1 4 1]}} {{count:6 sum:8 buckets:[1 4 1]}}
|
||||
|
||||
34
promql/promqltest/testdata/range_queries.test
vendored
34
promql/promqltest/testdata/range_queries.test
vendored
@ -71,3 +71,37 @@ eval range from 0 to 2m step 1m requests * 2
|
||||
{job="1", __address__="bar"} 200 200 200
|
||||
|
||||
clear
|
||||
|
||||
load 10s
|
||||
some_metric{env="a"} 1+1x5
|
||||
some_metric{env="b"} 2+2x5
|
||||
|
||||
# Return a range vector - note the use of the expect range vector directive which defines expected range
|
||||
eval instant at 1m some_metric[1m]
|
||||
expect range vector from 10s to 1m step 10s
|
||||
some_metric{env="a"} 2 3 4 5 6
|
||||
some_metric{env="b"} 4 6 8 10 12
|
||||
|
||||
clear
|
||||
|
||||
load 1m
|
||||
some_metric{env="1"} 0+1x4
|
||||
some_metric{env="2"} 0+2x4
|
||||
some_metric{env="3"} {{count:0}}+{{count:1}}x4
|
||||
some_metric_with_stale_marker 0 1 stale 3
|
||||
|
||||
eval instant at 2m some_metric[2m]
|
||||
expect range vector from 1m to 2m step 60s
|
||||
some_metric{env="1"} 1 2
|
||||
some_metric{env="2"} 2 4
|
||||
some_metric{env="3"} {{count:1 counter_reset_hint:not_reset}} {{count:2 counter_reset_hint:not_reset}}
|
||||
|
||||
eval instant at 3m some_metric_with_stale_marker[3m]
|
||||
expect range vector from 1m to 3m step 60s
|
||||
some_metric_with_stale_marker{} 1 _ 3
|
||||
|
||||
eval instant at 1m some_nonexistent_metric[1m]
|
||||
expect range vector from 10s to 1m step 10s
|
||||
|
||||
eval instant at 10m some_metric[1m]
|
||||
expect range vector from 9m10s to 10m step 1m
|
||||
@ -414,12 +414,12 @@ type maxSchemaAppender struct {
|
||||
|
||||
func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||
if h != nil {
|
||||
if histogram.IsExponentialSchema(h.Schema) && h.Schema > app.maxSchema {
|
||||
if histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > app.maxSchema {
|
||||
h = h.ReduceResolution(app.maxSchema)
|
||||
}
|
||||
}
|
||||
if fh != nil {
|
||||
if histogram.IsExponentialSchema(fh.Schema) && fh.Schema > app.maxSchema {
|
||||
if histogram.IsExponentialSchemaReserved(fh.Schema) && fh.Schema > app.maxSchema {
|
||||
fh = fh.ReduceResolution(app.maxSchema)
|
||||
}
|
||||
}
|
||||
|
||||
@ -34,8 +34,11 @@ jobs:
|
||||
- name: Install snmp_exporter/generator dependencies
|
||||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||
if: github.repository == 'prometheus/snmp_exporter'
|
||||
- name: Get golangci-lint version
|
||||
id: golangci-lint-version
|
||||
run: echo "version=$(make print-golangci-lint-version)" >> $GITHUB_OUTPUT
|
||||
- name: Lint
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
with:
|
||||
args: --verbose
|
||||
version: v2.2.1
|
||||
version: ${{ steps.golangci-lint-version.outputs.version }}
|
||||
|
||||
@ -388,6 +388,7 @@ type concreteSeriesIterator struct {
|
||||
histogramsCur int
|
||||
curValType chunkenc.ValueType
|
||||
series *concreteSeries
|
||||
err error
|
||||
}
|
||||
|
||||
func newConcreteSeriesIterator(series *concreteSeries) chunkenc.Iterator {
|
||||
@ -404,10 +405,14 @@ func (c *concreteSeriesIterator) reset(series *concreteSeries) {
|
||||
c.histogramsCur = -1
|
||||
c.curValType = chunkenc.ValNone
|
||||
c.series = series
|
||||
c.err = nil
|
||||
}
|
||||
|
||||
// Seek implements storage.SeriesIterator.
|
||||
func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||
if c.err != nil {
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
if c.floatsCur == -1 {
|
||||
c.floatsCur = 0
|
||||
}
|
||||
@ -439,7 +444,7 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||
if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp {
|
||||
c.curValType = chunkenc.ValFloat
|
||||
} else {
|
||||
c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur])
|
||||
c.curValType = chunkenc.ValHistogram
|
||||
}
|
||||
// When the timestamps do not overlap the cursor for the non-selected sample type has advanced too
|
||||
// far; we decrement it back down here.
|
||||
@ -453,11 +458,26 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||
case c.floatsCur < len(c.series.floats):
|
||||
c.curValType = chunkenc.ValFloat
|
||||
case c.histogramsCur < len(c.series.histograms):
|
||||
c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur])
|
||||
c.curValType = chunkenc.ValHistogram
|
||||
}
|
||||
if c.curValType == chunkenc.ValHistogram {
|
||||
h := &c.series.histograms[c.histogramsCur]
|
||||
c.curValType = getHistogramValType(h)
|
||||
c.err = validateHistogramSchema(h)
|
||||
}
|
||||
if c.err != nil {
|
||||
c.curValType = chunkenc.ValNone
|
||||
}
|
||||
return c.curValType
|
||||
}
|
||||
|
||||
func validateHistogramSchema(h *prompb.Histogram) error {
|
||||
if histogram.IsKnownSchema(h.Schema) {
|
||||
return nil
|
||||
}
|
||||
return histogram.UnknownSchemaError(h.Schema)
|
||||
}
|
||||
|
||||
func getHistogramValType(h *prompb.Histogram) chunkenc.ValueType {
|
||||
if h.IsFloatHistogram() {
|
||||
return chunkenc.ValFloatHistogram
|
||||
@ -480,14 +500,28 @@ func (c *concreteSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *hist
|
||||
panic("iterator is not on an integer histogram sample")
|
||||
}
|
||||
h := c.series.histograms[c.histogramsCur]
|
||||
return h.Timestamp, h.ToIntHistogram()
|
||||
mh := h.ToIntHistogram()
|
||||
if mh.Schema > histogram.ExponentialSchemaMax && mh.Schema <= histogram.ExponentialSchemaMaxReserved {
|
||||
// This is a very slow path, but it should only happen if the
|
||||
// sample is from a newer Prometheus version that supports higher
|
||||
// resolution.
|
||||
mh.ReduceResolution(histogram.ExponentialSchemaMax)
|
||||
}
|
||||
return h.Timestamp, mh
|
||||
}
|
||||
|
||||
// AtFloatHistogram implements chunkenc.Iterator.
|
||||
func (c *concreteSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
|
||||
if c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram {
|
||||
fh := c.series.histograms[c.histogramsCur]
|
||||
return fh.Timestamp, fh.ToFloatHistogram() // integer will be auto-converted.
|
||||
mfh := fh.ToFloatHistogram() // integer will be auto-converted.
|
||||
if mfh.Schema > histogram.ExponentialSchemaMax && mfh.Schema <= histogram.ExponentialSchemaMaxReserved {
|
||||
// This is a very slow path, but it should only happen if the
|
||||
// sample is from a newer Prometheus version that supports higher
|
||||
// resolution.
|
||||
mfh.ReduceResolution(histogram.ExponentialSchemaMax)
|
||||
}
|
||||
return fh.Timestamp, mfh
|
||||
}
|
||||
panic("iterator is not on a histogram sample")
|
||||
}
|
||||
@ -504,6 +538,9 @@ const noTS = int64(math.MaxInt64)
|
||||
|
||||
// Next implements chunkenc.Iterator.
|
||||
func (c *concreteSeriesIterator) Next() chunkenc.ValueType {
|
||||
if c.err != nil {
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
peekFloatTS := noTS
|
||||
if c.floatsCur+1 < len(c.series.floats) {
|
||||
peekFloatTS = c.series.floats[c.floatsCur+1].Timestamp
|
||||
@ -532,12 +569,21 @@ func (c *concreteSeriesIterator) Next() chunkenc.ValueType {
|
||||
c.histogramsCur++
|
||||
c.curValType = chunkenc.ValFloat
|
||||
}
|
||||
|
||||
if c.curValType == chunkenc.ValHistogram {
|
||||
h := &c.series.histograms[c.histogramsCur]
|
||||
c.curValType = getHistogramValType(h)
|
||||
c.err = validateHistogramSchema(h)
|
||||
}
|
||||
if c.err != nil {
|
||||
c.curValType = chunkenc.ValNone
|
||||
}
|
||||
return c.curValType
|
||||
}
|
||||
|
||||
// Err implements chunkenc.Iterator.
|
||||
func (*concreteSeriesIterator) Err() error {
|
||||
return nil
|
||||
func (c *concreteSeriesIterator) Err() error {
|
||||
return c.err
|
||||
}
|
||||
|
||||
// chunkedSeriesSet implements storage.SeriesSet.
|
||||
|
||||
@ -548,6 +548,79 @@ func TestConcreteSeriesIterator_FloatAndHistogramSamples(t *testing.T) {
|
||||
require.Equal(t, chunkenc.ValNone, it.Seek(1))
|
||||
}
|
||||
|
||||
func TestConcreteSeriesIterator_InvalidHistogramSamples(t *testing.T) {
|
||||
for _, schema := range []int32{-100, 100} {
|
||||
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
|
||||
h := prompb.FromIntHistogram(2, &testHistogram)
|
||||
h.Schema = schema
|
||||
fh := prompb.FromFloatHistogram(4, testHistogram.ToFloat(nil))
|
||||
fh.Schema = schema
|
||||
series := &concreteSeries{
|
||||
labels: labels.FromStrings("foo", "bar"),
|
||||
floats: []prompb.Sample{
|
||||
{Value: 1, Timestamp: 0},
|
||||
{Value: 2, Timestamp: 3},
|
||||
},
|
||||
histograms: []prompb.Histogram{
|
||||
h,
|
||||
fh,
|
||||
},
|
||||
}
|
||||
it := series.Iterator(nil)
|
||||
require.Equal(t, chunkenc.ValFloat, it.Next())
|
||||
require.Equal(t, chunkenc.ValNone, it.Next())
|
||||
require.Error(t, it.Err())
|
||||
require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema)
|
||||
|
||||
it = series.Iterator(it)
|
||||
require.Equal(t, chunkenc.ValFloat, it.Next())
|
||||
require.Equal(t, chunkenc.ValNone, it.Next())
|
||||
require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema)
|
||||
|
||||
it = series.Iterator(it)
|
||||
require.Equal(t, chunkenc.ValNone, it.Seek(1))
|
||||
require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema)
|
||||
|
||||
it = series.Iterator(it)
|
||||
require.Equal(t, chunkenc.ValFloat, it.Seek(3))
|
||||
require.Equal(t, chunkenc.ValNone, it.Next())
|
||||
require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema)
|
||||
|
||||
it = series.Iterator(it)
|
||||
require.Equal(t, chunkenc.ValNone, it.Seek(4))
|
||||
require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcreteSeriesIterator_ReducesHighResolutionHistograms(t *testing.T) {
|
||||
for _, schema := range []int32{9, 52} {
|
||||
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
|
||||
h := testHistogram.Copy()
|
||||
h.Schema = schema
|
||||
fh := h.ToFloat(nil)
|
||||
series := &concreteSeries{
|
||||
labels: labels.FromStrings("foo", "bar"),
|
||||
histograms: []prompb.Histogram{
|
||||
prompb.FromIntHistogram(1, h),
|
||||
prompb.FromFloatHistogram(2, fh),
|
||||
},
|
||||
}
|
||||
it := series.Iterator(nil)
|
||||
require.Equal(t, chunkenc.ValHistogram, it.Next())
|
||||
_, gotH := it.AtHistogram(nil)
|
||||
require.Equal(t, histogram.ExponentialSchemaMax, gotH.Schema)
|
||||
_, gotFH := it.AtFloatHistogram(nil)
|
||||
require.Equal(t, histogram.ExponentialSchemaMax, gotFH.Schema)
|
||||
require.Equal(t, chunkenc.ValFloatHistogram, it.Next())
|
||||
_, gotFH = it.AtFloatHistogram(nil)
|
||||
require.Equal(t, histogram.ExponentialSchemaMax, gotFH.Schema)
|
||||
require.Equal(t, chunkenc.ValNone, it.Next())
|
||||
require.NoError(t, it.Err())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromQueryResultWithDuplicates(t *testing.T) {
|
||||
ts1 := prompb.TimeSeries{
|
||||
Labels: []prompb.Label{
|
||||
|
||||
@ -139,7 +139,7 @@ func (b *combinedAppender) appendFloatOrHistogram(ls labels.Labels, meta metadat
|
||||
ref = 0
|
||||
}
|
||||
updateRefs := !exists || series.ct != ct
|
||||
if updateRefs && ct != 0 && b.ingestCTZeroSample {
|
||||
if updateRefs && ct != 0 && ct < t && b.ingestCTZeroSample {
|
||||
var newRef storage.SeriesRef
|
||||
if h != nil {
|
||||
newRef, err = b.app.AppendHistogramCTZeroSample(ref, ls, t, ct, h, nil)
|
||||
@ -147,10 +147,14 @@ func (b *combinedAppender) appendFloatOrHistogram(ls labels.Labels, meta metadat
|
||||
newRef, err = b.app.AppendCTZeroSample(ref, ls, t, ct)
|
||||
}
|
||||
if err != nil {
|
||||
if !errors.Is(err, storage.ErrOutOfOrderCT) {
|
||||
if !errors.Is(err, storage.ErrOutOfOrderCT) && !errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
|
||||
// Even for the first sample OOO is a common scenario because
|
||||
// we can't tell if a CT was already ingested in a previous request.
|
||||
// We ignore the error.
|
||||
// ErrDuplicateSampleForTimestamp is also a common scenario because
|
||||
// unknown start times in Opentelemetry are indicated by setting
|
||||
// the start time to the same as the first sample time.
|
||||
// https://opentelemetry.io/docs/specs/otel/metrics/data-model/#cumulative-streams-handling-unknown-start-time
|
||||
b.logger.Warn("Error when appending CT from OTLP", "err", err, "series", ls.String(), "created_timestamp", ct, "timestamp", t, "sample_type", sampleType(h))
|
||||
}
|
||||
} else {
|
||||
|
||||
@ -14,6 +14,7 @@
|
||||
package prometheusremotewrite
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
@ -160,8 +161,10 @@ func testCombinedAppenderOnTSDB(t *testing.T, ingestCTZeroSample bool) {
|
||||
|
||||
testCases := map[string]struct {
|
||||
appendFunc func(*testing.T, CombinedAppender)
|
||||
extraAppendFunc func(*testing.T, CombinedAppender)
|
||||
expectedSamples []sample
|
||||
expectedExemplars []exemplar.QueryResult
|
||||
expectedLogsForCT []string
|
||||
}{
|
||||
"single float sample, zero CT": {
|
||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
||||
@ -185,6 +188,10 @@ func testCombinedAppenderOnTSDB(t *testing.T, ingestCTZeroSample bool) {
|
||||
f: 42.0,
|
||||
},
|
||||
},
|
||||
expectedLogsForCT: []string{
|
||||
"Error when appending CT from OTLP",
|
||||
"out of bound",
|
||||
},
|
||||
},
|
||||
"single float sample, normal CT": {
|
||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
||||
@ -212,6 +219,24 @@ func testCombinedAppenderOnTSDB(t *testing.T, ingestCTZeroSample bool) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"two float samples in different messages, CT same time as first sample": {
|
||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
||||
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), 42.0, nil))
|
||||
},
|
||||
extraAppendFunc: func(t *testing.T, app CombinedAppender) {
|
||||
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.Add(time.Second).UnixMilli(), 43.0, nil))
|
||||
},
|
||||
expectedSamples: []sample{
|
||||
{
|
||||
t: now.UnixMilli(),
|
||||
f: 42.0,
|
||||
},
|
||||
{
|
||||
t: now.Add(time.Second).UnixMilli(),
|
||||
f: 43.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
"single float sample, CT in the future of the sample": {
|
||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
||||
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(time.Minute).UnixMilli(), now.UnixMilli(), 42.0, nil))
|
||||
@ -245,6 +270,10 @@ func testCombinedAppenderOnTSDB(t *testing.T, ingestCTZeroSample bool) {
|
||||
h: tsdbutil.GenerateTestHistogram(42),
|
||||
},
|
||||
},
|
||||
expectedLogsForCT: []string{
|
||||
"Error when appending CT from OTLP",
|
||||
"out of bound",
|
||||
},
|
||||
},
|
||||
"single histogram sample, normal CT": {
|
||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
||||
@ -273,6 +302,24 @@ func testCombinedAppenderOnTSDB(t *testing.T, ingestCTZeroSample bool) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"two histogram samples in different messages, CT same time as first sample": {
|
||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
||||
require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
|
||||
},
|
||||
extraAppendFunc: func(t *testing.T, app CombinedAppender) {
|
||||
require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.Add(time.Second).UnixMilli(), tsdbutil.GenerateTestHistogram(43), nil))
|
||||
},
|
||||
expectedSamples: []sample{
|
||||
{
|
||||
t: now.UnixMilli(),
|
||||
h: tsdbutil.GenerateTestHistogram(42),
|
||||
},
|
||||
{
|
||||
t: now.Add(time.Second).UnixMilli(),
|
||||
h: tsdbutil.GenerateTestHistogram(43),
|
||||
},
|
||||
},
|
||||
},
|
||||
"single histogram sample, CT in the future of the sample": {
|
||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
||||
require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.Add(time.Minute).UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
|
||||
@ -344,6 +391,11 @@ func testCombinedAppenderOnTSDB(t *testing.T, ingestCTZeroSample bool) {
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
var expectedLogs []string
|
||||
if ingestCTZeroSample {
|
||||
expectedLogs = append(expectedLogs, tc.expectedLogsForCT...)
|
||||
}
|
||||
|
||||
dir := t.TempDir()
|
||||
opts := tsdb.DefaultOptions()
|
||||
opts.EnableExemplarStorage = true
|
||||
@ -354,15 +406,32 @@ func testCombinedAppenderOnTSDB(t *testing.T, ingestCTZeroSample bool) {
|
||||
|
||||
t.Cleanup(func() { db.Close() })
|
||||
|
||||
var output bytes.Buffer
|
||||
logger := promslog.New(&promslog.Config{Writer: &output})
|
||||
|
||||
ctx := context.Background()
|
||||
reg := prometheus.NewRegistry()
|
||||
cappMetrics := NewCombinedAppenderMetrics(reg)
|
||||
app := db.Appender(ctx)
|
||||
capp := NewCombinedAppender(app, promslog.NewNopLogger(), ingestCTZeroSample, NewCombinedAppenderMetrics(reg))
|
||||
|
||||
capp := NewCombinedAppender(app, logger, ingestCTZeroSample, cappMetrics)
|
||||
tc.appendFunc(t, capp)
|
||||
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
if tc.extraAppendFunc != nil {
|
||||
app = db.Appender(ctx)
|
||||
capp = NewCombinedAppender(app, logger, ingestCTZeroSample, cappMetrics)
|
||||
tc.extraAppendFunc(t, capp)
|
||||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
if len(expectedLogs) > 0 {
|
||||
for _, expectedLog := range expectedLogs {
|
||||
require.Contains(t, output.String(), expectedLog)
|
||||
}
|
||||
} else {
|
||||
require.Empty(t, output.String(), "unexpected log output")
|
||||
}
|
||||
|
||||
q, err := db.Querier(int64(math.MinInt64), int64(math.MaxInt64))
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@ -86,16 +86,16 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont
|
||||
func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, temporality pmetric.AggregationTemporality) (*histogram.Histogram, annotations.Annotations, error) {
|
||||
var annots annotations.Annotations
|
||||
scale := p.Scale()
|
||||
if scale < -4 {
|
||||
if scale < histogram.ExponentialSchemaMin {
|
||||
return nil, annots,
|
||||
fmt.Errorf("cannot convert exponential to native histogram."+
|
||||
" Scale must be >= -4, was %d", scale)
|
||||
" Scale must be >= %d, was %d", histogram.ExponentialSchemaMin, scale)
|
||||
}
|
||||
|
||||
var scaleDown int32
|
||||
if scale > 8 {
|
||||
scaleDown = scale - 8
|
||||
scale = 8
|
||||
if scale > histogram.ExponentialSchemaMax {
|
||||
scaleDown = scale - histogram.ExponentialSchemaMax
|
||||
scale = histogram.ExponentialSchemaMax
|
||||
}
|
||||
|
||||
pSpans, pDeltas := convertBucketsLayout(p.Positive().BucketCounts().AsRaw(), p.Positive().Offset(), scaleDown, true)
|
||||
|
||||
@ -117,6 +117,24 @@ func (*writeHandler) parseProtoMsg(contentType string) (config.RemoteWriteProtoM
|
||||
return config.RemoteWriteProtoMsgV1, nil
|
||||
}
|
||||
|
||||
// isHistogramValidationError checks if the error is a native histogram validation error.
|
||||
func isHistogramValidationError(err error) bool {
|
||||
// TODO: Consider adding single histogram error type instead of individual sentinel errors.
|
||||
return errors.Is(err, histogram.ErrHistogramCountMismatch) ||
|
||||
errors.Is(err, histogram.ErrHistogramCountNotBigEnough) ||
|
||||
errors.Is(err, histogram.ErrHistogramNegativeBucketCount) ||
|
||||
errors.Is(err, histogram.ErrHistogramSpanNegativeOffset) ||
|
||||
errors.Is(err, histogram.ErrHistogramSpansBucketsMismatch) ||
|
||||
errors.Is(err, histogram.ErrHistogramCustomBucketsMismatch) ||
|
||||
errors.Is(err, histogram.ErrHistogramCustomBucketsInvalid) ||
|
||||
errors.Is(err, histogram.ErrHistogramCustomBucketsInfinite) ||
|
||||
errors.Is(err, histogram.ErrHistogramCustomBucketsZeroCount) ||
|
||||
errors.Is(err, histogram.ErrHistogramCustomBucketsZeroThresh) ||
|
||||
errors.Is(err, histogram.ErrHistogramCustomBucketsNegSpans) ||
|
||||
errors.Is(err, histogram.ErrHistogramCustomBucketsNegBuckets) ||
|
||||
errors.Is(err, histogram.ErrHistogramExpSchemaCustomBounds)
|
||||
}
|
||||
|
||||
func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
@ -190,6 +208,9 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Indicated an out-of-order sample is a bad request to prevent retries.
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
case isHistogramValidationError(err):
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
default:
|
||||
h.logger.Error("Error while remote writing the v1 request", "err", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
@ -229,7 +250,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
||||
samplesWithInvalidLabels := 0
|
||||
samplesAppended := 0
|
||||
|
||||
app := &timeLimitAppender{
|
||||
app := &remoteWriteAppender{
|
||||
Appender: h.appendable.Appender(ctx),
|
||||
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
||||
}
|
||||
@ -344,7 +365,7 @@ func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Hist
|
||||
// NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors.
|
||||
// Once we have 5xx type of error, we immediately stop and rollback all appends.
|
||||
func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ WriteResponseStats, errHTTPCode int, _ error) {
|
||||
app := &timeLimitAppender{
|
||||
app := &remoteWriteAppender{
|
||||
Appender: h.appendable.Appender(ctx),
|
||||
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
||||
}
|
||||
@ -474,6 +495,11 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
|
||||
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
|
||||
continue
|
||||
}
|
||||
if isHistogramValidationError(err) {
|
||||
h.logger.Error("Invalid histogram received", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp)
|
||||
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
|
||||
continue
|
||||
}
|
||||
return 0, http.StatusInternalServerError, err
|
||||
}
|
||||
|
||||
@ -616,7 +642,7 @@ type rwExporter struct {
|
||||
|
||||
func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
|
||||
otlpCfg := rw.config().OTLPConfig
|
||||
app := &timeLimitAppender{
|
||||
app := &remoteWriteAppender{
|
||||
Appender: rw.appendable.Appender(ctx),
|
||||
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
||||
}
|
||||
@ -719,13 +745,13 @@ func hasDelta(md pmetric.Metrics) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type timeLimitAppender struct {
|
||||
type remoteWriteAppender struct {
|
||||
storage.Appender
|
||||
|
||||
maxTime int64
|
||||
}
|
||||
|
||||
func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
||||
func (app *remoteWriteAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
||||
if t > app.maxTime {
|
||||
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
|
||||
}
|
||||
@ -737,11 +763,18 @@ func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels,
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
func (app *timeLimitAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||
func (app *remoteWriteAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||
if t > app.maxTime {
|
||||
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
|
||||
}
|
||||
|
||||
if h != nil && histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > histogram.ExponentialSchemaMax {
|
||||
h = h.ReduceResolution(histogram.ExponentialSchemaMax)
|
||||
}
|
||||
if fh != nil && histogram.IsExponentialSchemaReserved(fh.Schema) && fh.Schema > histogram.ExponentialSchemaMax {
|
||||
fh = fh.ReduceResolution(histogram.ExponentialSchemaMax)
|
||||
}
|
||||
|
||||
ref, err := app.Appender.AppendHistogram(ref, l, t, h, fh)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@ -749,7 +782,7 @@ func (app *timeLimitAppender) AppendHistogram(ref storage.SeriesRef, l labels.La
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
func (app *timeLimitAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
|
||||
func (app *remoteWriteAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
|
||||
if e.Ts > app.maxTime {
|
||||
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
|
||||
}
|
||||
|
||||
@ -806,6 +806,94 @@ func TestCommitErr_V1Message(t *testing.T) {
|
||||
require.Equal(t, "commit error\n", string(body))
|
||||
}
|
||||
|
||||
// Regression test for https://github.com/prometheus/prometheus/issues/17206
|
||||
func TestHistogramValidationErrorHandling(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
hist histogram.Histogram
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
desc: "count mismatch",
|
||||
hist: histogram.Histogram{
|
||||
Schema: 2,
|
||||
ZeroThreshold: 1e-128,
|
||||
ZeroCount: 1,
|
||||
Count: 10,
|
||||
Sum: 20,
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||
PositiveBuckets: []int64{2},
|
||||
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||
NegativeBuckets: []int64{3},
|
||||
// Total: 1 (zero) + 2 (positive) + 3 (negative) = 6, but Count = 10
|
||||
},
|
||||
expected: "histogram's observation count should equal",
|
||||
},
|
||||
{
|
||||
desc: "custom buckets zero count",
|
||||
hist: histogram.Histogram{
|
||||
Schema: histogram.CustomBucketsSchema,
|
||||
Count: 10,
|
||||
Sum: 20,
|
||||
ZeroCount: 1, // Invalid: custom buckets must have zero count of 0
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||
PositiveBuckets: []int64{10},
|
||||
CustomValues: []float64{1.0},
|
||||
},
|
||||
expected: "custom buckets: must have zero count of 0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, protoMsg := range []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2} {
|
||||
protoName := "V1"
|
||||
if protoMsg == config.RemoteWriteProtoMsgV2 {
|
||||
protoName = "V2"
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
testName := fmt.Sprintf("%s %s", protoName, tc.desc)
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
opts := tsdb.DefaultOptions()
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
db, err := tsdb.Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { require.NoError(t, db.Close()) })
|
||||
|
||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, db.Head(), []config.RemoteWriteProtoMsg{protoMsg}, false)
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
var buf []byte
|
||||
if protoMsg == config.RemoteWriteProtoMsgV1 {
|
||||
ts := []prompb.TimeSeries{{
|
||||
Labels: []prompb.Label{{Name: "__name__", Value: "test"}},
|
||||
Histograms: []prompb.Histogram{prompb.FromIntHistogram(1, &tc.hist)},
|
||||
}}
|
||||
buf, _, _, err = buildWriteRequest(nil, ts, nil, nil, nil, nil, "snappy")
|
||||
} else {
|
||||
st := writev2.NewSymbolTable()
|
||||
ts := []writev2.TimeSeries{{
|
||||
LabelsRefs: st.SymbolizeLabels(labels.FromStrings("__name__", "test"), nil),
|
||||
Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &tc.hist)},
|
||||
}}
|
||||
buf, _, _, err = buildV2WriteRequest(promslog.NewNopLogger(), ts, st.Symbols(), nil, nil, nil, "snappy")
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/v1/write", bytes.NewReader(buf))
|
||||
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[protoMsg])
|
||||
req.Header.Set("Content-Encoding", "snappy")
|
||||
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, recorder.Code)
|
||||
require.Contains(t, recorder.Body.String(), tc.expected)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitErr_V2Message(t *testing.T) {
|
||||
payload, _, _, err := buildV2WriteRequest(promslog.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
|
||||
require.NoError(t, err)
|
||||
@ -1134,3 +1222,100 @@ func (m *mockAppendable) AppendCTZeroSample(_ storage.SeriesRef, l labels.Labels
|
||||
m.samples = append(m.samples, mockSample{l, ct, 0})
|
||||
return storage.SeriesRef(hash), nil
|
||||
}
|
||||
|
||||
var (
|
||||
highSchemaHistogram = &histogram.Histogram{
|
||||
Schema: 10,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{
|
||||
Offset: -1,
|
||||
Length: 2,
|
||||
},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 2},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 1,
|
||||
},
|
||||
},
|
||||
NegativeBuckets: []int64{1},
|
||||
}
|
||||
reducedSchemaHistogram = &histogram.Histogram{
|
||||
Schema: 8,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 1,
|
||||
},
|
||||
},
|
||||
PositiveBuckets: []int64{4},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 1,
|
||||
},
|
||||
},
|
||||
NegativeBuckets: []int64{1},
|
||||
}
|
||||
)
|
||||
|
||||
func TestHistogramsReduction(t *testing.T) {
|
||||
for _, protoMsg := range []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2} {
|
||||
t.Run(string(protoMsg), func(t *testing.T) {
|
||||
appendable := &mockAppendable{}
|
||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{protoMsg}, false)
|
||||
|
||||
var (
|
||||
err error
|
||||
payload []byte
|
||||
)
|
||||
|
||||
if protoMsg == config.RemoteWriteProtoMsgV1 {
|
||||
payload, _, _, err = buildWriteRequest(nil, []prompb.TimeSeries{
|
||||
{
|
||||
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric1"}},
|
||||
Histograms: []prompb.Histogram{prompb.FromIntHistogram(1, highSchemaHistogram)},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric2"}},
|
||||
Histograms: []prompb.Histogram{prompb.FromFloatHistogram(2, highSchemaHistogram.ToFloat(nil))},
|
||||
},
|
||||
}, nil, nil, nil, nil, "snappy")
|
||||
} else {
|
||||
payload, _, _, err = buildV2WriteRequest(promslog.NewNopLogger(), []writev2.TimeSeries{
|
||||
{
|
||||
LabelsRefs: []uint32{0, 1},
|
||||
Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, highSchemaHistogram)},
|
||||
},
|
||||
{
|
||||
LabelsRefs: []uint32{0, 2},
|
||||
Histograms: []writev2.Histogram{writev2.FromFloatHistogram(2, highSchemaHistogram.ToFloat(nil))},
|
||||
},
|
||||
}, []string{"__name__", "test_metric1", "test_metric2"},
|
||||
nil, nil, nil, "snappy")
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[protoMsg])
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
||||
resp := recorder.Result()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusNoContent, resp.StatusCode)
|
||||
require.Empty(t, body)
|
||||
|
||||
require.Len(t, appendable.histograms, 2)
|
||||
require.Equal(t, int64(1), appendable.histograms[0].t)
|
||||
require.Equal(t, reducedSchemaHistogram, appendable.histograms[0].h)
|
||||
require.Equal(t, int64(2), appendable.histograms[1].t)
|
||||
require.Equal(t, reducedSchemaHistogram.ToFloat(nil), appendable.histograms[1].fh)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -437,7 +437,7 @@ func (db *DB) resetWALReplayResources() {
|
||||
func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef) (err error) {
|
||||
var (
|
||||
syms = labels.NewSymbolTable() // One table for the whole WAL.
|
||||
dec = record.NewDecoder(syms)
|
||||
dec = record.NewDecoder(syms, db.logger)
|
||||
lastRef = chunks.HeadSeriesRef(db.nextRef.Load())
|
||||
|
||||
decoded = make(chan any, 10)
|
||||
|
||||
@ -211,7 +211,7 @@ func TestCommit(t *testing.T) {
|
||||
// Read records from WAL and check for expected count of series, samples, and exemplars.
|
||||
var (
|
||||
r = wlog.NewReader(sr)
|
||||
dec = record.NewDecoder(labels.NewSymbolTable())
|
||||
dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
|
||||
|
||||
walSeriesCount, walSamplesCount, walExemplarsCount, walHistogramCount, walFloatHistogramCount int
|
||||
)
|
||||
@ -344,7 +344,7 @@ func TestRollback(t *testing.T) {
|
||||
// Read records from WAL and check for expected count of series and samples.
|
||||
var (
|
||||
r = wlog.NewReader(sr)
|
||||
dec = record.NewDecoder(labels.NewSymbolTable())
|
||||
dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
|
||||
|
||||
walSeriesCount, walSamplesCount, walHistogramCount, walFloatHistogramCount, walExemplarsCount int
|
||||
)
|
||||
@ -892,7 +892,7 @@ func TestStorage_DuplicateExemplarsIgnored(t *testing.T) {
|
||||
defer sr.Close()
|
||||
r := wlog.NewReader(sr)
|
||||
|
||||
dec := record.NewDecoder(labels.NewSymbolTable())
|
||||
dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
|
||||
for r.Next() {
|
||||
rec := r.Record()
|
||||
if dec.Type(rec) == record.Exemplars {
|
||||
@ -1332,7 +1332,7 @@ func readWALSamples(t *testing.T, walDir string) []*walSample {
|
||||
}(sr)
|
||||
|
||||
r := wlog.NewReader(sr)
|
||||
dec := record.NewDecoder(labels.NewSymbolTable())
|
||||
dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
|
||||
|
||||
var (
|
||||
samples []record.RefSample
|
||||
|
||||
@ -866,7 +866,7 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
|
||||
}
|
||||
if fh == nil {
|
||||
it.atFloatHistogramCalled = true
|
||||
return it.t, &histogram.FloatHistogram{
|
||||
fh = &histogram.FloatHistogram{
|
||||
CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
|
||||
Count: it.cnt.value,
|
||||
ZeroCount: it.zCnt.value,
|
||||
@ -879,6 +879,14 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
|
||||
NegativeBuckets: it.nBuckets,
|
||||
CustomValues: it.customValues,
|
||||
}
|
||||
if fh.Schema > histogram.ExponentialSchemaMax && fh.Schema <= histogram.ExponentialSchemaMaxReserved {
|
||||
// This is a very slow path, but it should only happen if the
|
||||
// chunk is from a newer Prometheus version that supports higher
|
||||
// resolution.
|
||||
fh = fh.Copy()
|
||||
fh.ReduceResolution(histogram.ExponentialSchemaMax)
|
||||
}
|
||||
return it.t, fh
|
||||
}
|
||||
|
||||
fh.CounterResetHint = counterResetHint(it.counterResetHeader, it.numRead)
|
||||
@ -903,6 +911,13 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
|
||||
// Custom values are interned. The single copy is in this iterator.
|
||||
fh.CustomValues = it.customValues
|
||||
|
||||
if fh.Schema > histogram.ExponentialSchemaMax && fh.Schema <= histogram.ExponentialSchemaMaxReserved {
|
||||
// This is a very slow path, but it should only happen if the
|
||||
// chunk is from a newer Prometheus version that supports higher
|
||||
// resolution.
|
||||
fh.ReduceResolution(histogram.ExponentialSchemaMax)
|
||||
}
|
||||
|
||||
return it.t, fh
|
||||
}
|
||||
|
||||
@ -954,6 +969,12 @@ func (it *floatHistogramIterator) Next() ValueType {
|
||||
it.err = err
|
||||
return ValNone
|
||||
}
|
||||
|
||||
if !histogram.IsKnownSchema(schema) {
|
||||
it.err = histogram.UnknownSchemaError(schema)
|
||||
return ValNone
|
||||
}
|
||||
|
||||
it.schema = schema
|
||||
it.zThreshold = zeroThreshold
|
||||
it.pSpans, it.nSpans = posSpans, negSpans
|
||||
|
||||
@ -14,6 +14,7 @@
|
||||
package chunkenc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -1462,3 +1463,62 @@ func TestFloatHistogramEmptyBucketsWithGaps(t *testing.T) {
|
||||
require.Equal(t, ValNone, it.Next())
|
||||
require.NoError(t, it.Err())
|
||||
}
|
||||
|
||||
func TestFloatHistogramIteratorFailIfSchemaInValid(t *testing.T) {
|
||||
for _, schema := range []int32{-101, 101} {
|
||||
t.Run(fmt.Sprintf("schema %d", schema), func(t *testing.T) {
|
||||
h := &histogram.FloatHistogram{
|
||||
Schema: schema,
|
||||
Count: 10,
|
||||
Sum: 15.0,
|
||||
ZeroThreshold: 1e-100,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 2, 3, 4},
|
||||
}
|
||||
|
||||
c := NewFloatHistogramChunk()
|
||||
app, err := c.Appender()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, _, err = app.AppendFloatHistogram(nil, 1, h, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
it := c.Iterator(nil)
|
||||
require.Equal(t, ValNone, it.Next())
|
||||
require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFloatHistogramIteratorReduceSchema(t *testing.T) {
|
||||
for _, schema := range []int32{9, 52} {
|
||||
t.Run(fmt.Sprintf("schema %d", schema), func(t *testing.T) {
|
||||
h := &histogram.FloatHistogram{
|
||||
Schema: schema,
|
||||
Count: 10,
|
||||
Sum: 15.0,
|
||||
ZeroThreshold: 1e-100,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 2, 3, 4},
|
||||
}
|
||||
|
||||
c := NewFloatHistogramChunk()
|
||||
app, err := c.Appender()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, _, err = app.AppendFloatHistogram(nil, 1, h, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
it := c.Iterator(nil)
|
||||
require.Equal(t, ValFloatHistogram, it.Next())
|
||||
_, rh := it.AtFloatHistogram(nil)
|
||||
require.Equal(t, histogram.ExponentialSchemaMax, rh.Schema)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -921,7 +921,7 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog
|
||||
}
|
||||
if h == nil {
|
||||
it.atHistogramCalled = true
|
||||
return it.t, &histogram.Histogram{
|
||||
h = &histogram.Histogram{
|
||||
CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
|
||||
Count: it.cnt,
|
||||
ZeroCount: it.zCnt,
|
||||
@ -934,6 +934,14 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog
|
||||
NegativeBuckets: it.nBuckets,
|
||||
CustomValues: it.customValues,
|
||||
}
|
||||
if h.Schema > histogram.ExponentialSchemaMax && h.Schema <= histogram.ExponentialSchemaMaxReserved {
|
||||
// This is a very slow path, but it should only happen if the
|
||||
// chunk is from a newer Prometheus version that supports higher
|
||||
// resolution.
|
||||
h = h.Copy()
|
||||
h.ReduceResolution(histogram.ExponentialSchemaMax)
|
||||
}
|
||||
return it.t, h
|
||||
}
|
||||
|
||||
h.CounterResetHint = counterResetHint(it.counterResetHeader, it.numRead)
|
||||
@ -958,6 +966,13 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog
|
||||
// Custom values are interned. The single copy is here in the iterator.
|
||||
h.CustomValues = it.customValues
|
||||
|
||||
if h.Schema > histogram.ExponentialSchemaMax && h.Schema <= histogram.ExponentialSchemaMaxReserved {
|
||||
// This is a very slow path, but it should only happen if the
|
||||
// chunk is from a newer Prometheus version that supports higher
|
||||
// resolution.
|
||||
h.ReduceResolution(histogram.ExponentialSchemaMax)
|
||||
}
|
||||
|
||||
return it.t, h
|
||||
}
|
||||
|
||||
@ -967,7 +982,7 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int
|
||||
}
|
||||
if fh == nil {
|
||||
it.atFloatHistogramCalled = true
|
||||
return it.t, &histogram.FloatHistogram{
|
||||
fh = &histogram.FloatHistogram{
|
||||
CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
|
||||
Count: float64(it.cnt),
|
||||
ZeroCount: float64(it.zCnt),
|
||||
@ -980,6 +995,14 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int
|
||||
NegativeBuckets: it.nFloatBuckets,
|
||||
CustomValues: it.customValues,
|
||||
}
|
||||
if fh.Schema > histogram.ExponentialSchemaMax && fh.Schema <= histogram.ExponentialSchemaMaxReserved {
|
||||
// This is a very slow path, but it should only happen if the
|
||||
// chunk is from a newer Prometheus version that supports higher
|
||||
// resolution.
|
||||
fh = fh.Copy()
|
||||
fh.ReduceResolution(histogram.ExponentialSchemaMax)
|
||||
}
|
||||
return it.t, fh
|
||||
}
|
||||
|
||||
fh.CounterResetHint = counterResetHint(it.counterResetHeader, it.numRead)
|
||||
@ -1012,6 +1035,13 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int
|
||||
// Custom values are interned. The single copy is here in the iterator.
|
||||
fh.CustomValues = it.customValues
|
||||
|
||||
if fh.Schema > histogram.ExponentialSchemaMax && fh.Schema <= histogram.ExponentialSchemaMaxReserved {
|
||||
// This is a very slow path, but it should only happen if the
|
||||
// chunk is from a newer Prometheus version that supports higher
|
||||
// resolution.
|
||||
fh.ReduceResolution(histogram.ExponentialSchemaMax)
|
||||
}
|
||||
|
||||
return it.t, fh
|
||||
}
|
||||
|
||||
@ -1077,6 +1107,12 @@ func (it *histogramIterator) Next() ValueType {
|
||||
it.err = err
|
||||
return ValNone
|
||||
}
|
||||
|
||||
if !histogram.IsKnownSchema(schema) {
|
||||
it.err = histogram.UnknownSchemaError(schema)
|
||||
return ValNone
|
||||
}
|
||||
|
||||
it.schema = schema
|
||||
it.zThreshold = zeroThreshold
|
||||
it.pSpans, it.nSpans = posSpans, negSpans
|
||||
|
||||
@ -14,6 +14,7 @@
|
||||
package chunkenc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -1818,3 +1819,65 @@ func TestIntHistogramEmptyBucketsWithGaps(t *testing.T) {
|
||||
require.Equal(t, ValNone, it.Next())
|
||||
require.NoError(t, it.Err())
|
||||
}
|
||||
|
||||
func TestHistogramIteratorFailIfSchemaInValid(t *testing.T) {
|
||||
for _, schema := range []int32{-101, 101} {
|
||||
t.Run(fmt.Sprintf("schema %d", schema), func(t *testing.T) {
|
||||
h := &histogram.Histogram{
|
||||
Schema: schema,
|
||||
Count: 10,
|
||||
Sum: 15.0,
|
||||
ZeroThreshold: 1e-100,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 2, 3, 4},
|
||||
}
|
||||
|
||||
c := NewHistogramChunk()
|
||||
app, err := c.Appender()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, _, err = app.AppendHistogram(nil, 1, h, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
it := c.Iterator(nil)
|
||||
require.Equal(t, ValNone, it.Next())
|
||||
require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHistogramIteratorReduceSchema(t *testing.T) {
|
||||
for _, schema := range []int32{9, 52} {
|
||||
t.Run(fmt.Sprintf("schema %d", schema), func(t *testing.T) {
|
||||
h := &histogram.Histogram{
|
||||
Schema: schema,
|
||||
Count: 10,
|
||||
Sum: 15.0,
|
||||
ZeroThreshold: 1e-100,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 2, 3, 4},
|
||||
}
|
||||
|
||||
c := NewHistogramChunk()
|
||||
app, err := c.Appender()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, _, err = app.AppendHistogram(nil, 1, h, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
it := c.Iterator(nil)
|
||||
require.Equal(t, ValHistogram, it.Next())
|
||||
_, rh := it.AtHistogram(nil)
|
||||
require.Equal(t, histogram.ExponentialSchemaMax, rh.Schema)
|
||||
|
||||
_, rfh := it.AtFloatHistogram(nil)
|
||||
require.Equal(t, histogram.ExponentialSchemaMax, rfh.Schema)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
108
tsdb/db_test.go
108
tsdb/db_test.go
@ -300,19 +300,87 @@ func TestDataNotAvailableAfterRollback(t *testing.T) {
|
||||
}()
|
||||
|
||||
app := db.Appender(context.Background())
|
||||
_, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, 0)
|
||||
_, err := app.Append(0, labels.FromStrings("type", "float"), 0, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = app.AppendHistogram(
|
||||
0, labels.FromStrings("type", "histogram"), 0,
|
||||
&histogram.Histogram{Count: 42, Sum: math.NaN()}, nil,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = app.AppendHistogram(
|
||||
0, labels.FromStrings("type", "floathistogram"), 0,
|
||||
nil, &histogram.FloatHistogram{Count: 42, Sum: math.NaN()},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = app.Rollback()
|
||||
require.NoError(t, err)
|
||||
|
||||
querier, err := db.Querier(0, 1)
|
||||
for _, typ := range []string{"float", "histogram", "floathistogram"} {
|
||||
querier, err := db.Querier(0, 1)
|
||||
require.NoError(t, err)
|
||||
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "type", typ))
|
||||
require.Equal(t, map[string][]chunks.Sample{}, seriesSet)
|
||||
}
|
||||
|
||||
sr, err := wlog.NewSegmentsReader(db.head.wal.Dir())
|
||||
require.NoError(t, err)
|
||||
defer querier.Close()
|
||||
defer func() {
|
||||
require.NoError(t, sr.Close())
|
||||
}()
|
||||
|
||||
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
||||
// Read records from WAL and check for expected count of series and samples.
|
||||
var (
|
||||
r = wlog.NewReader(sr)
|
||||
dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
|
||||
|
||||
require.Equal(t, map[string][]chunks.Sample{}, seriesSet)
|
||||
walSeriesCount, walSamplesCount, walHistogramCount, walFloatHistogramCount, walExemplarsCount int
|
||||
)
|
||||
for r.Next() {
|
||||
rec := r.Record()
|
||||
switch dec.Type(rec) {
|
||||
case record.Series:
|
||||
var series []record.RefSeries
|
||||
series, err = dec.Series(rec, series)
|
||||
require.NoError(t, err)
|
||||
walSeriesCount += len(series)
|
||||
|
||||
case record.Samples:
|
||||
var samples []record.RefSample
|
||||
samples, err = dec.Samples(rec, samples)
|
||||
require.NoError(t, err)
|
||||
walSamplesCount += len(samples)
|
||||
|
||||
case record.Exemplars:
|
||||
var exemplars []record.RefExemplar
|
||||
exemplars, err = dec.Exemplars(rec, exemplars)
|
||||
require.NoError(t, err)
|
||||
walExemplarsCount += len(exemplars)
|
||||
|
||||
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
|
||||
var histograms []record.RefHistogramSample
|
||||
histograms, err = dec.HistogramSamples(rec, histograms)
|
||||
require.NoError(t, err)
|
||||
walHistogramCount += len(histograms)
|
||||
|
||||
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
|
||||
var floatHistograms []record.RefFloatHistogramSample
|
||||
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
|
||||
require.NoError(t, err)
|
||||
walFloatHistogramCount += len(floatHistograms)
|
||||
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Check that only series get stored after calling Rollback.
|
||||
require.Equal(t, 3, walSeriesCount, "series should have been written to WAL")
|
||||
require.Equal(t, 0, walSamplesCount, "samples should not have been written to WAL")
|
||||
require.Equal(t, 0, walExemplarsCount, "exemplars should not have been written to WAL")
|
||||
require.Equal(t, 0, walHistogramCount, "histograms should not have been written to WAL")
|
||||
require.Equal(t, 0, walFloatHistogramCount, "float histograms should not have been written to WAL")
|
||||
}
|
||||
|
||||
func TestDBAppenderAddRef(t *testing.T) {
|
||||
@ -4504,7 +4572,7 @@ func testOOOWALWrite(t *testing.T,
|
||||
}()
|
||||
|
||||
var records []any
|
||||
dec := record.NewDecoder(nil)
|
||||
dec := record.NewDecoder(nil, promslog.NewNopLogger())
|
||||
for r.Next() {
|
||||
rec := r.Record()
|
||||
switch typ := dec.Type(rec); typ {
|
||||
@ -4856,10 +4924,7 @@ func TestMetadataAssertInMemoryData(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestMultipleEncodingsCommitOrder mainly serves to demonstrate when happens when committing a batch of samples for the
|
||||
// same series when there are multiple encodings. Commit() will process all float samples before histogram samples. This
|
||||
// means that if histograms are appended before floats, the histograms could be marked as OOO when they are committed.
|
||||
// While possible, this shouldn't happen very often - you need the same series to be ingested as both a float and a
|
||||
// histogram in a single write request.
|
||||
// same series when there are multiple encodings. With issue #15177 fixed, this now all works as expected.
|
||||
func TestMultipleEncodingsCommitOrder(t *testing.T) {
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderCapMax = 30
|
||||
@ -4933,26 +4998,19 @@ func TestMultipleEncodingsCommitOrder(t *testing.T) {
|
||||
s := addSample(app, int64(i), chunkenc.ValFloat)
|
||||
expSamples = append(expSamples, s)
|
||||
}
|
||||
// These samples will be marked as OOO as their timestamps are less than the max timestamp for float samples in the
|
||||
// same batch.
|
||||
for i := 110; i < 120; i++ {
|
||||
s := addSample(app, int64(i), chunkenc.ValHistogram)
|
||||
expSamples = append(expSamples, s)
|
||||
}
|
||||
// These samples will be marked as OOO as their timestamps are less than the max timestamp for float samples in the
|
||||
// same batch.
|
||||
for i := 120; i < 130; i++ {
|
||||
s := addSample(app, int64(i), chunkenc.ValFloatHistogram)
|
||||
expSamples = append(expSamples, s)
|
||||
}
|
||||
// These samples will be marked as in-order as their timestamps are greater than the max timestamp for float
|
||||
// samples in the same batch.
|
||||
for i := 140; i < 150; i++ {
|
||||
s := addSample(app, int64(i), chunkenc.ValFloatHistogram)
|
||||
expSamples = append(expSamples, s)
|
||||
}
|
||||
// These samples will be marked as in-order, even though they're appended after the float histograms from ts 140-150
|
||||
// because float samples are processed first and these samples are in-order wrt to the float samples in the batch.
|
||||
// These samples will be marked as out-of-order.
|
||||
for i := 130; i < 135; i++ {
|
||||
s := addSample(app, int64(i), chunkenc.ValFloat)
|
||||
expSamples = append(expSamples, s)
|
||||
@ -4964,8 +5022,8 @@ func TestMultipleEncodingsCommitOrder(t *testing.T) {
|
||||
return expSamples[i].T() < expSamples[j].T()
|
||||
})
|
||||
|
||||
// oooCount = 20 because the histograms from 120 - 130 and float histograms from 120 - 130 are detected as OOO.
|
||||
verifySamples(100, 150, expSamples, 20)
|
||||
// oooCount = 5 for the samples 130 to 134.
|
||||
verifySamples(100, 150, expSamples, 5)
|
||||
|
||||
// Append and commit some in-order histograms by themselves.
|
||||
app = db.Appender(context.Background())
|
||||
@ -4975,8 +5033,8 @@ func TestMultipleEncodingsCommitOrder(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
// oooCount remains at 20 as no new OOO samples have been added.
|
||||
verifySamples(100, 160, expSamples, 20)
|
||||
// oooCount remains at 5.
|
||||
verifySamples(100, 160, expSamples, 5)
|
||||
|
||||
// Append and commit samples for all encoding types. This time all samples will be treated as OOO because samples
|
||||
// with newer timestamps have already been committed.
|
||||
@ -5004,8 +5062,8 @@ func TestMultipleEncodingsCommitOrder(t *testing.T) {
|
||||
return expSamples[i].T() < expSamples[j].T()
|
||||
})
|
||||
|
||||
// oooCount = 50 as we've added 30 more OOO samples.
|
||||
verifySamples(50, 160, expSamples, 50)
|
||||
// oooCount = 35 as we've added 30 more OOO samples.
|
||||
verifySamples(50, 160, expSamples, 35)
|
||||
}
|
||||
|
||||
// TODO(codesome): test more samples incoming once compaction has started. To verify new samples after the start
|
||||
@ -7030,7 +7088,7 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) {
|
||||
require.NoError(t, err)
|
||||
sr, err := wlog.NewSegmentsReader(originalWblDir)
|
||||
require.NoError(t, err)
|
||||
dec := record.NewDecoder(labels.NewSymbolTable())
|
||||
dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
|
||||
r, markers, addedRecs := wlog.NewReader(sr), 0, 0
|
||||
for r.Next() {
|
||||
rec := r.Record()
|
||||
|
||||
@ -86,7 +86,8 @@ type Head struct {
|
||||
exemplarMetrics *ExemplarMetrics
|
||||
exemplars ExemplarStorage
|
||||
logger *slog.Logger
|
||||
appendPool zeropool.Pool[[]record.RefSample]
|
||||
refSeriesPool zeropool.Pool[[]record.RefSeries]
|
||||
floatsPool zeropool.Pool[[]record.RefSample]
|
||||
exemplarsPool zeropool.Pool[[]exemplarWithSeriesRef]
|
||||
histogramsPool zeropool.Pool[[]record.RefHistogramSample]
|
||||
floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample]
|
||||
|
||||
@ -164,13 +164,6 @@ func (h *Head) Appender(context.Context) storage.Appender {
|
||||
func (h *Head) appender() *headAppender {
|
||||
minValidTime := h.appendableMinValidTime()
|
||||
appendID, cleanupAppendIDsBelow := h.iso.newAppendID(minValidTime) // Every appender gets an ID that is cleared upon commit/rollback.
|
||||
|
||||
// Allocate the exemplars buffer only if exemplars are enabled.
|
||||
var exemplarsBuf []exemplarWithSeriesRef
|
||||
if h.opts.EnableExemplarStorage {
|
||||
exemplarsBuf = h.getExemplarBuffer()
|
||||
}
|
||||
|
||||
return &headAppender{
|
||||
head: h,
|
||||
minValidTime: minValidTime,
|
||||
@ -178,12 +171,9 @@ func (h *Head) appender() *headAppender {
|
||||
maxt: math.MinInt64,
|
||||
headMaxt: h.MaxTime(),
|
||||
oooTimeWindow: h.opts.OutOfOrderTimeWindow.Load(),
|
||||
samples: h.getAppendBuffer(),
|
||||
sampleSeries: h.getSeriesBuffer(),
|
||||
exemplars: exemplarsBuf,
|
||||
histograms: h.getHistogramBuffer(),
|
||||
floatHistograms: h.getFloatHistogramBuffer(),
|
||||
metadata: h.getMetadataBuffer(),
|
||||
seriesRefs: h.getRefSeriesBuffer(),
|
||||
series: h.getSeriesBuffer(),
|
||||
typesInBatch: map[chunks.HeadSeriesRef]sampleType{},
|
||||
appendID: appendID,
|
||||
cleanupAppendIDsBelow: cleanupAppendIDsBelow,
|
||||
}
|
||||
@ -213,16 +203,28 @@ func (h *Head) AppendableMinValidTime() (int64, bool) {
|
||||
return h.appendableMinValidTime(), true
|
||||
}
|
||||
|
||||
func (h *Head) getAppendBuffer() []record.RefSample {
|
||||
b := h.appendPool.Get()
|
||||
func (h *Head) getRefSeriesBuffer() []record.RefSeries {
|
||||
b := h.refSeriesPool.Get()
|
||||
if b == nil {
|
||||
return make([]record.RefSeries, 0, 512)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (h *Head) putRefSeriesBuffer(b []record.RefSeries) {
|
||||
h.refSeriesPool.Put(b[:0])
|
||||
}
|
||||
|
||||
func (h *Head) getFloatBuffer() []record.RefSample {
|
||||
b := h.floatsPool.Get()
|
||||
if b == nil {
|
||||
return make([]record.RefSample, 0, 512)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (h *Head) putAppendBuffer(b []record.RefSample) {
|
||||
h.appendPool.Put(b[:0])
|
||||
func (h *Head) putFloatBuffer(b []record.RefSample) {
|
||||
h.floatsPool.Put(b[:0])
|
||||
}
|
||||
|
||||
func (h *Head) getExemplarBuffer() []exemplarWithSeriesRef {
|
||||
@ -312,17 +314,30 @@ type exemplarWithSeriesRef struct {
|
||||
exemplar exemplar.Exemplar
|
||||
}
|
||||
|
||||
type headAppender struct {
|
||||
head *Head
|
||||
minValidTime int64 // No samples below this timestamp are allowed.
|
||||
mint, maxt int64
|
||||
headMaxt int64 // We track it here to not take the lock for every sample appended.
|
||||
oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample.
|
||||
// sampleType describes sample types we need to distinguish for append batching.
|
||||
// We need separate types for everything that goes into a different WAL record
|
||||
// type or into a different chunk encoding.
|
||||
type sampleType byte
|
||||
|
||||
seriesRefs []record.RefSeries // New series records held by this appender.
|
||||
series []*memSeries // New series held by this appender (using corresponding slices indexes from seriesRefs)
|
||||
samples []record.RefSample // New float samples held by this appender.
|
||||
sampleSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
|
||||
const (
|
||||
stNone sampleType = iota // To mark that the sample type does not matter.
|
||||
stFloat // All simple floats (counters, gauges, untyped). Goes to `floats`.
|
||||
stHistogram // Native integer histograms with a standard exponential schema. Goes to `histograms`.
|
||||
stCustomBucketHistogram // Native integer histograms with custom bucket boundaries. Goes to `histograms`.
|
||||
stFloatHistogram // Native float histograms. Goes to `floatHistograms`.
|
||||
stCustomBucketFloatHistogram // Native float histograms with custom bucket boundaries. Goes to `floatHistograms`.
|
||||
)
|
||||
|
||||
// appendBatch is used to partition all the appended data into batches that are
|
||||
// "type clean", i.e. every series receives only samples of one type within the
|
||||
// batch. Types in this regard are defined by the sampleType enum above.
|
||||
// TODO(beorn7): The same concept could be extended to make sure every series in
|
||||
// the batch has at most one metadata record. This is currently not implemented
|
||||
// because it is unclear if it is needed at all. (Maybe we will remove metadata
|
||||
// records altogether, see issue #15911.)
|
||||
type appendBatch struct {
|
||||
floats []record.RefSample // New float samples held by this appender.
|
||||
floatSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
|
||||
histograms []record.RefHistogramSample // New histogram samples held by this appender.
|
||||
histogramSeries []*memSeries // HistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
|
||||
floatHistograms []record.RefFloatHistogramSample // New float histogram samples held by this appender.
|
||||
@ -330,6 +345,42 @@ type headAppender struct {
|
||||
metadata []record.RefMetadata // New metadata held by this appender.
|
||||
metadataSeries []*memSeries // Series corresponding to the metadata held by this appender.
|
||||
exemplars []exemplarWithSeriesRef // New exemplars held by this appender.
|
||||
}
|
||||
|
||||
// close returns all the slices to the pools in Head and nil's them.
|
||||
func (b *appendBatch) close(h *Head) {
|
||||
h.putFloatBuffer(b.floats)
|
||||
b.floats = nil
|
||||
h.putSeriesBuffer(b.floatSeries)
|
||||
b.floatSeries = nil
|
||||
h.putHistogramBuffer(b.histograms)
|
||||
b.histograms = nil
|
||||
h.putSeriesBuffer(b.histogramSeries)
|
||||
b.histogramSeries = nil
|
||||
h.putFloatHistogramBuffer(b.floatHistograms)
|
||||
b.floatHistograms = nil
|
||||
h.putSeriesBuffer(b.floatHistogramSeries)
|
||||
b.floatHistogramSeries = nil
|
||||
h.putMetadataBuffer(b.metadata)
|
||||
b.metadata = nil
|
||||
h.putSeriesBuffer(b.metadataSeries)
|
||||
b.metadataSeries = nil
|
||||
h.putExemplarBuffer(b.exemplars)
|
||||
b.exemplars = nil
|
||||
}
|
||||
|
||||
type headAppender struct {
|
||||
head *Head
|
||||
minValidTime int64 // No samples below this timestamp are allowed.
|
||||
mint, maxt int64
|
||||
headMaxt int64 // We track it here to not take the lock for every sample appended.
|
||||
oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample.
|
||||
|
||||
seriesRefs []record.RefSeries // New series records held by this appender.
|
||||
series []*memSeries // New series held by this appender (using corresponding slices indexes from seriesRefs)
|
||||
batches []*appendBatch // Holds all the other data to append. (In regular cases, there should be only one of these.)
|
||||
|
||||
typesInBatch map[chunks.HeadSeriesRef]sampleType // Which (one) sample type each series holds in the most recent batch.
|
||||
|
||||
appendID, cleanupAppendIDsBelow uint64
|
||||
closed bool
|
||||
@ -357,21 +408,27 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
|
||||
}
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
if value.IsStaleNaN(v) {
|
||||
// TODO(krajorama): reorganize Commit() to handle samples in append order
|
||||
// not floats first and then histograms. Then we could do this conversion
|
||||
// in commit. This code should move into Commit().
|
||||
switch {
|
||||
case s.lastHistogramValue != nil:
|
||||
s.Unlock()
|
||||
// If we have added a sample before with this same appender, we
|
||||
// can check the previously used type and turn a stale float
|
||||
// sample into a stale histogram sample or stale float histogram
|
||||
// sample as appropriate. This prevents an unnecessary creation
|
||||
// of a new batch. However, since other appenders might append
|
||||
// to the same series concurrently, this is not perfect but just
|
||||
// an optimization for the more likely case.
|
||||
switch a.typesInBatch[s.ref] {
|
||||
case stHistogram, stCustomBucketHistogram:
|
||||
return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil)
|
||||
case s.lastFloatHistogramValue != nil:
|
||||
s.Unlock()
|
||||
case stFloatHistogram, stCustomBucketFloatHistogram:
|
||||
return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v})
|
||||
}
|
||||
// Note that a series reference not yet in the map will come out
|
||||
// as stNone, but since we do not handle that case separately,
|
||||
// we do not need to check for the difference between "unknown
|
||||
// series" and "known series with stNone".
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
|
||||
// to skip that sample from the WAL and write only in the WBL.
|
||||
@ -403,12 +460,13 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
|
||||
a.maxt = t
|
||||
}
|
||||
|
||||
a.samples = append(a.samples, record.RefSample{
|
||||
b := a.getCurrentBatch(stFloat, s.ref)
|
||||
b.floats = append(b.floats, record.RefSample{
|
||||
Ref: s.ref,
|
||||
T: t,
|
||||
V: v,
|
||||
})
|
||||
a.sampleSeries = append(a.sampleSeries, s)
|
||||
b.floatSeries = append(b.floatSeries, s)
|
||||
return storage.SeriesRef(s.ref), nil
|
||||
}
|
||||
|
||||
@ -448,8 +506,9 @@ func (a *headAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Lab
|
||||
if ct > a.maxt {
|
||||
a.maxt = ct
|
||||
}
|
||||
a.samples = append(a.samples, record.RefSample{Ref: s.ref, T: ct, V: 0.0})
|
||||
a.sampleSeries = append(a.sampleSeries, s)
|
||||
b := a.getCurrentBatch(stFloat, s.ref)
|
||||
b.floats = append(b.floats, record.RefSample{Ref: s.ref, T: ct, V: 0.0})
|
||||
b.floatSeries = append(b.floatSeries, s)
|
||||
return storage.SeriesRef(s.ref), nil
|
||||
}
|
||||
|
||||
@ -476,6 +535,65 @@ func (a *headAppender) getOrCreate(lset labels.Labels) (s *memSeries, created bo
|
||||
return s, created, nil
|
||||
}
|
||||
|
||||
// getCurrentBatch returns the current batch if it fits the provided sampleType
|
||||
// for the provided series. Otherwise, it adds a new batch and returns it.
|
||||
func (a *headAppender) getCurrentBatch(st sampleType, s chunks.HeadSeriesRef) *appendBatch {
|
||||
h := a.head
|
||||
|
||||
newBatch := func() *appendBatch {
|
||||
b := appendBatch{
|
||||
floats: h.getFloatBuffer(),
|
||||
floatSeries: h.getSeriesBuffer(),
|
||||
histograms: h.getHistogramBuffer(),
|
||||
histogramSeries: h.getSeriesBuffer(),
|
||||
floatHistograms: h.getFloatHistogramBuffer(),
|
||||
floatHistogramSeries: h.getSeriesBuffer(),
|
||||
metadata: h.getMetadataBuffer(),
|
||||
metadataSeries: h.getSeriesBuffer(),
|
||||
}
|
||||
|
||||
// Allocate the exemplars buffer only if exemplars are enabled.
|
||||
if h.opts.EnableExemplarStorage {
|
||||
b.exemplars = h.getExemplarBuffer()
|
||||
}
|
||||
clear(a.typesInBatch)
|
||||
if st != stNone {
|
||||
a.typesInBatch[s] = st
|
||||
}
|
||||
a.batches = append(a.batches, &b)
|
||||
return &b
|
||||
}
|
||||
|
||||
// First batch ever. Create it.
|
||||
if len(a.batches) == 0 {
|
||||
return newBatch()
|
||||
}
|
||||
|
||||
// TODO(beorn7): If we ever see that the a.typesInBatch map grows so
|
||||
// large that it matters for total memory consumption, we could limit
|
||||
// the batch size here, i.e. cut a new batch even without a type change.
|
||||
// Something like:
|
||||
// if len(a.typesInBatch > limit) {
|
||||
// return newBatch()
|
||||
// }
|
||||
|
||||
lastBatch := a.batches[len(a.batches)-1]
|
||||
if st == stNone {
|
||||
// Type doesn't matter, last batch will always do.
|
||||
return lastBatch
|
||||
}
|
||||
prevST, ok := a.typesInBatch[s]
|
||||
switch {
|
||||
case !ok: // New series. Add it to map and return current batch.
|
||||
a.typesInBatch[s] = st
|
||||
return lastBatch
|
||||
case prevST == st: // Old series, same type. Just return batch.
|
||||
return lastBatch
|
||||
}
|
||||
// An old series got a new type. Start new batch.
|
||||
return newBatch()
|
||||
}
|
||||
|
||||
// appendable checks whether the given sample is valid for appending to the series.
|
||||
// If the sample is valid and in-order, it returns false with no error.
|
||||
// If the sample belongs to the out-of-order chunk, it returns true with no error.
|
||||
@ -638,7 +756,8 @@ func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels,
|
||||
return 0, err
|
||||
}
|
||||
|
||||
a.exemplars = append(a.exemplars, exemplarWithSeriesRef{ref, e})
|
||||
b := a.getCurrentBatch(stNone, chunks.HeadSeriesRef(ref))
|
||||
b.exemplars = append(b.exemplars, exemplarWithSeriesRef{ref, e})
|
||||
|
||||
return storage.SeriesRef(s.ref), nil
|
||||
}
|
||||
@ -667,11 +786,10 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
|
||||
}
|
||||
}
|
||||
|
||||
var created bool
|
||||
s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
|
||||
if s == nil {
|
||||
var err error
|
||||
s, created, err = a.getOrCreate(lset)
|
||||
s, _, err = a.getOrCreate(lset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -680,14 +798,6 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
|
||||
switch {
|
||||
case h != nil:
|
||||
s.Lock()
|
||||
|
||||
// TODO(krajorama): reorganize Commit() to handle samples in append order
|
||||
// not floats first and then histograms. Then we would not need to do this.
|
||||
// This whole "if" should be removed.
|
||||
if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil {
|
||||
s.lastHistogramValue = &histogram.Histogram{}
|
||||
}
|
||||
|
||||
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
|
||||
// to skip that sample from the WAL and write only in the WBL.
|
||||
_, delta, err := s.appendableHistogram(t, h, a.headMaxt, a.minValidTime, a.oooTimeWindow)
|
||||
@ -707,22 +817,19 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
a.histograms = append(a.histograms, record.RefHistogramSample{
|
||||
st := stHistogram
|
||||
if h.UsesCustomBuckets() {
|
||||
st = stCustomBucketHistogram
|
||||
}
|
||||
b := a.getCurrentBatch(st, s.ref)
|
||||
b.histograms = append(b.histograms, record.RefHistogramSample{
|
||||
Ref: s.ref,
|
||||
T: t,
|
||||
H: h,
|
||||
})
|
||||
a.histogramSeries = append(a.histogramSeries, s)
|
||||
b.histogramSeries = append(b.histogramSeries, s)
|
||||
case fh != nil:
|
||||
s.Lock()
|
||||
|
||||
// TODO(krajorama): reorganize Commit() to handle samples in append order
|
||||
// not floats first and then histograms. Then we would not need to do this.
|
||||
// This whole "if" should be removed.
|
||||
if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil {
|
||||
s.lastFloatHistogramValue = &histogram.FloatHistogram{}
|
||||
}
|
||||
|
||||
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
|
||||
// to skip that sample from the WAL and write only in the WBL.
|
||||
_, delta, err := s.appendableFloatHistogram(t, fh, a.headMaxt, a.minValidTime, a.oooTimeWindow)
|
||||
@ -742,12 +849,17 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{
|
||||
st := stFloatHistogram
|
||||
if fh.UsesCustomBuckets() {
|
||||
st = stCustomBucketFloatHistogram
|
||||
}
|
||||
b := a.getCurrentBatch(st, s.ref)
|
||||
b.floatHistograms = append(b.floatHistograms, record.RefFloatHistogramSample{
|
||||
Ref: s.ref,
|
||||
T: t,
|
||||
FH: fh,
|
||||
})
|
||||
a.floatHistogramSeries = append(a.floatHistogramSeries, s)
|
||||
b.floatHistogramSeries = append(b.floatHistogramSeries, s)
|
||||
}
|
||||
|
||||
if t < a.mint {
|
||||
@ -769,11 +881,10 @@ func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset l
|
||||
return 0, storage.ErrCTNewerThanSample
|
||||
}
|
||||
|
||||
var created bool
|
||||
s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
|
||||
if s == nil {
|
||||
var err error
|
||||
s, created, err = a.getOrCreate(lset)
|
||||
s, _, err = a.getOrCreate(lset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -784,16 +895,12 @@ func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset l
|
||||
zeroHistogram := &histogram.Histogram{
|
||||
// The CTZeroSample represents a counter reset by definition.
|
||||
CounterResetHint: histogram.CounterReset,
|
||||
// Replicate other fields to avoid needless chunk creation.
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
CustomValues: h.CustomValues,
|
||||
}
|
||||
s.Lock()
|
||||
|
||||
// TODO(krajorama): reorganize Commit() to handle samples in append order
|
||||
// not floats first and then histograms. Then we would not need to do this.
|
||||
// This whole "if" should be removed.
|
||||
if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil {
|
||||
s.lastHistogramValue = zeroHistogram
|
||||
}
|
||||
|
||||
// For CTZeroSamples OOO is not allowed.
|
||||
// We set it to true to make this implementation as close as possible to the float implementation.
|
||||
isOOO, _, err := s.appendableHistogram(ct, zeroHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow)
|
||||
@ -815,26 +922,27 @@ func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset l
|
||||
|
||||
s.pendingCommit = true
|
||||
s.Unlock()
|
||||
a.histograms = append(a.histograms, record.RefHistogramSample{
|
||||
st := stHistogram
|
||||
if h.UsesCustomBuckets() {
|
||||
st = stCustomBucketHistogram
|
||||
}
|
||||
b := a.getCurrentBatch(st, s.ref)
|
||||
b.histograms = append(b.histograms, record.RefHistogramSample{
|
||||
Ref: s.ref,
|
||||
T: ct,
|
||||
H: zeroHistogram,
|
||||
})
|
||||
a.histogramSeries = append(a.histogramSeries, s)
|
||||
b.histogramSeries = append(b.histogramSeries, s)
|
||||
case fh != nil:
|
||||
zeroFloatHistogram := &histogram.FloatHistogram{
|
||||
// The CTZeroSample represents a counter reset by definition.
|
||||
CounterResetHint: histogram.CounterReset,
|
||||
// Replicate other fields to avoid needless chunk creation.
|
||||
Schema: fh.Schema,
|
||||
ZeroThreshold: fh.ZeroThreshold,
|
||||
CustomValues: fh.CustomValues,
|
||||
}
|
||||
s.Lock()
|
||||
|
||||
// TODO(krajorama): reorganize Commit() to handle samples in append order
|
||||
// not floats first and then histograms. Then we would not need to do this.
|
||||
// This whole "if" should be removed.
|
||||
if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil {
|
||||
s.lastFloatHistogramValue = zeroFloatHistogram
|
||||
}
|
||||
|
||||
// We set it to true to make this implementation as close as possible to the float implementation.
|
||||
isOOO, _, err := s.appendableFloatHistogram(ct, zeroFloatHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow) // OOO is not allowed for CTZeroSamples.
|
||||
if err != nil {
|
||||
@ -855,12 +963,17 @@ func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset l
|
||||
|
||||
s.pendingCommit = true
|
||||
s.Unlock()
|
||||
a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{
|
||||
st := stFloatHistogram
|
||||
if fh.UsesCustomBuckets() {
|
||||
st = stCustomBucketFloatHistogram
|
||||
}
|
||||
b := a.getCurrentBatch(st, s.ref)
|
||||
b.floatHistograms = append(b.floatHistograms, record.RefFloatHistogramSample{
|
||||
Ref: s.ref,
|
||||
T: ct,
|
||||
FH: zeroFloatHistogram,
|
||||
})
|
||||
a.floatHistogramSeries = append(a.floatHistogramSeries, s)
|
||||
b.floatHistogramSeries = append(b.floatHistogramSeries, s)
|
||||
}
|
||||
|
||||
if ct > a.maxt {
|
||||
@ -889,13 +1002,14 @@ func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels,
|
||||
s.Unlock()
|
||||
|
||||
if hasNewMetadata {
|
||||
a.metadata = append(a.metadata, record.RefMetadata{
|
||||
b := a.getCurrentBatch(stNone, s.ref)
|
||||
b.metadata = append(b.metadata, record.RefMetadata{
|
||||
Ref: s.ref,
|
||||
Type: record.GetMetricType(meta.Type),
|
||||
Unit: meta.Unit,
|
||||
Help: meta.Help,
|
||||
})
|
||||
a.metadataSeries = append(a.metadataSeries, s)
|
||||
b.metadataSeries = append(b.metadataSeries, s)
|
||||
}
|
||||
|
||||
return ref, nil
|
||||
@ -932,66 +1046,68 @@ func (a *headAppender) log() error {
|
||||
return fmt.Errorf("log series: %w", err)
|
||||
}
|
||||
}
|
||||
if len(a.metadata) > 0 {
|
||||
rec = enc.Metadata(a.metadata, buf)
|
||||
buf = rec[:0]
|
||||
for _, b := range a.batches {
|
||||
if len(b.metadata) > 0 {
|
||||
rec = enc.Metadata(b.metadata, buf)
|
||||
buf = rec[:0]
|
||||
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log metadata: %w", err)
|
||||
}
|
||||
}
|
||||
if len(a.samples) > 0 {
|
||||
rec = enc.Samples(a.samples, buf)
|
||||
buf = rec[:0]
|
||||
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log samples: %w", err)
|
||||
}
|
||||
}
|
||||
if len(a.histograms) > 0 {
|
||||
var customBucketsHistograms []record.RefHistogramSample
|
||||
rec, customBucketsHistograms = enc.HistogramSamples(a.histograms, buf)
|
||||
buf = rec[:0]
|
||||
if len(rec) > 0 {
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log histograms: %w", err)
|
||||
return fmt.Errorf("log metadata: %w", err)
|
||||
}
|
||||
}
|
||||
if len(b.floats) > 0 {
|
||||
rec = enc.Samples(b.floats, buf)
|
||||
buf = rec[:0]
|
||||
|
||||
if len(customBucketsHistograms) > 0 {
|
||||
rec = enc.CustomBucketsHistogramSamples(customBucketsHistograms, buf)
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log custom buckets histograms: %w", err)
|
||||
return fmt.Errorf("log samples: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(a.floatHistograms) > 0 {
|
||||
var customBucketsFloatHistograms []record.RefFloatHistogramSample
|
||||
rec, customBucketsFloatHistograms = enc.FloatHistogramSamples(a.floatHistograms, buf)
|
||||
buf = rec[:0]
|
||||
if len(rec) > 0 {
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log float histograms: %w", err)
|
||||
if len(b.histograms) > 0 {
|
||||
var customBucketsHistograms []record.RefHistogramSample
|
||||
rec, customBucketsHistograms = enc.HistogramSamples(b.histograms, buf)
|
||||
buf = rec[:0]
|
||||
if len(rec) > 0 {
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log histograms: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(customBucketsHistograms) > 0 {
|
||||
rec = enc.CustomBucketsHistogramSamples(customBucketsHistograms, buf)
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log custom buckets histograms: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(b.floatHistograms) > 0 {
|
||||
var customBucketsFloatHistograms []record.RefFloatHistogramSample
|
||||
rec, customBucketsFloatHistograms = enc.FloatHistogramSamples(b.floatHistograms, buf)
|
||||
buf = rec[:0]
|
||||
if len(rec) > 0 {
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log float histograms: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(customBucketsFloatHistograms) > 0 {
|
||||
rec = enc.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, buf)
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log custom buckets float histograms: %w", err)
|
||||
if len(customBucketsFloatHistograms) > 0 {
|
||||
rec = enc.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, buf)
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log custom buckets float histograms: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Exemplars should be logged after samples (float/native histogram/etc),
|
||||
// otherwise it might happen that we send the exemplars in a remote write
|
||||
// batch before the samples, which in turn means the exemplar is rejected
|
||||
// for missing series, since series are created due to samples.
|
||||
if len(a.exemplars) > 0 {
|
||||
rec = enc.Exemplars(exemplarsForEncoding(a.exemplars), buf)
|
||||
buf = rec[:0]
|
||||
// Exemplars should be logged after samples (float/native histogram/etc),
|
||||
// otherwise it might happen that we send the exemplars in a remote write
|
||||
// batch before the samples, which in turn means the exemplar is rejected
|
||||
// for missing series, since series are created due to samples.
|
||||
if len(b.exemplars) > 0 {
|
||||
rec = enc.Exemplars(exemplarsForEncoding(b.exemplars), buf)
|
||||
buf = rec[:0]
|
||||
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log exemplars: %w", err)
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log exemplars: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -1040,10 +1156,10 @@ type appenderCommitContext struct {
|
||||
enc record.Encoder
|
||||
}
|
||||
|
||||
// commitExemplars adds all exemplars from headAppender to the head's exemplar storage.
|
||||
func (a *headAppender) commitExemplars() {
|
||||
// commitExemplars adds all exemplars from the provided batch to the head's exemplar storage.
|
||||
func (a *headAppender) commitExemplars(b *appendBatch) {
|
||||
// No errors logging to WAL, so pass the exemplars along to the in memory storage.
|
||||
for _, e := range a.exemplars {
|
||||
for _, e := range b.exemplars {
|
||||
s := a.head.series.getByID(chunks.HeadSeriesRef(e.ref))
|
||||
if s == nil {
|
||||
// This is very unlikely to happen, but we have seen it in the wild.
|
||||
@ -1147,9 +1263,9 @@ func handleAppendableError(err error, appended, oooRejected, oobRejected, tooOld
|
||||
}
|
||||
}
|
||||
|
||||
// commitSamples processes and commits the samples in the headAppender to the series.
|
||||
// It handles both in-order and out-of-order samples, updating the appenderCommitContext
|
||||
// with the results of the append operations.
|
||||
// commitFloats processes and commits the samples in the provided batch to the
|
||||
// series. It handles both in-order and out-of-order samples, updating the
|
||||
// appenderCommitContext with the results of the append operations.
|
||||
//
|
||||
// The function iterates over the samples in the headAppender and attempts to append each sample
|
||||
// to its corresponding series. It handles various error cases such as out-of-order samples,
|
||||
@ -1166,14 +1282,68 @@ func handleAppendableError(err error, appended, oooRejected, oobRejected, tooOld
|
||||
// operations on the series after appending the samples.
|
||||
//
|
||||
// There are also specific functions to commit histograms and float histograms.
|
||||
func (a *headAppender) commitSamples(acc *appenderCommitContext) {
|
||||
func (a *headAppender) commitFloats(b *appendBatch, acc *appenderCommitContext) {
|
||||
var ok, chunkCreated bool
|
||||
var series *memSeries
|
||||
|
||||
for i, s := range a.samples {
|
||||
series = a.sampleSeries[i]
|
||||
for i, s := range b.floats {
|
||||
series = b.floatSeries[i]
|
||||
series.Lock()
|
||||
|
||||
if value.IsStaleNaN(s.V) {
|
||||
// If a float staleness marker had been appended for a
|
||||
// series that got a histogram or float histogram
|
||||
// appended before via this same appender, it would not
|
||||
// show up here because we had already converted it. We
|
||||
// end up here for two reasons: (1) This is the very
|
||||
// first sample for this series appended via this
|
||||
// appender. (2) A float sample was appended to this
|
||||
// series before via this same appender.
|
||||
//
|
||||
// In either case, we need to check the previous sample
|
||||
// in the memSeries to append the appropriately typed
|
||||
// staleness marker. This is obviously so in case (1).
|
||||
// In case (2), we would usually expect a float sample
|
||||
// as the previous sample, but there might be concurrent
|
||||
// appends that have added a histogram sample in the
|
||||
// meantime. (This will probably lead to OOO shenanigans
|
||||
// anyway, but that's a different story.)
|
||||
//
|
||||
// If the last sample in the memSeries is indeed a
|
||||
// float, we don't have to do anything special here and
|
||||
// just go on with the normal commit for a float sample.
|
||||
// However, if the last sample in the memSeries is a
|
||||
// histogram or float histogram, we have to convert the
|
||||
// staleness marker to a histogram (or float histogram,
|
||||
// respectively), and just add it at the end of the
|
||||
// histograms (or float histograms) in the same batch,
|
||||
// to be committed later in commitHistograms (or
|
||||
// commitFloatHistograms). The latter is fine because we
|
||||
// know there is no other histogram (or float histogram)
|
||||
// sample for this same series in this same batch
|
||||
// (because any such sample would have triggered a new
|
||||
// batch).
|
||||
switch {
|
||||
case series.lastHistogramValue != nil:
|
||||
b.histograms = append(b.histograms, record.RefHistogramSample{
|
||||
Ref: series.ref,
|
||||
T: s.T,
|
||||
H: &histogram.Histogram{Sum: s.V},
|
||||
})
|
||||
b.histogramSeries = append(b.histogramSeries, series)
|
||||
series.Unlock()
|
||||
continue
|
||||
case series.lastFloatHistogramValue != nil:
|
||||
b.floatHistograms = append(b.floatHistograms, record.RefFloatHistogramSample{
|
||||
Ref: series.ref,
|
||||
T: s.T,
|
||||
FH: &histogram.FloatHistogram{Sum: s.V},
|
||||
})
|
||||
b.floatHistogramSeries = append(b.floatHistogramSeries, series)
|
||||
series.Unlock()
|
||||
continue
|
||||
}
|
||||
}
|
||||
oooSample, _, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, a.oooTimeWindow)
|
||||
if err != nil {
|
||||
handleAppendableError(err, &acc.floatsAppended, &acc.floatOOORejected, &acc.floatOOBRejected, &acc.floatTooOldRejected)
|
||||
@ -1261,15 +1431,24 @@ func (a *headAppender) commitSamples(acc *appenderCommitContext) {
|
||||
}
|
||||
}
|
||||
|
||||
// For details on the commitHistograms function, see the commitSamples docs.
|
||||
func (a *headAppender) commitHistograms(acc *appenderCommitContext) {
|
||||
// For details on the commitHistograms function, see the commitFloats docs.
|
||||
func (a *headAppender) commitHistograms(b *appendBatch, acc *appenderCommitContext) {
|
||||
var ok, chunkCreated bool
|
||||
var series *memSeries
|
||||
|
||||
for i, s := range a.histograms {
|
||||
series = a.histogramSeries[i]
|
||||
for i, s := range b.histograms {
|
||||
series = b.histogramSeries[i]
|
||||
series.Lock()
|
||||
|
||||
// At this point, we could encounter a histogram staleness
|
||||
// marker that should better be a float staleness marker or a
|
||||
// float histogram staleness marker. This can only happen with
|
||||
// concurrent appenders appending to the same series _and_ doing
|
||||
// so in a mixed-type scenario. This case is expected to be very
|
||||
// rare, so we do not bother here to convert the staleness
|
||||
// marker. The worst case is that we need to cut a new chunk
|
||||
// just for the staleness marker.
|
||||
|
||||
oooSample, _, err := series.appendableHistogram(s.T, s.H, a.headMaxt, a.minValidTime, a.oooTimeWindow)
|
||||
if err != nil {
|
||||
handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected)
|
||||
@ -1361,15 +1540,24 @@ func (a *headAppender) commitHistograms(acc *appenderCommitContext) {
|
||||
}
|
||||
}
|
||||
|
||||
// For details on the commitFloatHistograms function, see the commitSamples docs.
|
||||
func (a *headAppender) commitFloatHistograms(acc *appenderCommitContext) {
|
||||
// For details on the commitFloatHistograms function, see the commitFloats docs.
|
||||
func (a *headAppender) commitFloatHistograms(b *appendBatch, acc *appenderCommitContext) {
|
||||
var ok, chunkCreated bool
|
||||
var series *memSeries
|
||||
|
||||
for i, s := range a.floatHistograms {
|
||||
series = a.floatHistogramSeries[i]
|
||||
for i, s := range b.floatHistograms {
|
||||
series = b.floatHistogramSeries[i]
|
||||
series.Lock()
|
||||
|
||||
// At this point, we could encounter a float histogram staleness
|
||||
// marker that should better be a float staleness marker or an
|
||||
// integer histogram staleness marker. This can only happen with
|
||||
// concurrent appenders appending to the same series _and_ doing
|
||||
// so in a mixed-type scenario. This case is expected to be very
|
||||
// rare, so we do not bother here to convert the staleness
|
||||
// marker. The worst case is that we need to cut a new chunk
|
||||
// just for the staleness marker.
|
||||
|
||||
oooSample, _, err := series.appendableFloatHistogram(s.T, s.FH, a.headMaxt, a.minValidTime, a.oooTimeWindow)
|
||||
if err != nil {
|
||||
handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected)
|
||||
@ -1461,14 +1649,14 @@ func (a *headAppender) commitFloatHistograms(acc *appenderCommitContext) {
|
||||
}
|
||||
}
|
||||
|
||||
// commitMetadata commits the metadata for each series in the headAppender.
|
||||
// commitMetadata commits the metadata for each series in the provided batch.
|
||||
// It iterates over the metadata slice and updates the corresponding series
|
||||
// with the new metadata information. The series is locked during the update
|
||||
// to ensure thread safety.
|
||||
func (a *headAppender) commitMetadata() {
|
||||
func commitMetadata(b *appendBatch) {
|
||||
var series *memSeries
|
||||
for i, m := range a.metadata {
|
||||
series = a.metadataSeries[i]
|
||||
for i, m := range b.metadata {
|
||||
series = b.metadataSeries[i]
|
||||
series.Lock()
|
||||
series.meta = &metadata.Metadata{Type: record.ToMetricType(m.Type), Unit: m.Unit, Help: m.Help}
|
||||
series.Unlock()
|
||||
@ -1489,75 +1677,82 @@ func (a *headAppender) Commit() (err error) {
|
||||
if a.closed {
|
||||
return ErrAppenderClosed
|
||||
}
|
||||
defer func() { a.closed = true }()
|
||||
|
||||
h := a.head
|
||||
|
||||
defer func() {
|
||||
h.putRefSeriesBuffer(a.seriesRefs)
|
||||
h.putSeriesBuffer(a.series)
|
||||
a.closed = true
|
||||
}()
|
||||
|
||||
if err := a.log(); err != nil {
|
||||
_ = a.Rollback() // Most likely the same error will happen again.
|
||||
return fmt.Errorf("write to WAL: %w", err)
|
||||
}
|
||||
|
||||
if a.head.writeNotified != nil {
|
||||
a.head.writeNotified.Notify()
|
||||
if h.writeNotified != nil {
|
||||
h.writeNotified.Notify()
|
||||
}
|
||||
|
||||
a.commitExemplars()
|
||||
|
||||
defer a.head.metrics.activeAppenders.Dec()
|
||||
defer a.head.putAppendBuffer(a.samples)
|
||||
defer a.head.putSeriesBuffer(a.sampleSeries)
|
||||
defer a.head.putExemplarBuffer(a.exemplars)
|
||||
defer a.head.putHistogramBuffer(a.histograms)
|
||||
defer a.head.putFloatHistogramBuffer(a.floatHistograms)
|
||||
defer a.head.putMetadataBuffer(a.metadata)
|
||||
defer a.head.iso.closeAppend(a.appendID)
|
||||
|
||||
acc := &appenderCommitContext{
|
||||
floatsAppended: len(a.samples),
|
||||
histogramsAppended: len(a.histograms) + len(a.floatHistograms),
|
||||
inOrderMint: math.MaxInt64,
|
||||
inOrderMaxt: math.MinInt64,
|
||||
oooMinT: math.MaxInt64,
|
||||
oooMaxT: math.MinInt64,
|
||||
oooCapMax: a.head.opts.OutOfOrderCapMax.Load(),
|
||||
inOrderMint: math.MaxInt64,
|
||||
inOrderMaxt: math.MinInt64,
|
||||
oooMinT: math.MaxInt64,
|
||||
oooMaxT: math.MinInt64,
|
||||
oooCapMax: h.opts.OutOfOrderCapMax.Load(),
|
||||
appendChunkOpts: chunkOpts{
|
||||
chunkDiskMapper: a.head.chunkDiskMapper,
|
||||
chunkRange: a.head.chunkRange.Load(),
|
||||
samplesPerChunk: a.head.opts.SamplesPerChunk,
|
||||
chunkDiskMapper: h.chunkDiskMapper,
|
||||
chunkRange: h.chunkRange.Load(),
|
||||
samplesPerChunk: h.opts.SamplesPerChunk,
|
||||
},
|
||||
}
|
||||
|
||||
for _, b := range a.batches {
|
||||
acc.floatsAppended += len(b.floats)
|
||||
acc.histogramsAppended += len(b.histograms) + len(b.floatHistograms)
|
||||
a.commitExemplars(b)
|
||||
defer b.close(h)
|
||||
}
|
||||
defer h.metrics.activeAppenders.Dec()
|
||||
defer h.iso.closeAppend(a.appendID)
|
||||
|
||||
defer func() {
|
||||
for i := range acc.oooRecords {
|
||||
a.head.putBytesBuffer(acc.oooRecords[i][:0])
|
||||
h.putBytesBuffer(acc.oooRecords[i][:0])
|
||||
}
|
||||
}()
|
||||
|
||||
a.commitSamples(acc)
|
||||
a.commitHistograms(acc)
|
||||
a.commitFloatHistograms(acc)
|
||||
a.commitMetadata()
|
||||
for _, b := range a.batches {
|
||||
// Do not change the order of these calls. The staleness marker
|
||||
// handling depends on it.
|
||||
a.commitFloats(b, acc)
|
||||
a.commitHistograms(b, acc)
|
||||
a.commitFloatHistograms(b, acc)
|
||||
commitMetadata(b)
|
||||
}
|
||||
// Unmark all series as pending commit after all samples have been committed.
|
||||
a.unmarkCreatedSeriesAsPendingCommit()
|
||||
|
||||
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOORejected))
|
||||
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histoOOORejected))
|
||||
a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOBRejected))
|
||||
a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatTooOldRejected))
|
||||
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatsAppended))
|
||||
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histogramsAppended))
|
||||
a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.oooFloatsAccepted))
|
||||
a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.oooHistogramAccepted))
|
||||
a.head.updateMinMaxTime(acc.inOrderMint, acc.inOrderMaxt)
|
||||
a.head.updateMinOOOMaxOOOTime(acc.oooMinT, acc.oooMaxT)
|
||||
h.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOORejected))
|
||||
h.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histoOOORejected))
|
||||
h.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOBRejected))
|
||||
h.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatTooOldRejected))
|
||||
h.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatsAppended))
|
||||
h.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histogramsAppended))
|
||||
h.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.oooFloatsAccepted))
|
||||
h.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.oooHistogramAccepted))
|
||||
h.updateMinMaxTime(acc.inOrderMint, acc.inOrderMaxt)
|
||||
h.updateMinOOOMaxOOOTime(acc.oooMinT, acc.oooMaxT)
|
||||
|
||||
acc.collectOOORecords(a)
|
||||
if a.head.wbl != nil {
|
||||
if err := a.head.wbl.Log(acc.oooRecords...); err != nil {
|
||||
if h.wbl != nil {
|
||||
if err := h.wbl.Log(acc.oooRecords...); err != nil {
|
||||
// TODO(codesome): Currently WBL logging of ooo samples is best effort here since we cannot try logging
|
||||
// until we have found what samples become OOO. We can try having a metric for this failure.
|
||||
// Returning the error here is not correct because we have already put the samples into the memory,
|
||||
// hence the append/insert was a success.
|
||||
a.head.logger.Error("Failed to log out of order samples into the WAL", "err", err)
|
||||
h.logger.Error("Failed to log out of order samples into the WAL", "err", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -2007,37 +2202,43 @@ func (a *headAppender) Rollback() (err error) {
|
||||
if a.closed {
|
||||
return ErrAppenderClosed
|
||||
}
|
||||
defer func() { a.closed = true }()
|
||||
defer a.head.metrics.activeAppenders.Dec()
|
||||
defer a.head.iso.closeAppend(a.appendID)
|
||||
defer a.head.putSeriesBuffer(a.sampleSeries)
|
||||
defer a.unmarkCreatedSeriesAsPendingCommit()
|
||||
h := a.head
|
||||
defer func() {
|
||||
a.unmarkCreatedSeriesAsPendingCommit()
|
||||
h.iso.closeAppend(a.appendID)
|
||||
h.metrics.activeAppenders.Dec()
|
||||
a.closed = true
|
||||
h.putRefSeriesBuffer(a.seriesRefs)
|
||||
h.putSeriesBuffer(a.series)
|
||||
}()
|
||||
|
||||
var series *memSeries
|
||||
for i := range a.samples {
|
||||
series = a.sampleSeries[i]
|
||||
series.Lock()
|
||||
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
||||
series.pendingCommit = false
|
||||
series.Unlock()
|
||||
fmt.Println("ROLLBACK")
|
||||
for _, b := range a.batches {
|
||||
for i := range b.floats {
|
||||
series = b.floatSeries[i]
|
||||
series.Lock()
|
||||
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
||||
series.pendingCommit = false
|
||||
series.Unlock()
|
||||
}
|
||||
for i := range b.histograms {
|
||||
series = b.histogramSeries[i]
|
||||
series.Lock()
|
||||
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
||||
series.pendingCommit = false
|
||||
series.Unlock()
|
||||
}
|
||||
for i := range b.floatHistograms {
|
||||
series = b.floatHistogramSeries[i]
|
||||
series.Lock()
|
||||
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
||||
series.pendingCommit = false
|
||||
series.Unlock()
|
||||
}
|
||||
b.close(h)
|
||||
}
|
||||
for i := range a.histograms {
|
||||
series = a.histogramSeries[i]
|
||||
series.Lock()
|
||||
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
||||
series.pendingCommit = false
|
||||
series.Unlock()
|
||||
}
|
||||
a.head.putAppendBuffer(a.samples)
|
||||
a.head.putExemplarBuffer(a.exemplars)
|
||||
a.head.putHistogramBuffer(a.histograms)
|
||||
a.head.putFloatHistogramBuffer(a.floatHistograms)
|
||||
a.head.putMetadataBuffer(a.metadata)
|
||||
a.samples = nil
|
||||
a.exemplars = nil
|
||||
a.histograms = nil
|
||||
a.metadata = nil
|
||||
|
||||
a.batches = a.batches[:0]
|
||||
// Series are created in the head memory regardless of rollback. Thus we have
|
||||
// to log them to the WAL in any case.
|
||||
return a.log()
|
||||
|
||||
@ -34,6 +34,7 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@ -185,7 +186,7 @@ func readTestWAL(t testing.TB, dir string) (recs []any) {
|
||||
require.NoError(t, sr.Close())
|
||||
}()
|
||||
|
||||
dec := record.NewDecoder(labels.NewSymbolTable())
|
||||
dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
|
||||
r := wlog.NewReader(sr)
|
||||
|
||||
for r.Next() {
|
||||
@ -5336,8 +5337,6 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
|
||||
samples []chunks.Sample
|
||||
expChunks int
|
||||
err error
|
||||
// If this is empty, samples above will be taken instead of this.
|
||||
addToExp []chunks.Sample
|
||||
}{
|
||||
// Histograms that end up in the expected samples are copied here so that we
|
||||
// can independently set the CounterResetHint later.
|
||||
@ -5377,43 +5376,29 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
|
||||
samples: []chunks.Sample{sample{t: 100, fh: floatHists[4].Copy()}},
|
||||
err: storage.ErrOutOfOrderSample,
|
||||
},
|
||||
// The three next tests all failed before #15177 was fixed.
|
||||
{
|
||||
// Combination of histograms and float64 in the same commit. The behaviour is undefined, but we want to also
|
||||
// verify how TSDB would behave. Here the histogram is appended at the end, hence will be considered as out of order.
|
||||
samples: []chunks.Sample{
|
||||
sample{t: 400, f: 4},
|
||||
sample{t: 500, h: hists[5]}, // This won't be committed.
|
||||
sample{t: 500, h: hists[5]},
|
||||
sample{t: 600, f: 6},
|
||||
},
|
||||
addToExp: []chunks.Sample{
|
||||
sample{t: 400, f: 4},
|
||||
sample{t: 600, f: 6},
|
||||
},
|
||||
expChunks: 7, // Only 1 new chunk for float64.
|
||||
expChunks: 9, // Each of the three samples above creates a new chunk because the type changes.
|
||||
},
|
||||
{
|
||||
// Here the histogram is appended at the end, hence the first histogram is out of order.
|
||||
samples: []chunks.Sample{
|
||||
sample{t: 700, h: hists[7]}, // Out of order w.r.t. the next float64 sample that is appended first.
|
||||
sample{t: 700, h: hists[7]},
|
||||
sample{t: 800, f: 8},
|
||||
sample{t: 900, h: hists[9]},
|
||||
},
|
||||
addToExp: []chunks.Sample{
|
||||
sample{t: 800, f: 8},
|
||||
sample{t: 900, h: hists[9].Copy()},
|
||||
},
|
||||
expChunks: 8, // float64 added to old chunk, only 1 new for histograms.
|
||||
expChunks: 12, // Again each sample creates a new chunk.
|
||||
},
|
||||
{
|
||||
// Float histogram is appended at the end.
|
||||
samples: []chunks.Sample{
|
||||
sample{t: 1000, fh: floatHists[7]}, // Out of order w.r.t. the next histogram.
|
||||
sample{t: 1000, fh: floatHists[7]},
|
||||
sample{t: 1100, h: hists[9]},
|
||||
},
|
||||
addToExp: []chunks.Sample{
|
||||
sample{t: 1100, h: hists[9].Copy()},
|
||||
},
|
||||
expChunks: 8,
|
||||
expChunks: 14, // Even changes between float and integer histogram create new chunks.
|
||||
},
|
||||
}
|
||||
|
||||
@ -5431,11 +5416,7 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
|
||||
|
||||
if a.err == nil {
|
||||
require.NoError(t, app.Commit())
|
||||
if len(a.addToExp) > 0 {
|
||||
expResult = append(expResult, a.addToExp...)
|
||||
} else {
|
||||
expResult = append(expResult, a.samples...)
|
||||
}
|
||||
expResult = append(expResult, a.samples...)
|
||||
checkExpChunks(a.expChunks)
|
||||
} else {
|
||||
require.NoError(t, app.Rollback())
|
||||
@ -6751,7 +6732,27 @@ func TestHeadAppender_AppendFloatWithSameTimestampAsPreviousHistogram(t *testing
|
||||
|
||||
func TestHeadAppender_AppendCT(t *testing.T) {
|
||||
testHistogram := tsdbutil.GenerateTestHistogram(1)
|
||||
testHistogram.CounterResetHint = histogram.NotCounterReset
|
||||
testFloatHistogram := tsdbutil.GenerateTestFloatHistogram(1)
|
||||
testFloatHistogram.CounterResetHint = histogram.NotCounterReset
|
||||
// TODO(beorn7): Once issue #15346 is fixed, the CounterResetHint of the
|
||||
// following two zero histograms should be histogram.CounterReset.
|
||||
testZeroHistogram := &histogram.Histogram{
|
||||
Schema: testHistogram.Schema,
|
||||
ZeroThreshold: testHistogram.ZeroThreshold,
|
||||
PositiveSpans: testHistogram.PositiveSpans,
|
||||
NegativeSpans: testHistogram.NegativeSpans,
|
||||
PositiveBuckets: []int64{0, 0, 0, 0},
|
||||
NegativeBuckets: []int64{0, 0, 0, 0},
|
||||
}
|
||||
testZeroFloatHistogram := &histogram.FloatHistogram{
|
||||
Schema: testFloatHistogram.Schema,
|
||||
ZeroThreshold: testFloatHistogram.ZeroThreshold,
|
||||
PositiveSpans: testFloatHistogram.PositiveSpans,
|
||||
NegativeSpans: testFloatHistogram.NegativeSpans,
|
||||
PositiveBuckets: []float64{0, 0, 0, 0},
|
||||
NegativeBuckets: []float64{0, 0, 0, 0},
|
||||
}
|
||||
type appendableSamples struct {
|
||||
ts int64
|
||||
fSample float64
|
||||
@ -6783,12 +6784,10 @@ func TestHeadAppender_AppendCT(t *testing.T) {
|
||||
{ts: 101, h: testHistogram, ct: 1},
|
||||
},
|
||||
expectedSamples: func() []chunks.Sample {
|
||||
hNoCounterReset := *testHistogram
|
||||
hNoCounterReset.CounterResetHint = histogram.NotCounterReset
|
||||
return []chunks.Sample{
|
||||
sample{t: 1, h: &histogram.Histogram{}},
|
||||
sample{t: 1, h: testZeroHistogram},
|
||||
sample{t: 100, h: testHistogram},
|
||||
sample{t: 101, h: &hNoCounterReset},
|
||||
sample{t: 101, h: testHistogram},
|
||||
}
|
||||
}(),
|
||||
},
|
||||
@ -6799,12 +6798,10 @@ func TestHeadAppender_AppendCT(t *testing.T) {
|
||||
{ts: 101, fh: testFloatHistogram, ct: 1},
|
||||
},
|
||||
expectedSamples: func() []chunks.Sample {
|
||||
fhNoCounterReset := *testFloatHistogram
|
||||
fhNoCounterReset.CounterResetHint = histogram.NotCounterReset
|
||||
return []chunks.Sample{
|
||||
sample{t: 1, fh: &histogram.FloatHistogram{}},
|
||||
sample{t: 1, fh: testZeroFloatHistogram},
|
||||
sample{t: 100, fh: testFloatHistogram},
|
||||
sample{t: 101, fh: &fhNoCounterReset},
|
||||
sample{t: 101, fh: testFloatHistogram},
|
||||
}
|
||||
}(),
|
||||
},
|
||||
@ -6827,12 +6824,10 @@ func TestHeadAppender_AppendCT(t *testing.T) {
|
||||
{ts: 101, h: testHistogram, ct: 1},
|
||||
},
|
||||
expectedSamples: func() []chunks.Sample {
|
||||
hNoCounterReset := *testHistogram
|
||||
hNoCounterReset.CounterResetHint = histogram.NotCounterReset
|
||||
return []chunks.Sample{
|
||||
sample{t: 1, h: &histogram.Histogram{}},
|
||||
sample{t: 1, h: testZeroHistogram},
|
||||
sample{t: 100, h: testHistogram},
|
||||
sample{t: 101, h: &hNoCounterReset},
|
||||
sample{t: 101, h: testHistogram},
|
||||
}
|
||||
}(),
|
||||
},
|
||||
@ -6843,12 +6838,10 @@ func TestHeadAppender_AppendCT(t *testing.T) {
|
||||
{ts: 101, fh: testFloatHistogram, ct: 1},
|
||||
},
|
||||
expectedSamples: func() []chunks.Sample {
|
||||
fhNoCounterReset := *testFloatHistogram
|
||||
fhNoCounterReset.CounterResetHint = histogram.NotCounterReset
|
||||
return []chunks.Sample{
|
||||
sample{t: 1, fh: &histogram.FloatHistogram{}},
|
||||
sample{t: 1, fh: testZeroFloatHistogram},
|
||||
sample{t: 100, fh: testFloatHistogram},
|
||||
sample{t: 101, fh: &fhNoCounterReset},
|
||||
sample{t: 101, fh: testFloatHistogram},
|
||||
}
|
||||
}(),
|
||||
},
|
||||
@ -6872,9 +6865,9 @@ func TestHeadAppender_AppendCT(t *testing.T) {
|
||||
{ts: 102, h: testHistogram, ct: 101},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
sample{t: 1, h: &histogram.Histogram{}},
|
||||
sample{t: 1, h: testZeroHistogram},
|
||||
sample{t: 100, h: testHistogram},
|
||||
sample{t: 101, h: &histogram.Histogram{CounterResetHint: histogram.UnknownCounterReset}},
|
||||
sample{t: 101, h: testZeroHistogram},
|
||||
sample{t: 102, h: testHistogram},
|
||||
},
|
||||
},
|
||||
@ -6885,9 +6878,9 @@ func TestHeadAppender_AppendCT(t *testing.T) {
|
||||
{ts: 102, fh: testFloatHistogram, ct: 101},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
sample{t: 1, fh: &histogram.FloatHistogram{}},
|
||||
sample{t: 1, fh: testZeroFloatHistogram},
|
||||
sample{t: 100, fh: testFloatHistogram},
|
||||
sample{t: 101, fh: &histogram.FloatHistogram{CounterResetHint: histogram.UnknownCounterReset}},
|
||||
sample{t: 101, fh: testZeroFloatHistogram},
|
||||
sample{t: 102, fh: testFloatHistogram},
|
||||
},
|
||||
},
|
||||
@ -6910,12 +6903,10 @@ func TestHeadAppender_AppendCT(t *testing.T) {
|
||||
{ts: 101, h: testHistogram, ct: 100},
|
||||
},
|
||||
expectedSamples: func() []chunks.Sample {
|
||||
hNoCounterReset := *testHistogram
|
||||
hNoCounterReset.CounterResetHint = histogram.NotCounterReset
|
||||
return []chunks.Sample{
|
||||
sample{t: 1, h: &histogram.Histogram{}},
|
||||
sample{t: 1, h: testZeroHistogram},
|
||||
sample{t: 100, h: testHistogram},
|
||||
sample{t: 101, h: &hNoCounterReset},
|
||||
sample{t: 101, h: testHistogram},
|
||||
}
|
||||
}(),
|
||||
},
|
||||
@ -6926,12 +6917,10 @@ func TestHeadAppender_AppendCT(t *testing.T) {
|
||||
{ts: 101, fh: testFloatHistogram, ct: 100},
|
||||
},
|
||||
expectedSamples: func() []chunks.Sample {
|
||||
fhNoCounterReset := *testFloatHistogram
|
||||
fhNoCounterReset.CounterResetHint = histogram.NotCounterReset
|
||||
return []chunks.Sample{
|
||||
sample{t: 1, fh: &histogram.FloatHistogram{}},
|
||||
sample{t: 1, fh: testZeroFloatHistogram},
|
||||
sample{t: 100, fh: testFloatHistogram},
|
||||
sample{t: 101, fh: &fhNoCounterReset},
|
||||
sample{t: 101, fh: testFloatHistogram},
|
||||
}
|
||||
}(),
|
||||
},
|
||||
|
||||
@ -155,7 +155,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
|
||||
go func() {
|
||||
defer close(decoded)
|
||||
var err error
|
||||
dec := record.NewDecoder(syms)
|
||||
dec := record.NewDecoder(syms, h.logger)
|
||||
for r.Next() {
|
||||
switch dec.Type(r.Record()) {
|
||||
case record.Series:
|
||||
@ -767,7 +767,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
|
||||
|
||||
go func() {
|
||||
defer close(decodedCh)
|
||||
dec := record.NewDecoder(syms)
|
||||
dec := record.NewDecoder(syms, h.logger)
|
||||
for r.Next() {
|
||||
var err error
|
||||
rec := r.Record()
|
||||
@ -1572,7 +1572,7 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie
|
||||
refSeries map[chunks.HeadSeriesRef]*memSeries
|
||||
exemplarBuf []record.RefExemplar
|
||||
syms = labels.NewSymbolTable() // New table for the whole snapshot.
|
||||
dec = record.NewDecoder(syms)
|
||||
dec = record.NewDecoder(syms, h.logger)
|
||||
)
|
||||
|
||||
wg.Add(concurrency)
|
||||
|
||||
@ -18,6 +18,7 @@ package record
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
@ -202,10 +203,11 @@ type RefMmapMarker struct {
|
||||
// Decoder decodes series, sample, metadata and tombstone records.
|
||||
type Decoder struct {
|
||||
builder labels.ScratchBuilder
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func NewDecoder(*labels.SymbolTable) Decoder { // FIXME remove t
|
||||
return Decoder{builder: labels.NewScratchBuilder(0)}
|
||||
func NewDecoder(_ *labels.SymbolTable, logger *slog.Logger) Decoder { // FIXME remove t
|
||||
return Decoder{builder: labels.NewScratchBuilder(0), logger: logger}
|
||||
}
|
||||
|
||||
// Type returns the type of the record.
|
||||
@ -433,7 +435,7 @@ func (*Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMarke
|
||||
return markers, nil
|
||||
}
|
||||
|
||||
func (*Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([]RefHistogramSample, error) {
|
||||
func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([]RefHistogramSample, error) {
|
||||
dec := encoding.Decbuf{B: rec}
|
||||
t := Type(dec.Byte())
|
||||
if t != HistogramSamples && t != CustomBucketsHistogramSamples {
|
||||
@ -457,6 +459,18 @@ func (*Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([
|
||||
}
|
||||
|
||||
DecodeHistogram(&dec, rh.H)
|
||||
|
||||
if !histogram.IsKnownSchema(rh.H.Schema) {
|
||||
d.logger.Warn("skipping histogram with unknown schema in WAL record", "schema", rh.H.Schema, "timestamp", rh.T)
|
||||
continue
|
||||
}
|
||||
if rh.H.Schema > histogram.ExponentialSchemaMax && rh.H.Schema <= histogram.ExponentialSchemaMaxReserved {
|
||||
// This is a very slow path, but it should only happen if the
|
||||
// record is from a newer Prometheus version that supports higher
|
||||
// resolution.
|
||||
rh.H.ReduceResolution(histogram.ExponentialSchemaMax)
|
||||
}
|
||||
|
||||
histograms = append(histograms, rh)
|
||||
}
|
||||
|
||||
@ -525,7 +539,7 @@ func DecodeHistogram(buf *encoding.Decbuf, h *histogram.Histogram) {
|
||||
}
|
||||
}
|
||||
|
||||
func (*Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) {
|
||||
func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) {
|
||||
dec := encoding.Decbuf{B: rec}
|
||||
t := Type(dec.Byte())
|
||||
if t != FloatHistogramSamples && t != CustomBucketsFloatHistogramSamples {
|
||||
@ -549,6 +563,18 @@ func (*Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogram
|
||||
}
|
||||
|
||||
DecodeFloatHistogram(&dec, rh.FH)
|
||||
|
||||
if !histogram.IsKnownSchema(rh.FH.Schema) {
|
||||
d.logger.Warn("skipping histogram with unknown schema in WAL record", "schema", rh.FH.Schema, "timestamp", rh.T)
|
||||
continue
|
||||
}
|
||||
if rh.FH.Schema > histogram.ExponentialSchemaMax && rh.FH.Schema <= histogram.ExponentialSchemaMaxReserved {
|
||||
// This is a very slow path, but it should only happen if the
|
||||
// record is from a newer Prometheus version that supports higher
|
||||
// resolution.
|
||||
rh.FH.ReduceResolution(histogram.ExponentialSchemaMax)
|
||||
}
|
||||
|
||||
histograms = append(histograms, rh)
|
||||
}
|
||||
|
||||
|
||||
@ -15,11 +15,13 @@
|
||||
package record
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
@ -32,7 +34,7 @@ import (
|
||||
|
||||
func TestRecord_EncodeDecode(t *testing.T) {
|
||||
var enc Encoder
|
||||
dec := NewDecoder(labels.NewSymbolTable())
|
||||
dec := NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
|
||||
|
||||
series := []RefSeries{
|
||||
{
|
||||
@ -224,11 +226,151 @@ func TestRecord_EncodeDecode(t *testing.T) {
|
||||
require.Equal(t, floatHistograms, decGaugeFloatHistograms)
|
||||
}
|
||||
|
||||
func TestRecord_DecodeInvalidHistogramSchema(t *testing.T) {
|
||||
for _, schema := range []int32{-100, 100} {
|
||||
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
|
||||
var enc Encoder
|
||||
|
||||
var output bytes.Buffer
|
||||
logger := promslog.New(&promslog.Config{Writer: &output})
|
||||
dec := NewDecoder(labels.NewSymbolTable(), logger)
|
||||
histograms := []RefHistogramSample{
|
||||
{
|
||||
Ref: 56,
|
||||
T: 1234,
|
||||
H: &histogram.Histogram{
|
||||
Count: 5,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 18.4 * rand.Float64(),
|
||||
Schema: schema,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
},
|
||||
},
|
||||
}
|
||||
histSamples, _ := enc.HistogramSamples(histograms, nil)
|
||||
decHistograms, err := dec.HistogramSamples(histSamples, nil)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, decHistograms)
|
||||
require.Contains(t, output.String(), "skipping histogram with unknown schema in WAL record")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecord_DecodeInvalidFloatHistogramSchema(t *testing.T) {
|
||||
for _, schema := range []int32{-100, 100} {
|
||||
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
|
||||
var enc Encoder
|
||||
|
||||
var output bytes.Buffer
|
||||
logger := promslog.New(&promslog.Config{Writer: &output})
|
||||
dec := NewDecoder(labels.NewSymbolTable(), logger)
|
||||
histograms := []RefFloatHistogramSample{
|
||||
{
|
||||
Ref: 56,
|
||||
T: 1234,
|
||||
FH: &histogram.FloatHistogram{
|
||||
Count: 5,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 18.4 * rand.Float64(),
|
||||
Schema: schema,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 1, -1, 0},
|
||||
},
|
||||
},
|
||||
}
|
||||
histSamples, _ := enc.FloatHistogramSamples(histograms, nil)
|
||||
decHistograms, err := dec.FloatHistogramSamples(histSamples, nil)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, decHistograms)
|
||||
require.Contains(t, output.String(), "skipping histogram with unknown schema in WAL record")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecord_DecodeTooHighResolutionHistogramSchema(t *testing.T) {
|
||||
for _, schema := range []int32{9, 52} {
|
||||
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
|
||||
var enc Encoder
|
||||
|
||||
var output bytes.Buffer
|
||||
logger := promslog.New(&promslog.Config{Writer: &output})
|
||||
dec := NewDecoder(labels.NewSymbolTable(), logger)
|
||||
histograms := []RefHistogramSample{
|
||||
{
|
||||
Ref: 56,
|
||||
T: 1234,
|
||||
H: &histogram.Histogram{
|
||||
Count: 5,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 18.4 * rand.Float64(),
|
||||
Schema: schema,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
},
|
||||
},
|
||||
}
|
||||
histSamples, _ := enc.HistogramSamples(histograms, nil)
|
||||
decHistograms, err := dec.HistogramSamples(histSamples, nil)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, decHistograms, 1)
|
||||
require.Equal(t, histogram.ExponentialSchemaMax, decHistograms[0].H.Schema)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecord_DecodeTooHighResolutionFloatHistogramSchema(t *testing.T) {
|
||||
for _, schema := range []int32{9, 52} {
|
||||
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
|
||||
var enc Encoder
|
||||
|
||||
var output bytes.Buffer
|
||||
logger := promslog.New(&promslog.Config{Writer: &output})
|
||||
dec := NewDecoder(labels.NewSymbolTable(), logger)
|
||||
histograms := []RefFloatHistogramSample{
|
||||
{
|
||||
Ref: 56,
|
||||
T: 1234,
|
||||
FH: &histogram.FloatHistogram{
|
||||
Count: 5,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 18.4 * rand.Float64(),
|
||||
Schema: schema,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 1, -1, 0},
|
||||
},
|
||||
},
|
||||
}
|
||||
histSamples, _ := enc.FloatHistogramSamples(histograms, nil)
|
||||
decHistograms, err := dec.FloatHistogramSamples(histSamples, nil)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, decHistograms, 1)
|
||||
require.Equal(t, histogram.ExponentialSchemaMax, decHistograms[0].FH.Schema)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestRecord_Corrupted ensures that corrupted records return the correct error.
|
||||
// Bugfix check for pull/521 and pull/523.
|
||||
func TestRecord_Corrupted(t *testing.T) {
|
||||
var enc Encoder
|
||||
dec := NewDecoder(labels.NewSymbolTable())
|
||||
dec := NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
|
||||
|
||||
t.Run("Test corrupted series record", func(t *testing.T) {
|
||||
series := []RefSeries{
|
||||
|
||||
@ -156,7 +156,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He
|
||||
exemplars []record.RefExemplar
|
||||
metadata []record.RefMetadata
|
||||
st = labels.NewSymbolTable() // Needed for decoding; labels do not outlive this function.
|
||||
dec = record.NewDecoder(st)
|
||||
dec = record.NewDecoder(st, logger)
|
||||
enc record.Encoder
|
||||
buf []byte
|
||||
recs [][]byte
|
||||
|
||||
@ -311,7 +311,7 @@ func TestCheckpoint(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer sr.Close()
|
||||
|
||||
dec := record.NewDecoder(labels.NewSymbolTable())
|
||||
dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
|
||||
var series []record.RefSeries
|
||||
var metadata []record.RefMetadata
|
||||
r := NewReader(sr)
|
||||
|
||||
@ -494,7 +494,7 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error {
|
||||
// Also used with readCheckpoint - implements segmentReadFn.
|
||||
func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
|
||||
var (
|
||||
dec = record.NewDecoder(labels.NewSymbolTable()) // One table per WAL segment means it won't grow indefinitely.
|
||||
dec = record.NewDecoder(labels.NewSymbolTable(), w.logger) // One table per WAL segment means it won't grow indefinitely.
|
||||
series []record.RefSeries
|
||||
samples []record.RefSample
|
||||
samplesToSend []record.RefSample
|
||||
@ -647,7 +647,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
|
||||
// Used with readCheckpoint - implements segmentReadFn.
|
||||
func (w *Watcher) readSegmentForGC(r *LiveReader, segmentNum int, _ bool) error {
|
||||
var (
|
||||
dec = record.NewDecoder(labels.NewSymbolTable()) // Needed for decoding; labels do not outlive this function.
|
||||
dec = record.NewDecoder(labels.NewSymbolTable(), w.logger) // Needed for decoding; labels do not outlive this function.
|
||||
series []record.RefSeries
|
||||
)
|
||||
for r.Next() && !isClosed(w.quit) {
|
||||
|
||||
@ -27,8 +27,8 @@ var _ slog.Handler = (*JSONFileLogger)(nil)
|
||||
|
||||
var _ io.Closer = (*JSONFileLogger)(nil)
|
||||
|
||||
// JSONFileLogger represents a logger that writes JSON to a file. It implements
|
||||
// the slog.Handler interface, as well as the io.Closer interface.
|
||||
// JSONFileLogger represents a logger that writes JSON to a file.
|
||||
// It implements the promql.QueryLogger interface.
|
||||
type JSONFileLogger struct {
|
||||
handler slog.Handler
|
||||
file *os.File
|
||||
|
||||
134
web/federate.go
134
web/federate.go
@ -190,8 +190,13 @@ Loop:
|
||||
isHistogram := s.H != nil
|
||||
formatType := format.FormatType()
|
||||
if isHistogram &&
|
||||
formatType != expfmt.TypeProtoDelim && formatType != expfmt.TypeProtoText && formatType != expfmt.TypeProtoCompact {
|
||||
// Can't serve the native histogram.
|
||||
!s.H.UsesCustomBuckets() &&
|
||||
formatType != expfmt.TypeProtoDelim &&
|
||||
formatType != expfmt.TypeProtoText &&
|
||||
formatType != expfmt.TypeProtoCompact {
|
||||
// Can't serve a native histogram with a non-protobuf format.
|
||||
// (We can serve an NHCB, though, as it is converted to a
|
||||
// classic histogram for federation.)
|
||||
// TODO(codesome): Serve them when other protocols get the native histogram support.
|
||||
continue
|
||||
}
|
||||
@ -208,20 +213,30 @@ Loop:
|
||||
}
|
||||
if l.Name == labels.MetricName {
|
||||
nameSeen = true
|
||||
if l.Value == lastMetricName && // We already have the name in the current MetricDescriptor, and we ignore nameless metrics.
|
||||
lastWasHistogram == isHistogram && // The sample type matches (float vs histogram).
|
||||
// If it was a histogram, the histogram type (counter vs gauge) also matches.
|
||||
(!isHistogram || lastHistogramWasGauge == (s.H.CounterResetHint == histogram.GaugeType)) {
|
||||
// We already have the name in the current MetricDescriptor,
|
||||
// and we ignore nameless metrics.
|
||||
if l.Value == lastMetricName &&
|
||||
// The sample type matches (float vs histogram).
|
||||
lastWasHistogram == isHistogram &&
|
||||
// If it was a histogram, the histogram type
|
||||
// (counter vs gauge) also matches.
|
||||
(!isHistogram ||
|
||||
lastHistogramWasGauge == (s.H.CounterResetHint == histogram.GaugeType)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Since we now check for the sample type and type of histogram above, we will end up
|
||||
// creating multiple metric families for the same metric name. This would technically be
|
||||
// an invalid exposition. But since the consumer of this is Prometheus, and Prometheus can
|
||||
// parse it fine, we allow it and bend the rules to make federation possible in those cases.
|
||||
// Since we now check for the sample type and
|
||||
// type of histogram above, we will end up
|
||||
// creating multiple metric families for the
|
||||
// same metric name. This would technically be
|
||||
// an invalid exposition. But since the consumer
|
||||
// of this is Prometheus, and Prometheus can
|
||||
// parse it fine, we allow it and bend the rules
|
||||
// to make federation possible in those cases.
|
||||
|
||||
// Need to start a new MetricDescriptor. Ship off the old one (if any) before
|
||||
// creating the new one.
|
||||
// Need to start a new MetricDescriptor. Ship
|
||||
// off the old one (if any) before creating the
|
||||
// new one.
|
||||
if protMetricFam != nil {
|
||||
if err := enc.Encode(protMetricFam); err != nil {
|
||||
return err
|
||||
@ -278,32 +293,10 @@ Loop:
|
||||
}
|
||||
} else {
|
||||
lastHistogramWasGauge = s.H.CounterResetHint == histogram.GaugeType
|
||||
protMetric.Histogram = &dto.Histogram{
|
||||
SampleCountFloat: proto.Float64(s.H.Count),
|
||||
SampleSum: proto.Float64(s.H.Sum),
|
||||
Schema: proto.Int32(s.H.Schema),
|
||||
ZeroThreshold: proto.Float64(s.H.ZeroThreshold),
|
||||
ZeroCountFloat: proto.Float64(s.H.ZeroCount),
|
||||
NegativeCount: s.H.NegativeBuckets,
|
||||
PositiveCount: s.H.PositiveBuckets,
|
||||
}
|
||||
if len(s.H.PositiveSpans) > 0 {
|
||||
protMetric.Histogram.PositiveSpan = make([]*dto.BucketSpan, len(s.H.PositiveSpans))
|
||||
for i, sp := range s.H.PositiveSpans {
|
||||
protMetric.Histogram.PositiveSpan[i] = &dto.BucketSpan{
|
||||
Offset: proto.Int32(sp.Offset),
|
||||
Length: proto.Uint32(sp.Length),
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(s.H.NegativeSpans) > 0 {
|
||||
protMetric.Histogram.NegativeSpan = make([]*dto.BucketSpan, len(s.H.NegativeSpans))
|
||||
for i, sp := range s.H.NegativeSpans {
|
||||
protMetric.Histogram.NegativeSpan[i] = &dto.BucketSpan{
|
||||
Offset: proto.Int32(sp.Offset),
|
||||
Length: proto.Uint32(sp.Length),
|
||||
}
|
||||
}
|
||||
if s.H.UsesCustomBuckets() {
|
||||
protMetric.Histogram = makeClassicHistogram(s.H)
|
||||
} else {
|
||||
protMetric.Histogram = makeNativeHistogram(s.H)
|
||||
}
|
||||
}
|
||||
lastWasHistogram = isHistogram
|
||||
@ -317,3 +310,68 @@ Loop:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// makeNativeHistogram creates a dto.Histogram representing a native histogram.
|
||||
// Use only for standard exponential schemas.
|
||||
func makeNativeHistogram(h *histogram.FloatHistogram) *dto.Histogram {
|
||||
result := &dto.Histogram{
|
||||
SampleCountFloat: proto.Float64(h.Count),
|
||||
SampleSum: proto.Float64(h.Sum),
|
||||
Schema: proto.Int32(h.Schema),
|
||||
ZeroThreshold: proto.Float64(h.ZeroThreshold),
|
||||
ZeroCountFloat: proto.Float64(h.ZeroCount),
|
||||
NegativeCount: h.NegativeBuckets,
|
||||
PositiveCount: h.PositiveBuckets,
|
||||
}
|
||||
if len(h.PositiveSpans) > 0 {
|
||||
result.PositiveSpan = make([]*dto.BucketSpan, len(h.PositiveSpans))
|
||||
for i, sp := range h.PositiveSpans {
|
||||
result.PositiveSpan[i] = &dto.BucketSpan{
|
||||
Offset: proto.Int32(sp.Offset),
|
||||
Length: proto.Uint32(sp.Length),
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(h.NegativeSpans) > 0 {
|
||||
result.NegativeSpan = make([]*dto.BucketSpan, len(h.NegativeSpans))
|
||||
for i, sp := range h.NegativeSpans {
|
||||
result.NegativeSpan[i] = &dto.BucketSpan{
|
||||
Offset: proto.Int32(sp.Offset),
|
||||
Length: proto.Uint32(sp.Length),
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// makeClassicHistogram creates a dto.Histogram representing a classic
|
||||
// histogram. Use only for NHCB (schema -53).
|
||||
func makeClassicHistogram(h *histogram.FloatHistogram) *dto.Histogram {
|
||||
result := &dto.Histogram{
|
||||
SampleCountFloat: proto.Float64(h.Count),
|
||||
SampleSum: proto.Float64(h.Sum),
|
||||
}
|
||||
result.Bucket = make([]*dto.Bucket, len(h.CustomValues))
|
||||
var (
|
||||
cumulativeCount float64
|
||||
bucketIter = h.PositiveBucketIterator()
|
||||
bucketAvailable = bucketIter.Next()
|
||||
)
|
||||
for i, le := range h.CustomValues {
|
||||
for bucketAvailable && int(bucketIter.At().Index) < i {
|
||||
bucketAvailable = bucketIter.Next()
|
||||
}
|
||||
if bucketAvailable && int(bucketIter.At().Index) == i {
|
||||
cumulativeCount += bucketIter.At().Count
|
||||
}
|
||||
result.Bucket[i] = &dto.Bucket{
|
||||
UpperBound: proto.Float64(le),
|
||||
CumulativeCountFloat: proto.Float64(cumulativeCount),
|
||||
}
|
||||
}
|
||||
// Note that we do not add the +Inf bucket explicitly. In the protobuf
|
||||
// exposition format, it is optional. For other exposition formats, the
|
||||
// code converting the protobuf created here into the actual exposition
|
||||
// payload will add the +Inf bucket.
|
||||
return result
|
||||
}
|
||||
|
||||
@ -340,8 +340,19 @@ func TestFederationWithNativeHistograms(t *testing.T) {
|
||||
},
|
||||
NegativeBuckets: []int64{2, 2, -2, 0},
|
||||
}
|
||||
nhcb := &histogram.Histogram{
|
||||
Count: 6,
|
||||
Sum: 1.234,
|
||||
Schema: -53,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 2, Length: 1},
|
||||
},
|
||||
PositiveBuckets: []int64{3, -1, -1},
|
||||
CustomValues: []float64{0.1, 0.2, 0.5, 1, 2},
|
||||
}
|
||||
app := db.Appender(context.Background())
|
||||
for i := range 6 {
|
||||
for i := range 7 {
|
||||
l := labels.FromStrings("__name__", "test_metric", "foo", strconv.Itoa(i))
|
||||
expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", strconv.Itoa(i))
|
||||
var err error
|
||||
@ -360,6 +371,56 @@ func TestFederationWithNativeHistograms(t *testing.T) {
|
||||
H: histWithoutZeroBucket.ToFloat(nil),
|
||||
Metric: expL,
|
||||
})
|
||||
case 6:
|
||||
_, err = app.AppendHistogram(0, l, 100*60*1000, nhcb.Copy(), nil)
|
||||
expL = labels.FromStrings("__name__", "test_metric_count", "instance", "", "foo", strconv.Itoa(i))
|
||||
expVec = append(expVec, promql.Sample{
|
||||
T: 100 * 60 * 1000,
|
||||
F: 6,
|
||||
Metric: expL,
|
||||
})
|
||||
expL = labels.FromStrings("__name__", "test_metric_sum", "instance", "", "foo", strconv.Itoa(i))
|
||||
expVec = append(expVec, promql.Sample{
|
||||
T: 100 * 60 * 1000,
|
||||
F: 1.234,
|
||||
Metric: expL,
|
||||
})
|
||||
expL = labels.FromStrings("__name__", "test_metric_bucket", "instance", "", "foo", strconv.Itoa(i), "le", "0.1")
|
||||
expVec = append(expVec, promql.Sample{
|
||||
T: 100 * 60 * 1000,
|
||||
F: 3,
|
||||
Metric: expL,
|
||||
})
|
||||
expL = labels.FromStrings("__name__", "test_metric_bucket", "instance", "", "foo", strconv.Itoa(i), "le", "0.2")
|
||||
expVec = append(expVec, promql.Sample{
|
||||
T: 100 * 60 * 1000,
|
||||
F: 5,
|
||||
Metric: expL,
|
||||
})
|
||||
expL = labels.FromStrings("__name__", "test_metric_bucket", "instance", "", "foo", strconv.Itoa(i), "le", "0.5")
|
||||
expVec = append(expVec, promql.Sample{
|
||||
T: 100 * 60 * 1000,
|
||||
F: 5,
|
||||
Metric: expL,
|
||||
})
|
||||
expL = labels.FromStrings("__name__", "test_metric_bucket", "instance", "", "foo", strconv.Itoa(i), "le", "1.0")
|
||||
expVec = append(expVec, promql.Sample{
|
||||
T: 100 * 60 * 1000,
|
||||
F: 5,
|
||||
Metric: expL,
|
||||
})
|
||||
expL = labels.FromStrings("__name__", "test_metric_bucket", "instance", "", "foo", strconv.Itoa(i), "le", "2.0")
|
||||
expVec = append(expVec, promql.Sample{
|
||||
T: 100 * 60 * 1000,
|
||||
F: 6,
|
||||
Metric: expL,
|
||||
})
|
||||
expL = labels.FromStrings("__name__", "test_metric_bucket", "instance", "", "foo", strconv.Itoa(i), "le", "+Inf")
|
||||
expVec = append(expVec, promql.Sample{
|
||||
T: 100 * 60 * 1000,
|
||||
F: 6,
|
||||
Metric: expL,
|
||||
})
|
||||
default:
|
||||
hist.ZeroCount++
|
||||
hist.Count++
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user