Merge pull request #18195 from prometheus/st-main-sync2

Merge main to `feature/start-time`
This commit is contained in:
Bartlomiej Plotka 2026-02-25 18:56:20 +00:00 committed by GitHub
commit dfd6647002
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
58 changed files with 2492 additions and 1199 deletions

View File

@ -12,7 +12,7 @@ jobs:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0

View File

@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'prometheus'
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0

View File

@ -20,7 +20,7 @@ jobs:
# Don't run it on dependabot PRs either as humans would take control in case a bump introduces a breaking change.
if: (github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community') && github.event.pull_request.user.login != 'dependabot[bot]'
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- env:
PR_DESCRIPTION: ${{ github.event.pull_request.body }}
run: |

View File

@ -18,7 +18,7 @@ jobs:
# should also be updated.
image: quay.io/prometheus/golang-builder:1.25-base
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
@ -36,7 +36,7 @@ jobs:
container:
image: quay.io/prometheus/golang-builder:1.25-base
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
@ -61,7 +61,7 @@ jobs:
# The go version in this image should be N-1 wrt test_go.
image: quay.io/prometheus/golang-builder:1.25-base
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- run: make build
@ -80,7 +80,7 @@ jobs:
image: quay.io/prometheus/golang-builder:1.25-base
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
@ -99,7 +99,7 @@ jobs:
name: Go tests on Windows
runs-on: windows-latest
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
@ -118,7 +118,7 @@ jobs:
container:
image: quay.io/prometheus/golang-builder:1.25-base
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- run: go install ./cmd/promtool/.
@ -138,7 +138,7 @@ jobs:
# should also be updated.
image: quay.io/prometheus/golang-builder:1.25-base
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
@ -166,7 +166,7 @@ jobs:
matrix:
thread: [ 0, 1, 2 ]
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
@ -193,7 +193,7 @@ jobs:
# Whenever the Go version is updated here, .promu.yml
# should also be updated.
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
@ -232,7 +232,7 @@ jobs:
image: quay.io/prometheus/golang-builder:1.25-base
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
@ -246,7 +246,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Install Go
@ -296,7 +296,7 @@ jobs:
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
@ -315,7 +315,7 @@ jobs:
||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
@ -332,7 +332,7 @@ jobs:
needs: [test_ui, codeql]
steps:
- name: Checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4

View File

@ -24,7 +24,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false

View File

@ -18,7 +18,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Set docker hub repo name
@ -42,7 +42,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Set quay.io org name

View File

@ -13,7 +13,7 @@ jobs:
fuzz_test: [FuzzParseMetricText, FuzzParseOpenMetric, FuzzParseMetricSelector, FuzzParseExpr]
steps:
- name: Checkout repository
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Install Go

View File

@ -14,7 +14,7 @@ jobs:
container:
image: quay.io/prometheus/golang-builder
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- run: ./scripts/sync_repo_files.sh

View File

@ -21,7 +21,7 @@ jobs:
steps:
- name: "Checkout code"
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false

View File

@ -55,7 +55,7 @@ ifneq ($(shell command -v gotestsum 2> /dev/null),)
endif
endif
PROMU_VERSION ?= 0.17.0
PROMU_VERSION ?= 0.18.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT :=

View File

@ -719,6 +719,9 @@ func main() {
if cfgFile.StorageConfig.TSDBConfig.Retention.Size > 0 {
cfg.tsdb.MaxBytes = cfgFile.StorageConfig.TSDBConfig.Retention.Size
}
if cfgFile.StorageConfig.TSDBConfig.Retention.Percentage > 0 {
cfg.tsdb.MaxPercentage = cfgFile.StorageConfig.TSDBConfig.Retention.Percentage
}
}
}
@ -772,9 +775,9 @@ func main() {
cfg.web.RoutePrefix = "/" + strings.Trim(cfg.web.RoutePrefix, "/")
if !agentMode {
if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 {
if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 && cfg.tsdb.MaxPercentage == 0 {
cfg.tsdb.RetentionDuration = defaultRetentionDuration
logger.Info("No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration)
logger.Info("No time, size or percentage retention was set so using the default time retention", "duration", defaultRetentionDuration)
}
// Check for overflows. This limits our max retention to 100y.
@ -787,6 +790,20 @@ func main() {
logger.Warn("Time retention value is too high. Limiting to: " + y.String())
}
if cfg.tsdb.MaxPercentage > 100 {
cfg.tsdb.MaxPercentage = 100
logger.Warn("Percentage retention value is too high. Limiting to: 100%")
}
if cfg.tsdb.MaxPercentage > 0 {
if cfg.tsdb.MaxBytes > 0 {
logger.Warn("storage.tsdb.retention.size is ignored, because storage.tsdb.retention.percentage is specified")
}
if prom_runtime.FsSize(localStoragePath) == 0 {
fmt.Fprintln(os.Stderr, fmt.Errorf("unable to detect total capacity of metric storage at %s, please disable retention percentage (%d%%)", localStoragePath, cfg.tsdb.MaxPercentage))
os.Exit(2)
}
}
// Max block size settings.
if cfg.tsdb.MaxBlockDuration == 0 {
maxBlockDuration, err := model.ParseDuration("31d")
@ -960,6 +977,7 @@ func main() {
cfg.web.Context = ctxWeb
cfg.web.TSDBRetentionDuration = cfg.tsdb.RetentionDuration
cfg.web.TSDBMaxBytes = cfg.tsdb.MaxBytes
cfg.web.TSDBMaxPercentage = cfg.tsdb.MaxPercentage
cfg.web.TSDBDir = localStoragePath
cfg.web.LocalStorage = localStorage
cfg.web.Storage = fanoutStorage
@ -1379,7 +1397,7 @@ func main() {
return fmt.Errorf("opening storage failed: %w", err)
}
switch fsType := prom_runtime.Statfs(localStoragePath); fsType {
switch fsType := prom_runtime.FsType(localStoragePath); fsType {
case "NFS_SUPER_MAGIC":
logger.Warn("This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.", "fs_type", fsType)
default:
@ -1391,6 +1409,7 @@ func main() {
"MinBlockDuration", cfg.tsdb.MinBlockDuration,
"MaxBlockDuration", cfg.tsdb.MaxBlockDuration,
"MaxBytes", cfg.tsdb.MaxBytes,
"MaxPercentage", cfg.tsdb.MaxPercentage,
"NoLockfile", cfg.tsdb.NoLockfile,
"RetentionDuration", cfg.tsdb.RetentionDuration,
"WALSegmentSize", cfg.tsdb.WALSegmentSize,
@ -1440,7 +1459,7 @@ func main() {
return fmt.Errorf("opening storage failed: %w", err)
}
switch fsType := prom_runtime.Statfs(localStoragePath); fsType {
switch fsType := prom_runtime.FsType(localStoragePath); fsType {
case "NFS_SUPER_MAGIC":
logger.Warn(fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.")
default:
@ -1965,6 +1984,7 @@ type tsdbOptions struct {
MaxBlockChunkSegmentSize units.Base2Bytes
RetentionDuration model.Duration
MaxBytes units.Base2Bytes
MaxPercentage uint
NoLockfile bool
WALCompressionType compression.Type
HeadChunksWriteQueueSize int
@ -1993,6 +2013,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
MaxBlockChunkSegmentSize: int64(opts.MaxBlockChunkSegmentSize),
RetentionDuration: int64(time.Duration(opts.RetentionDuration) / time.Millisecond),
MaxBytes: int64(opts.MaxBytes),
MaxPercentage: opts.MaxPercentage,
NoLockfile: opts.NoLockfile,
WALCompression: opts.WALCompressionType,
HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize,

View File

@ -145,6 +145,8 @@
"=~": true,
">": true,
">=": true,
">/": true,
"</": true,
"@": true,
"^": true,
"and": true,

View File

@ -1092,6 +1092,9 @@ type TSDBRetentionConfig struct {
// Maximum number of bytes that can be stored for blocks.
Size units.Base2Bytes `yaml:"size,omitempty"`
// Maximum percentage of disk used for TSDB storage.
Percentage uint `yaml:"percentage,omitempty"`
}
// TSDBConfig configures runtime reloadable configuration options.

View File

@ -1737,8 +1737,9 @@ var expectedConf = &Config{
OutOfOrderTimeWindowFlag: model.Duration(30 * time.Minute),
StaleSeriesCompactionThreshold: 0.5,
Retention: &TSDBRetentionConfig{
Time: model.Duration(24 * time.Hour),
Size: 1 * units.GiB,
Time: model.Duration(24 * time.Hour),
Size: 1 * units.GiB,
Percentage: 28,
},
},
},

View File

@ -457,6 +457,7 @@ storage:
retention:
time: 1d
size: 1GB
percentage: 28
tracing:
endpoint: "localhost:4317"

View File

@ -224,7 +224,12 @@ func (d *EC2Discovery) ec2Client(ctx context.Context) (ec2Client, error) {
cfg.Credentials = aws.NewCredentialsCache(assumeProvider)
}
d.ec2 = ec2.NewFromConfig(cfg)
d.ec2 = ec2.NewFromConfig(cfg, func(options *ec2.Options) {
if d.cfg.Endpoint != "" {
options.BaseEndpoint = &d.cfg.Endpoint
}
options.HTTPClient = httpClient
})
return d.ec2, nil
}
@ -234,8 +239,15 @@ func (d *EC2Discovery) refreshAZIDs(ctx context.Context) error {
if err != nil {
return err
}
if azs.AvailabilityZones == nil {
d.azToAZID = make(map[string]string)
return nil
}
d.azToAZID = make(map[string]string, len(azs.AvailabilityZones))
for _, az := range azs.AvailabilityZones {
if az.ZoneName == nil || az.ZoneId == nil {
continue
}
d.azToAZID[*az.ZoneName] = *az.ZoneId
}
return nil

View File

@ -188,7 +188,12 @@ func (d *LightsailDiscovery) lightsailClient(ctx context.Context) (*lightsail.Cl
cfg.Credentials = aws.NewCredentialsCache(assumeProvider)
}
d.lightsail = lightsail.NewFromConfig(cfg)
d.lightsail = lightsail.NewFromConfig(cfg, func(options *lightsail.Options) {
if d.cfg.Endpoint != "" {
options.BaseEndpoint = &d.cfg.Endpoint
}
options.HTTPClient = httpClient
})
return d.lightsail, nil
}

View File

@ -499,6 +499,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
WaitTime: watchTimeout,
AllowStale: srv.discovery.allowStale,
NodeMeta: srv.discovery.watchedNodeMeta,
Filter: srv.discovery.watchedFilter,
}
t0 := time.Now()

View File

@ -240,6 +240,8 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
response = ServiceTestAnswer
case "/v1/health/service/test?wait=120000ms":
response = ServiceTestAnswer
case "/v1/health/service/test?filter=NodeMeta.rack_name+%3D%3D+%222304%22&wait=120000ms":
response = ServiceTestAnswer
case "/v1/health/service/other?wait=120000ms":
response = `[]`
case "/v1/catalog/services?node-meta=rack_name%3A2304&stale=&wait=120000ms":
@ -392,6 +394,54 @@ func TestFilterOption(t *testing.T) {
cancel()
}
// TestFilterOnHealthEndpoint verifies that filter is passed to health service endpoint.
func TestFilterOnHealthEndpoint(t *testing.T) {
filterReceived := false
stub := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
response := ""
switch r.URL.Path {
case "/v1/agent/self":
response = AgentAnswer
case "/v1/health/service/test":
// Verify filter parameter is present in the query
filter := r.URL.Query().Get("filter")
if filter == `Node.Meta.rack_name == "2304"` {
filterReceived = true
}
response = ServiceTestAnswer
default:
t.Errorf("Unhandled consul call: %s", r.URL)
}
w.Header().Add("X-Consul-Index", "1")
w.Write([]byte(response))
}))
defer stub.Close()
stuburl, err := url.Parse(stub.URL)
require.NoError(t, err)
config := &SDConfig{
Server: stuburl.Host,
Services: []string{"test"},
Filter: `Node.Meta.rack_name == "2304"`,
RefreshInterval: model.Duration(1 * time.Second),
}
d := newDiscovery(t, config)
ctx, cancel := context.WithCancel(context.Background())
ch := make(chan []*targetgroup.Group)
go func() {
d.Run(ctx, ch)
close(ch)
}()
checkOneTarget(t, <-ch)
cancel()
// Verify the filter was actually sent to the health endpoint
require.True(t, filterReceived, "Filter parameter should be sent to health service endpoint")
}
func TestGetDatacenterShouldReturnError(t *testing.T) {
for _, tc := range []struct {
handler func(http.ResponseWriter, *http.Request)

View File

@ -3285,6 +3285,10 @@ sigv4:
# AWS Role ARN, an alternative to using AWS API keys.
[ role_arn: <string> ]
# AWS External ID used when assuming a role.
# Can only be used with role_arn.
[ external_id: <string> ]
# Defines the FIPS mode for the AWS STS endpoint.
# Requires Prometheus >= 2.54.0
# Note: FIPS STS selection should be configured via use_fips_sts_endpoint rather than environment variables. (The problem report that motivated this: AWS_USE_FIPS_ENDPOINT no longer works.)
@ -3496,6 +3500,10 @@ sigv4:
# AWS Role ARN, an alternative to using AWS API keys.
[ role_arn: <string> ]
# AWS External ID used when assuming a role.
# Can only be used with role_arn.
[ external_id: <string> ]
# Defines the FIPS mode for the AWS STS endpoint.
# Requires Prometheus >= 2.54.0
# Note: FIPS STS selection should be configured via use_fips_sts_endpoint rather than environment variables. (The problem report that motivated this: AWS_USE_FIPS_ENDPOINT no longer works.)
@ -3688,6 +3696,14 @@ with this feature.
# This option takes precedence over the deprecated command-line flag --storage.tsdb.retention.size.
[ size: <size> | default = 0 ]
# Maximum percent of total disk space allowed for storage of blocks. Alternative to `size` and
# behaves the same as if size was calculated by hand as a percentage of the total storage capacity.
# Prometheus will fail to start if this config is enabled, but it fails to query the total storage capacity.
# The total disk space allowed will automatically adapt to volume resize.
# If set to 0 or not set, percentage-based retention is disabled.
#
# This is an experimental feature, this behaviour could change or be removed in the future.
[ percentage: <uint> | default = 0 ]
```
### `<exemplars>`

View File

@ -126,6 +126,25 @@ samples. Operations involving histogram samples result in the removal of the
corresponding vector elements from the output vector, flagged by an
info-level annotation.
### Histogram trim operators
The following binary histogram trim operators exist in Prometheus:
* `</` (trim upper): removes all observations above a threshold value
* `>/` (trim lower): removes all observations below a threshold value
Histogram trim operators are defined between vector/scalar and vector/vector value pairs,
where the left hand side is a native histogram (either exponential or NHCB),
and the right hand side is a float threshold value.
In case the threshold value is not aligned to one of the bucket boundaries of the histogram,
either linear (for NHCB and zero buckets of exponential histogram) or exponential (for non zero
bucket of exponential histogram) interpolation is applied to compute the estimated count
of observations that remain in the bucket containing the threshold.
In case when some observations get trimmed, the new sum of observation values is recomputed
(approximately) based on the remaining observations.
### Comparison binary operators
The following binary comparison operators exist in Prometheus:

4
go.mod
View File

@ -97,7 +97,7 @@ require (
golang.org/x/text v0.34.0
google.golang.org/api v0.267.0
google.golang.org/genproto/googleapis/api v0.0.0-20260217215200-42d3e9bedb6d
google.golang.org/grpc v1.78.0
google.golang.org/grpc v1.79.1
google.golang.org/protobuf v1.36.11
k8s.io/api v0.35.1
k8s.io/apimachinery v0.35.1
@ -151,7 +151,7 @@ require (
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.14 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e // indirect
github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect

8
go.sum
View File

@ -115,8 +115,8 @@ github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObk
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e h1:gt7U1Igw0xbJdyaCM5H2CnlAlPSkzrhsebQB6WQWjLA=
github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI=
github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w=
github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
@ -776,8 +776,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20260217215200-42d3e9bedb6d h1:
google.golang.org/genproto/googleapis/api v0.0.0-20260217215200-42d3e9bedb6d/go.mod h1:48U2I+QQUYhsFrg2SY6r+nJzeOtjey7j//WBESw+qyQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY=
google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=

View File

@ -2110,3 +2110,337 @@ func (h *FloatHistogram) HasOverflow() bool {
}
return false
}
// TrimBuckets trims native histogram buckets.
func (h *FloatHistogram) TrimBuckets(rhs float64, isUpperTrim bool) *FloatHistogram {
var (
trimmedHist = h.Copy()
updatedCount, updatedSum float64
trimmedBuckets bool
isCustomBucket = trimmedHist.UsesCustomBuckets()
hasPositive, hasNegative bool
)
if isUpperTrim {
// Calculate the fraction to keep for buckets that contain the trim value.
// For TRIM_UPPER, we keep observations below the trim point (rhs).
// Example: histogram </ float.
for i, iter := 0, trimmedHist.PositiveBucketIterator(); iter.Next(); i++ {
bucket := iter.At()
if bucket.Count == 0 {
continue
}
hasPositive = true
switch {
case bucket.Upper <= rhs:
// Bucket is entirely below the trim point - keep all.
updatedCount += bucket.Count
bucketMidpoint := computeMidpoint(bucket.Lower, bucket.Upper, true, isCustomBucket)
updatedSum += bucketMidpoint * bucket.Count
case bucket.Lower < rhs:
// Bucket contains the trim point - interpolate.
keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, true, isCustomBucket)
updatedCount += keepCount
updatedSum += bucketMidpoint * keepCount
if trimmedHist.PositiveBuckets[i] != keepCount {
trimmedHist.PositiveBuckets[i] = keepCount
trimmedBuckets = true
}
default:
// Bucket is entirely above the trim point - discard.
trimmedHist.PositiveBuckets[i] = 0
trimmedBuckets = true
}
}
for i, iter := 0, trimmedHist.NegativeBucketIterator(); iter.Next(); i++ {
bucket := iter.At()
if bucket.Count == 0 {
continue
}
hasNegative = true
switch {
case bucket.Upper <= rhs:
// Bucket is entirely below the trim point - keep all.
updatedCount += bucket.Count
bucketMidpoint := computeMidpoint(bucket.Lower, bucket.Upper, false, isCustomBucket)
updatedSum += bucketMidpoint * bucket.Count
case bucket.Lower < rhs:
// Bucket contains the trim point - interpolate.
keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, false, isCustomBucket)
updatedCount += keepCount
updatedSum += bucketMidpoint * keepCount
if trimmedHist.NegativeBuckets[i] != keepCount {
trimmedHist.NegativeBuckets[i] = keepCount
trimmedBuckets = true
}
default:
trimmedHist.NegativeBuckets[i] = 0
trimmedBuckets = true
}
}
} else { // !isUpperTrim
// For TRIM_LOWER, we keep observations above the trim point (rhs).
// Example: histogram >/ float.
for i, iter := 0, trimmedHist.PositiveBucketIterator(); iter.Next(); i++ {
bucket := iter.At()
if bucket.Count == 0 {
continue
}
hasPositive = true
switch {
case bucket.Lower >= rhs:
// Bucket is entirely below the trim point - keep all.
updatedCount += bucket.Count
bucketMidpoint := computeMidpoint(bucket.Lower, bucket.Upper, true, isCustomBucket)
updatedSum += bucketMidpoint * bucket.Count
case bucket.Upper > rhs:
// Bucket contains the trim point - interpolate.
keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, true, isCustomBucket)
updatedCount += keepCount
updatedSum += bucketMidpoint * keepCount
if trimmedHist.PositiveBuckets[i] != keepCount {
trimmedHist.PositiveBuckets[i] = keepCount
trimmedBuckets = true
}
default:
trimmedHist.PositiveBuckets[i] = 0
trimmedBuckets = true
}
}
for i, iter := 0, trimmedHist.NegativeBucketIterator(); iter.Next(); i++ {
bucket := iter.At()
if bucket.Count == 0 {
continue
}
hasNegative = true
switch {
case bucket.Lower >= rhs:
// Bucket is entirely below the trim point - keep all.
updatedCount += bucket.Count
bucketMidpoint := computeMidpoint(bucket.Lower, bucket.Upper, false, isCustomBucket)
updatedSum += bucketMidpoint * bucket.Count
case bucket.Upper > rhs:
// Bucket contains the trim point - interpolate.
keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, false, isCustomBucket)
updatedCount += keepCount
updatedSum += bucketMidpoint * keepCount
if trimmedHist.NegativeBuckets[i] != keepCount {
trimmedHist.NegativeBuckets[i] = keepCount
trimmedBuckets = true
}
default:
trimmedHist.NegativeBuckets[i] = 0
trimmedBuckets = true
}
}
}
// Handle the zero count bucket.
if trimmedHist.ZeroCount > 0 {
keepCount, bucketMidpoint := computeZeroBucketTrim(trimmedHist.ZeroBucket(), rhs, hasNegative, hasPositive, isUpperTrim)
if trimmedHist.ZeroCount != keepCount {
trimmedHist.ZeroCount = keepCount
trimmedBuckets = true
}
updatedSum += bucketMidpoint * keepCount
updatedCount += keepCount
}
if trimmedBuckets {
// Only update the totals in case some bucket(s) were fully (or partially) trimmed.
trimmedHist.Count = updatedCount
trimmedHist.Sum = updatedSum
trimmedHist.Compact(0)
}
return trimmedHist
}
func handleInfinityBuckets(isUpperTrim bool, b Bucket[float64], rhs float64) (underCount, bucketMidpoint float64) {
zeroIfInf := func(x float64) float64 {
if math.IsInf(x, 0) {
return 0
}
return x
}
// Case 1: Bucket with lower bound -Inf.
if math.IsInf(b.Lower, -1) {
// TRIM_UPPER (</) - remove values greater than rhs
if isUpperTrim {
if rhs >= b.Upper {
// As the rhs is greater than the upper bound, we keep the entire current bucket.
return b.Count, 0
}
if rhs > 0 && b.Upper > 0 && !math.IsInf(b.Upper, 1) {
// If upper is finite and positive, we treat lower as 0 (despite it de facto being -Inf).
// This is only possible with NHCB, so we can always use linear interpolation.
return b.Count * rhs / b.Upper, rhs / 2
}
if b.Upper <= 0 {
return b.Count, rhs
}
// Otherwise, we are targeting a valid trim, but as we don't know the exact distribution of values that belongs to an infinite bucket, we need to remove the entire bucket.
return 0, zeroIfInf(b.Upper)
}
// TRIM_LOWER (>/) - remove values less than rhs
if rhs <= b.Lower {
// Impossible to happen because the lower bound is -Inf. Returning the entire current bucket.
return b.Count, 0
}
if rhs >= 0 && b.Upper > rhs && !math.IsInf(b.Upper, 1) {
// If upper is finite and positive, we treat lower as 0 (despite it de facto being -Inf).
// This is only possible with NHCB, so we can always use linear interpolation.
return b.Count * (1 - rhs/b.Upper), (rhs + b.Upper) / 2
}
// Otherwise, we are targeting a valid trim, but as we don't know the exact distribution of values that belongs to an infinite bucket, we need to remove the entire bucket.
return 0, zeroIfInf(b.Upper)
}
// Case 2: Bucket with upper bound +Inf.
if math.IsInf(b.Upper, 1) {
if isUpperTrim {
// TRIM_UPPER (</) - remove values greater than rhs.
// We don't care about lower here, because:
// when rhs >= lower and the bucket extends to +Inf, some values in this bucket could be > rhs, so we conservatively remove the entire bucket;
// when rhs < lower, all values in this bucket are >= lower > rhs, so all values should be removed.
return 0, zeroIfInf(b.Lower)
}
// TRIM_LOWER (>/) - remove values less than rhs.
if rhs >= b.Lower {
return b.Count, rhs
}
// lower < rhs: we are inside the infinity bucket, but as we don't know the exact distribution of values, we conservatively remove the entire bucket.
return 0, zeroIfInf(b.Lower)
}
panic(fmt.Errorf("one of the bounds must be infinite for handleInfinityBuckets, got %v", b))
}
// computeSplit calculates the portion of the bucket's count <= rhs (trim point).
func computeSplit(b Bucket[float64], rhs float64, isPositive, isLinear bool) float64 {
if rhs <= b.Lower {
return 0
}
if rhs >= b.Upper {
return b.Count
}
var fraction float64
switch {
case isLinear:
fraction = (rhs - b.Lower) / (b.Upper - b.Lower)
default:
// Exponential interpolation.
logLower := math.Log2(math.Abs(b.Lower))
logUpper := math.Log2(math.Abs(b.Upper))
logV := math.Log2(math.Abs(rhs))
if isPositive {
fraction = (logV - logLower) / (logUpper - logLower)
} else {
fraction = 1 - ((logV - logUpper) / (logLower - logUpper))
}
}
return b.Count * fraction
}
func computeZeroBucketTrim(zeroBucket Bucket[float64], rhs float64, hasNegative, hasPositive, isUpperTrim bool) (float64, float64) {
var (
lower = zeroBucket.Lower
upper = zeroBucket.Upper
)
if hasNegative && !hasPositive {
upper = 0
}
if hasPositive && !hasNegative {
lower = 0
}
var fraction, midpoint float64
if isUpperTrim {
if rhs <= lower {
return 0, 0
}
if rhs >= upper {
return zeroBucket.Count, (lower + upper) / 2
}
fraction = (rhs - lower) / (upper - lower)
midpoint = (lower + rhs) / 2
} else { // lower trim
if rhs <= lower {
return zeroBucket.Count, (lower + upper) / 2
}
if rhs >= upper {
return 0, 0
}
fraction = (upper - rhs) / (upper - lower)
midpoint = (rhs + upper) / 2
}
return zeroBucket.Count * fraction, midpoint
}
func computeBucketTrim(b Bucket[float64], rhs float64, isUpperTrim, isPositive, isCustomBucket bool) (float64, float64) {
if math.IsInf(b.Lower, -1) || math.IsInf(b.Upper, 1) {
return handleInfinityBuckets(isUpperTrim, b, rhs)
}
underCount := computeSplit(b, rhs, isPositive, isCustomBucket)
if isUpperTrim {
return underCount, computeMidpoint(b.Lower, rhs, isPositive, isCustomBucket)
}
return b.Count - underCount, computeMidpoint(rhs, b.Upper, isPositive, isCustomBucket)
}
func computeMidpoint(survivingIntervalLowerBound, survivingIntervalUpperBound float64, isPositive, isLinear bool) float64 {
if math.IsInf(survivingIntervalLowerBound, 0) {
if math.IsInf(survivingIntervalUpperBound, 0) {
return 0
}
if survivingIntervalUpperBound > 0 {
return survivingIntervalUpperBound / 2
}
return survivingIntervalUpperBound
} else if math.IsInf(survivingIntervalUpperBound, 0) {
return survivingIntervalLowerBound
}
if isLinear {
return (survivingIntervalLowerBound + survivingIntervalUpperBound) / 2
}
geoMean := math.Sqrt(math.Abs(survivingIntervalLowerBound * survivingIntervalUpperBound))
if isPositive {
return geoMean
}
return -geoMean
}

View File

@ -1223,10 +1223,12 @@ type EvalNodeHelper struct {
lblResultBuf []byte
// For binary vector matching.
rightSigs map[int]Sample
matchedSigs map[int]map[uint64]struct{}
resultMetric map[string]labels.Labels
numSigs int
rightSigs []Sample
sigsPresent []bool
matchedSigs []map[uint64]struct{}
matchedSigsPresent []bool
resultMetric map[string]labels.Labels
numSigs int
// For info series matching.
rightStrSigs map[string]Sample
@ -1235,6 +1237,44 @@ type EvalNodeHelper struct {
enableDelayedNameRemoval bool
}
func (enh *EvalNodeHelper) resetSigsPresent() []bool {
if len(enh.sigsPresent) == 0 {
enh.sigsPresent = make([]bool, enh.numSigs)
} else {
clear(enh.sigsPresent)
}
return enh.sigsPresent
}
func (enh *EvalNodeHelper) resetMatchedSigsPresent() []bool {
if len(enh.matchedSigsPresent) == 0 {
enh.matchedSigsPresent = make([]bool, enh.numSigs)
} else {
clear(enh.matchedSigsPresent)
}
return enh.matchedSigsPresent
}
func (enh *EvalNodeHelper) resetRightSigs() []Sample {
if enh.rightSigs == nil {
enh.rightSigs = make([]Sample, enh.numSigs)
} else {
clear(enh.rightSigs)
}
return enh.rightSigs
}
func (enh *EvalNodeHelper) resetMatchedSigs() []map[uint64]struct{} {
if enh.matchedSigs == nil {
enh.matchedSigs = make([]map[uint64]struct{}, enh.numSigs)
} else {
for i := range enh.matchedSigs {
clear(enh.matchedSigs[i])
}
}
return enh.matchedSigs
}
func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) {
if enh.lb == nil {
enh.lb = labels.NewBuilder(lbls)
@ -2835,7 +2875,7 @@ func (*evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, lh
}
// Ordinals of signatures present on the right-hand side.
rightSigOrdinalsPresent := make([]bool, enh.numSigs)
rightSigOrdinalsPresent := enh.resetSigsPresent()
for _, sh := range rhsh {
rightSigOrdinalsPresent[sh.sigOrdinal] = true
}
@ -2861,7 +2901,7 @@ func (*evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhs
return enh.Out
}
leftSigOrdinalsPresent := make([]bool, enh.numSigs)
leftSigOrdinalsPresent := enh.resetSigsPresent()
// Add everything from the left-hand-side Vector.
for i, ls := range lhs {
leftSigOrdinalsPresent[lhsh[i].sigOrdinal] = true
@ -2888,7 +2928,7 @@ func (*evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatching,
}
// Ordinals of signatures present on the right-hand side.
rightSigOrdinalsPresent := make([]bool, enh.numSigs)
rightSigOrdinalsPresent := enh.resetSigsPresent()
for _, sh := range rhsh {
rightSigOrdinalsPresent[sh.sigOrdinal] = true
}
@ -2920,19 +2960,16 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
}
// All samples from the rhs by their join signature ordinal.
if enh.rightSigs == nil {
enh.rightSigs = make(map[int]Sample, len(enh.Out))
} else {
clear(enh.rightSigs)
}
rightSigs := enh.rightSigs
rightSigs := enh.resetRightSigs()
rightSigsPresent := enh.resetSigsPresent()
// Add all rhs samples to a map so we can easily find matches later.
for i, rs := range rhs {
sigOrd := rhsh[i].sigOrdinal
// The rhs is guaranteed to be the 'one' side. Having multiple samples
// with the same signature means that the matching is many-to-many.
if duplSample, found := rightSigs[sigOrd]; found {
if rightSigsPresent[sigOrd] {
duplSample := rightSigs[sigOrd]
// oneSide represents which side of the vector represents the 'one' in the many-to-one relationship.
oneSide := "right"
if matching.Card == parser.CardOneToMany {
@ -2944,16 +2981,22 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
";many-to-many matching not allowed: matching labels must be unique on one side", matchedLabels.String(), oneSide, rs.Metric.String(), duplSample.Metric.String())
}
rightSigs[sigOrd] = rs
rightSigsPresent[sigOrd] = true
}
// Tracks the matching by signature ordinals. For one-to-one operations the value is nil.
// For many-to-one the value is a set of hashes to detect duplicated result elements.
if enh.matchedSigs == nil {
enh.matchedSigs = make(map[int]map[uint64]struct{}, len(rightSigs))
var (
// Tracks the match-signature for one-to-one operations.
matchedSigsPresent []bool
// Tracks the match-signature for many-to-one operations, the value is a set of signatures
// to detect duplicated result elements.
matchedSigs []map[uint64]struct{}
)
if matching.Card == parser.CardOneToOne {
matchedSigsPresent = enh.resetMatchedSigsPresent()
} else {
clear(enh.matchedSigs)
matchedSigs = enh.resetMatchedSigs()
}
matchedSigs := enh.matchedSigs
var lastErr error
@ -2982,26 +3025,26 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
}
}
metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh)
if !ev.enableDelayedNameRemoval && returnBool {
metric = metric.DropReserved(schema.IsMetadataLabel)
}
insertedSigs, exists := matchedSigs[sigOrd]
dropMetricName := !ev.enableDelayedNameRemoval && returnBool
metric := resultMetric(ls.Metric, rs.Metric, op, matching, dropMetricName, enh)
if matching.Card == parser.CardOneToOne {
if exists {
if matchedSigsPresent[sigOrd] {
ev.errorf("multiple matches for labels: many-to-one matching must be explicit (group_left/group_right)")
}
matchedSigs[sigOrd] = nil // Set existence to true.
matchedSigsPresent[sigOrd] = true
} else {
// In many-to-one matching the grouping labels have to ensure a unique metric
// for the result Vector. Check whether those labels have already been added for
// the same matching labels.
insertSig := metric.Hash()
if !exists {
insertedSigs = map[uint64]struct{}{}
matchedSigs[sigOrd] = insertedSigs
} else if _, duplicate := insertedSigs[insertSig]; duplicate {
if matchedSigs[sigOrd] == nil {
matchedSigs[sigOrd] = map[uint64]struct{}{}
}
insertedSigs := matchedSigs[sigOrd]
if _, duplicate := insertedSigs[insertSig]; duplicate {
ev.errorf("multiple matches for labels: grouping labels must ensure unique matches")
}
insertedSigs[insertSig] = struct{}{}
@ -3024,8 +3067,12 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
for i, ls := range lhs {
sigOrd := lhsh[i].sigOrdinal
rs, found := rightSigs[sigOrd] // Look for a match in the rhs Vector.
if !found {
var rs Sample
if rightSigsPresent[sigOrd] {
// Found a match in the rhs.
rs = rightSigs[sigOrd]
} else {
// Have to fall back to the fill value.
fill := matching.FillValues.RHS
if fill == nil {
continue
@ -3042,8 +3089,11 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
// For any rhs samples which have not been matched, check if we need to
// perform the operation with a fill value from the lhs.
if fill := matching.FillValues.LHS; fill != nil {
for sigOrd, rs := range rightSigs {
if _, matched := matchedSigs[sigOrd]; matched {
for i, rs := range rhs {
sigOrd := rhsh[i].sigOrdinal
if (matching.Card == parser.CardOneToOne && matchedSigsPresent[sigOrd]) ||
(matching.Card != parser.CardOneToOne && matchedSigs[sigOrd] != nil) {
continue // Already matched.
}
ls := Sample{
@ -3060,7 +3110,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
// resultMetric returns the metric for the given sample(s) based on the Vector
// binary operation and the matching options.
func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.VectorMatching, enh *EvalNodeHelper) labels.Labels {
func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.VectorMatching, dropMetricName bool, enh *EvalNodeHelper) labels.Labels {
if enh.resultMetric == nil {
enh.resultMetric = make(map[string]labels.Labels, len(enh.Out))
}
@ -3078,7 +3128,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V
str := string(enh.lblResultBuf)
enh.resetBuilder(lhs)
if changesMetricSchema(op) {
if dropMetricName || changesMetricSchema(op) {
// Setting empty Metadata causes the deletion of those if they exists.
schema.Metadata{}.SetToLabels(enh.lb)
}
@ -3216,6 +3266,8 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram
return lhs, nil, lhs <= rhs, nil, nil
case parser.ATAN2:
return math.Atan2(lhs, rhs), nil, true, nil, nil
case parser.TRIM_LOWER, parser.TRIM_UPPER:
return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("float", parser.ItemTypeStr[op], "float", pos)
}
}
case hlhs == nil && hrhs != nil:
@ -3223,7 +3275,7 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram
switch op {
case parser.MUL:
return 0, hrhs.Copy().Mul(lhs).Compact(0), true, nil, nil
case parser.ADD, parser.SUB, parser.DIV, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2:
case parser.ADD, parser.SUB, parser.DIV, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.TRIM_LOWER, parser.TRIM_UPPER, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2:
return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("float", parser.ItemTypeStr[op], "histogram", pos)
}
}
@ -3234,6 +3286,10 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram
return 0, hlhs.Copy().Mul(rhs).Compact(0), true, nil, nil
case parser.DIV:
return 0, hlhs.Copy().Div(rhs).Compact(0), true, nil, nil
case parser.TRIM_UPPER:
return 0, hlhs.TrimBuckets(rhs, true), true, nil, nil
case parser.TRIM_LOWER:
return 0, hlhs.TrimBuckets(rhs, false), true, nil, nil
case parser.ADD, parser.SUB, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2:
return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("histogram", parser.ItemTypeStr[op], "float", pos)
}
@ -3274,7 +3330,7 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram
case parser.NEQ:
// This operation expects that both histograms are compacted.
return 0, hlhs, !hlhs.Equals(hrhs), nil, nil
case parser.MUL, parser.DIV, parser.POW, parser.MOD, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2:
case parser.MUL, parser.DIV, parser.POW, parser.MOD, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2, parser.TRIM_LOWER, parser.TRIM_UPPER:
return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("histogram", parser.ItemTypeStr[op], "histogram", pos)
}
}

View File

@ -98,6 +98,8 @@ EQLC
EQL_REGEX
GTE
GTR
TRIM_UPPER
TRIM_LOWER
LAND
LOR
LSS
@ -200,7 +202,7 @@ START_METRIC_SELECTOR
// Operators are listed with increasing precedence.
%left LOR
%left LAND LUNLESS
%left EQLC GTE GTR LSS LTE NEQ
%left EQLC GTE GTR LSS LTE NEQ TRIM_UPPER TRIM_LOWER
%left ADD SUB
%left MUL DIV MOD ATAN2
%right POW
@ -291,6 +293,8 @@ binary_expr : expr ADD bin_modifier expr { $$ = yylex.(*parser).newBinar
| expr EQLC bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
| expr GTE bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
| expr GTR bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
| expr TRIM_UPPER bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
| expr TRIM_LOWER bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
| expr LAND bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
| expr LOR bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
| expr LSS bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }

File diff suppressed because it is too large Load Diff

View File

@ -189,21 +189,23 @@ var ItemTypeStr = map[ItemType]string{
TIMES: "x",
SPACE: "<space>",
SUB: "-",
ADD: "+",
MUL: "*",
MOD: "%",
DIV: "/",
EQLC: "==",
NEQ: "!=",
LTE: "<=",
LSS: "<",
GTE: ">=",
GTR: ">",
EQL_REGEX: "=~",
NEQ_REGEX: "!~",
POW: "^",
AT: "@",
SUB: "-",
ADD: "+",
MUL: "*",
MOD: "%",
DIV: "/",
EQLC: "==",
NEQ: "!=",
LTE: "<=",
LSS: "<",
GTE: ">=",
GTR: ">",
TRIM_UPPER: "</",
TRIM_LOWER: ">/",
EQL_REGEX: "=~",
NEQ_REGEX: "!~",
POW: "^",
AT: "@",
}
func init() {
@ -446,6 +448,9 @@ func lexStatements(l *Lexer) stateFn {
if t := l.peek(); t == '=' {
l.next()
l.emit(LTE)
} else if t := l.peek(); t == '/' {
l.next()
l.emit(TRIM_UPPER)
} else {
l.emit(LSS)
}
@ -453,6 +458,9 @@ func lexStatements(l *Lexer) stateFn {
if t := l.peek(); t == '=' {
l.next()
l.emit(GTE)
} else if t := l.peek(); t == '/' {
l.next()
l.emit(TRIM_LOWER)
} else {
l.emit(GTR)
}

View File

@ -2020,3 +2020,356 @@ eval instant at 1m irate(nhcb_add_bucket[2m]) * 60
expect no_warn
expect no_info
{} {{schema:-53 sum:56 count:15 custom_values:[2 3 4 6] buckets:[1 0 1 5 8] counter_reset_hint:gauge}}
# Test native histogram with trim operators ("</": TRIM_UPPER, ">/": TRIM_LOWER)
load 1m
h_test {{schema:0 sum:123.75 count:34 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1 2]}}
eval instant at 1m h_test >/ -Inf
h_test {{schema:0 sum:123.75 count:34 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1 2]}}
eval instant at 1m h_test </ +Inf
h_test {{schema:0 sum:123.75 count:34 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1 2]}}
eval instant at 1m h_test >/ +Inf
h_test {{schema:0 z_bucket_w:0.001}}
eval instant at 1m h_test </ -Inf
h_test {{schema:0 z_bucket_w:0.001}}
eval instant at 1m h_test >/ 0
h_test {{schema:0 sum:120.20840280171308 count:30.5 z_bucket:0.5 z_bucket_w:0.001 buckets:[2 4 8 16]}}
eval instant at 1m h_test </ 0
h_test {{schema:0 sum:-3.53578390593273768 count:3.5 z_bucket:0.5 z_bucket_w:0.001 n_buckets:[1 2]}}
# Exponential buckets: trim uses exponential interpolation if cutoff is inside a bucket
# Trim at sqrt(2) yields half the area between 1 and 2 boundaries.
eval instant at 1m h_test </ 1.4142135624
h_test {{count:8 sum:0.2570938865989847 z_bucket:1 z_bucket_w:0.001 buckets:[2 2] n_buckets:[1 2]}}
eval instant at 1m h_test >/ 1.4142135624
h_test {{count:26 sum:116.50067065070982 z_bucket_w:0.001 buckets:[0 2 8 16]}}
load 1m
h_test_2 {{schema:2 sum:12.8286080906 count:28 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 3 1]}}
eval instant at 1m h_test_2 </ 1.13
h_test_2 {{schema:2 count:13.410582181123704 sum:-9.385798726068233 z_bucket:1 z_bucket_w:0.001 buckets:[1 1.410582181123704] n_buckets:[1 5 3 1]}}
eval instant at 1m h_test_2 >/ 1.13
h_test_2 {{schema:2 count:14.589417818876296 sum:22.168126492693734 z_bucket_w:0.001 offset:1 buckets:[0.589417818876296 4 7 3]}}
eval instant at 1m h_test_2 >/ -1.3
h_test_2 {{schema:2 count:25.54213947904476 sum:16.29588491217537 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 1.54213947904476]}}
eval instant at 1m h_test_2 </ -1.3
h_test_2 {{schema:2 count:2.45786052095524 sum:-3.5189307983595066 z_bucket_w:0.001 n_offset:2 n_buckets:[1.45786052095524 1]}}
# Exponential buckets: trim on bucket boundary uses no interpolation.
eval instant at 1m h_test </ 2
h_test{} {{count:10 sum:3.5355339059327373 z_bucket:1 z_bucket_w:0.001 buckets:[2 4] n_buckets:[1 2]}}
eval instant at 1m h_test >/ 2
h_test{} {{count:24 sum:113.13708498984761 z_bucket_w:0.001 offset:2 buckets:[8 16]}}
eval instant at 1m h_test >/ -1
h_test{} {{count:32 sum:119.50104602052653 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1]}}
eval instant at 1m h_test </ -1
h_test{} {{count:2 sum:-2.8284271247461903 z_bucket_w:0.001 n_offset:1 n_buckets:[2]}}
# Exponential buckets: trim zero bucket that is positive-biased (because of the presence of positive buckets).
load 1m
h_positive_buckets {{schema:0 sum:8.0210678118654755 count:12 z_bucket:2 z_bucket_w:0.5 buckets:[10]}}
eval instant at 1m h_positive_buckets >/ 0.5
h_positive_buckets {{schema:0 count:10 sum:7.0710678118654755 z_bucket:0 z_bucket_w:0.5 buckets:[10]}}
eval instant at 1m h_positive_buckets >/ 0.1
h_positive_buckets {{schema:0 count:11.6 sum:7.551067811865476 z_bucket:1.6 z_bucket_w:0.5 buckets:[10]}}
eval instant at 1m h_positive_buckets >/ 0
h_positive_buckets {{schema:0 sum:8.0210678118654755 count:12 z_bucket:2 z_bucket_w:0.5 buckets:[10]}}
eval instant at 1m h_positive_buckets </ 0.5
h_positive_buckets {{schema:0 count:2 sum:0.5 z_bucket:2 z_bucket_w:0.5}}
eval instant at 1m h_positive_buckets </ 0.1
h_positive_buckets {{schema:0 count:0.4 sum:0.02 z_bucket:0.4 z_bucket_w:0.5}}
eval instant at 1m h_positive_buckets </ 0
h_positive_buckets {{schema:0 z_bucket_w:0.5}}
# Exponential buckets: trim zero bucket that is negative-biased (because of the presence of negative buckets).
load 1m
h_negative_buckets {{schema:0 sum:-8.0210678118654755 count:12 z_bucket:2 z_bucket_w:0.5 n_buckets:[10]}}
eval instant at 1m h_negative_buckets </ -0.5
h_negative_buckets {{schema:0 count:10 sum:-7.0710678118654755 z_bucket:0 z_bucket_w:0.5 n_buckets:[10]}}
eval instant at 1m h_negative_buckets </ -0.1
h_negative_buckets {{schema:0 count:11.6 sum:-7.551067811865476 z_bucket:1.6 z_bucket_w:0.5 n_buckets:[10]}}
eval instant at 1m h_negative_buckets </ 0
h_negative_buckets {{schema:0 sum:-8.0210678118654755 count:12 z_bucket:2 z_bucket_w:0.5 n_buckets:[10]}}
eval instant at 1m h_negative_buckets >/ -0.5
h_negative_buckets {{schema:0 count:2 sum:-0.5 z_bucket:2 z_bucket_w:0.5}}
eval instant at 1m h_negative_buckets >/ -0.1
h_negative_buckets {{schema:0 count:0.4 sum:-0.02 z_bucket:0.4 z_bucket_w:0.5}}
eval instant at 1m h_negative_buckets >/ 0
h_negative_buckets {{schema:0 z_bucket_w:0.5}}
# Exponential buckets: trim zero bucket when there are no other buckets.
load 1m
zero_bucket_only {{schema:0 count:5 sum:0 z_bucket:5 z_bucket_w:0.1 }}
eval instant at 1m zero_bucket_only >/ 0.1
zero_bucket_only {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.1 }}
eval instant at 1m zero_bucket_only </ 0.1
zero_bucket_only {{schema:0 count:5 sum:0 z_bucket:5 z_bucket_w:0.1 }}
eval instant at 1m zero_bucket_only >/ 0.05
zero_bucket_only {{schema:0 count:1.25 sum:0.09375 z_bucket:1.25 z_bucket_w:0.1 }}
eval instant at 1m zero_bucket_only </ 0.05
zero_bucket_only {{schema:0 count:3.75 sum:-0.09375 z_bucket:3.75 z_bucket_w:0.1 }}
eval instant at 1m zero_bucket_only >/ 0
zero_bucket_only {{schema:0 count:2.5 sum:0.125 z_bucket:2.5 z_bucket_w:0.1 }}
eval instant at 1m zero_bucket_only </ 0
zero_bucket_only {{schema:0 count:2.5 sum:-0.125 z_bucket:2.5 z_bucket_w:0.1 }}
eval instant at 1m zero_bucket_only >/ -0.05
zero_bucket_only {{schema:0 count:3.75 sum:0.09375 z_bucket:3.75 z_bucket_w:0.1 }}
eval instant at 1m zero_bucket_only </ -0.05
zero_bucket_only {{schema:0 count:1.25 sum:-0.09375 z_bucket:1.25 z_bucket_w:0.1 }}
eval instant at 1m zero_bucket_only </ -0.1
zero_bucket_only {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.1 }}
eval instant at 1m zero_bucket_only >/ -0.1
zero_bucket_only {{schema:0 count:5 sum:0 z_bucket:5 z_bucket_w:0.1 }}
load 1m
cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}}
# Custom buckets: trim on bucket boundary without interpolation
eval instant at 1m cbh </ 15
cbh{} {{schema:-53 count:11 sum:97.5 custom_values:[5 10 15 20] buckets:[1 6 4]}}
eval instant at 1m cbh >/ 15
cbh{} {{schema:-53 count:4 sum:72.5 custom_values:[5 10 15 20] offset:3 buckets:[3 1]}}
# Custom buckets: trim uses linear interpolation if cutoff is inside a bucket
eval instant at 1m cbh </ 13
cbh{} {{schema:-53 count:9.4 sum:75.1 custom_values:[5 10 15 20] buckets:[1 6 2.4]}}
eval instant at 1m cbh >/ 13
cbh{} {{schema:-53 count:5.6 sum:94.9 custom_values:[5 10 15 20] offset:2 buckets:[1.6 3 1]}}
eval instant at 1m cbh </ 7.5
cbh{} {{schema:-53 count:4 sum:21.25 custom_values:[5 10 15 20] buckets:[1 3]}}
# Custom buckets: trim drops +Inf bucket entirely even if cutoff is above its lower bound
eval instant at 1m cbh </ 50
cbh{} {{schema:-53 count:14 sum:150.0 custom_values:[5 10 15 20] buckets:[1 6 4 3]}}
eval instant at 1m cbh </ -Inf
cbh{} {{schema:-53 custom_values:[5 10 15 20]}}
eval instant at 1m cbh >/ +Inf
cbh{} {{schema:-53 custom_values:[5 10 15 20]}}
eval instant at 1m cbh </ +Inf
cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}}
eval instant at 1m cbh >/ -Inf
cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}}
# Noop
eval instant at 1m cbh >/ 0
cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}}
eval instant at 1m cbh </ 0
cbh {{schema:-53 custom_values:[5 10 15 20]}}
# Custom buckets: negative values
load 1m
cbh_has_neg {{schema:-53 sum:172.5 count:15 custom_values:[-10 5 10 15 20] buckets:[2 1 6 4 3 1]}}
eval instant at 1m cbh_has_neg </ 2
cbh_has_neg{} {{schema:-53 count:2.8 sum:-23.2 custom_values:[-10 5 10 15 20] buckets:[2 0.8]}}
eval instant at 1m cbh_has_neg </ -4
cbh_has_neg{} {{schema:-53 count:2.4 sum:-22.8 custom_values:[-10 5 10 15 20] buckets:[2 0.4]}}
eval instant at 1m cbh_has_neg </ -15
cbh_has_neg{} {{schema:-53 count:2 sum:-30 custom_values:[-10 5 10 15 20] buckets:[2]}}
load 1m
zero_bucket {{schema:0 sum:-6.75 z_bucket:5 z_bucket_w:0.01 buckets:[2 3] n_buckets:[1 2 3]}}
# Zero Bucket Edge Case: Interpolation Around Zero
eval instant at 1m zero_bucket </ -0.005
zero_bucket{} {{count:7.25 sum:-12.03019028017131 z_bucket:1.25 z_bucket_w:0.01 n_buckets:[1 2 3]}}
eval instant at 1m zero_bucket >/ 0
zero_bucket{} {{count:7.5 sum:5.669354249492381 z_bucket:2.5 z_bucket_w:0.01 buckets:[2 3]}}
load 1m
cbh_one_bucket {{schema:-53 sum:100.0 count:100 buckets:[100]}}
# Skip [-Inf; +Inf] bucket (100).
eval instant at 1m cbh_one_bucket </ 10.0
cbh_one_bucket {{schema:-53 sum:0.0 count:0 buckets:[0]}}
# Skip [-Inf; +Inf] bucket (100).
eval instant at 1m cbh_one_bucket >/ 10.0
cbh_one_bucket {{schema:-53 sum:0.0 count:0 buckets:[0]}}
# Keep [-Inf; +Inf] bucket (100).
eval instant at 1m cbh_one_bucket </ +Inf
cbh_one_bucket {{schema:-53 sum:100 count:100 buckets:[100]}}
# Skip [-Inf; +Inf] bucket (100).
eval instant at 1m cbh_one_bucket >/ +Inf
cbh_one_bucket {{schema:-53 sum:0 count:0 buckets:[0]}}
# Keep [-Inf; +Inf] bucket (100).
eval instant at 1m cbh_one_bucket >/ -Inf
cbh_one_bucket {{schema:-53 sum:100 count:100 buckets:[100]}}
# Skip [-Inf; +Inf] bucket (100).
eval instant at 1m cbh_one_bucket </ -Inf
cbh_one_bucket {{schema:-53 sum:0 count:0 buckets:[0]}}
load 1m
cbh_two_buckets_split_at_zero {{schema:-53 sum:33.0 count:100 custom_values:[0] buckets:[1 100]}}
# Skip (0; +Inf] bucket (100).
eval instant at 1m cbh_two_buckets_split_at_zero </ 10.0
cbh_two_buckets_split_at_zero {{schema:-53 sum:0.0 count:1 custom_values:[0] buckets:[1 0]}}
# Skip (0; +Inf] bucket (100).
eval instant at 1m cbh_two_buckets_split_at_zero </ 0.0
cbh_two_buckets_split_at_zero {{schema:-53 sum:0.0 count:1 custom_values:[0] buckets:[1 0]}}
# Skip both buckets (1, 100).
eval instant at 1m cbh_two_buckets_split_at_zero </ -10.0
cbh_two_buckets_split_at_zero {{schema:-53 sum:-10.0 count:1 custom_values:[0] buckets:[1 0]}}
# Skip [-Inf, 0] bucket (1).
eval instant at 1m cbh_two_buckets_split_at_zero >/ -10.0
cbh_two_buckets_split_at_zero {{schema:-53 sum:0.0 count:100 custom_values:[0] buckets:[0 100]}}
# Skip [-Inf, 0] bucket (1).
eval instant at 1m cbh_two_buckets_split_at_zero >/ 0.0
cbh_two_buckets_split_at_zero {{schema:-53 sum:0.0 count:100 custom_values:[0] buckets:[0 100]}}
# Skip first bucket.
eval instant at 1m cbh_two_buckets_split_at_zero >/ 10.0
cbh_two_buckets_split_at_zero {{schema:-53 sum:1000.0 count:100 custom_values:[0] buckets:[0 100]}}
load 1m
cbh_two_buckets_split_at_positive {{schema:-53 sum:33 count:101 custom_values:[5] buckets:[1 100]}}
# Skip (5, +Inf] bucket (100).
eval instant at 1m cbh_two_buckets_split_at_positive </ 10.0
cbh_two_buckets_split_at_positive {{schema:-53 sum:2.5 count:1 custom_values:[5] buckets:[1 0]}}
# Skip (5, +Inf] bucket (100) and 3/5 of [0, 5] bucket (0.6 * 3.5).
eval instant at 1m cbh_two_buckets_split_at_positive </ 2.0
cbh_two_buckets_split_at_positive {{schema:-53 sum:0.4 count:0.4 custom_values:[5] buckets:[0.4 0]}}
# Skip both buckets (1 and 100).
eval instant at 1m cbh_two_buckets_split_at_positive </ 0.0
cbh_two_buckets_split_at_positive {{schema:-53 custom_values:[5]}}
# Skip both buckets (1 and 100).
eval instant at 1m cbh_two_buckets_split_at_positive </ -10.0
cbh_two_buckets_split_at_positive {{schema:-53 sum:0.0 count:0 custom_values:[5] buckets:[0 0]}}
# Skip [0, 5] bucket (1).
eval instant at 1m cbh_two_buckets_split_at_positive >/ -10.0
cbh_two_buckets_split_at_positive {{schema:-53 sum:500.0 count:100 custom_values:[5] buckets:[0 100]}}
# Noop.
eval instant at 1m cbh_two_buckets_split_at_positive >/ 0.0
cbh_two_buckets_split_at_positive {{schema:-53 sum:33.0 count:101 custom_values:[5] buckets:[1 100]}}
# Keep (5, +Inf] bucket (100) and 3/5 of [0, 5] bucket (0.6 * 3.5).
eval instant at 1m cbh_two_buckets_split_at_positive >/ 2.0
cbh_two_buckets_split_at_positive {{schema:-53 sum:502.1 count:100.6 custom_values:[5] buckets:[0.6 100]}}
# Skip first bucket.
eval instant at 1m cbh_two_buckets_split_at_positive >/ 10.0
cbh_two_buckets_split_at_positive {{schema:-53 sum:1000.0 count:100 custom_values:[5] buckets:[0 100]}}
load 1m
cbh_two_buckets_split_at_negative {{schema:-53 sum:33 count:101 custom_values:[-5] buckets:[1 100]}}
# Skip (-5, +Inf] bucket (100).
eval instant at 1m cbh_two_buckets_split_at_negative </ 10.0
cbh_two_buckets_split_at_negative {{schema:-53 sum:-5.0 count:1 custom_values:[-5] buckets:[1 0]}}
# Skip (-5, +Inf] bucket (100).
eval instant at 1m cbh_two_buckets_split_at_negative </ 0.0
cbh_two_buckets_split_at_negative {{schema:-53 sum:-5 count:1 custom_values:[-5] buckets:[1 0]}}
# Skip (-5; +Inf] bucket (100).
eval instant at 1m cbh_two_buckets_split_at_negative </ -2.0
cbh_two_buckets_split_at_negative {{schema:-53 sum:-5.0 count:1 custom_values:[-5] buckets:[1 0]}}
# Skip (-5, +Inf] bucket (100).
eval instant at 1m cbh_two_buckets_split_at_negative </ -10.0
cbh_two_buckets_split_at_negative {{schema:-53 sum:-10.0 count:1 custom_values:[-5] buckets:[1 0]}}
# Skip [-Inf, -5] bucket (1).
eval instant at 1m cbh_two_buckets_split_at_negative >/ -10.0
cbh_two_buckets_split_at_negative {{schema:-53 sum:-500 count:100 custom_values:[-5] buckets:[0 100]}}
# Skip [-Inf, -5] bucket (1).
eval instant at 1m cbh_two_buckets_split_at_negative >/ -2.0
cbh_two_buckets_split_at_negative {{schema:-53 sum:-200 count:100 custom_values:[-5] buckets:[0 100]}}
# Skip [-Inf, -5] bucket (1).
eval instant at 1m cbh_two_buckets_split_at_negative >/ 0.0
cbh_two_buckets_split_at_negative {{schema:-53 sum:0.0 count:100 custom_values:[-5] buckets:[0 100]}}
# Skip [-Inf, -5] bucket (1).
eval instant at 1m cbh_two_buckets_split_at_negative >/ 10.0
cbh_two_buckets_split_at_negative {{schema:-53 sum:1000.0 count:100 custom_values:[-5] buckets:[0 100]}}
load 1m
cbh_for_join{label="a"} {{schema:-53 sum:33 count:101 custom_values:[5] buckets:[1 100]}}
cbh_for_join{label="b"} {{schema:-53 sum:66 count:202 custom_values:[5] buckets:[2 200]}}
cbh_for_join{label="c"} {{schema:-53 sum:99 count:303 custom_values:[5] buckets:[3 300]}}
float_for_join{label="a"} 1
float_for_join{label="b"} 4
eval instant at 1m cbh_for_join >/ on (label) float_for_join
{label="a"} {{schema:-53 count:100.8 sum:502.4 custom_values:[5] buckets:[0.8 100]}}
{label="b"} {{schema:-53 count:200.4 sum:1001.8 custom_values:[5] buckets:[0.4 200]}}
clear

View File

@ -4,17 +4,27 @@
"config:recommended"
],
"separateMultipleMajor": true,
"baseBranches": ["main"],
"baseBranchPatterns": [
"main"
],
"postUpdateOptions": [
"gomodTidy",
"gomodUpdateImportPaths"
],
"schedule": ["* * 21 * *"],
"schedule": [
"* * 21 * *"
],
"timezone": "UTC",
"github-actions": {
"managerFilePatterns": ["scripts/**"]
"managerFilePatterns": [
"scripts/**"
]
},
"prBodyNotes": ["```release-notes","NONE","```"],
"prBodyNotes": [
"```release-notes",
"NONE",
"```"
],
"prConcurrentLimit": 20,
"prHourlyLimit": 5,
"packageRules": [
@ -27,31 +37,49 @@
},
{
"description": "Don't update prometheus-io namespace packages",
"matchPackageNames": ["@prometheus-io/**"],
"matchPackageNames": [
"@prometheus-io/**"
],
"enabled": false
},
{
"description": "Group AWS Go dependencies",
"matchManagers": ["gomod"],
"matchPackageNames": ["github.com/aws/**"],
"matchManagers": [
"gomod"
],
"matchPackageNames": [
"github.com/aws/**"
],
"groupName": "AWS Go dependencies"
},
{
"description": "Group Azure Go dependencies",
"matchManagers": ["gomod"],
"matchPackageNames": ["github.com/Azure/**"],
"matchManagers": [
"gomod"
],
"matchPackageNames": [
"github.com/Azure/**"
],
"groupName": "Azure Go dependencies"
},
{
"description": "Group Kubernetes Go dependencies",
"matchManagers": ["gomod"],
"matchPackageNames": ["k8s.io/**"],
"matchManagers": [
"gomod"
],
"matchPackageNames": [
"k8s.io/**"
],
"groupName": "Kubernetes Go dependencies"
},
{
"description": "Group OpenTelemetry Go dependencies",
"matchManagers": ["gomod"],
"matchPackageNames": ["go.opentelemetry.io/**"],
"matchManagers": [
"gomod"
],
"matchPackageNames": [
"go.opentelemetry.io/**"
],
"groupName": "OpenTelemetry Go dependencies"
},
{
@ -60,7 +88,10 @@
"web/ui/mantine-ui/package.json"
],
"groupName": "Mantine UI",
"matchUpdateTypes": ["minor", "patch"],
"matchUpdateTypes": [
"minor",
"patch"
],
"enabled": true
},
{
@ -69,7 +100,10 @@
"web/ui/react-app/package.json"
],
"groupName": "React App",
"matchUpdateTypes": ["minor", "patch"],
"matchUpdateTypes": [
"minor",
"patch"
],
"enabled": true
},
{
@ -78,14 +112,19 @@
"web/ui/module/**/package.json"
],
"groupName": "Modules",
"matchUpdateTypes": ["minor", "patch"],
"matchUpdateTypes": [
"minor",
"patch"
],
"enabled": true
}
],
"branchPrefix": "deps-update/",
"vulnerabilityAlerts": {
"enabled": true,
"labels": ["security-update"]
"labels": [
"security-update"
]
},
"osvVulnerabilityAlerts": true,
"dependencyDashboardApproval": false

View File

@ -105,6 +105,7 @@ type scrapePool struct {
activeTargets map[uint64]*Target
droppedTargets []*Target // Subject to KeepDroppedTargets limit.
droppedTargetsCount int // Count of all dropped targets.
scrapeFailureLogger FailureLogger
// newLoop injection for testing purposes.
injectTestNewLoop func(scrapeLoopOptions) loop
@ -112,9 +113,6 @@ type scrapePool struct {
metrics *scrapeMetrics
buffers *pool.Pool
offsetSeed uint64
scrapeFailureLogger FailureLogger
scrapeFailureLoggerMtx sync.RWMutex
}
type labelLimits struct {
@ -224,26 +222,18 @@ func (sp *scrapePool) DroppedTargetsCount() int {
}
func (sp *scrapePool) SetScrapeFailureLogger(l FailureLogger) {
sp.scrapeFailureLoggerMtx.Lock()
defer sp.scrapeFailureLoggerMtx.Unlock()
sp.targetMtx.Lock()
defer sp.targetMtx.Unlock()
if l != nil {
l = slog.New(l).With("job_name", sp.config.JobName).Handler().(FailureLogger)
}
sp.scrapeFailureLogger = l
sp.targetMtx.Lock()
defer sp.targetMtx.Unlock()
for _, s := range sp.loops {
s.setScrapeFailureLogger(sp.scrapeFailureLogger)
}
}
func (sp *scrapePool) getScrapeFailureLogger() FailureLogger {
sp.scrapeFailureLoggerMtx.RLock()
defer sp.scrapeFailureLoggerMtx.RUnlock()
return sp.scrapeFailureLogger
}
// stop terminates all scrape loops and returns after they all terminated.
func (sp *scrapePool) stop() {
sp.mtx.Lock()
@ -323,6 +313,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
sp.targetMtx.Lock()
forcedErr := sp.refreshTargetLimitErr()
scrapeFailureLogger := sp.scrapeFailureLogger
for fp, oldLoop := range sp.loops {
var cache *scrapeCache
if oc := oldLoop.getCache(); reuseCache && oc != nil {
@ -364,7 +355,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
wg.Done()
newLoop.setForcedError(forcedErr)
newLoop.setScrapeFailureLogger(sp.getScrapeFailureLogger())
newLoop.setScrapeFailureLogger(scrapeFailureLogger)
newLoop.run(nil)
}(oldLoop, newLoop)

View File

@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"io"
"log/slog"
"maps"
"math"
"net/http"
@ -6804,3 +6805,54 @@ func testDropsSeriesFromMetricRelabeling(t *testing.T, appV2 bool) {
require.NoError(t, app.Commit())
}
// noopFailureLogger is a minimal FailureLogger implementation for testing.
type noopFailureLogger struct{}
func (noopFailureLogger) Enabled(context.Context, slog.Level) bool { return true }
func (noopFailureLogger) Handle(context.Context, slog.Record) error { return nil }
func (noopFailureLogger) WithAttrs([]slog.Attr) slog.Handler { return noopFailureLogger{} }
func (noopFailureLogger) WithGroup(string) slog.Handler { return noopFailureLogger{} }
func (noopFailureLogger) Close() error { return nil }
// TestScrapePoolSetScrapeFailureLoggerRace is a regression test for concurrent
// access to scrapeFailureLogger. Both must use targetMtx for synchronization.
func TestScrapePoolSetScrapeFailureLoggerRace(t *testing.T) {
var (
app = teststorage.NewAppendable()
cfg = &config.ScrapeConfig{
JobName: "test",
ScrapeInterval: model.Duration(100 * time.Millisecond),
ScrapeTimeout: model.Duration(50 * time.Millisecond),
MetricNameValidationScheme: model.UTF8Validation,
MetricNameEscapingScheme: model.AllowUTF8,
}
sp, err = newScrapePool(cfg, app, nil, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
)
require.NoError(t, err)
defer sp.stop()
// Create a target group with a target.
tg := &targetgroup.Group{
Targets: []model.LabelSet{
{model.AddressLabel: "127.0.0.1:9090"},
},
}
var wg sync.WaitGroup
wg.Go(func() {
for range 100 {
sp.SetScrapeFailureLogger(noopFailureLogger{})
sp.SetScrapeFailureLogger(nil)
}
})
wg.Go(func() {
for range 100 {
sp.Sync([]*targetgroup.Group{tg})
}
})
wg.Wait()
}

View File

@ -3,6 +3,7 @@
name: golangci-lint
on:
push:
branches: [main, master, 'release-*']
paths:
- "go.sum"
- "go.mod"
@ -10,6 +11,7 @@ on:
- "scripts/errcheck_excludes.txt"
- ".github/workflows/golangci-lint.yml"
- ".golangci.yml"
tags: ['v*']
pull_request:
permissions: # added using https://github.com/step-security/secure-repo
@ -24,13 +26,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Install Go
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: 1.25.x
go-version: 1.26.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'

View File

@ -27,7 +27,6 @@ import (
"time"
"github.com/gogo/protobuf/proto"
"github.com/google/go-cmp/cmp"
remoteapi "github.com/prometheus/client_golang/exp/api/remote"
"github.com/prometheus/client_golang/prometheus"
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
@ -69,6 +68,7 @@ func newHighestTimestampMetric() *maxTimestamp {
func TestBasicContentNegotiation(t *testing.T) {
t.Parallel()
queueConfig := config.DefaultQueueConfig
queueConfig.BatchSendDeadline = model.Duration(100 * time.Millisecond)
queueConfig.MaxShards = 1
@ -139,20 +139,8 @@ func TestBasicContentNegotiation(t *testing.T) {
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
defer s.Close()
var (
series []record.RefSeries
metadata []record.RefMetadata
samples []record.RefSample
)
recs := generateRecords(recCase{series: 1, samplesPerSeries: 1})
// Generates same series in both cases.
samples, series = createTimeseries(1, 1)
metadata = createSeriesMetadata(series)
// Apply new config.
queueConfig.Capacity = len(samples)
queueConfig.MaxSamplesPerSend = len(samples)
// For now we only ever have a single rw config in this test.
conf.RemoteWriteConfigs[0].ProtobufMessage = tc.senderProtoMsg
require.NoError(t, s.ApplyConfig(conf))
hash, err := toHash(writeConfig)
@ -163,18 +151,18 @@ func TestBasicContentNegotiation(t *testing.T) {
c.injectErrors(tc.injectErrs)
qm.SetClient(c)
qm.StoreSeries(series, 0)
qm.StoreMetadata(metadata)
qm.StoreSeries(recs.series, 0)
qm.StoreMetadata(recs.metadata)
// Do we expect some data back?
if !tc.expectFail {
c.expectSamples(samples, series)
c.expectSamples(recs.samples, recs.series)
} else {
c.expectSamples(nil, nil)
}
// Schedule send.
qm.Append(samples)
qm.Append(recs.samples)
if !tc.expectFail {
// No error expected, so wait for data.
@ -200,8 +188,7 @@ func TestBasicContentNegotiation(t *testing.T) {
}
func TestSampleDelivery(t *testing.T) {
// Let's create an even number of send batches, so we don't run into the
// batch timeout case.
t.Parallel()
n := 3
queueConfig := config.DefaultQueueConfig
@ -220,100 +207,75 @@ func TestSampleDelivery(t *testing.T) {
writeConfig,
},
}
for _, tc := range []struct {
protoMsg remoteapi.WriteMessageType
name string
samples bool
exemplars bool
histograms bool
floatHistograms bool
}{
{protoMsg: remoteapi.WriteV1MessageType, samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"},
{protoMsg: remoteapi.WriteV1MessageType, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"},
{protoMsg: remoteapi.WriteV1MessageType, samples: false, exemplars: true, histograms: false, floatHistograms: false, name: "exemplars only"},
{protoMsg: remoteapi.WriteV1MessageType, samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"},
{protoMsg: remoteapi.WriteV1MessageType, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"},
for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} {
for _, rc := range []recCase{
{series: n, samplesPerSeries: n, histogramsPerSeries: 0, floatHistogramsPerSeries: 0, exemplarsPerSeries: 0, name: "samples only"},
{series: n, samplesPerSeries: 0, histogramsPerSeries: n, floatHistogramsPerSeries: 0, exemplarsPerSeries: 0, name: "histograms only"},
{series: n, samplesPerSeries: 0, histogramsPerSeries: 0, floatHistogramsPerSeries: n, exemplarsPerSeries: 0, name: "float histograms only"},
{series: n, samplesPerSeries: 0, histogramsPerSeries: 0, floatHistogramsPerSeries: 0, exemplarsPerSeries: n, name: "exemplars only"},
{series: n, samplesPerSeries: n, histogramsPerSeries: n, floatHistogramsPerSeries: n, exemplarsPerSeries: n, name: "all"},
} {
t.Run(fmt.Sprintf("proto=%s/case=%s", protoMsg, rc.name), func(t *testing.T) {
dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
defer s.Close()
{protoMsg: remoteapi.WriteV2MessageType, samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"},
{protoMsg: remoteapi.WriteV2MessageType, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"},
{protoMsg: remoteapi.WriteV2MessageType, samples: false, exemplars: true, histograms: false, floatHistograms: false, name: "exemplars only"},
{protoMsg: remoteapi.WriteV2MessageType, samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"},
{protoMsg: remoteapi.WriteV2MessageType, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"},
} {
t.Run(fmt.Sprintf("%s-%s", tc.protoMsg, tc.name), func(t *testing.T) {
dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
defer s.Close()
recs := generateRecords(rc)
var (
series []record.RefSeries
metadata []record.RefMetadata
samples []record.RefSample
exemplars []record.RefExemplar
histograms []record.RefHistogramSample
floatHistograms []record.RefFloatHistogramSample
)
var (
series = recs.series
metadata = recs.metadata
samples = recs.samples
exemplars = recs.exemplars
histograms = recs.histograms
floatHistograms = recs.floatHistograms
)
// Generates same series in both cases.
if tc.samples {
samples, series = createTimeseries(n, n)
}
if tc.exemplars {
exemplars, series = createExemplars(n, n)
}
if tc.histograms {
histograms, _, series = createHistograms(n, n, false)
}
if tc.floatHistograms {
_, floatHistograms, series = createHistograms(n, n, true)
}
metadata = createSeriesMetadata(series)
// Apply new config.
queueConfig.Capacity = n
queueConfig.MaxSamplesPerSend = n / 2
conf.RemoteWriteConfigs[0].ProtobufMessage = protoMsg
require.NoError(t, s.ApplyConfig(conf))
hash, err := toHash(writeConfig)
require.NoError(t, err)
qm := s.rws.queues[hash]
// Apply new config.
queueConfig.Capacity = len(samples)
queueConfig.MaxSamplesPerSend = len(samples) / 2
// For now we only ever have a single rw config in this test.
conf.RemoteWriteConfigs[0].ProtobufMessage = tc.protoMsg
require.NoError(t, s.ApplyConfig(conf))
hash, err := toHash(writeConfig)
require.NoError(t, err)
qm := s.rws.queues[hash]
c := NewTestWriteClient(protoMsg)
qm.SetClient(c)
c := NewTestWriteClient(tc.protoMsg)
qm.SetClient(c)
qm.StoreSeries(series, 0)
qm.StoreMetadata(metadata)
qm.StoreSeries(series, 0)
qm.StoreMetadata(metadata)
// Send first half of data.
c.expectSamples(samples[:len(samples)/2], series)
c.expectExemplars(exemplars[:len(exemplars)/2], series)
c.expectHistograms(histograms[:len(histograms)/2], series)
c.expectFloatHistograms(floatHistograms[:len(floatHistograms)/2], series)
if protoMsg == remoteapi.WriteV2MessageType && len(metadata) > 0 {
c.expectMetadataForBatch(metadata, series, samples[:len(samples)/2], exemplars[:len(exemplars)/2], histograms[:len(histograms)/2], floatHistograms[:len(floatHistograms)/2])
}
qm.Append(samples[:len(samples)/2])
qm.AppendExemplars(exemplars[:len(exemplars)/2])
qm.AppendHistograms(histograms[:len(histograms)/2])
qm.AppendFloatHistograms(floatHistograms[:len(floatHistograms)/2])
c.waitForExpectedData(t, 30*time.Second)
// Send first half of data.
c.expectSamples(samples[:len(samples)/2], series)
c.expectExemplars(exemplars[:len(exemplars)/2], series)
c.expectHistograms(histograms[:len(histograms)/2], series)
c.expectFloatHistograms(floatHistograms[:len(floatHistograms)/2], series)
if tc.protoMsg == remoteapi.WriteV2MessageType && len(metadata) > 0 {
c.expectMetadataForBatch(metadata, series, samples[:len(samples)/2], exemplars[:len(exemplars)/2], histograms[:len(histograms)/2], floatHistograms[:len(floatHistograms)/2])
}
qm.Append(samples[:len(samples)/2])
qm.AppendExemplars(exemplars[:len(exemplars)/2])
qm.AppendHistograms(histograms[:len(histograms)/2])
qm.AppendFloatHistograms(floatHistograms[:len(floatHistograms)/2])
c.waitForExpectedData(t, 30*time.Second)
// Send second half of data.
c.expectSamples(samples[len(samples)/2:], series)
c.expectExemplars(exemplars[len(exemplars)/2:], series)
c.expectHistograms(histograms[len(histograms)/2:], series)
c.expectFloatHistograms(floatHistograms[len(floatHistograms)/2:], series)
if tc.protoMsg == remoteapi.WriteV2MessageType && len(metadata) > 0 {
c.expectMetadataForBatch(metadata, series, samples[len(samples)/2:], exemplars[len(exemplars)/2:], histograms[len(histograms)/2:], floatHistograms[len(floatHistograms)/2:])
}
qm.Append(samples[len(samples)/2:])
qm.AppendExemplars(exemplars[len(exemplars)/2:])
qm.AppendHistograms(histograms[len(histograms)/2:])
qm.AppendFloatHistograms(floatHistograms[len(floatHistograms)/2:])
c.waitForExpectedData(t, 30*time.Second)
})
// Send second half of data.
c.expectSamples(samples[len(samples)/2:], series)
c.expectExemplars(exemplars[len(exemplars)/2:], series)
c.expectHistograms(histograms[len(histograms)/2:], series)
c.expectFloatHistograms(floatHistograms[len(floatHistograms)/2:], series)
if protoMsg == remoteapi.WriteV2MessageType && len(metadata) > 0 {
c.expectMetadataForBatch(metadata, series, samples[len(samples)/2:], exemplars[len(exemplars)/2:], histograms[len(histograms)/2:], floatHistograms[len(floatHistograms)/2:])
}
qm.Append(samples[len(samples)/2:])
qm.AppendExemplars(exemplars[len(exemplars)/2:])
qm.AppendHistograms(histograms[len(histograms)/2:])
qm.AppendFloatHistograms(floatHistograms[len(floatHistograms)/2:])
c.waitForExpectedData(t, 30*time.Second)
})
}
}
}
@ -386,50 +348,50 @@ func TestWALMetadataDelivery(t *testing.T) {
},
}
num := 3
_, series := createTimeseries(0, num)
metadata := createSeriesMetadata(series)
n := 3
recs := generateRecords(recCase{series: n, samplesPerSeries: n})
require.NoError(t, s.ApplyConfig(conf))
hash, err := toHash(writeConfig)
require.NoError(t, err)
qm := s.rws.queues[hash]
c := NewTestWriteClient(remoteapi.WriteV1MessageType)
c := NewTestWriteClient(remoteapi.WriteV2MessageType)
qm.SetClient(c)
qm.StoreSeries(series, 0)
qm.StoreMetadata(metadata)
qm.StoreSeries(recs.series, 0)
qm.StoreMetadata(recs.metadata)
require.Len(t, qm.seriesLabels, num)
require.Len(t, qm.seriesMetadata, num)
require.Len(t, qm.seriesLabels, n)
require.Len(t, qm.seriesMetadata, n)
c.expectSamples(recs.samples, recs.series)
c.expectMetadataForBatch(recs.metadata, recs.series, recs.samples, nil, nil, nil)
qm.Append(recs.samples)
c.waitForExpectedData(t, 30*time.Second)
}
func TestSampleDeliveryTimeout(t *testing.T) {
for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} {
t.Run(fmt.Sprint(protoMsg), func(t *testing.T) {
// Let's send one less sample than batch size, and wait the timeout duration
n := 9
samples, series := createTimeseries(n, n)
recs := generateRecords(recCase{series: 10, samplesPerSeries: 10})
cfg := testDefaultQueueConfig()
mcfg := config.DefaultMetadataConfig
cfg.MaxShards = 1
c := NewTestWriteClient(protoMsg)
m := newTestQueueManager(t, cfg, mcfg, defaultFlushDeadline, c, protoMsg)
m.StoreSeries(series, 0)
m.StoreSeries(recs.series, 0)
m.Start()
defer m.Stop()
// Send the samples twice, waiting for the samples in the meantime.
c.expectSamples(samples, series)
m.Append(samples)
c.expectSamples(recs.samples, recs.series)
m.Append(recs.samples)
c.waitForExpectedData(t, 30*time.Second)
c.expectSamples(samples, series)
m.Append(samples)
c.expectSamples(recs.samples, recs.series)
m.Append(recs.samples)
c.waitForExpectedData(t, 30*time.Second)
})
}
@ -441,29 +403,16 @@ func TestSampleDeliveryOrder(t *testing.T) {
t.Run(fmt.Sprint(protoMsg), func(t *testing.T) {
ts := 10
n := config.DefaultQueueConfig.MaxSamplesPerSend * ts
samples := make([]record.RefSample, 0, n)
series := make([]record.RefSeries, 0, n)
for i := range n {
name := fmt.Sprintf("test_metric_%d", i%ts)
samples = append(samples, record.RefSample{
Ref: chunks.HeadSeriesRef(i),
T: int64(i),
V: float64(i),
})
series = append(series, record.RefSeries{
Ref: chunks.HeadSeriesRef(i),
Labels: labels.FromStrings("__name__", name),
})
}
recs := generateRecords(recCase{series: n, samplesPerSeries: 1})
c, m := newTestClientAndQueueManager(t, defaultFlushDeadline, protoMsg)
c.expectSamples(samples, series)
m.StoreSeries(series, 0)
c.expectSamples(recs.samples, recs.series)
m.StoreSeries(recs.series, 0)
m.Start()
defer m.Stop()
// These should be received by the client.
m.Append(samples)
m.Append(recs.samples)
c.waitForExpectedData(t, 30*time.Second)
})
}
@ -481,14 +430,15 @@ func TestShutdown(t *testing.T) {
mcfg := config.DefaultMetadataConfig
m := newTestQueueManager(t, cfg, mcfg, deadline, c, protoMsg)
// Send 2x batch size, so we know it will need at least two sends.
n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend
samples, series := createTimeseries(n, n)
m.StoreSeries(series, 0)
recs := generateRecords(recCase{series: n / 1000, samplesPerSeries: 1000})
m.StoreSeries(recs.series, 0)
m.Start()
// Append blocks to guarantee delivery, so we do it in the background.
go func() {
m.Append(samples)
m.Append(recs.samples)
}()
synctest.Wait()
@ -545,33 +495,35 @@ func TestSeriesReset(t *testing.T) {
func TestReshard(t *testing.T) {
t.Parallel()
for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} {
t.Run(fmt.Sprint(protoMsg), func(t *testing.T) {
size := 10 // Make bigger to find more races.
nSeries := 6
nSamples := config.DefaultQueueConfig.Capacity * size
samples, series := createTimeseries(nSamples, nSeries)
samplesPerSeries := config.DefaultQueueConfig.Capacity * size
recs := generateRecords(recCase{series: nSeries, samplesPerSeries: samplesPerSeries})
t.Logf("about to send %v samples", len(recs.samples))
cfg := config.DefaultQueueConfig
cfg.MaxShards = 1
c := NewTestWriteClient(protoMsg)
m := newTestQueueManager(t, cfg, config.DefaultMetadataConfig, defaultFlushDeadline, c, protoMsg)
c.expectSamples(samples, series)
m.StoreSeries(series, 0)
c.expectSamples(recs.samples, recs.series)
m.StoreSeries(recs.series, 0)
m.Start()
defer m.Stop()
go func() {
for i := 0; i < len(samples); i += config.DefaultQueueConfig.Capacity {
sent := m.Append(samples[i : i+config.DefaultQueueConfig.Capacity])
for i := 0; i < len(recs.samples); i += config.DefaultQueueConfig.Capacity {
sent := m.Append(recs.samples[i : i+config.DefaultQueueConfig.Capacity])
require.True(t, sent, "samples not sent")
time.Sleep(100 * time.Millisecond)
}
}()
for i := 1; i < len(samples)/config.DefaultQueueConfig.Capacity; i++ {
for i := 1; i < len(recs.samples)/config.DefaultQueueConfig.Capacity; i++ {
m.shards.stop()
m.shards.start(i)
time.Sleep(100 * time.Millisecond)
@ -625,7 +577,7 @@ func TestReshardPartialBatch(t *testing.T) {
t.Parallel()
for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} {
t.Run(fmt.Sprint(protoMsg), func(t *testing.T) {
samples, series := createTimeseries(1, 10)
recs := generateRecords(recCase{series: 1, samplesPerSeries: 10})
c := NewTestBlockedWriteClient()
@ -637,14 +589,14 @@ func TestReshardPartialBatch(t *testing.T) {
cfg.BatchSendDeadline = model.Duration(batchSendDeadline)
m := newTestQueueManager(t, cfg, mcfg, flushDeadline, c, protoMsg)
m.StoreSeries(series, 0)
m.StoreSeries(recs.series, 0)
m.Start()
for range 100 {
done := make(chan struct{})
go func() {
m.Append(samples)
m.Append(recs.samples)
time.Sleep(batchSendDeadline)
m.shards.stop()
m.shards.start(1)
@ -670,7 +622,7 @@ func TestReshardPartialBatch(t *testing.T) {
func TestQueueFilledDeadlock(t *testing.T) {
for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} {
t.Run(fmt.Sprint(protoMsg), func(t *testing.T) {
samples, series := createTimeseries(50, 1)
recs := generateRecords(recCase{series: 50, samplesPerSeries: 1})
c := NewNopWriteClient()
@ -684,7 +636,7 @@ func TestQueueFilledDeadlock(t *testing.T) {
cfg.BatchSendDeadline = model.Duration(batchSendDeadline)
m := newTestQueueManager(t, cfg, mcfg, flushDeadline, c, protoMsg)
m.StoreSeries(series, 0)
m.StoreSeries(recs.series, 0)
m.Start()
defer m.Stop()
@ -692,7 +644,7 @@ func TestQueueFilledDeadlock(t *testing.T) {
done := make(chan struct{})
go func() {
time.Sleep(batchSendDeadline)
m.Append(samples)
m.Append(recs.samples)
done <- struct{}{}
}()
select {
@ -782,7 +734,7 @@ func TestDisableReshardOnRetry(t *testing.T) {
defer onStoreCalled()
var (
fakeSamples, fakeSeries = createTimeseries(100, 100)
recs = generateRecords(recCase{series: 100, samplesPerSeries: 100})
cfg = config.DefaultQueueConfig
mcfg = config.DefaultMetadataConfig
@ -805,14 +757,14 @@ func TestDisableReshardOnRetry(t *testing.T) {
)
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, 0, newPool(), newHighestTimestampMetric(), nil, false, false, false, remoteapi.WriteV1MessageType)
m.StoreSeries(fakeSeries, 0)
m.StoreSeries(recs.series, 0)
// Attempt to samples while the manager is running. We immediately stop the
// manager after the recoverable error is generated to prevent the manager
// from resharding itself.
m.Start()
{
m.Append(fakeSamples)
m.Append(recs.samples)
select {
case <-onStoredContext.Done():
@ -838,35 +790,132 @@ func TestDisableReshardOnRetry(t *testing.T) {
}, time.Minute, retryAfter, "shouldReshard should have been re-enabled")
}
func createTimeseries(numSamples, numSeries int, extraLabels ...labels.Label) ([]record.RefSample, []record.RefSeries) {
samples := make([]record.RefSample, 0, numSamples)
series := make([]record.RefSeries, 0, numSeries)
lb := labels.NewScratchBuilder(1 + len(extraLabels))
for i := range numSeries {
name := fmt.Sprintf("test_metric_%d", i)
for j := range numSamples {
samples = append(samples, record.RefSample{
Ref: chunks.HeadSeriesRef(i),
T: int64(j),
V: float64(i),
})
}
// Create Labels that is name of series plus any extra labels supplied.
lb.Reset()
lb.Add(labels.MetricName, name)
rand.Shuffle(len(extraLabels), func(i, j int) {
extraLabels[i], extraLabels[j] = extraLabels[j], extraLabels[i]
})
for _, l := range extraLabels {
lb.Add(l.Name, l.Value)
}
lb.Sort()
series = append(series, record.RefSeries{
Ref: chunks.HeadSeriesRef(i),
Labels: lb.Labels(),
})
type recCase struct {
name string
series int
samplesPerSeries int
histogramsPerSeries int
floatHistogramsPerSeries int
exemplarsPerSeries int
extraLabels []labels.Label
labelsFn func(lb *labels.ScratchBuilder, i int) labels.Labels
tsFn func(i, j int) int64
}
type records struct {
series []record.RefSeries
samples []record.RefSample
histograms []record.RefHistogramSample
floatHistograms []record.RefFloatHistogramSample
exemplars []record.RefExemplar
metadata []record.RefMetadata
}
func newTestHist(i int) *histogram.Histogram {
return &histogram.Histogram{
Schema: 2,
ZeroThreshold: 1e-128,
ZeroCount: 0,
Count: 2,
Sum: 0,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{int64(i) + 1},
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
NegativeBuckets: []int64{int64(-i) - 1},
}
}
func generateRecords(c recCase) (ret records) {
ret.series = make([]record.RefSeries, c.series)
ret.metadata = make([]record.RefMetadata, c.series)
ret.samples = make([]record.RefSample, c.series*c.samplesPerSeries)
ret.histograms = make([]record.RefHistogramSample, c.series*c.histogramsPerSeries)
ret.floatHistograms = make([]record.RefFloatHistogramSample, c.series*c.floatHistogramsPerSeries)
ret.exemplars = make([]record.RefExemplar, c.series*c.exemplarsPerSeries)
if c.labelsFn == nil {
c.labelsFn = func(lb *labels.ScratchBuilder, i int) labels.Labels {
// Create series with labels that contains name of series plus any extra labels supplied.
name := fmt.Sprintf("test_metric_%d", i)
lb.Reset()
lb.Add(model.MetricNameLabel, name)
for _, l := range c.extraLabels {
lb.Add(l.Name, l.Value)
}
lb.Sort()
return lb.Labels()
}
}
if c.tsFn == nil {
c.tsFn = func(_, j int) int64 { return int64(j) }
}
lb := labels.NewScratchBuilder(1 + len(c.extraLabels))
for i := range ret.series {
ret.series[i] = record.RefSeries{
Ref: chunks.HeadSeriesRef(i),
Labels: c.labelsFn(&lb, i),
}
ret.metadata[i] = record.RefMetadata{
Ref: chunks.HeadSeriesRef(i),
Type: uint8(record.Counter),
Unit: "unit text",
Help: "help text",
}
for j := range c.samplesPerSeries {
ret.samples[i*c.samplesPerSeries+j] = record.RefSample{
Ref: chunks.HeadSeriesRef(i),
T: c.tsFn(i, j),
V: float64(i),
}
}
h := newTestHist(i)
for j := range c.histogramsPerSeries {
ret.histograms[i*c.histogramsPerSeries+j] = record.RefHistogramSample{
Ref: chunks.HeadSeriesRef(i),
T: c.tsFn(i, j),
H: h,
}
}
for j := range c.floatHistogramsPerSeries {
ret.floatHistograms[i*c.floatHistogramsPerSeries+j] = record.RefFloatHistogramSample{
Ref: chunks.HeadSeriesRef(i),
T: c.tsFn(i, j),
FH: h.ToFloat(nil),
}
}
for j := range c.exemplarsPerSeries {
ret.exemplars[i*c.exemplarsPerSeries+j] = record.RefExemplar{
Ref: chunks.HeadSeriesRef(i),
T: c.tsFn(i, j),
V: float64(i),
Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", i)),
}
}
}
return ret
}
// BenchmarkGenerateRecords checks data generator performance.
// Recommended CLI:
/*
export bench=genRecs && go test ./storage/remote/... \
-run '^$' -bench '^BenchmarkGenerateRecords' \
-benchtime 1s -count 6 -cpu 2 -timeout 999m -benchmem \
| tee ${bench}.txt
*/
func BenchmarkGenerateRecords(b *testing.B) {
n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend
b.ReportAllocs()
b.ResetTimer()
for b.Loop() {
// This will generate 16M samples and 4k series.
generateRecords(recCase{series: n, samplesPerSeries: n})
}
return samples, series
}
func createProtoTimeseriesWithOld(numSamples, baseTs int64) []prompb.TimeSeries {
@ -893,88 +942,6 @@ func createProtoTimeseriesWithOld(numSamples, baseTs int64) []prompb.TimeSeries
return samples
}
func createExemplars(numExemplars, numSeries int) ([]record.RefExemplar, []record.RefSeries) {
exemplars := make([]record.RefExemplar, 0, numExemplars)
series := make([]record.RefSeries, 0, numSeries)
for i := range numSeries {
name := fmt.Sprintf("test_metric_%d", i)
for j := range numExemplars {
e := record.RefExemplar{
Ref: chunks.HeadSeriesRef(i),
T: int64(j),
V: float64(i),
Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", i)),
}
exemplars = append(exemplars, e)
}
series = append(series, record.RefSeries{
Ref: chunks.HeadSeriesRef(i),
Labels: labels.FromStrings("__name__", name),
})
}
return exemplars, series
}
func createHistograms(numSamples, numSeries int, floatHistogram bool) ([]record.RefHistogramSample, []record.RefFloatHistogramSample, []record.RefSeries) {
histograms := make([]record.RefHistogramSample, 0, numSamples)
floatHistograms := make([]record.RefFloatHistogramSample, 0, numSamples)
series := make([]record.RefSeries, 0, numSeries)
for i := range numSeries {
name := fmt.Sprintf("test_metric_%d", i)
for j := range numSamples {
hist := &histogram.Histogram{
Schema: 2,
ZeroThreshold: 1e-128,
ZeroCount: 0,
Count: 2,
Sum: 0,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{int64(i) + 1},
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
NegativeBuckets: []int64{int64(-i) - 1},
}
if floatHistogram {
fh := record.RefFloatHistogramSample{
Ref: chunks.HeadSeriesRef(i),
T: int64(j),
FH: hist.ToFloat(nil),
}
floatHistograms = append(floatHistograms, fh)
} else {
h := record.RefHistogramSample{
Ref: chunks.HeadSeriesRef(i),
T: int64(j),
H: hist,
}
histograms = append(histograms, h)
}
}
series = append(series, record.RefSeries{
Ref: chunks.HeadSeriesRef(i),
Labels: labels.FromStrings("__name__", name),
})
}
if floatHistogram {
return nil, floatHistograms, series
}
return histograms, nil, series
}
func createSeriesMetadata(series []record.RefSeries) []record.RefMetadata {
metas := make([]record.RefMetadata, 0, len(series))
for _, s := range series {
metas = append(metas, record.RefMetadata{
Ref: s.Ref,
Type: uint8(record.Counter),
Unit: "unit text",
Help: "help text",
})
}
return metas
}
func getSeriesIDFromRef(r record.RefSeries) string {
return r.Labels.String()
}
@ -1417,7 +1384,7 @@ func BenchmarkSampleSend(b *testing.B) {
const numSamples = 1
const numSeries = 10000
samples, series := createTimeseries(numSamples, numSeries, extraLabels...)
recs := generateRecords(recCase{series: numSeries, samplesPerSeries: numSamples, extraLabels: extraLabels})
c := NewNopWriteClient()
@ -1431,7 +1398,7 @@ func BenchmarkSampleSend(b *testing.B) {
for _, format := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} {
b.Run(string(format), func(b *testing.B) {
m := newTestQueueManager(b, cfg, mcfg, defaultFlushDeadline, c, format)
m.StoreSeries(series, 0)
m.StoreSeries(recs.series, 0)
// These should be received by the client.
m.Start()
@ -1439,8 +1406,8 @@ func BenchmarkSampleSend(b *testing.B) {
b.ResetTimer()
for i := 0; b.Loop(); i++ {
m.Append(samples)
m.UpdateSeriesSegment(series, i+1) // simulate what wlog.Watcher.garbageCollectSeries does
m.Append(recs.samples)
m.UpdateSeriesSegment(recs.series, i+1) // simulate what wlog.Watcher.garbageCollectSeries does
m.SeriesReset(i + 1)
}
// Do not include shutdown
@ -1482,7 +1449,7 @@ func BenchmarkStoreSeries(b *testing.B) {
// numSeries chosen to be big enough that StoreSeries dominates creating a new queue manager.
const numSeries = 1000
_, series := createTimeseries(0, numSeries, extraLabels...)
recs := generateRecords(recCase{series: numSeries, samplesPerSeries: 0, extraLabels: extraLabels})
for _, tc := range testCases {
b.Run(tc.name, func(b *testing.B) {
@ -1497,7 +1464,7 @@ func BenchmarkStoreSeries(b *testing.B) {
m.externalLabels = tc.externalLabels
m.relabelConfigs = tc.relabelConfigs
m.StoreSeries(series, 0)
m.StoreSeries(recs.series, 0)
}
})
}
@ -2007,7 +1974,25 @@ func TestDropOldTimeSeries(t *testing.T) {
size := 10
nSeries := 6
nSamples := config.DefaultQueueConfig.Capacity * size
samples, newSamples, series := createTimeseriesWithOldSamples(nSamples, nSeries)
pastRecs := generateRecords(recCase{
series: nSeries,
samplesPerSeries: (nSamples / nSeries) / 2, // Half data is past.
tsFn: func(_, j int) int64 {
past := timestamp.FromTime(time.Now().Add(-5 * time.Minute))
return past + int64(j)
},
})
newRecs := generateRecords(recCase{
series: nSeries,
samplesPerSeries: (nSamples / nSeries) / 2, // Half data is past.
tsFn: func(_, j int) int64 {
return time.Now().UnixMilli() + int64(j)
},
})
series := pastRecs.series // Series is the same for both old and new.
newSamples := newRecs.samples
samples := append(pastRecs.samples, newRecs.samples...)
c := NewTestWriteClient(protoMsg)
c.expectSamples(newSamples, series)
@ -2036,9 +2021,14 @@ func TestIsSampleOld(t *testing.T) {
// Simulates scenario in which remote write endpoint is down and a subset of samples is dropped due to age limit while backoffing.
func TestSendSamplesWithBackoffWithSampleAgeLimit(t *testing.T) {
const (
maxSamplesPerSend = 10
maxLabels = 9
)
t.Parallel()
for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} {
t.Run(fmt.Sprint(protoMsg), func(t *testing.T) {
maxSamplesPerSend := 10
sampleAgeLimit := time.Second * 2
cfg := config.DefaultQueueConfig
@ -2060,18 +2050,38 @@ func TestSendSamplesWithBackoffWithSampleAgeLimit(t *testing.T) {
m.Start()
batchID := 0
expectedSamples := map[string][]prompb.Sample{}
appendData := func(numberOfSeries int, timeAdd time.Duration, shouldBeDropped bool) {
t.Log(">>>> Appending series ", numberOfSeries, " as batch ID ", batchID, " with timeAdd ", timeAdd, " and should be dropped ", shouldBeDropped)
samples, series := createTimeseriesWithRandomLabelCount(strconv.Itoa(batchID), numberOfSeries, timeAdd, 9)
m.StoreSeries(series, batchID)
sent := m.Append(samples)
// Use a fixed rand source so tests are consistent.
r := rand.New(rand.NewSource(99))
recs := generateRecords(recCase{
series: numberOfSeries,
samplesPerSeries: 1,
tsFn: func(_, _ int) int64 {
return time.Now().Add(timeAdd).UnixMilli()
},
labelsFn: func(lb *labels.ScratchBuilder, i int) labels.Labels {
lb.Reset()
labelsCount := r.Intn(maxLabels)
lb.Add("__name__", "batch_"+strconv.Itoa(batchID)+"_id_"+strconv.Itoa(i))
for j := 1; j < labelsCount+1; j++ {
// same for both name and value
label := "batch_" + strconv.Itoa(batchID) + "_label_" + strconv.Itoa(j)
lb.Add(label, label)
}
return lb.Labels()
},
})
m.StoreSeries(recs.series, batchID)
sent := m.Append(recs.samples)
require.True(t, sent, "samples not sent")
if !shouldBeDropped {
for _, s := range samples {
tsID := getSeriesIDFromRef(series[s.Ref])
expectedSamples[tsID] = append(c.expectedSamples[tsID], prompb.Sample{
for _, s := range recs.samples {
tsID := getSeriesIDFromRef(recs.series[s.Ref])
c.expectedSamples[tsID] = append(c.expectedSamples[tsID], prompb.Sample{
Timestamp: s.T,
Value: s.V,
})
@ -2081,95 +2091,30 @@ func TestSendSamplesWithBackoffWithSampleAgeLimit(t *testing.T) {
}
timeShift := -time.Millisecond * 5
// Inject RW error.
c.SetReturnError(RecoverableError{context.DeadlineExceeded, defaultBackoff})
// Send current samples in various intervals.
appendData(maxSamplesPerSend/2, timeShift, true)
time.Sleep(sampleAgeLimit)
appendData(maxSamplesPerSend/2, timeShift, true)
time.Sleep(sampleAgeLimit / 10)
appendData(maxSamplesPerSend/2, timeShift, true)
time.Sleep(2 * sampleAgeLimit)
// Eventually all the above data must be ignored as 2x sampleAgeLimit passed.
// Now send, quickly re-enable RW target and send another batch.
// We should expect all the data from those two below batches.
appendData(2*maxSamplesPerSend, timeShift, false)
time.Sleep(sampleAgeLimit / 2)
c.SetReturnError(nil)
appendData(5, timeShift, false)
m.Stop()
if diff := cmp.Diff(expectedSamples, c.receivedSamples); diff != "" {
t.Errorf("mismatch (-want +got):\n%s", diff)
}
require.Equal(t, c.expectedSamples, c.receivedSamples)
})
}
}
func createTimeseriesWithRandomLabelCount(id string, seriesCount int, timeAdd time.Duration, maxLabels int) ([]record.RefSample, []record.RefSeries) {
samples := []record.RefSample{}
series := []record.RefSeries{}
// use a fixed rand source so tests are consistent
r := rand.New(rand.NewSource(99))
for i := range seriesCount {
s := record.RefSample{
Ref: chunks.HeadSeriesRef(i),
T: time.Now().Add(timeAdd).UnixMilli(),
V: r.Float64(),
}
samples = append(samples, s)
labelsCount := r.Intn(maxLabels)
lb := labels.NewScratchBuilder(1 + labelsCount)
lb.Add("__name__", "batch_"+id+"_id_"+strconv.Itoa(i))
for j := 1; j < labelsCount+1; j++ {
// same for both name and value
label := "batch_" + id + "_label_" + strconv.Itoa(j)
lb.Add(label, label)
}
series = append(series, record.RefSeries{
Ref: chunks.HeadSeriesRef(i),
Labels: lb.Labels(),
})
}
return samples, series
}
func createTimeseriesWithOldSamples(numSamples, numSeries int, extraLabels ...labels.Label) ([]record.RefSample, []record.RefSample, []record.RefSeries) {
newSamples := make([]record.RefSample, 0, numSamples)
samples := make([]record.RefSample, 0, numSamples)
series := make([]record.RefSeries, 0, numSeries)
lb := labels.NewScratchBuilder(1 + len(extraLabels))
for i := range numSeries {
name := fmt.Sprintf("test_metric_%d", i)
// We create half of the samples in the past.
past := timestamp.FromTime(time.Now().Add(-5 * time.Minute))
for j := 0; j < numSamples/2; j++ {
samples = append(samples, record.RefSample{
Ref: chunks.HeadSeriesRef(i),
T: past + int64(j),
V: float64(i),
})
}
for j := 0; j < numSamples/2; j++ {
sample := record.RefSample{
Ref: chunks.HeadSeriesRef(i),
T: time.Now().UnixMilli() + int64(j),
V: float64(i),
}
samples = append(samples, sample)
newSamples = append(newSamples, sample)
}
// Create Labels that is name of series plus any extra labels supplied.
lb.Reset()
lb.Add(labels.MetricName, name)
for _, l := range extraLabels {
lb.Add(l.Name, l.Value)
}
lb.Sort()
series = append(series, record.RefSeries{
Ref: chunks.HeadSeriesRef(i),
Labels: lb.Labels(),
})
}
return samples, newSamples, series
}
func filterTsLimit(limit int64, ts prompb.TimeSeries) bool {
return limit > ts.Samples[0].Timestamp
}
@ -2659,7 +2604,7 @@ func TestHighestTimestampOnAppend(t *testing.T) {
t.Run(fmt.Sprint(protoMsg), func(t *testing.T) {
nSamples := 11 * config.DefaultQueueConfig.Capacity
nSeries := 3
samples, series := createTimeseries(nSamples, nSeries)
recs := generateRecords(recCase{series: nSeries, samplesPerSeries: nSamples / nSeries})
_, m := newTestClientAndQueueManager(t, defaultFlushDeadline, protoMsg)
m.Start()
@ -2667,13 +2612,14 @@ func TestHighestTimestampOnAppend(t *testing.T) {
require.Equal(t, 0.0, m.metrics.highestTimestamp.Get())
m.StoreSeries(series, 0)
require.True(t, m.Append(samples))
m.StoreSeries(recs.series, 0)
require.True(t, m.Append(recs.samples))
// Check that Append sets the highest timestamp correctly.
highestTs := float64((nSamples - 1) / 1000)
require.Greater(t, highestTs, 0.0)
require.Equal(t, highestTs, m.metrics.highestTimestamp.Get())
// NOTE: generateRecords yields nSamples/nSeries samples (36666), with <i for samplesPerSeries> timestamp.
// This gives the highest timestamp of 36666/1000 (seconds).
const expectedHighestTsSeconds = 36.0
require.Equal(t, expectedHighestTsSeconds, m.metrics.highestTimestamp.Get())
})
}
}

View File

@ -291,7 +291,7 @@ func writeMetaFile(logger *slog.Logger, dir string, meta *BlockMeta) (int64, err
jsonMeta, err := json.MarshalIndent(meta, "", "\t")
if err != nil {
return 0, err
return 0, errors.Join(err, f.Close())
}
n, err := f.Write(jsonMeta)

View File

@ -27,6 +27,7 @@ import (
)
func TestChunkWriteQueue_GettingChunkFromQueue(t *testing.T) {
t.Parallel()
var blockWriterWg sync.WaitGroup
blockWriterWg.Add(1)
@ -55,6 +56,7 @@ func TestChunkWriteQueue_GettingChunkFromQueue(t *testing.T) {
}
func TestChunkWriteQueue_WritingThroughQueue(t *testing.T) {
t.Parallel()
var (
gotSeriesRef HeadSeriesRef
gotMint, gotMaxt int64
@ -97,6 +99,7 @@ func TestChunkWriteQueue_WritingThroughQueue(t *testing.T) {
}
func TestChunkWriteQueue_WrappingAroundSizeLimit(t *testing.T) {
t.Parallel()
sizeLimit := 100
unblockChunkWriterCh := make(chan struct{}, sizeLimit)
@ -183,6 +186,7 @@ func TestChunkWriteQueue_WrappingAroundSizeLimit(t *testing.T) {
}
func TestChunkWriteQueue_HandlerErrorViaCallback(t *testing.T) {
t.Parallel()
testError := errors.New("test error")
chunkWriter := func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool, bool) error {
return testError

View File

@ -24,6 +24,7 @@ import (
)
func TestReaderWithInvalidBuffer(t *testing.T) {
t.Parallel()
b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81})
r := &Reader{bs: []ByteSlice{b}}
@ -32,6 +33,7 @@ func TestReaderWithInvalidBuffer(t *testing.T) {
}
func TestWriterWithDefaultSegmentSize(t *testing.T) {
t.Parallel()
chk1, err := ChunkFromSamples([]Sample{
sample{t: 10, f: 11},
sample{t: 20, f: 12},

View File

@ -45,6 +45,7 @@ func TestMain(m *testing.M) {
}
func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
t.Parallel()
hrw := createChunkDiskMapper(t, "")
defer func() {
require.NoError(t, hrw.Close())
@ -181,6 +182,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
// * The active file is not deleted even if the passed time makes it eligible to be deleted.
// * Non-empty current file leads to creation of another file after truncation.
func TestChunkDiskMapper_Truncate(t *testing.T) {
t.Parallel()
hrw := createChunkDiskMapper(t, "")
defer func() {
require.NoError(t, hrw.Close())
@ -275,6 +277,7 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
// This test exposes https://github.com/prometheus/prometheus/issues/7412 where the truncation
// simply deleted all empty files instead of stopping once it encountered a non-empty file.
func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
t.Parallel()
hrw := createChunkDiskMapper(t, "")
defer func() {
require.NoError(t, hrw.Close())
@ -359,6 +362,7 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
}
func TestChunkDiskMapper_Truncate_WriteQueueRaceCondition(t *testing.T) {
t.Parallel()
hrw := createChunkDiskMapper(t, "")
t.Cleanup(func() {
require.NoError(t, hrw.Close())
@ -411,6 +415,7 @@ func TestChunkDiskMapper_Truncate_WriteQueueRaceCondition(t *testing.T) {
// TestHeadReadWriter_TruncateAfterFailedIterateChunks tests for
// https://github.com/prometheus/prometheus/issues/7753
func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) {
t.Parallel()
hrw := createChunkDiskMapper(t, "")
defer func() {
require.NoError(t, hrw.Close())
@ -442,6 +447,7 @@ func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) {
}
func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
t.Parallel()
hrw := createChunkDiskMapper(t, "")
timeRange := 0

View File

@ -62,6 +62,7 @@ func (q *writeJobQueue) assertInvariants(t *testing.T) {
}
func TestQueuePushPopSingleGoroutine(t *testing.T) {
t.Parallel()
seed := time.Now().UnixNano()
t.Log("seed:", seed)
r := rand.New(rand.NewSource(seed))
@ -115,6 +116,7 @@ func TestQueuePushPopSingleGoroutine(t *testing.T) {
}
func TestQueuePushBlocksOnFullQueue(t *testing.T) {
t.Parallel()
queue := newWriteJobQueue(5, 5)
pushTime := make(chan time.Time)
@ -152,6 +154,7 @@ func TestQueuePushBlocksOnFullQueue(t *testing.T) {
}
func TestQueuePopBlocksOnEmptyQueue(t *testing.T) {
t.Parallel()
queue := newWriteJobQueue(5, 5)
popTime := make(chan time.Time)
@ -192,6 +195,7 @@ func TestQueuePopBlocksOnEmptyQueue(t *testing.T) {
}
func TestQueuePopUnblocksOnClose(t *testing.T) {
t.Parallel()
queue := newWriteJobQueue(5, 5)
popTime := make(chan time.Time)
@ -231,6 +235,7 @@ func TestQueuePopUnblocksOnClose(t *testing.T) {
}
func TestQueuePopAfterCloseReturnsAllElements(t *testing.T) {
t.Parallel()
const count = 10
queue := newWriteJobQueue(count, count)
@ -257,6 +262,7 @@ func TestQueuePopAfterCloseReturnsAllElements(t *testing.T) {
}
func TestQueuePushPopManyGoroutines(t *testing.T) {
t.Parallel()
const readGoroutines = 5
const writeGoroutines = 10
const writes = 500
@ -303,6 +309,7 @@ func TestQueuePushPopManyGoroutines(t *testing.T) {
}
func TestQueueSegmentIsKeptEvenIfEmpty(t *testing.T) {
t.Parallel()
queue := newWriteJobQueue(1024, 64)
require.True(t, queue.push(chunkWriteJob{seriesRef: 1}))

View File

@ -47,6 +47,7 @@ import (
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/compression"
"github.com/prometheus/prometheus/util/features"
prom_runtime "github.com/prometheus/prometheus/util/runtime"
)
const (
@ -126,6 +127,11 @@ type Options struct {
// the current size of the database.
MaxBytes int64
// Maximum % of disk space to use for blocks to be retained.
// 0 or less means disabled.
// If both MaxBytes and MaxPercentage are set, percentage prevails.
MaxPercentage uint
// NoLockfile disables creation and consideration of a lock file.
NoLockfile bool
@ -257,6 +263,9 @@ type Options struct {
// StaleSeriesCompactionThreshold is a number between 0.0-1.0 indicating the % of stale series in
// the in-memory Head block. If the % of stale series crosses this threshold, stale series compaction is run immediately.
StaleSeriesCompactionThreshold float64
// FsSizeFunc is a function returning the total disk size for a given path.
FsSizeFunc FsSizeFunc
}
type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error)
@ -267,6 +276,8 @@ type BlockQuerierFunc func(b BlockReader, mint, maxt int64) (storage.Querier, er
type BlockChunkQuerierFunc func(b BlockReader, mint, maxt int64) (storage.ChunkQuerier, error)
type FsSizeFunc func(path string) uint64
// DB handles reads and writes of time series falling into
// a hashed partition of a seriedb.
type DB struct {
@ -328,6 +339,8 @@ type DB struct {
blockQuerierFunc BlockQuerierFunc
blockChunkQuerierFunc BlockChunkQuerierFunc
fsSizeFunc FsSizeFunc
}
type dbMetrics struct {
@ -344,6 +357,7 @@ type dbMetrics struct {
tombCleanTimer prometheus.Histogram
blocksBytes prometheus.Gauge
maxBytes prometheus.Gauge
maxPercentage prometheus.Gauge
retentionDuration prometheus.Gauge
staleSeriesCompactionsTriggered prometheus.Counter
staleSeriesCompactionsFailed prometheus.Counter
@ -424,6 +438,10 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
Name: "prometheus_tsdb_retention_limit_bytes",
Help: "Max number of bytes to be retained in the tsdb blocks, configured 0 means disabled",
})
m.maxPercentage = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_retention_limit_percentage",
Help: "Max percentage of total storage space to be retained in the tsdb blocks, configured 0 means disabled",
})
m.retentionDuration = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_retention_limit_seconds",
Help: "How long to retain samples in storage.",
@ -464,6 +482,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
m.tombCleanTimer,
m.blocksBytes,
m.maxBytes,
m.maxPercentage,
m.retentionDuration,
m.staleSeriesCompactionsTriggered,
m.staleSeriesCompactionsFailed,
@ -669,6 +688,7 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue
head: head,
blockQuerierFunc: NewBlockQuerier,
blockChunkQuerierFunc: NewBlockChunkQuerier,
fsSizeFunc: prom_runtime.FsSize,
}, nil
}
@ -1007,6 +1027,12 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn
db.blockChunkQuerierFunc = opts.BlockChunkQuerierFunc
}
if opts.FsSizeFunc == nil {
db.fsSizeFunc = prom_runtime.FsSize
} else {
db.fsSizeFunc = opts.FsSizeFunc
}
var wal, wbl *wlog.WL
segmentSize := wlog.DefaultSegmentSize
// Wal is enabled.
@ -1067,6 +1093,7 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn
db.metrics = newDBMetrics(db, r)
maxBytes := max(opts.MaxBytes, 0)
db.metrics.maxBytes.Set(float64(maxBytes))
db.metrics.maxPercentage.Set(float64(max(opts.MaxPercentage, 0)))
db.metrics.retentionDuration.Set((time.Duration(opts.RetentionDuration) * time.Millisecond).Seconds())
// Calling db.reload() calls db.reloadBlocks() which requires cmtx to be locked.
@ -1259,6 +1286,10 @@ func (db *DB) ApplyConfig(conf *config.Config) error {
db.opts.MaxBytes = int64(conf.StorageConfig.TSDBConfig.Retention.Size)
db.metrics.maxBytes.Set(float64(db.opts.MaxBytes))
}
if conf.StorageConfig.TSDBConfig.Retention.Percentage > 0 {
db.opts.MaxPercentage = conf.StorageConfig.TSDBConfig.Retention.Percentage
db.metrics.maxPercentage.Set(float64(db.opts.MaxPercentage))
}
db.retentionMtx.Unlock()
}
} else {
@ -1304,11 +1335,11 @@ func (db *DB) getRetentionDuration() int64 {
return db.opts.RetentionDuration
}
// getMaxBytes returns the current max bytes setting in a thread-safe manner.
func (db *DB) getMaxBytes() int64 {
// getRetentionSettings returns max bytes and max percentage settings in a thread-safe manner.
func (db *DB) getRetentionSettings() (int64, uint) {
db.retentionMtx.RLock()
defer db.retentionMtx.RUnlock()
return db.opts.MaxBytes
return db.opts.MaxBytes, db.opts.MaxPercentage
}
// dbAppender wraps the DB's head appender and triggers compactions on commit
@ -1968,9 +1999,25 @@ func BeyondTimeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc
// BeyondSizeRetention returns those blocks which are beyond the size retention
// set in the db options.
func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struct{}) {
// Size retention is disabled or no blocks to work with.
maxBytes := db.getMaxBytes()
if len(blocks) == 0 || maxBytes <= 0 {
// No blocks to work with
if len(blocks) == 0 {
return deletable
}
maxBytes, maxPercentage := db.getRetentionSettings()
// Max percentage prevails over max size.
if maxPercentage > 0 {
diskSize := db.fsSizeFunc(db.dir)
if diskSize <= 0 {
db.logger.Warn("Unable to retrieve filesystem size of database directory, skip percentage limitation and default to fixed size limitation", "dir", db.dir)
} else {
maxBytes = int64(uint64(maxPercentage) * diskSize / 100)
}
}
// Size retention is disabled.
if maxBytes <= 0 {
return deletable
}

View File

@ -9618,3 +9618,39 @@ func TestStaleSeriesCompactionWithZeroSeries(t *testing.T) {
// Should still have no blocks since there was nothing to compact.
require.Empty(t, db.Blocks())
}
func TestBeyondSizeRetentionWithPercentage(t *testing.T) {
const maxBlock = 100
const numBytesChunks = 1024
const diskSize = maxBlock * numBytesChunks
opts := DefaultOptions()
opts.MaxPercentage = 10
opts.FsSizeFunc = func(_ string) uint64 {
return uint64(diskSize)
}
db := newTestDB(t, withOpts(opts))
require.Zero(t, db.Head().Size())
blocks := make([]*Block, 0, opts.MaxPercentage+1)
for range opts.MaxPercentage {
blocks = append(blocks, &Block{
numBytesChunks: numBytesChunks,
meta: BlockMeta{ULID: ulid.Make()},
})
}
deletable := BeyondSizeRetention(db, blocks)
require.Empty(t, deletable)
ulid := ulid.Make()
blocks = append(blocks, &Block{
numBytesChunks: numBytesChunks,
meta: BlockMeta{ULID: ulid},
})
deletable = BeyondSizeRetention(db, blocks)
require.Len(t, deletable, 1)
require.Contains(t, deletable, ulid)
}

View File

@ -11,12 +11,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build openbsd || windows || netbsd || solaris
//go:build openbsd || netbsd || solaris
package runtime
// Statfs returns the file system type (Unix only)
// syscall.Statfs_t isn't available on openbsd
func Statfs(path string) string {
// FsType returns the file system type or "unknown" if unsupported.
func FsType(path string) string {
return "unknown"
}
// FsSize returns the file system size or 0 if unsupported.
func FsSize(path string) uint64 {
return 0
}

View File

@ -20,8 +20,7 @@ import (
"syscall"
)
// Statfs returns the file system type (Unix only).
func Statfs(path string) string {
func FsType(path string) string {
// Types of file systems that may be returned by `statfs`
fsTypes := map[int64]string{
0xadf5: "ADFS_SUPER_MAGIC",
@ -67,6 +66,7 @@ func Statfs(path string) string {
0x012FF7B4: "XENIX_SUPER_MAGIC",
0x58465342: "XFS_SUPER_MAGIC",
0x012FD16D: "_XIAFS_SUPER_MAGIC",
0x794c7630: "OVERLAYFS_SUPER_MAGIC",
}
var fs syscall.Statfs_t
@ -82,3 +82,12 @@ func Statfs(path string) string {
}
return strconv.FormatInt(localType, 16)
}
func FsSize(path string) uint64 {
var fs syscall.Statfs_t
err := syscall.Statfs(path, &fs)
if err != nil {
return 0
}
return uint64(fs.Bsize) * fs.Blocks
}

View File

@ -20,8 +20,8 @@ import (
"syscall"
)
// Statfs returns the file system type (Unix only)
func Statfs(path string) string {
// FsType returns the file system type (Unix only).
func FsType(path string) string {
// Types of file systems that may be returned by `statfs`
fsTypes := map[int32]string{
0xadf5: "ADFS_SUPER_MAGIC",
@ -63,6 +63,7 @@ func Statfs(path string) string {
0x012FF7B4: "XENIX_SUPER_MAGIC",
0x58465342: "XFS_SUPER_MAGIC",
0x012FD16D: "_XIAFS_SUPER_MAGIC",
0x794c7630: "OVERLAYFS_SUPER_MAGIC",
}
var fs syscall.Statfs_t
@ -75,3 +76,13 @@ func Statfs(path string) string {
}
return strconv.Itoa(int(fs.Type))
}
// FsSize returns the file system size (Unix only).
func FsSize(path string) uint64 {
var fs syscall.Statfs_t
err := syscall.Statfs(path, &fs)
if err != nil {
return 0
}
return uint64(fs.Bsize) * fs.Blocks
}

View File

@ -20,8 +20,7 @@ import (
"syscall"
)
// Statfs returns the file system type (Unix only)
func Statfs(path string) string {
func FsType(path string) string {
// Types of file systems that may be returned by `statfs`
fsTypes := map[uint32]string{
0xadf5: "ADFS_SUPER_MAGIC",
@ -63,6 +62,7 @@ func Statfs(path string) string {
0x012FF7B4: "XENIX_SUPER_MAGIC",
0x58465342: "XFS_SUPER_MAGIC",
0x012FD16D: "_XIAFS_SUPER_MAGIC",
0x794c7630: "OVERLAYFS_SUPER_MAGIC",
}
var fs syscall.Statfs_t
@ -75,3 +75,12 @@ func Statfs(path string) string {
}
return strconv.Itoa(int(fs.Type))
}
func FsSize(path string) uint64 {
var fs syscall.Statfs_t
err := syscall.Statfs(path, &fs)
if err != nil {
return 0
}
return uint64(fs.Bsize) * fs.Blocks
}

View File

@ -0,0 +1,58 @@
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows && !openbsd && !netbsd && !solaris
package runtime
import (
"os"
"testing"
"github.com/grafana/regexp"
"github.com/stretchr/testify/require"
)
var regexpFsType = regexp.MustCompile("^[A-Z][A-Z0-9_]*_MAGIC$")
func TestFsType(t *testing.T) {
var fsType string
path, err := os.Getwd()
require.NoError(t, err)
fsType = FsType(path)
require.Regexp(t, regexpFsType, fsType)
fsType = FsType("/no/where/to/be/found")
require.Equal(t, "0", fsType)
fsType = FsType(" %% not event a real path\n\n")
require.Equal(t, "0", fsType)
}
func TestFsSize(t *testing.T) {
var size uint64
path, err := os.Getwd()
require.NoError(t, err)
size = FsSize(path)
require.Positive(t, size)
size = FsSize("/no/where/to/be/found")
require.Equal(t, uint64(0), size)
size = FsSize(" %% not event a real path\n\n")
require.Equal(t, uint64(0), size)
}

View File

@ -0,0 +1,56 @@
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build windows
package runtime
import (
"os"
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var (
dll = windows.MustLoadDLL("kernel32.dll")
getDiskFreeSpaceExW = dll.MustFindProc("GetDiskFreeSpaceExW")
)
func FsType(path string) string {
return "unknown"
}
func FsSize(path string) uint64 {
// Ensure the path exists.
if _, err := os.Stat(path); err != nil {
return 0
}
var avail int64
var total int64
var free int64
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getdiskfreespaceexa
ret, _, _ := getDiskFreeSpaceExW.Call(
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(path))),
uintptr(unsafe.Pointer(&avail)),
uintptr(unsafe.Pointer(&total)),
uintptr(unsafe.Pointer(&free)))
if ret == 0 || uint64(free) > uint64(total) {
return 0
}
return uint64(total)
}

View File

@ -0,0 +1,49 @@
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build windows
package runtime
import (
"os"
"testing"
"github.com/stretchr/testify/require"
)
func TestFsType(t *testing.T) {
var fsType string
path, err := os.Getwd()
require.NoError(t, err)
fsType = FsType(path)
require.Equal(t, "unknown", fsType)
fsType = FsType("A:\\no\\where\\to\\be\\found")
require.Equal(t, "unknown", fsType)
}
func TestFsSize(t *testing.T) {
var size uint64
size = FsSize("C:\\")
require.Positive(t, size)
size = FsSize("c:\\no\\where\\to\\be\\found")
require.Equal(t, uint64(0), size)
size = FsSize(" %% not event a real path\n\n")
require.Equal(t, uint64(0), size)
}

View File

@ -40,6 +40,8 @@ export enum binaryOperatorType {
neq = "!=",
gtr = ">",
lss = "<",
trimUpper = "</",
trimLower = ">/",
gte = ">=",
lte = "<=",
and = "and",

View File

@ -4,13 +4,12 @@ go 1.25.5
require (
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853
github.com/prometheus/prometheus v0.308.1
github.com/prometheus/prometheus v0.309.1
github.com/russross/blackfriday/v2 v2.1.0
)
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/dennwc/varint v1.0.0 // indirect

View File

@ -108,8 +108,8 @@ github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEo
github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/prometheus/prometheus v0.308.1 h1:ApMNI/3/es3Ze90Z7CMb+wwU2BsSYur0m5VKeqHj7h4=
github.com/prometheus/prometheus v0.308.1/go.mod h1:aHjYCDz9zKRyoUXvMWvu13K9XHOkBB12XrEqibs3e0A=
github.com/prometheus/prometheus v0.309.1 h1:jutK6eCYDpWdPTUbVbkcQsNCMO9CCkSwjQRMLds4jSo=
github.com/prometheus/prometheus v0.309.1/go.mod h1:d+dOGiVhuNDa4MaFXHVdnUBy/CzqlcNTooR8oM1wdTU=
github.com/prometheus/sigv4 v0.4.1 h1:EIc3j+8NBea9u1iV6O5ZAN8uvPq2xOIUPcqCTivHuXs=
github.com/prometheus/sigv4 v0.4.1/go.mod h1:eu+ZbRvsc5TPiHwqh77OWuCnWK73IdkETYY46P4dXOU=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=

View File

@ -37,6 +37,8 @@ const binOpPrecedence = {
[binaryOperatorType.lss]: 4,
[binaryOperatorType.gte]: 4,
[binaryOperatorType.lte]: 4,
[binaryOperatorType.trimLower]: 4,
[binaryOperatorType.trimUpper]: 4,
[binaryOperatorType.and]: 5,
[binaryOperatorType.or]: 6,
[binaryOperatorType.unless]: 5,

View File

@ -60,6 +60,8 @@ import {
LimitK,
LimitRatio,
CountValues,
TrimLower,
TrimUpper,
} from '@prometheus-io/lezer-promql';
import { Completion, CompletionContext, CompletionResult } from '@codemirror/autocomplete';
import { EditorState } from '@codemirror/state';
@ -579,6 +581,8 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode, pos: num
case Eql:
case Gte:
case Gtr:
case TrimLower:
case TrimUpper:
case Lte:
case Lss:
case And:

View File

@ -26,6 +26,8 @@ export const binOpTerms = [
{ label: '>=' },
{ label: '>' },
{ label: '<' },
{ label: '</' },
{ label: '>/' },
{ label: '<=' },
{ label: '!=' },
{ label: 'atan2' },

View File

@ -49,12 +49,14 @@ import {
StepInvariantExpr,
SubqueryExpr,
Topk,
TrimLower,
TrimUpper,
UnaryExpr,
Unless,
UnquotedLabelMatcher,
VectorSelector,
} from '@prometheus-io/lezer-promql';
import { containsAtLeastOneChild } from './path-finder';
import { containsAtLeastOneChild, containsChild } from './path-finder';
import { getType } from './type';
import { buildLabelMatchers } from './matcher';
import { EditorState } from '@codemirror/state';
@ -215,6 +217,8 @@ export class Parser {
const rt = this.checkAST(rExpr);
const boolModifierUsed = node.getChild(BoolModifier);
const isComparisonOperator = containsAtLeastOneChild(node, Eql, Neq, Lte, Lss, Gte, Gtr);
const isTrimLowerOperator = containsChild(node, TrimLower);
const isTrimUpperOperator = containsChild(node, TrimUpper);
const isSetOperator = containsAtLeastOneChild(node, And, Or, Unless);
// BOOL modifier check
@ -223,8 +227,14 @@ export class Parser {
this.addDiagnostic(node, 'bool modifier can only be used on comparison operators');
}
} else {
if (isComparisonOperator && lt === ValueType.scalar && rt === ValueType.scalar) {
this.addDiagnostic(node, 'comparisons between scalars must use BOOL modifier');
if (lt === ValueType.scalar && rt === ValueType.scalar) {
if (isComparisonOperator) {
this.addDiagnostic(node, 'comparisons between scalars must use BOOL modifier');
} else if (isTrimLowerOperator) {
this.addDiagnostic(node, 'operator ">/" not allowed for Scalar operations');
} else if (isTrimUpperOperator) {
this.addDiagnostic(node, 'operator "</" not allowed for Scalar operations');
}
}
}

View File

@ -88,6 +88,8 @@ BinaryExpr {
expr !eql Eql binModifiers expr |
expr !eql Gte binModifiers expr |
expr !eql Gtr binModifiers expr |
expr !eql TrimUpper binModifiers expr |
expr !eql TrimLower binModifiers expr |
expr !eql Lte binModifiers expr |
expr !eql Lss binModifiers expr |
expr !eql Neq binModifiers expr |
@ -338,6 +340,8 @@ NumberDurationLiteralInDurationContext {
Lss { "<" }
Gte { ">=" }
Gtr { ">" }
TrimUpper { "</" }
TrimLower { ">/" }
EqlRegex { "=~" }
EqlSingle { "=" }
NeqRegex { "!~" }

View File

@ -716,3 +716,31 @@ rate(caddy_http_requests_total[5m] smoothed)
==>
PromQL(FunctionCall(FunctionIdentifier(Rate),FunctionCallBody(SmoothedExpr(MatrixSelector(VectorSelector(Identifier),NumberDurationLiteralInDurationContext),Smoothed))))
# TrimUpper binary operator
metric1 </ metric2
==>
PromQL(
BinaryExpr(
VectorSelector(Identifier),
TrimUpper,
VectorSelector(Identifier)
)
)
# TrimLower binary operator
metric1 >/ metric2
==>
PromQL(
BinaryExpr(
VectorSelector(Identifier),
TrimLower,
VectorSelector(Identifier)
)
)

View File

@ -263,6 +263,7 @@ type Options struct {
TSDBRetentionDuration model.Duration
TSDBDir string
TSDBMaxBytes units.Base2Bytes
TSDBMaxPercentage uint
LocalStorage LocalStorage
Storage storage.Storage
ExemplarStorage storage.ExemplarQueryable
@ -874,6 +875,12 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) {
}
status.StorageRetention += h.options.TSDBMaxBytes.String()
}
if h.options.TSDBMaxPercentage != 0 {
if status.StorageRetention != "" {
status.StorageRetention += " or "
}
status.StorageRetention = status.StorageRetention + strconv.FormatUint(uint64(h.options.TSDBMaxPercentage), 10) + "%"
}
metrics, err := h.gatherer.Gather()
if err != nil {