From c76e78d0a4191e0cdf21d100e2b285a60c8a4f40 Mon Sep 17 00:00:00 2001 From: Laurent Dufresne Date: Thu, 19 Feb 2026 14:04:31 +0100 Subject: [PATCH] Added test for percentage-based retention Signed-off-by: Laurent Dufresne --- tsdb/db.go | 16 +++++++++++++++- tsdb/db_test.go | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/tsdb/db.go b/tsdb/db.go index ee234db352..b0076bed23 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -263,6 +263,9 @@ type Options struct { // StaleSeriesCompactionThreshold is a number between 0.0-1.0 indicating the % of stale series in // the in-memory Head block. If the % of stale series crosses this threshold, stale series compaction is run immediately. StaleSeriesCompactionThreshold float64 + + // FsSizeFunc is a function returning the total disk size for a given path. + FsSizeFunc FsSizeFunc } type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) @@ -273,6 +276,8 @@ type BlockQuerierFunc func(b BlockReader, mint, maxt int64) (storage.Querier, er type BlockChunkQuerierFunc func(b BlockReader, mint, maxt int64) (storage.ChunkQuerier, error) +type FsSizeFunc func(path string) uint64 + // DB handles reads and writes of time series falling into // a hashed partition of a seriedb. type DB struct { @@ -334,6 +339,8 @@ type DB struct { blockQuerierFunc BlockQuerierFunc blockChunkQuerierFunc BlockChunkQuerierFunc + + fsSizeFunc FsSizeFunc } type dbMetrics struct { @@ -681,6 +688,7 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue head: head, blockQuerierFunc: NewBlockQuerier, blockChunkQuerierFunc: NewBlockChunkQuerier, + fsSizeFunc: prom_runtime.FsSize, }, nil } @@ -1015,6 +1023,12 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn db.blockChunkQuerierFunc = opts.BlockChunkQuerierFunc } + if opts.FsSizeFunc == nil { + db.fsSizeFunc = prom_runtime.FsSize + } else { + db.fsSizeFunc = opts.FsSizeFunc + } + var wal, wbl *wlog.WL segmentSize := wlog.DefaultSegmentSize // Wal is enabled. @@ -2009,7 +2023,7 @@ func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc // Max percentage prevails over max size. if maxPercentage > 0 { - diskSize := prom_runtime.FsSize(db.dir) + diskSize := db.fsSizeFunc(db.dir) if diskSize <= 0 { db.logger.Warn("Unable to retrieve filesystem size of database directory, skip percentage limitation and default to fixed size limitation", "dir", db.dir) } else { diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 18e969f952..ad66945541 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -9611,3 +9611,39 @@ func TestStaleSeriesCompactionWithZeroSeries(t *testing.T) { // Should still have no blocks since there was nothing to compact. require.Empty(t, db.Blocks()) } + +func TestBeyondSizeRetentionWithPercentage(t *testing.T) { + const maxBlock = 100 + const numBytesChunks = 1024 + const diskSize = maxBlock * numBytesChunks + + opts := DefaultOptions() + opts.MaxPercentage = 10 + opts.FsSizeFunc = func(_ string) uint64 { + return uint64(diskSize) + } + + db := newTestDB(t, withOpts(opts)) + require.Zero(t, db.Head().Size()) + + blocks := make([]*Block, 0, opts.MaxPercentage+1) + for range opts.MaxPercentage { + blocks = append(blocks, &Block{ + numBytesChunks: numBytesChunks, + meta: BlockMeta{ULID: ulid.Make()}, + }) + } + + deletable := BeyondSizeRetention(db, blocks) + require.Empty(t, deletable) + + ulid := ulid.Make() + blocks = append(blocks, &Block{ + numBytesChunks: numBytesChunks, + meta: BlockMeta{ULID: ulid}, + }) + + deletable = BeyondSizeRetention(db, blocks) + require.Len(t, deletable, 1) + require.Contains(t, deletable, ulid) +}