Added test for percentage-based retention

Signed-off-by: Laurent Dufresne <laurent.dufresne@grafana.com>
This commit is contained in:
Laurent Dufresne 2026-02-19 14:04:31 +01:00
parent 971143edac
commit c76e78d0a4
2 changed files with 51 additions and 1 deletions

View file

@ -263,6 +263,9 @@ type Options struct {
// StaleSeriesCompactionThreshold is a number between 0.0-1.0 indicating the % of stale series in
// the in-memory Head block. If the % of stale series crosses this threshold, stale series compaction is run immediately.
StaleSeriesCompactionThreshold float64
// FsSizeFunc is a function returning the total disk size for a given path.
FsSizeFunc FsSizeFunc
}
type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error)
@ -273,6 +276,8 @@ type BlockQuerierFunc func(b BlockReader, mint, maxt int64) (storage.Querier, er
type BlockChunkQuerierFunc func(b BlockReader, mint, maxt int64) (storage.ChunkQuerier, error)
type FsSizeFunc func(path string) uint64
// DB handles reads and writes of time series falling into
// a hashed partition of a seriedb.
type DB struct {
@ -334,6 +339,8 @@ type DB struct {
blockQuerierFunc BlockQuerierFunc
blockChunkQuerierFunc BlockChunkQuerierFunc
fsSizeFunc FsSizeFunc
}
type dbMetrics struct {
@ -681,6 +688,7 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue
head: head,
blockQuerierFunc: NewBlockQuerier,
blockChunkQuerierFunc: NewBlockChunkQuerier,
fsSizeFunc: prom_runtime.FsSize,
}, nil
}
@ -1015,6 +1023,12 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn
db.blockChunkQuerierFunc = opts.BlockChunkQuerierFunc
}
if opts.FsSizeFunc == nil {
db.fsSizeFunc = prom_runtime.FsSize
} else {
db.fsSizeFunc = opts.FsSizeFunc
}
var wal, wbl *wlog.WL
segmentSize := wlog.DefaultSegmentSize
// Wal is enabled.
@ -2009,7 +2023,7 @@ func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc
// Max percentage prevails over max size.
if maxPercentage > 0 {
diskSize := prom_runtime.FsSize(db.dir)
diskSize := db.fsSizeFunc(db.dir)
if diskSize <= 0 {
db.logger.Warn("Unable to retrieve filesystem size of database directory, skip percentage limitation and default to fixed size limitation", "dir", db.dir)
} else {

View file

@ -9611,3 +9611,39 @@ func TestStaleSeriesCompactionWithZeroSeries(t *testing.T) {
// Should still have no blocks since there was nothing to compact.
require.Empty(t, db.Blocks())
}
func TestBeyondSizeRetentionWithPercentage(t *testing.T) {
const maxBlock = 100
const numBytesChunks = 1024
const diskSize = maxBlock * numBytesChunks
opts := DefaultOptions()
opts.MaxPercentage = 10
opts.FsSizeFunc = func(_ string) uint64 {
return uint64(diskSize)
}
db := newTestDB(t, withOpts(opts))
require.Zero(t, db.Head().Size())
blocks := make([]*Block, 0, opts.MaxPercentage+1)
for range opts.MaxPercentage {
blocks = append(blocks, &Block{
numBytesChunks: numBytesChunks,
meta: BlockMeta{ULID: ulid.Make()},
})
}
deletable := BeyondSizeRetention(db, blocks)
require.Empty(t, deletable)
ulid := ulid.Make()
blocks = append(blocks, &Block{
numBytesChunks: numBytesChunks,
meta: BlockMeta{ULID: ulid},
})
deletable = BeyondSizeRetention(db, blocks)
require.Len(t, deletable, 1)
require.Contains(t, deletable, ulid)
}