restorer: scale file cache with workers count

This commit is contained in:
Michael Eischer 2025-11-30 11:01:01 +01:00
parent 24fcfeafcb
commit 760d0220f4
2 changed files with 9 additions and 9 deletions

View file

@ -79,12 +79,10 @@ func newFileRestorer(dst string,
workerCount := int(connections)
return &fileRestorer{
idx: idx,
blobsLoader: blobsLoader,
startWarmup: startWarmup,
// use a large number of buckets to minimize bucket contention in filesWriter
// buckets are relatively cheap, so we can afford to have a lot of them
filesWriter: newFilesWriter(1024, allowRecursiveDelete),
idx: idx,
blobsLoader: blobsLoader,
startWarmup: startWarmup,
filesWriter: newFilesWriter(workerCount, allowRecursiveDelete),
zeroChunk: repository.ZeroChunk(),
sparse: sparse,
progress: progress,

View file

@ -37,12 +37,14 @@ type partialFile struct {
}
func newFilesWriter(count int, allowRecursiveDelete bool) *filesWriter {
buckets := make([]filesWriterBucket, count)
for b := 0; b < count; b++ {
// use a large number of buckets to minimize bucket contention
// creating a new file can be slow, so make sure that files typically end up in different buckets.
buckets := make([]filesWriterBucket, 1024)
for b := 0; b < len(buckets); b++ {
buckets[b].files = make(map[string]*partialFile)
}
cache, err := simplelru.NewLRU[string, *partialFile](50, func(_ string, wr *partialFile) {
cache, err := simplelru.NewLRU[string, *partialFile](count+50, func(_ string, wr *partialFile) {
// close the file only when it is not in use
if wr.users == 0 {
_ = wr.Close()