diff --git a/internal/archiver/buffer.go b/internal/archiver/buffer.go index d5bfb46b3..0a6ae6d8f 100644 --- a/internal/archiver/buffer.go +++ b/internal/archiver/buffer.go @@ -1,5 +1,7 @@ package archiver +import "sync" + // buffer is a reusable buffer. After the buffer has been used, Release should // be called so the underlying slice is put back into the pool. type buffer struct { @@ -14,41 +16,32 @@ func (b *buffer) Release() { return } - select { - case pool.ch <- b: - default: - } + pool.pool.Put(b) } // bufferPool implements a limited set of reusable buffers. type bufferPool struct { - ch chan *buffer + pool sync.Pool defaultSize int } // newBufferPool initializes a new buffer pool. The pool stores at most max // items. New buffers are created with defaultSize. Buffers that have grown // larger are not put back. -func newBufferPool(max int, defaultSize int) *bufferPool { +func newBufferPool(defaultSize int) *bufferPool { b := &bufferPool{ - ch: make(chan *buffer, max), defaultSize: defaultSize, } + b.pool = sync.Pool{New: func() any { + return &buffer{ + Data: make([]byte, defaultSize), + pool: b, + } + }} return b } // Get returns a new buffer, either from the pool or newly allocated. func (pool *bufferPool) Get() *buffer { - select { - case buf := <-pool.ch: - return buf - default: - } - - b := &buffer{ - Data: make([]byte, pool.defaultSize), - pool: pool, - } - - return b + return pool.pool.Get().(*buffer) } diff --git a/internal/archiver/file_saver.go b/internal/archiver/file_saver.go index 4d0603e34..84e175d82 100644 --- a/internal/archiver/file_saver.go +++ b/internal/archiver/file_saver.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "runtime" "sync" "github.com/restic/chunker" @@ -34,16 +33,11 @@ type fileSaver struct { // started, it is stopped when ctx is cancelled. func newFileSaver(ctx context.Context, wg *errgroup.Group, uploader restic.BlobSaverAsync, pol chunker.Pol, fileWorkers uint) *fileSaver { ch := make(chan saveFileJob) - - // TODO find a way to get rid of this parameter - blobWorkers := uint(runtime.GOMAXPROCS(0)) - debug.Log("new file saver with %v file workers and %v blob workers", fileWorkers, blobWorkers) - - poolSize := fileWorkers + blobWorkers + debug.Log("new file saver with %v file workers", fileWorkers) s := &fileSaver{ uploader: uploader, - saveFilePool: newBufferPool(int(poolSize), chunker.MaxSize), + saveFilePool: newBufferPool(chunker.MaxSize), pol: pol, ch: ch,