mirror of
https://github.com/restic/restic.git
synced 2026-02-03 04:20:45 -05:00
The tree.Nodes will be replaced by an iterator to loads and serializes tree node ondemand. Thus, the processing moves from StreamTrees into the callback. Schedule them onto the workers used by StreamTrees for proper load distribution.
42 lines
1.1 KiB
Go
42 lines
1.1 KiB
Go
package data
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
|
|
"github.com/restic/restic/internal/restic"
|
|
"github.com/restic/restic/internal/ui/progress"
|
|
)
|
|
|
|
// FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data
|
|
// blobs) to the set blobs. Already seen tree blobs will not be visited again.
|
|
func FindUsedBlobs(ctx context.Context, repo restic.Loader, treeIDs restic.IDs, blobs restic.FindBlobSet, p *progress.Counter) error {
|
|
var lock sync.Mutex
|
|
|
|
return StreamTrees(ctx, repo, treeIDs, p, func(treeID restic.ID) bool {
|
|
// locking is necessary the goroutine below concurrently adds data blobs
|
|
lock.Lock()
|
|
h := restic.BlobHandle{ID: treeID, Type: restic.TreeBlob}
|
|
blobReferenced := blobs.Has(h)
|
|
// noop if already referenced
|
|
blobs.Insert(h)
|
|
lock.Unlock()
|
|
return blobReferenced
|
|
}, func(_ restic.ID, err error, tree *Tree) error {
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
for _, node := range tree.Nodes {
|
|
lock.Lock()
|
|
switch node.Type {
|
|
case NodeTypeFile:
|
|
for _, blob := range node.Content {
|
|
blobs.Insert(restic.BlobHandle{ID: blob, Type: restic.DataBlob})
|
|
}
|
|
}
|
|
lock.Unlock()
|
|
}
|
|
return nil
|
|
})
|
|
}
|