repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/upgrade_repo.go | internal/repository/upgrade_repo.go | package repository
import (
"context"
"fmt"
"os"
"path/filepath"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/restic"
)
type upgradeRepoV2Error struct {
UploadNewConfigError error
ReuploadOldConfigError error
BackupFilePath string
}
func (err *upgradeRepoV2Error) Error() string {
if err.ReuploadOldConfigError != nil {
return fmt.Sprintf("error uploading config (%v), re-uploading old config filed failed as well (%v), but there is a backup of the config file in %v", err.UploadNewConfigError, err.ReuploadOldConfigError, err.BackupFilePath)
}
return fmt.Sprintf("error uploading config (%v), re-uploaded old config was successful, there is a backup of the config file in %v", err.UploadNewConfigError, err.BackupFilePath)
}
func (err *upgradeRepoV2Error) Unwrap() error {
// consider the original upload error as the primary cause
return err.UploadNewConfigError
}
func upgradeRepository(ctx context.Context, repo *Repository) error {
h := backend.Handle{Type: backend.ConfigFile}
if !repo.be.Properties().HasAtomicReplace {
// remove the original file for backends which do not support atomic overwriting
err := repo.be.Remove(ctx, h)
if err != nil {
return fmt.Errorf("remove config failed: %w", err)
}
}
// upgrade config
cfg := repo.Config()
cfg.Version = 2
err := restic.SaveConfig(ctx, &internalRepository{repo}, cfg)
if err != nil {
return fmt.Errorf("save new config file failed: %w", err)
}
return nil
}
func UpgradeRepo(ctx context.Context, repo *Repository) error {
if repo.Config().Version != 1 {
return fmt.Errorf("repository has version %v, only upgrades from version 1 are supported", repo.Config().Version)
}
tempdir, err := os.MkdirTemp("", "restic-migrate-upgrade-repo-v2-")
if err != nil {
return fmt.Errorf("create temp dir failed: %w", err)
}
h := backend.Handle{Type: restic.ConfigFile}
// read raw config file and save it to a temp dir, just in case
rawConfigFile, err := repo.LoadRaw(ctx, restic.ConfigFile, restic.ID{})
if err != nil {
return fmt.Errorf("load config file failed: %w", err)
}
backupFileName := filepath.Join(tempdir, "config")
err = os.WriteFile(backupFileName, rawConfigFile, 0600)
if err != nil {
return fmt.Errorf("write config file backup to %v failed: %w", tempdir, err)
}
// run the upgrade
err = upgradeRepository(ctx, repo)
if err != nil {
// build an error we can return to the caller
repoError := &upgradeRepoV2Error{
UploadNewConfigError: err,
BackupFilePath: backupFileName,
}
// try contingency methods, reupload the original file
_ = repo.be.Remove(ctx, h)
err = repo.be.Save(ctx, h, backend.NewByteReader(rawConfigFile, nil))
if err != nil {
repoError.ReuploadOldConfigError = err
}
return repoError
}
_ = os.Remove(backupFileName)
_ = os.Remove(tempdir)
return nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/warmup_test.go | internal/repository/warmup_test.go | package repository
import (
"context"
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/mock"
"github.com/restic/restic/internal/restic"
)
func TestWarmupRepository(t *testing.T) {
warmupCalls := [][]backend.Handle{}
warmupWaitCalls := [][]backend.Handle{}
simulateWarmingUp := false
be := mock.NewBackend()
be.WarmupFn = func(ctx context.Context, handles []backend.Handle) ([]backend.Handle, error) {
warmupCalls = append(warmupCalls, handles)
if simulateWarmingUp {
return handles, nil
}
return []backend.Handle{}, nil
}
be.WarmupWaitFn = func(ctx context.Context, handles []backend.Handle) error {
warmupWaitCalls = append(warmupWaitCalls, handles)
return nil
}
repo, _ := New(be, Options{})
id1, _ := restic.ParseID("1111111111111111111111111111111111111111111111111111111111111111")
id2, _ := restic.ParseID("2222222222222222222222222222222222222222222222222222222222222222")
id3, _ := restic.ParseID("3333333333333333333333333333333333333333333333333333333333333333")
job, err := repo.StartWarmup(context.TODO(), restic.NewIDSet(id1, id2))
if err != nil {
t.Fatalf("error when starting warmup: %v", err)
}
if len(warmupCalls) != 1 {
t.Fatalf("expected %d calls to warmup, got %d", 1, len(warmupCalls))
}
if len(warmupCalls[0]) != 2 {
t.Fatalf("expected warmup on %d handles, got %d", 2, len(warmupCalls[0]))
}
if job.HandleCount() != 0 {
t.Fatalf("expected all files to be warm, got %d cold", job.HandleCount())
}
simulateWarmingUp = true
job, err = repo.StartWarmup(context.TODO(), restic.NewIDSet(id3))
if err != nil {
t.Fatalf("error when starting warmup: %v", err)
}
if len(warmupCalls) != 2 {
t.Fatalf("expected %d calls to warmup, got %d", 2, len(warmupCalls))
}
if len(warmupCalls[1]) != 1 {
t.Fatalf("expected warmup on %d handles, got %d", 1, len(warmupCalls[1]))
}
if job.HandleCount() != 1 {
t.Fatalf("expected %d file to be warming up, got %d", 1, job.HandleCount())
}
if err := job.Wait(context.TODO()); err != nil {
t.Fatalf("error when waiting warmup: %v", err)
}
if len(warmupWaitCalls) != 1 {
t.Fatalf("expected %d calls to warmupWait, got %d", 1, len(warmupCalls))
}
if len(warmupWaitCalls[0]) != 1 {
t.Fatalf("expected warmupWait to be called with %d handles, got %d", 1, len(warmupWaitCalls[0]))
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/lock_test.go | internal/repository/lock_test.go | package repository
import (
"context"
"fmt"
"os"
"runtime"
"strings"
"sync"
"testing"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/mem"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
type backendWrapper func(r backend.Backend) (backend.Backend, error)
func openLockTestRepo(t *testing.T, wrapper backendWrapper) (*Repository, backend.Backend) {
be := backend.Backend(mem.New())
// initialize repo
TestRepositoryWithBackend(t, be, 0, Options{})
// reopen repository to allow injecting a backend wrapper
if wrapper != nil {
var err error
be, err = wrapper(be)
rtest.OK(t, err)
}
return TestOpenBackend(t, be), be
}
func checkedLockRepo(ctx context.Context, t *testing.T, repo *Repository, lockerInst *locker, retryLock time.Duration) (*Unlocker, context.Context) {
lock, wrappedCtx, err := lockerInst.Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {})
rtest.OK(t, err)
rtest.OK(t, wrappedCtx.Err())
if lock.info.lock.Stale() {
t.Fatal("lock returned stale lock")
}
return lock, wrappedCtx
}
func TestLock(t *testing.T) {
t.Parallel()
repo, _ := openLockTestRepo(t, nil)
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, lockerInst, 0)
lock.Unlock()
if wrappedCtx.Err() == nil {
t.Fatal("unlock did not cancel context")
}
}
func TestLockCancel(t *testing.T) {
t.Parallel()
repo, _ := openLockTestRepo(t, nil)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
lock, wrappedCtx := checkedLockRepo(ctx, t, repo, lockerInst, 0)
cancel()
if wrappedCtx.Err() == nil {
t.Fatal("canceled parent context did not cancel context")
}
// Unlock should not crash
lock.Unlock()
}
func TestLockConflict(t *testing.T) {
t.Parallel()
repo, be := openLockTestRepo(t, nil)
repo2 := TestOpenBackend(t, be)
lock, _, err := Lock(context.Background(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {})
rtest.OK(t, err)
defer lock.Unlock()
_, _, err = Lock(context.Background(), repo2, false, 0, func(msg string) {}, func(format string, args ...interface{}) {})
if err == nil {
t.Fatal("second lock should have failed")
}
rtest.Assert(t, restic.IsAlreadyLocked(err), "unexpected error %v", err)
}
type writeOnceBackend struct {
backend.Backend
written bool
}
func (b *writeOnceBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
if b.written {
return fmt.Errorf("fail after first write")
}
b.written = true
return b.Backend.Save(ctx, h, rd)
}
func TestLockFailedRefresh(t *testing.T) {
t.Parallel()
repo, _ := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) {
return &writeOnceBackend{Backend: r}, nil
})
// reduce locking intervals to be suitable for testing
li := &locker{
retrySleepStart: lockerInst.retrySleepStart,
retrySleepMax: lockerInst.retrySleepMax,
refreshInterval: 20 * time.Millisecond,
refreshabilityTimeout: 100 * time.Millisecond,
}
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0)
select {
case <-wrappedCtx.Done():
// expected lock refresh failure
case <-time.After(time.Second):
t.Fatal("failed lock refresh did not cause context cancellation")
}
// Unlock should not crash
lock.Unlock()
}
type loggingBackend struct {
backend.Backend
t *testing.T
}
func (b *loggingBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
b.t.Logf("save %v @ %v", h, time.Now())
err := b.Backend.Save(ctx, h, rd)
b.t.Logf("save finished %v @ %v", h, time.Now())
return err
}
func TestLockSuccessfulRefresh(t *testing.T) {
t.Parallel()
repo, _ := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) {
return &loggingBackend{
Backend: r,
t: t,
}, nil
})
t.Logf("test for successful lock refresh %v", time.Now())
// reduce locking intervals to be suitable for testing
li := &locker{
retrySleepStart: lockerInst.retrySleepStart,
retrySleepMax: lockerInst.retrySleepMax,
refreshInterval: 60 * time.Millisecond,
refreshabilityTimeout: 500 * time.Millisecond,
}
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0)
select {
case <-wrappedCtx.Done():
// don't call t.Fatal to allow the lock to be properly cleaned up
t.Error("lock refresh failed", time.Now())
// Dump full stacktrace
buf := make([]byte, 1024*1024)
n := runtime.Stack(buf, true)
buf = buf[:n]
t.Log(string(buf))
case <-time.After(2 * li.refreshabilityTimeout):
// expected lock refresh to work
}
// Unlock should not crash
lock.Unlock()
}
type slowBackend struct {
backend.Backend
m sync.Mutex
sleep time.Duration
}
func (b *slowBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
b.m.Lock()
sleep := b.sleep
b.m.Unlock()
time.Sleep(sleep)
return b.Backend.Save(ctx, h, rd)
}
func TestLockSuccessfulStaleRefresh(t *testing.T) {
t.Parallel()
var sb *slowBackend
repo, _ := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) {
sb = &slowBackend{Backend: r}
return sb, nil
})
t.Logf("test for successful lock refresh %v", time.Now())
// reduce locking intervals to be suitable for testing
li := &locker{
retrySleepStart: lockerInst.retrySleepStart,
retrySleepMax: lockerInst.retrySleepMax,
refreshInterval: 10 * time.Millisecond,
refreshabilityTimeout: 50 * time.Millisecond,
}
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0)
// delay lock refreshing long enough that the lock would expire
sb.m.Lock()
sb.sleep = li.refreshabilityTimeout + li.refreshInterval
sb.m.Unlock()
select {
case <-wrappedCtx.Done():
// don't call t.Fatal to allow the lock to be properly cleaned up
t.Error("lock refresh failed", time.Now())
case <-time.After(li.refreshabilityTimeout):
}
// reset slow backend
sb.m.Lock()
sb.sleep = 0
sb.m.Unlock()
debug.Log("normal lock period has expired")
select {
case <-wrappedCtx.Done():
// don't call t.Fatal to allow the lock to be properly cleaned up
t.Error("lock refresh failed", time.Now())
case <-time.After(3 * li.refreshabilityTimeout):
// expected lock refresh to work
}
// Unlock should not crash
lock.Unlock()
}
func TestLockWaitTimeout(t *testing.T) {
t.Parallel()
repo, _ := openLockTestRepo(t, nil)
elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {})
rtest.OK(t, err)
defer elock.Unlock()
retryLock := 200 * time.Millisecond
start := time.Now()
_, _, err = Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {})
duration := time.Since(start)
rtest.Assert(t, err != nil,
"create normal lock with exclusively locked repo didn't return an error")
rtest.Assert(t, strings.Contains(err.Error(), "repository is already locked exclusively"),
"create normal lock with exclusively locked repo didn't return the correct error")
rtest.Assert(t, retryLock <= duration && duration < retryLock*3/2,
"create normal lock with exclusively locked repo didn't wait for the specified timeout")
}
func TestLockWaitCancel(t *testing.T) {
t.Parallel()
repo, _ := openLockTestRepo(t, nil)
elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {})
rtest.OK(t, err)
defer elock.Unlock()
retryLock := 200 * time.Millisecond
cancelAfter := 40 * time.Millisecond
start := time.Now()
ctx, cancel := context.WithCancel(context.TODO())
time.AfterFunc(cancelAfter, cancel)
_, _, err = Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {})
duration := time.Since(start)
rtest.Assert(t, err != nil,
"create normal lock with exclusively locked repo didn't return an error")
rtest.Assert(t, strings.Contains(err.Error(), "context canceled"),
"create normal lock with exclusively locked repo didn't return the correct error")
rtest.Assert(t, cancelAfter <= duration && duration < retryLock-10*time.Millisecond,
"create normal lock with exclusively locked repo didn't return in time, duration %v", duration)
}
func TestLockWaitSuccess(t *testing.T) {
t.Parallel()
repo, _ := openLockTestRepo(t, nil)
elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {})
rtest.OK(t, err)
retryLock := 200 * time.Millisecond
unlockAfter := 40 * time.Millisecond
time.AfterFunc(unlockAfter, func() {
elock.Unlock()
})
lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {})
rtest.OK(t, err)
lock.Unlock()
}
func createFakeLock(repo *Repository, t time.Time, pid int) (restic.ID, error) {
hostname, err := os.Hostname()
if err != nil {
return restic.ID{}, err
}
newLock := &restic.Lock{Time: t, PID: pid, Hostname: hostname}
return restic.SaveJSONUnpacked(context.TODO(), &internalRepository{repo}, restic.LockFile, &newLock)
}
func lockExists(repo restic.Lister, t testing.TB, lockID restic.ID) bool {
var exists bool
rtest.OK(t, repo.List(context.TODO(), restic.LockFile, func(id restic.ID, size int64) error {
if id == lockID {
exists = true
}
return nil
}))
return exists
}
func removeLock(repo *Repository, id restic.ID) error {
return (&internalRepository{repo}).RemoveUnpacked(context.TODO(), restic.LockFile, id)
}
func TestLockWithStaleLock(t *testing.T) {
repo := TestRepository(t)
id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid())
rtest.OK(t, err)
id2, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid())
rtest.OK(t, err)
id3, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()+500000)
rtest.OK(t, err)
processed, err := RemoveStaleLocks(context.TODO(), repo)
rtest.OK(t, err)
rtest.Assert(t, lockExists(repo, t, id1) == false,
"stale lock still exists after RemoveStaleLocks was called")
rtest.Assert(t, lockExists(repo, t, id2) == true,
"non-stale lock was removed by RemoveStaleLocks")
rtest.Assert(t, lockExists(repo, t, id3) == false,
"stale lock still exists after RemoveStaleLocks was called")
rtest.Assert(t, processed == 2,
"number of locks removed does not match: expected %d, got %d",
2, processed)
rtest.OK(t, removeLock(repo, id2))
}
func TestRemoveAllLocks(t *testing.T) {
repo := TestRepository(t)
id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid())
rtest.OK(t, err)
id2, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid())
rtest.OK(t, err)
id3, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()+500000)
rtest.OK(t, err)
processed, err := RemoveAllLocks(context.TODO(), repo)
rtest.OK(t, err)
rtest.Assert(t, lockExists(repo, t, id1) == false,
"lock still exists after RemoveAllLocks was called")
rtest.Assert(t, lockExists(repo, t, id2) == false,
"lock still exists after RemoveAllLocks was called")
rtest.Assert(t, lockExists(repo, t, id3) == false,
"lock still exists after RemoveAllLocks was called")
rtest.Assert(t, processed == 3,
"number of locks removed does not match: expected %d, got %d",
3, processed)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/packer_uploader.go | internal/repository/packer_uploader.go | package repository
import (
"context"
"github.com/restic/restic/internal/restic"
"golang.org/x/sync/errgroup"
)
// savePacker implements saving a pack in the repository.
type savePacker interface {
savePacker(ctx context.Context, t restic.BlobType, p *packer) error
}
type uploadTask struct {
packer *packer
tpe restic.BlobType
}
type packerUploader struct {
uploadQueue chan uploadTask
}
func newPackerUploader(ctx context.Context, wg *errgroup.Group, repo savePacker, connections uint) *packerUploader {
pu := &packerUploader{
uploadQueue: make(chan uploadTask),
}
for i := 0; i < int(connections); i++ {
wg.Go(func() error {
for {
select {
case t, ok := <-pu.uploadQueue:
if !ok {
return nil
}
err := repo.savePacker(ctx, t.tpe, t.packer)
if err != nil {
return err
}
case <-ctx.Done():
return ctx.Err()
}
}
})
}
return pu
}
func (pu *packerUploader) QueuePacker(ctx context.Context, t restic.BlobType, p *packer) (err error) {
select {
case <-ctx.Done():
return ctx.Err()
case pu.uploadQueue <- uploadTask{tpe: t, packer: p}:
}
return nil
}
func (pu *packerUploader) TriggerShutdown() {
close(pu.uploadQueue)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/prune_internal_test.go | internal/repository/prune_internal_test.go | package repository
import (
"context"
"math"
"math/rand"
"testing"
"time"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/ui/progress"
)
// TestPruneMaxUnusedDuplicate checks that MaxUnused correctly accounts for duplicates.
//
// Create a repository containing blobs a to d that are stored in packs as follows:
// - a, d
// - b, d
// - c, d
// All blobs should be kept during prune, but the duplicates should be gone afterwards.
// The special construction ensures that each pack contains a used, non-duplicate blob.
// This ensures that special cases that delete completely duplicate packs files do not
// apply.
func TestPruneMaxUnusedDuplicate(t *testing.T) {
seed := time.Now().UnixNano()
random := rand.New(rand.NewSource(seed))
t.Logf("rand initialized with seed %d", seed)
repo, _, _ := TestRepositoryWithVersion(t, 0)
// ensure blobs are assembled into packs as expected
repo.packerCount = 1
// large blobs to prevent repacking due to too small packsize
const blobSize = 1024 * 1024
bufs := [][]byte{}
for i := 0; i < 4; i++ {
// use uniform length for simpler control via MaxUnusedBytes
buf := make([]byte, blobSize)
random.Read(buf)
bufs = append(bufs, buf)
}
keep := restic.NewBlobSet()
for _, blobs := range [][][]byte{
{bufs[0], bufs[3]},
{bufs[1], bufs[3]},
{bufs[2], bufs[3]},
} {
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
for _, blob := range blobs {
id, _, _, err := uploader.SaveBlob(ctx, restic.DataBlob, blob, restic.ID{}, true)
keep.Insert(restic.BlobHandle{Type: restic.DataBlob, ID: id})
rtest.OK(t, err)
}
return nil
}))
}
opts := PruneOptions{
MaxRepackBytes: math.MaxUint64,
// non-zero number of unused bytes, that is nevertheless smaller than a single blob
// setting this to zero would bypass the unused/duplicate size accounting that should
// be tested here
MaxUnusedBytes: func(used uint64) (unused uint64) { return blobSize / 2 },
}
plan, err := PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository, usedBlobs restic.FindBlobSet) error {
for blob := range keep {
usedBlobs.Insert(blob)
}
return nil
}, &progress.NoopPrinter{})
rtest.OK(t, err)
rtest.OK(t, plan.Execute(context.TODO(), &progress.NoopPrinter{}))
rsize := plan.Stats().Size
remainingUnusedSize := rsize.Duplicate + rsize.Unused - rsize.Remove - rsize.Repackrm
maxUnusedSize := opts.MaxUnusedBytes(rsize.Used)
rtest.Assert(t, remainingUnusedSize <= maxUnusedSize, "too much unused data remains got %v, expected less than %v", remainingUnusedSize, maxUnusedSize)
// divide by blobSize to ignore pack file overhead
rtest.Equals(t, rsize.Used/blobSize, uint64(4))
rtest.Equals(t, rsize.Duplicate/blobSize, uint64(2))
rtest.Equals(t, rsize.Unused, uint64(0))
rtest.Equals(t, rsize.Remove, uint64(0))
rtest.Equals(t, rsize.Repack/blobSize, uint64(4))
rtest.Equals(t, rsize.Repackrm/blobSize, uint64(2))
rtest.Equals(t, rsize.Unref, uint64(0))
rtest.Equals(t, rsize.Uncompressed, uint64(0))
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/warmup.go | internal/repository/warmup.go | package repository
import (
"context"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/restic"
)
type WarmupJob struct {
repo *Repository
handlesWarmingUp []backend.Handle
}
// HandleCount returns the number of handles that are currently warming up.
func (job *WarmupJob) HandleCount() int {
return len(job.handlesWarmingUp)
}
// Wait waits for all handles to be warm.
func (job *WarmupJob) Wait(ctx context.Context) error {
return job.repo.be.WarmupWait(ctx, job.handlesWarmingUp)
}
// StartWarmup creates a new warmup job, requesting the backend to warmup the specified packs.
func (r *Repository) StartWarmup(ctx context.Context, packs restic.IDSet) (restic.WarmupJob, error) {
handles := make([]backend.Handle, 0, len(packs))
for pack := range packs {
handles = append(
handles,
backend.Handle{Type: restic.PackFile, Name: pack.String()},
)
}
handlesWarmingUp, err := r.be.Warmup(ctx, handles)
return &WarmupJob{
repo: r,
handlesWarmingUp: handlesWarmingUp,
}, err
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/testing.go | internal/repository/testing.go | package repository
import (
"context"
"fmt"
"os"
"sync"
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/local"
"github.com/restic/restic/internal/backend/mem"
"github.com/restic/restic/internal/backend/retry"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
"github.com/restic/chunker"
)
type logger interface {
Logf(format string, args ...interface{})
}
var paramsOnce sync.Once
// TestUseLowSecurityKDFParameters configures low-security KDF parameters for testing.
func TestUseLowSecurityKDFParameters(t logger) {
t.Logf("using low-security KDF parameters for test")
paramsOnce.Do(func() {
params = &crypto.Params{
N: 128,
R: 1,
P: 1,
}
})
}
// TestBackend returns a fully configured in-memory backend.
func TestBackend(_ testing.TB) backend.Backend {
return mem.New()
}
const testChunkerPol = chunker.Pol(0x3DA3358B4DC173)
// TestRepositoryWithBackend returns a repository initialized with a test
// password. If be is nil, an in-memory backend is used. A constant polynomial
// is used for the chunker and low-security test parameters.
func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, opts Options) (*Repository, backend.Backend) {
t.Helper()
TestUseLowSecurityKDFParameters(t)
restic.TestDisableCheckPolynomial(t)
if be == nil {
be = TestBackend(t)
}
repo, err := New(be, opts)
if err != nil {
t.Fatalf("TestRepository(): new repo failed: %v", err)
}
if version == 0 {
version = restic.StableRepoVersion
}
pol := testChunkerPol
err = repo.Init(context.TODO(), version, test.TestPassword, &pol)
if err != nil {
t.Fatalf("TestRepository(): initialize repo failed: %v", err)
}
return repo, be
}
// TestRepository returns a repository initialized with a test password on an
// in-memory backend. When the environment variable RESTIC_TEST_REPO is set to
// a non-existing directory, a local backend is created there and this is used
// instead. The directory is not removed, but left there for inspection.
func TestRepository(t testing.TB) *Repository {
t.Helper()
repo, _, _ := TestRepositoryWithVersion(t, 0)
return repo
}
func TestRepositoryWithVersion(t testing.TB, version uint) (*Repository, restic.Unpacked[restic.FileType], backend.Backend) {
t.Helper()
dir := os.Getenv("RESTIC_TEST_REPO")
opts := Options{}
var repo *Repository
var be backend.Backend
if dir != "" {
_, err := os.Stat(dir)
if err != nil {
lbe, err := local.Create(context.TODO(), local.Config{Path: dir}, t.Logf)
if err != nil {
t.Fatalf("error creating local backend at %v: %v", dir, err)
}
repo, be = TestRepositoryWithBackend(t, lbe, version, opts)
} else {
t.Logf("directory at %v already exists, using mem backend", dir)
}
} else {
repo, be = TestRepositoryWithBackend(t, nil, version, opts)
}
return repo, &internalRepository{repo}, be
}
func TestFromFixture(t testing.TB, repoFixture string) (*Repository, backend.Backend, func()) {
repodir, cleanup := test.Env(t, repoFixture)
repo, be := TestOpenLocal(t, repodir)
return repo, be, cleanup
}
// TestOpenLocal opens a local repository.
func TestOpenLocal(t testing.TB, dir string) (*Repository, backend.Backend) {
var be backend.Backend
be, err := local.Open(context.TODO(), local.Config{Path: dir, Connections: 2}, t.Logf)
if err != nil {
t.Fatal(err)
}
be = retry.New(be, 3, nil, nil)
return TestOpenBackend(t, be), be
}
func TestOpenBackend(t testing.TB, be backend.Backend) *Repository {
repo, err := New(be, Options{})
if err != nil {
t.Fatal(err)
}
err = repo.SearchKey(context.TODO(), test.TestPassword, 10, "")
if err != nil {
t.Fatal(err)
}
return repo
}
type VersionedTest func(t *testing.T, version uint)
func TestAllVersions(t *testing.T, test VersionedTest) {
for version := restic.MinRepoVersion; version <= restic.MaxRepoVersion; version++ {
t.Run(fmt.Sprintf("v%d", version), func(t *testing.T) {
test(t, uint(version))
})
}
}
type VersionedBenchmark func(b *testing.B, version uint)
func BenchmarkAllVersions(b *testing.B, bench VersionedBenchmark) {
for version := restic.MinRepoVersion; version <= restic.MaxRepoVersion; version++ {
b.Run(fmt.Sprintf("v%d", version), func(b *testing.B) {
bench(b, uint(version))
})
}
}
func TestNewLock(_ *testing.T, repo *Repository, exclusive bool) (*restic.Lock, error) {
// TODO get rid of this test helper
return restic.NewLock(context.TODO(), &internalRepository{repo}, exclusive)
}
// TestCheckRepo runs the checker on repo.
func TestCheckRepo(t testing.TB, repo *Repository) {
chkr := NewChecker(repo)
hints, errs := chkr.LoadIndex(context.TODO(), nil)
if len(errs) != 0 {
t.Fatalf("errors loading index: %v", errs)
}
if len(hints) != 0 {
t.Fatalf("errors loading index: %v", hints)
}
// packs
errChan := make(chan error)
go chkr.Packs(context.TODO(), errChan)
for err := range errChan {
t.Error(err)
}
// read data
errChan = make(chan error)
go chkr.ReadPacks(context.TODO(), func(packs map[restic.ID]int64) map[restic.ID]int64 {
return packs
}, nil, errChan)
for err := range errChan {
t.Error(err)
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/repository.go | internal/repository/repository.go | package repository
import (
"bytes"
"context"
"fmt"
"io"
"math"
"runtime"
"sort"
"sync"
"github.com/klauspost/compress/zstd"
"github.com/restic/chunker"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/cache"
"github.com/restic/restic/internal/backend/dryrun"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository/index"
"github.com/restic/restic/internal/repository/pack"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
"golang.org/x/sync/errgroup"
)
const MinPackSize = 4 * 1024 * 1024
const DefaultPackSize = 16 * 1024 * 1024
const MaxPackSize = 128 * 1024 * 1024
// Repository is used to access a repository in a backend.
type Repository struct {
be backend.Backend
cfg restic.Config
key *crypto.Key
keyID restic.ID
idx *index.MasterIndex
cache *cache.Cache
opts Options
packerWg *errgroup.Group
mainWg *errgroup.Group
blobSaver *sync.WaitGroup
uploader *packerUploader
treePM *packerManager
dataPM *packerManager
packerCount int
allocEnc sync.Once
allocDec sync.Once
enc *zstd.Encoder
dec *zstd.Decoder
}
// internalRepository allows using SaveUnpacked and RemoveUnpacked with all FileTypes
type internalRepository struct {
*Repository
}
type Options struct {
Compression CompressionMode
PackSize uint
NoExtraVerify bool
}
// CompressionMode configures if data should be compressed.
type CompressionMode uint
// Constants for the different compression levels.
const (
CompressionAuto CompressionMode = 0
CompressionOff CompressionMode = 1
CompressionMax CompressionMode = 2
CompressionFastest CompressionMode = 3
CompressionBetter CompressionMode = 4
CompressionInvalid CompressionMode = 5
)
// Set implements the method needed for pflag command flag parsing.
func (c *CompressionMode) Set(s string) error {
switch s {
case "auto":
*c = CompressionAuto
case "off":
*c = CompressionOff
case "max":
*c = CompressionMax
case "fastest":
*c = CompressionFastest
case "better":
*c = CompressionBetter
default:
*c = CompressionInvalid
return fmt.Errorf("invalid compression mode %q, must be one of (auto|off|fastest|better|max)", s)
}
return nil
}
func (c *CompressionMode) String() string {
switch *c {
case CompressionAuto:
return "auto"
case CompressionOff:
return "off"
case CompressionMax:
return "max"
case CompressionFastest:
return "fastest"
case CompressionBetter:
return "better"
default:
return "invalid"
}
}
func (c *CompressionMode) Type() string {
return "mode"
}
// New returns a new repository with backend be.
func New(be backend.Backend, opts Options) (*Repository, error) {
if opts.Compression == CompressionInvalid {
return nil, errors.New("invalid compression mode")
}
if opts.PackSize == 0 {
opts.PackSize = DefaultPackSize
}
if opts.PackSize > MaxPackSize {
return nil, fmt.Errorf("pack size larger than limit of %v MiB", MaxPackSize/1024/1024)
} else if opts.PackSize < MinPackSize {
return nil, fmt.Errorf("pack size smaller than minimum of %v MiB", MinPackSize/1024/1024)
}
repo := &Repository{
be: be,
opts: opts,
idx: index.NewMasterIndex(),
packerCount: defaultPackerCount,
}
return repo, nil
}
// setConfig assigns the given config and updates the repository parameters accordingly
func (r *Repository) setConfig(cfg restic.Config) {
r.cfg = cfg
}
// Config returns the repository configuration.
func (r *Repository) Config() restic.Config {
return r.cfg
}
// PackSize return the target size of a pack file when uploading
func (r *Repository) PackSize() uint {
return r.opts.PackSize
}
// UseCache replaces the backend with the wrapped cache.
func (r *Repository) UseCache(c *cache.Cache, errorLog func(string, ...interface{})) {
if c == nil {
return
}
debug.Log("using cache")
r.cache = c
r.be = c.Wrap(r.be, errorLog)
}
func (r *Repository) Cache() *cache.Cache {
return r.cache
}
// SetDryRun sets the repo backend into dry-run mode.
func (r *Repository) SetDryRun() {
r.be = dryrun.New(r.be)
}
func (r *Repository) Checker() *Checker {
return NewChecker(r)
}
// LoadUnpacked loads and decrypts the file with the given type and ID.
func (r *Repository) LoadUnpacked(ctx context.Context, t restic.FileType, id restic.ID) ([]byte, error) {
debug.Log("load %v with id %v", t, id)
if t == restic.ConfigFile {
id = restic.ID{}
}
buf, err := r.LoadRaw(ctx, t, id)
if err != nil {
return nil, err
}
nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():]
plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil)
if err != nil {
return nil, err
}
if t != restic.ConfigFile {
return r.decompressUnpacked(plaintext)
}
return plaintext, nil
}
type haver interface {
Has(backend.Handle) bool
}
// sortCachedPacksFirst moves all cached pack files to the front of blobs.
func sortCachedPacksFirst(cache haver, blobs []restic.PackedBlob) {
if cache == nil {
return
}
// no need to sort a list with one element
if len(blobs) == 1 {
return
}
cached := blobs[:0]
noncached := make([]restic.PackedBlob, 0, len(blobs)/2)
for _, blob := range blobs {
if cache.Has(backend.Handle{Type: restic.PackFile, Name: blob.PackID.String()}) {
cached = append(cached, blob)
continue
}
noncached = append(noncached, blob)
}
copy(blobs[len(cached):], noncached)
}
// LoadBlob loads a blob of type t from the repository.
// It may use all of buf[:cap(buf)] as scratch space.
func (r *Repository) LoadBlob(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) ([]byte, error) {
debug.Log("load %v with id %v (buf len %v, cap %d)", t, id, len(buf), cap(buf))
// lookup packs
blobs := r.idx.Lookup(restic.BlobHandle{ID: id, Type: t})
if len(blobs) == 0 {
debug.Log("id %v not found in index", id)
return nil, errors.Errorf("id %v not found in repository", id)
}
// try cached pack files first
sortCachedPacksFirst(r.cache, blobs)
buf, err := r.loadBlob(ctx, blobs, buf)
if err != nil {
if r.cache != nil {
for _, blob := range blobs {
h := backend.Handle{Type: restic.PackFile, Name: blob.PackID.String(), IsMetadata: blob.Type.IsMetadata()}
// ignore errors as there's not much we can do here
_ = r.cache.Forget(h)
}
}
buf, err = r.loadBlob(ctx, blobs, buf)
}
return buf, err
}
func (r *Repository) loadBlob(ctx context.Context, blobs []restic.PackedBlob, buf []byte) ([]byte, error) {
var lastError error
for _, blob := range blobs {
debug.Log("blob %v found: %v", blob.BlobHandle, blob)
// load blob from pack
h := backend.Handle{Type: restic.PackFile, Name: blob.PackID.String(), IsMetadata: blob.Type.IsMetadata()}
switch {
case cap(buf) < int(blob.Length):
buf = make([]byte, blob.Length)
case len(buf) != int(blob.Length):
buf = buf[:blob.Length]
}
_, err := backend.ReadAt(ctx, r.be, h, int64(blob.Offset), buf)
if err != nil {
debug.Log("error loading blob %v: %v", blob, err)
lastError = err
continue
}
it := newPackBlobIterator(blob.PackID, newByteReader(buf), blob.Offset, []restic.Blob{blob.Blob}, r.key, r.getZstdDecoder())
pbv, err := it.Next()
if err == nil {
err = pbv.Err
}
if err != nil {
debug.Log("error decoding blob %v: %v", blob, err)
lastError = err
continue
}
plaintext := pbv.Plaintext
if len(plaintext) > cap(buf) {
return plaintext, nil
}
// move decrypted data to the start of the buffer
buf = buf[:len(plaintext)]
copy(buf, plaintext)
return buf, nil
}
if lastError != nil {
return nil, lastError
}
return nil, errors.Errorf("loading %v from %v packs failed", blobs[0].BlobHandle, len(blobs))
}
func (r *Repository) getZstdEncoder() *zstd.Encoder {
r.allocEnc.Do(func() {
var level zstd.EncoderLevel
switch r.opts.Compression {
case CompressionFastest:
level = zstd.SpeedFastest
case CompressionBetter:
level = zstd.SpeedBetterCompression
case CompressionMax:
level = zstd.SpeedBestCompression
default:
level = zstd.SpeedDefault
}
opts := []zstd.EOption{
// Set the compression level configured.
zstd.WithEncoderLevel(level),
// Disable CRC, we have enough checks in place, makes the
// compressed data four bytes shorter.
zstd.WithEncoderCRC(false),
// Set a window of 512kbyte, so we have good lookbehind for usual
// blob sizes.
zstd.WithWindowSize(512 * 1024),
}
enc, err := zstd.NewWriter(nil, opts...)
if err != nil {
panic(err)
}
r.enc = enc
})
return r.enc
}
func (r *Repository) getZstdDecoder() *zstd.Decoder {
r.allocDec.Do(func() {
opts := []zstd.DOption{
// Use all available cores.
zstd.WithDecoderConcurrency(0),
// Limit the maximum decompressed memory. Set to a very high,
// conservative value.
zstd.WithDecoderMaxMemory(16 * 1024 * 1024 * 1024),
}
dec, err := zstd.NewReader(nil, opts...)
if err != nil {
panic(err)
}
r.dec = dec
})
return r.dec
}
// saveAndEncrypt encrypts data and stores it to the backend as type t. If data
// is small enough, it will be packed together with other small blobs. The
// caller must ensure that the id matches the data. Returned is the size data
// occupies in the repo (compressed or not, including the encryption overhead).
func (r *Repository) saveAndEncrypt(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) (size int, err error) {
debug.Log("save id %v (%v, %d bytes)", id, t, len(data))
uncompressedLength := 0
if r.cfg.Version > 1 {
// we have a repo v2, so compression is available. if the user opts to
// not compress, we won't compress any data, but everything else is
// compressed.
if r.opts.Compression != CompressionOff || t != restic.DataBlob {
uncompressedLength = len(data)
data = r.getZstdEncoder().EncodeAll(data, nil)
}
}
nonce := crypto.NewRandomNonce()
ciphertext := make([]byte, 0, crypto.CiphertextLength(len(data)))
ciphertext = append(ciphertext, nonce...)
// encrypt blob
ciphertext = r.key.Seal(ciphertext, nonce, data, nil)
if err := r.verifyCiphertext(ciphertext, uncompressedLength, id); err != nil {
//nolint:revive,staticcheck // ignore linter warnings about error message spelling
return 0, fmt.Errorf("Detected data corruption while saving blob %v: %w\nCorrupted blobs are either caused by hardware issues or software bugs. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting.", id, err)
}
// find suitable packer and add blob
var pm *packerManager
switch t {
case restic.TreeBlob:
pm = r.treePM
case restic.DataBlob:
pm = r.dataPM
default:
panic(fmt.Sprintf("invalid type: %v", t))
}
return pm.SaveBlob(ctx, t, id, ciphertext, uncompressedLength)
}
func (r *Repository) verifyCiphertext(buf []byte, uncompressedLength int, id restic.ID) error {
if r.opts.NoExtraVerify {
return nil
}
nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():]
plaintext, err := r.key.Open(nil, nonce, ciphertext, nil)
if err != nil {
return fmt.Errorf("decryption failed: %w", err)
}
if uncompressedLength != 0 {
// DecodeAll will allocate a slice if it is not large enough since it
// knows the decompressed size (because we're using EncodeAll)
plaintext, err = r.getZstdDecoder().DecodeAll(plaintext, nil)
if err != nil {
return fmt.Errorf("decompression failed: %w", err)
}
}
if !restic.Hash(plaintext).Equal(id) {
return errors.New("hash mismatch")
}
return nil
}
func (r *Repository) compressUnpacked(p []byte) ([]byte, error) {
// compression is only available starting from version 2
if r.cfg.Version < 2 {
return p, nil
}
// version byte
out := []byte{2}
out = r.getZstdEncoder().EncodeAll(p, out)
return out, nil
}
func (r *Repository) decompressUnpacked(p []byte) ([]byte, error) {
// compression is only available starting from version 2
if r.cfg.Version < 2 {
return p, nil
}
if len(p) == 0 {
// too short for version header
return p, nil
}
if p[0] == '[' || p[0] == '{' {
// probably raw JSON
return p, nil
}
// version
if p[0] != 2 {
return nil, errors.New("not supported encoding format")
}
return r.getZstdDecoder().DecodeAll(p[1:], nil)
}
// SaveUnpacked encrypts data and stores it in the backend. Returned is the
// storage hash.
func (r *Repository) SaveUnpacked(ctx context.Context, t restic.WriteableFileType, buf []byte) (id restic.ID, err error) {
return r.saveUnpacked(ctx, t.ToFileType(), buf)
}
func (r *internalRepository) SaveUnpacked(ctx context.Context, t restic.FileType, buf []byte) (id restic.ID, err error) {
return r.Repository.saveUnpacked(ctx, t, buf)
}
func (r *Repository) saveUnpacked(ctx context.Context, t restic.FileType, buf []byte) (id restic.ID, err error) {
p := buf
if t != restic.ConfigFile {
p, err = r.compressUnpacked(p)
if err != nil {
return restic.ID{}, err
}
}
ciphertext := crypto.NewBlobBuffer(len(p))
ciphertext = ciphertext[:0]
nonce := crypto.NewRandomNonce()
ciphertext = append(ciphertext, nonce...)
ciphertext = r.key.Seal(ciphertext, nonce, p, nil)
if err := r.verifyUnpacked(ciphertext, t, buf); err != nil {
//nolint:revive,staticcheck // ignore linter warnings about error message spelling
return restic.ID{}, fmt.Errorf("Detected data corruption while saving file of type %v: %w\nCorrupted data is either caused by hardware issues or software bugs. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting.", t, err)
}
if t == restic.ConfigFile {
id = restic.ID{}
} else {
id = restic.Hash(ciphertext)
}
h := backend.Handle{Type: t, Name: id.String()}
err = r.be.Save(ctx, h, backend.NewByteReader(ciphertext, r.be.Hasher()))
if err != nil {
debug.Log("error saving blob %v: %v", h, err)
return restic.ID{}, err
}
debug.Log("blob %v saved", h)
return id, nil
}
func (r *Repository) verifyUnpacked(buf []byte, t restic.FileType, expected []byte) error {
if r.opts.NoExtraVerify {
return nil
}
nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():]
plaintext, err := r.key.Open(nil, nonce, ciphertext, nil)
if err != nil {
return fmt.Errorf("decryption failed: %w", err)
}
if t != restic.ConfigFile {
plaintext, err = r.decompressUnpacked(plaintext)
if err != nil {
return fmt.Errorf("decompression failed: %w", err)
}
}
if !bytes.Equal(plaintext, expected) {
return errors.New("data mismatch")
}
return nil
}
func (r *Repository) RemoveUnpacked(ctx context.Context, t restic.WriteableFileType, id restic.ID) error {
return r.removeUnpacked(ctx, t.ToFileType(), id)
}
func (r *internalRepository) RemoveUnpacked(ctx context.Context, t restic.FileType, id restic.ID) error {
return r.Repository.removeUnpacked(ctx, t, id)
}
func (r *Repository) removeUnpacked(ctx context.Context, t restic.FileType, id restic.ID) error {
return r.be.Remove(ctx, backend.Handle{Type: t, Name: id.String()})
}
func (r *Repository) WithBlobUploader(ctx context.Context, fn func(ctx context.Context, uploader restic.BlobSaverWithAsync) error) error {
wg, ctx := errgroup.WithContext(ctx)
// pack uploader + wg.Go below + blob saver (CPU bound)
wg.SetLimit(2 + runtime.GOMAXPROCS(0))
r.mainWg = wg
r.startPackUploader(ctx, wg)
// blob saver are spawned on demand, use wait group to keep track of them
r.blobSaver = &sync.WaitGroup{}
wg.Go(func() error {
if err := fn(ctx, &blobSaverRepo{repo: r}); err != nil {
return err
}
if err := r.flush(ctx); err != nil {
return fmt.Errorf("error flushing repository: %w", err)
}
return nil
})
return wg.Wait()
}
func (r *Repository) startPackUploader(ctx context.Context, wg *errgroup.Group) {
if r.packerWg != nil {
panic("uploader already started")
}
innerWg, ctx := errgroup.WithContext(ctx)
r.packerWg = innerWg
r.uploader = newPackerUploader(ctx, innerWg, r, r.Connections())
r.treePM = newPackerManager(r.key, restic.TreeBlob, r.PackSize(), r.packerCount, r.uploader.QueuePacker)
r.dataPM = newPackerManager(r.key, restic.DataBlob, r.PackSize(), r.packerCount, r.uploader.QueuePacker)
wg.Go(func() error {
return innerWg.Wait()
})
}
type blobSaverRepo struct {
repo *Repository
}
func (r *blobSaverRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, size int, err error) {
return r.repo.saveBlob(ctx, t, buf, id, storeDuplicate)
}
func (r *blobSaverRepo) SaveBlobAsync(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool, cb func(newID restic.ID, known bool, size int, err error)) {
r.repo.saveBlobAsync(ctx, t, buf, id, storeDuplicate, cb)
}
// Flush saves all remaining packs and the index
func (r *Repository) flush(ctx context.Context) error {
r.flushBlobSaver()
r.mainWg = nil
if err := r.flushPackUploader(ctx); err != nil {
return err
}
return r.idx.Flush(ctx, &internalRepository{r})
}
func (r *Repository) flushBlobSaver() {
if r.blobSaver == nil {
return
}
r.blobSaver.Wait()
r.blobSaver = nil
}
// FlushPacks saves all remaining packs.
func (r *Repository) flushPackUploader(ctx context.Context) error {
if r.packerWg == nil {
return nil
}
err := r.treePM.Flush(ctx)
if err != nil {
return err
}
err = r.dataPM.Flush(ctx)
if err != nil {
return err
}
r.uploader.TriggerShutdown()
err = r.packerWg.Wait()
r.treePM = nil
r.dataPM = nil
r.uploader = nil
r.packerWg = nil
return err
}
func (r *Repository) Connections() uint {
return r.be.Properties().Connections
}
func (r *Repository) LookupBlob(tpe restic.BlobType, id restic.ID) []restic.PackedBlob {
return r.idx.Lookup(restic.BlobHandle{Type: tpe, ID: id})
}
// LookupBlobSize returns the size of blob id. Also returns pending blobs.
func (r *Repository) LookupBlobSize(tpe restic.BlobType, id restic.ID) (uint, bool) {
return r.idx.LookupSize(restic.BlobHandle{Type: tpe, ID: id})
}
// ListBlobs runs fn on all blobs known to the index. When the context is cancelled,
// the index iteration returns immediately with ctx.Err(). This blocks any modification of the index.
func (r *Repository) ListBlobs(ctx context.Context, fn func(restic.PackedBlob)) error {
for blob := range r.idx.Values() {
if ctx.Err() != nil {
return ctx.Err()
}
fn(blob)
}
return nil
}
func (r *Repository) ListPacksFromIndex(ctx context.Context, packs restic.IDSet) <-chan restic.PackBlobs {
return r.idx.ListPacks(ctx, packs)
}
func (r *Repository) clearIndex() {
r.idx = index.NewMasterIndex()
}
// LoadIndex loads all index files from the backend in parallel and stores them
func (r *Repository) LoadIndex(ctx context.Context, p restic.TerminalCounterFactory) error {
return r.loadIndexWithCallback(ctx, p, nil)
}
// loadIndexWithCallback loads all index files from the backend in parallel and stores them
func (r *Repository) loadIndexWithCallback(ctx context.Context, p restic.TerminalCounterFactory, cb func(id restic.ID, idx *index.Index, err error) error) error {
debug.Log("Loading index")
// reset in-memory index before loading it from the repository
r.clearIndex()
var bar *progress.Counter
if p != nil {
bar = p.NewCounterTerminalOnly("index files loaded")
}
err := r.idx.Load(ctx, r, bar, cb)
if err != nil {
return err
}
// Trigger GC to reset garbage collection threshold
runtime.GC()
if r.cfg.Version < 2 {
// sanity check
ctx, cancel := context.WithCancel(ctx)
defer cancel()
invalidIndex := false
for blob := range r.idx.Values() {
if ctx.Err() != nil {
return ctx.Err()
}
if blob.IsCompressed() {
invalidIndex = true
}
}
if invalidIndex {
return errors.New("index uses feature not supported by repository version 1")
}
}
if ctx.Err() != nil {
return ctx.Err()
}
// remove index files from the cache which have been removed in the repo
return r.prepareCache()
}
// createIndexFromPacks creates a new index by reading all given pack files (with sizes).
// The index is added to the MasterIndex but not marked as finalized.
// Returned is the list of pack files which could not be read.
func (r *Repository) createIndexFromPacks(ctx context.Context, packsize map[restic.ID]int64, p *progress.Counter) (invalid restic.IDs, err error) {
var m sync.Mutex
debug.Log("Loading index from pack files")
// track spawned goroutines using wg, create a new context which is
// cancelled as soon as an error occurs.
wg, wgCtx := errgroup.WithContext(ctx)
type FileInfo struct {
restic.ID
Size int64
}
ch := make(chan FileInfo)
// send list of pack files through ch, which is closed afterwards
wg.Go(func() error {
defer close(ch)
for id, size := range packsize {
select {
case <-wgCtx.Done():
return wgCtx.Err()
case ch <- FileInfo{id, size}:
}
}
return nil
})
// a worker receives an pack ID from ch, reads the pack contents, and adds them to idx
worker := func() error {
for fi := range ch {
entries, _, err := r.ListPack(wgCtx, fi.ID, fi.Size)
if err != nil {
debug.Log("unable to list pack file %v", fi.ID.Str())
m.Lock()
invalid = append(invalid, fi.ID)
m.Unlock()
}
if err := r.idx.StorePack(wgCtx, fi.ID, entries, &internalRepository{r}); err != nil {
return err
}
p.Add(1)
}
return nil
}
// decoding the pack header is usually quite fast, thus we are primarily IO-bound
workerCount := int(r.Connections())
// run workers on ch
for i := 0; i < workerCount; i++ {
wg.Go(worker)
}
err = wg.Wait()
if err != nil {
return invalid, err
}
// flush the index to the repository
err = r.flush(ctx)
if err != nil {
return invalid, err
}
return invalid, nil
}
func (r *Repository) NewAssociatedBlobSet() restic.AssociatedBlobSet {
return &associatedBlobSet{*index.NewAssociatedSet[struct{}](r.idx)}
}
// associatedBlobSet is a wrapper around index.AssociatedSet to implement the restic.AssociatedBlobSet interface.
type associatedBlobSet struct {
index.AssociatedSet[struct{}]
}
func (s *associatedBlobSet) Intersect(other restic.AssociatedBlobSet) restic.AssociatedBlobSet {
return &associatedBlobSet{*s.AssociatedSet.Intersect(other)}
}
func (s *associatedBlobSet) Sub(other restic.AssociatedBlobSet) restic.AssociatedBlobSet {
return &associatedBlobSet{*s.AssociatedSet.Sub(other)}
}
// prepareCache initializes the local cache. indexIDs is the list of IDs of
// index files still present in the repo.
func (r *Repository) prepareCache() error {
if r.cache == nil {
return nil
}
packs := r.idx.Packs(restic.NewIDSet())
// clear old packs
return r.cache.Clear(restic.PackFile, packs)
}
// SearchKey finds a key with the supplied password, afterwards the config is
// read and parsed. It tries at most maxKeys key files in the repo.
func (r *Repository) SearchKey(ctx context.Context, password string, maxKeys int, keyHint string) error {
key, err := SearchKey(ctx, r, password, maxKeys, keyHint)
if err != nil {
return err
}
oldKey := r.key
oldKeyID := r.keyID
r.key = key.master
r.keyID = key.ID()
cfg, err := restic.LoadConfig(ctx, r)
if err != nil {
r.key = oldKey
r.keyID = oldKeyID
if err == crypto.ErrUnauthenticated {
return fmt.Errorf("config or key %v is damaged: %w", key.ID(), err)
}
return fmt.Errorf("config cannot be loaded: %w", err)
}
r.setConfig(cfg)
return nil
}
// Init creates a new master key with the supplied password, initializes and
// saves the repository config.
func (r *Repository) Init(ctx context.Context, version uint, password string, chunkerPolynomial *chunker.Pol) error {
if version > restic.MaxRepoVersion {
return fmt.Errorf("repository version %v too high", version)
}
if version < restic.MinRepoVersion {
return fmt.Errorf("repository version %v too low", version)
}
_, err := r.be.Stat(ctx, backend.Handle{Type: restic.ConfigFile})
if err != nil && !r.be.IsNotExist(err) {
return err
}
if err == nil {
return errors.New("repository master key and config already initialized")
}
// double check to make sure that a repository is not accidentally reinitialized
// if the backend somehow fails to stat the config file. An initialized repository
// must always contain at least one key file.
if err := r.List(ctx, restic.KeyFile, func(_ restic.ID, _ int64) error {
return errors.New("repository already contains keys")
}); err != nil {
return err
}
// Also check for snapshots to detect repositories with a misconfigured retention
// policy that deletes files older than x days. For such repositories usually the
// config and key files are removed first and therefore the check would not detect
// the old repository.
if err := r.List(ctx, restic.SnapshotFile, func(_ restic.ID, _ int64) error {
return errors.New("repository already contains snapshots")
}); err != nil {
return err
}
cfg, err := restic.CreateConfig(version)
if err != nil {
return err
}
if chunkerPolynomial != nil {
cfg.ChunkerPolynomial = *chunkerPolynomial
}
return r.init(ctx, password, cfg)
}
// init creates a new master key with the supplied password and uses it to save
// the config into the repo.
func (r *Repository) init(ctx context.Context, password string, cfg restic.Config) error {
key, err := createMasterKey(ctx, r, password)
if err != nil {
return err
}
r.key = key.master
r.keyID = key.ID()
r.setConfig(cfg)
return restic.SaveConfig(ctx, &internalRepository{r}, cfg)
}
// Key returns the current master key.
func (r *Repository) Key() *crypto.Key {
return r.key
}
// KeyID returns the id of the current key in the backend.
func (r *Repository) KeyID() restic.ID {
return r.keyID
}
// List runs fn for all files of type t in the repo.
func (r *Repository) List(ctx context.Context, t restic.FileType, fn func(restic.ID, int64) error) error {
return r.be.List(ctx, t, func(fi backend.FileInfo) error {
id, err := restic.ParseID(fi.Name)
if err != nil {
debug.Log("unable to parse %v as an ID", fi.Name)
return nil
}
return fn(id, fi.Size)
})
}
// ListPack returns the list of blobs saved in the pack id and the length of
// the pack header.
func (r *Repository) ListPack(ctx context.Context, id restic.ID, size int64) ([]restic.Blob, uint32, error) {
h := backend.Handle{Type: restic.PackFile, Name: id.String()}
entries, hdrSize, err := pack.List(r.Key(), backend.ReaderAt(ctx, r.be, h), size)
if err != nil {
if r.cache != nil {
// ignore error as there is not much we can do here
_ = r.cache.Forget(h)
}
// retry on error
entries, hdrSize, err = pack.List(r.Key(), backend.ReaderAt(ctx, r.be, h), size)
}
return entries, hdrSize, err
}
// Delete calls backend.Delete() if implemented, and returns an error
// otherwise.
func (r *Repository) Delete(ctx context.Context) error {
return r.be.Delete(ctx)
}
// Close closes the repository by closing the backend.
func (r *Repository) Close() error {
return r.be.Close()
}
// saveBlob saves a blob of type t into the repository.
// It takes care that no duplicates are saved; this can be overwritten
// by setting storeDuplicate to true.
// If id is the null id, it will be computed and returned.
// Also returns if the blob was already known before.
// If the blob was not known before, it returns the number of bytes the blob
// occupies in the repo (compressed or not, including encryption overhead).
func (r *Repository) saveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, size int, err error) {
if int64(len(buf)) > math.MaxUint32 {
return restic.ID{}, false, 0, fmt.Errorf("blob is larger than 4GB")
}
// compute plaintext hash if not already set
if id.IsNull() {
// Special case the hash calculation for all zero chunks. This is especially
// useful for sparse files containing large all zero regions. For these we can
// process chunks as fast as we can read the from disk.
if len(buf) == chunker.MinSize && restic.ZeroPrefixLen(buf) == chunker.MinSize {
newID = ZeroChunk()
} else {
newID = restic.Hash(buf)
}
} else {
newID = id
}
// first try to add to pending blobs; if not successful, this blob is already known
known = !r.idx.AddPending(restic.BlobHandle{ID: newID, Type: t}, uint(len(buf)))
// only save when needed or explicitly told
if !known || storeDuplicate {
size, err = r.saveAndEncrypt(ctx, t, buf, newID)
}
return newID, known, size, err
}
func (r *Repository) saveBlobAsync(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool, cb func(newID restic.ID, known bool, size int, err error)) {
r.mainWg.Go(func() error {
if ctx.Err() != nil {
// fail fast if the context is cancelled
cb(restic.ID{}, false, 0, ctx.Err())
return ctx.Err()
}
newID, known, size, err := r.saveBlob(ctx, t, buf, id, storeDuplicate)
cb(newID, known, size, err)
return err
})
}
type backendLoadFn func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error
type loadBlobFn func(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) ([]byte, error)
// Skip sections with more than 1MB unused blobs
const maxUnusedRange = 1 * 1024 * 1024
// LoadBlobsFromPack loads the listed blobs from the specified pack file. The plaintext blob is passed to
// the handleBlobFn callback or an error if decryption failed or the blob hash does not match.
// handleBlobFn is called at most once for each blob. If the callback returns an error,
// then LoadBlobsFromPack will abort and not retry it. The buf passed to the callback is only valid within
// this specific call. The callback must not keep a reference to buf.
func (r *Repository) LoadBlobsFromPack(ctx context.Context, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error {
return streamPack(ctx, r.be.Load, r.LoadBlob, r.getZstdDecoder(), r.key, packID, blobs, handleBlobFn)
}
func streamPack(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn, dec *zstd.Decoder, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error {
if len(blobs) == 0 {
// nothing to do
return nil
}
sort.Slice(blobs, func(i, j int) bool {
return blobs[i].Offset < blobs[j].Offset
})
lowerIdx := 0
lastPos := blobs[0].Offset
const maxChunkSize = 2 * DefaultPackSize
for i := 0; i < len(blobs); i++ {
if blobs[i].Offset < lastPos {
// don't wait for streamPackPart to fail
return errors.Errorf("overlapping blobs in pack %v", packID)
}
chunkSizeAfter := (blobs[i].Offset + blobs[i].Length) - blobs[lowerIdx].Offset
split := false
// split if the chunk would become larger than maxChunkSize. Oversized chunks are
// handled by the requirement that the chunk contains at least one blob (i > lowerIdx)
if i > lowerIdx && chunkSizeAfter >= maxChunkSize {
split = true
}
// skip too large gaps as a new request is typically much cheaper than data transfers
if blobs[i].Offset-lastPos > maxUnusedRange {
split = true
}
if split {
// load everything up to the skipped file section
err := streamPackPart(ctx, beLoad, loadBlobFn, dec, key, packID, blobs[lowerIdx:i], handleBlobFn)
if err != nil {
return err
}
lowerIdx = i
}
lastPos = blobs[i].Offset + blobs[i].Length
}
// load remainder
return streamPackPart(ctx, beLoad, loadBlobFn, dec, key, packID, blobs[lowerIdx:], handleBlobFn)
}
func streamPackPart(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn, dec *zstd.Decoder, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error {
h := backend.Handle{Type: restic.PackFile, Name: packID.String(), IsMetadata: blobs[0].Type.IsMetadata()}
dataStart := blobs[0].Offset
dataEnd := blobs[len(blobs)-1].Offset + blobs[len(blobs)-1].Length
debug.Log("streaming pack %v (%d to %d bytes), blobs: %v", packID, dataStart, dataEnd, len(blobs))
data := make([]byte, int(dataEnd-dataStart))
err := beLoad(ctx, h, int(dataEnd-dataStart), int64(dataStart), func(rd io.Reader) error {
_, cerr := io.ReadFull(rd, data)
return cerr
})
// prevent callbacks after cancellation
if ctx.Err() != nil {
return ctx.Err()
}
if err != nil {
// the context is only still valid if handleBlobFn never returned an error
if loadBlobFn != nil {
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | true |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/repository_test.go | internal/repository/repository_test.go | package repository_test
import (
"bytes"
"context"
"crypto/sha256"
"fmt"
"io"
"math/rand"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/cache"
"github.com/restic/restic/internal/backend/local"
"github.com/restic/restic/internal/backend/mem"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/repository/index"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20}
var rnd = rand.New(rand.NewSource(time.Now().UnixNano()))
func TestSave(t *testing.T) {
repository.TestAllVersions(t, testSavePassID)
repository.TestAllVersions(t, testSaveCalculateID)
}
func testSavePassID(t *testing.T, version uint) {
testSave(t, version, false)
}
func testSaveCalculateID(t *testing.T, version uint) {
testSave(t, version, true)
}
func testSave(t *testing.T, version uint, calculateID bool) {
repo, _, _ := repository.TestRepositoryWithVersion(t, version)
for _, size := range testSizes {
data := make([]byte, size)
_, err := io.ReadFull(rnd, data)
rtest.OK(t, err)
id := restic.Hash(data)
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
// save
inputID := restic.ID{}
if !calculateID {
inputID = id
}
sid, _, _, err := uploader.SaveBlob(ctx, restic.DataBlob, data, inputID, false)
rtest.OK(t, err)
rtest.Equals(t, id, sid)
return nil
}))
// read back
buf, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, nil)
rtest.OK(t, err)
rtest.Equals(t, size, len(buf))
rtest.Assert(t, len(buf) == len(data),
"number of bytes read back does not match: expected %d, got %d",
len(data), len(buf))
rtest.Assert(t, bytes.Equal(buf, data),
"data does not match: expected %02x, got %02x",
data, buf)
}
}
func TestSavePackMerging(t *testing.T) {
t.Run("75%", func(t *testing.T) {
testSavePackMerging(t, 75, 1)
})
t.Run("150%", func(t *testing.T) {
testSavePackMerging(t, 175, 2)
})
t.Run("250%", func(t *testing.T) {
testSavePackMerging(t, 275, 3)
})
}
func testSavePackMerging(t *testing.T, targetPercentage int, expectedPacks int) {
repo, _ := repository.TestRepositoryWithBackend(t, nil, 0, repository.Options{
// minimum pack size to speed up test
PackSize: repository.MinPackSize,
})
var ids restic.IDs
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
// add blobs with size targetPercentage / 100 * repo.PackSize to the repository
blobSize := repository.MinPackSize / 100
for range targetPercentage {
data := make([]byte, blobSize)
_, err := io.ReadFull(rnd, data)
rtest.OK(t, err)
sid, _, _, err := uploader.SaveBlob(ctx, restic.DataBlob, data, restic.ID{}, false)
rtest.OK(t, err)
ids = append(ids, sid)
}
return nil
}))
// check that all blobs are readable
for _, id := range ids {
_, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, nil)
rtest.OK(t, err)
}
// check for correct number of pack files
packs := 0
rtest.OK(t, repo.List(context.TODO(), restic.PackFile, func(id restic.ID, _ int64) error {
packs++
return nil
}))
rtest.Equals(t, expectedPacks, packs, "unexpected number of pack files")
repository.TestCheckRepo(t, repo)
}
func BenchmarkSaveAndEncrypt(t *testing.B) {
repository.BenchmarkAllVersions(t, benchmarkSaveAndEncrypt)
}
func benchmarkSaveAndEncrypt(t *testing.B, version uint) {
repo, _, _ := repository.TestRepositoryWithVersion(t, version)
size := 4 << 20 // 4MiB
data := make([]byte, size)
_, err := io.ReadFull(rnd, data)
rtest.OK(t, err)
id := restic.ID(sha256.Sum256(data))
t.ReportAllocs()
t.ResetTimer()
t.SetBytes(int64(size))
_ = repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
for i := 0; i < t.N; i++ {
_, _, _, err = uploader.SaveBlob(ctx, restic.DataBlob, data, id, true)
rtest.OK(t, err)
}
return nil
})
}
func TestLoadBlob(t *testing.T) {
repository.TestAllVersions(t, testLoadBlob)
}
func testLoadBlob(t *testing.T, version uint) {
repo, _, _ := repository.TestRepositoryWithVersion(t, version)
length := 1000000
buf := crypto.NewBlobBuffer(length)
_, err := io.ReadFull(rnd, buf)
rtest.OK(t, err)
var id restic.ID
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
var err error
id, _, _, err = uploader.SaveBlob(ctx, restic.DataBlob, buf, restic.ID{}, false)
return err
}))
base := crypto.CiphertextLength(length)
for _, testlength := range []int{0, base - 20, base - 1, base, base + 7, base + 15, base + 1000} {
buf = make([]byte, 0, testlength)
buf, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
if err != nil {
t.Errorf("LoadBlob() returned an error for buffer size %v: %v", testlength, err)
continue
}
if len(buf) != length {
t.Errorf("LoadBlob() returned the wrong number of bytes: want %v, got %v", length, len(buf))
continue
}
}
}
func TestLoadBlobBroken(t *testing.T) {
be := mem.New()
repo, _ := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{})
buf := rtest.Random(42, 1000)
var id restic.ID
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
var err error
id, _, _, err = uploader.SaveBlob(ctx, restic.TreeBlob, buf, restic.ID{}, false)
return err
}))
// setup cache after saving the blob to make sure that the damageOnceBackend damages the cached data
c := cache.TestNewCache(t)
repo.UseCache(c, t.Logf)
data, err := repo.LoadBlob(context.TODO(), restic.TreeBlob, id, nil)
rtest.OK(t, err)
rtest.Assert(t, bytes.Equal(buf, data), "data mismatch")
pack := repo.LookupBlob(restic.TreeBlob, id)[0].PackID
rtest.Assert(t, c.Has(backend.Handle{Type: restic.PackFile, Name: pack.String()}), "expected tree pack to be cached")
}
func BenchmarkLoadBlob(b *testing.B) {
repository.BenchmarkAllVersions(b, benchmarkLoadBlob)
}
func benchmarkLoadBlob(b *testing.B, version uint) {
repo, _, _ := repository.TestRepositoryWithVersion(b, version)
length := 1000000
buf := crypto.NewBlobBuffer(length)
_, err := io.ReadFull(rnd, buf)
rtest.OK(b, err)
var id restic.ID
rtest.OK(b, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
var err error
id, _, _, err = uploader.SaveBlob(ctx, restic.DataBlob, buf, restic.ID{}, false)
return err
}))
b.ResetTimer()
b.SetBytes(int64(length))
for i := 0; i < b.N; i++ {
var err error
buf, err = repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
// Checking the SHA-256 with restic.Hash can make up 38% of the time
// spent in this loop, so pause the timer.
b.StopTimer()
rtest.OK(b, err)
if len(buf) != length {
b.Errorf("wanted %d bytes, got %d", length, len(buf))
}
id2 := restic.Hash(buf)
if !id.Equal(id2) {
b.Errorf("wrong data returned, wanted %v, got %v", id.Str(), id2.Str())
}
b.StartTimer()
}
}
func BenchmarkLoadUnpacked(b *testing.B) {
repository.BenchmarkAllVersions(b, benchmarkLoadUnpacked)
}
func benchmarkLoadUnpacked(b *testing.B, version uint) {
repo, _, _ := repository.TestRepositoryWithVersion(b, version)
length := 1000000
buf := crypto.NewBlobBuffer(length)
_, err := io.ReadFull(rnd, buf)
rtest.OK(b, err)
dataID := restic.Hash(buf)
storageID, err := repo.SaveUnpacked(context.TODO(), restic.WriteableSnapshotFile, buf)
rtest.OK(b, err)
// rtest.OK(b, repo.Flush())
b.ResetTimer()
b.SetBytes(int64(length))
for i := 0; i < b.N; i++ {
data, err := repo.LoadUnpacked(context.TODO(), restic.SnapshotFile, storageID)
rtest.OK(b, err)
// See comment in BenchmarkLoadBlob.
b.StopTimer()
if len(data) != length {
b.Errorf("wanted %d bytes, got %d", length, len(data))
}
id2 := restic.Hash(data)
if !dataID.Equal(id2) {
b.Errorf("wrong data returned, wanted %v, got %v", storageID.Str(), id2.Str())
}
b.StartTimer()
}
}
var repoFixture = filepath.Join("testdata", "test-repo.tar.gz")
func TestRepositoryLoadIndex(t *testing.T) {
repo, _, cleanup := repository.TestFromFixture(t, repoFixture)
defer cleanup()
rtest.OK(t, repo.LoadIndex(context.TODO(), nil))
}
// loadIndex loads the index id from backend and returns it.
func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (*index.Index, error) {
buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id)
if err != nil {
return nil, err
}
return index.DecodeIndex(buf, id)
}
func TestRepositoryLoadUnpackedBroken(t *testing.T) {
repo, _, be := repository.TestRepositoryWithVersion(t, 0)
data := rtest.Random(23, 12345)
id := restic.Hash(data)
h := backend.Handle{Type: restic.IndexFile, Name: id.String()}
// damage buffer
data[0] ^= 0xff
// store broken file
err := be.Save(context.TODO(), h, backend.NewByteReader(data, be.Hasher()))
rtest.OK(t, err)
_, err = repo.LoadUnpacked(context.TODO(), restic.IndexFile, id)
rtest.Assert(t, errors.Is(err, restic.ErrInvalidData), "unexpected error: %v", err)
}
type damageOnceBackend struct {
backend.Backend
m sync.Map
}
func (be *damageOnceBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
// don't break the config file as we can't retry it
if h.Type == restic.ConfigFile {
return be.Backend.Load(ctx, h, length, offset, fn)
}
h.IsMetadata = false
_, isRetry := be.m.LoadOrStore(h, true)
if !isRetry {
// return broken data on the first try
offset++
}
return be.Backend.Load(ctx, h, length, offset, fn)
}
func TestRepositoryLoadUnpackedRetryBroken(t *testing.T) {
repodir, cleanup := rtest.Env(t, repoFixture)
defer cleanup()
be, err := local.Open(context.TODO(), local.Config{Path: repodir, Connections: 2}, t.Logf)
rtest.OK(t, err)
repo := repository.TestOpenBackend(t, &damageOnceBackend{Backend: be})
rtest.OK(t, repo.LoadIndex(context.TODO(), nil))
}
// saveRandomDataBlobs generates random data blobs and saves them to the repository.
func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax int) {
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
for i := 0; i < num; i++ {
size := rand.Int() % sizeMax
buf := make([]byte, size)
_, err := io.ReadFull(rnd, buf)
rtest.OK(t, err)
_, _, _, err = uploader.SaveBlob(ctx, restic.DataBlob, buf, restic.ID{}, false)
rtest.OK(t, err)
}
return nil
}))
}
func TestRepositoryIncrementalIndex(t *testing.T) {
repository.TestAllVersions(t, testRepositoryIncrementalIndex)
}
func testRepositoryIncrementalIndex(t *testing.T, version uint) {
repo, _, _ := repository.TestRepositoryWithVersion(t, version)
index.Full = func(*index.Index) bool { return true }
// add a few rounds of packs
for j := 0; j < 5; j++ {
// add some packs and write index
saveRandomDataBlobs(t, repo, 20, 1<<15)
}
packEntries := make(map[restic.ID]map[restic.ID]struct{})
err := repo.List(context.TODO(), restic.IndexFile, func(id restic.ID, size int64) error {
idx, err := loadIndex(context.TODO(), repo, id)
rtest.OK(t, err)
for pb := range idx.Values() {
if _, ok := packEntries[pb.PackID]; !ok {
packEntries[pb.PackID] = make(map[restic.ID]struct{})
}
packEntries[pb.PackID][id] = struct{}{}
}
return nil
})
if err != nil {
t.Fatal(err)
}
for packID, ids := range packEntries {
if len(ids) > 1 {
t.Errorf("pack %v listed in %d indexes\n", packID, len(ids))
}
}
}
func TestInvalidCompression(t *testing.T) {
var comp repository.CompressionMode
err := comp.Set("nope")
rtest.Assert(t, err != nil, "missing error")
_, err = repository.New(nil, repository.Options{Compression: comp})
rtest.Assert(t, err != nil, "missing error")
}
func TestListPack(t *testing.T) {
be := mem.New()
repo, _ := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{})
buf := rtest.Random(42, 1000)
var id restic.ID
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
var err error
id, _, _, err = uploader.SaveBlob(ctx, restic.TreeBlob, buf, restic.ID{}, false)
return err
}))
// setup cache after saving the blob to make sure that the damageOnceBackend damages the cached data
c := cache.TestNewCache(t)
repo.UseCache(c, t.Logf)
// Forcibly cache pack file
packID := repo.LookupBlob(restic.TreeBlob, id)[0].PackID
rtest.OK(t, be.Load(context.TODO(), backend.Handle{Type: restic.PackFile, IsMetadata: true, Name: packID.String()}, 0, 0, func(rd io.Reader) error { return nil }))
// Get size to list pack
var size int64
rtest.OK(t, repo.List(context.TODO(), restic.PackFile, func(id restic.ID, sz int64) error {
if id == packID {
size = sz
}
return nil
}))
blobs, _, err := repo.ListPack(context.TODO(), packID, size)
rtest.OK(t, err)
rtest.Assert(t, len(blobs) == 1 && blobs[0].ID == id, "unexpected blobs in pack: %v", blobs)
rtest.Assert(t, !c.Has(backend.Handle{Type: restic.PackFile, Name: packID.String()}), "tree pack should no longer be cached as ListPack does not set IsMetadata in the backend.Handle")
}
func TestNoDoubleInit(t *testing.T) {
r, _, be := repository.TestRepositoryWithVersion(t, restic.StableRepoVersion)
repo, err := repository.New(be, repository.Options{})
rtest.OK(t, err)
pol := r.Config().ChunkerPolynomial
err = repo.Init(context.TODO(), r.Config().Version, rtest.TestPassword, &pol)
rtest.Assert(t, strings.Contains(err.Error(), "repository master key and config already initialized"), "expected config exist error, got %q", err)
// must also prevent init if only keys exist
rtest.OK(t, be.Remove(context.TODO(), backend.Handle{Type: backend.ConfigFile}))
err = repo.Init(context.TODO(), r.Config().Version, rtest.TestPassword, &pol)
rtest.Assert(t, strings.Contains(err.Error(), "repository already contains keys"), "expected already contains keys error, got %q", err)
// must also prevent init if a snapshot exists and keys were deleted
var data [32]byte
hash := restic.Hash(data[:])
rtest.OK(t, be.Save(context.TODO(), backend.Handle{Type: backend.SnapshotFile, Name: hash.String()}, backend.NewByteReader(data[:], be.Hasher())))
rtest.OK(t, be.List(context.TODO(), restic.KeyFile, func(fi backend.FileInfo) error {
return be.Remove(context.TODO(), backend.Handle{Type: restic.KeyFile, Name: fi.Name})
}))
err = repo.Init(context.TODO(), r.Config().Version, rtest.TestPassword, &pol)
rtest.Assert(t, strings.Contains(err.Error(), "repository already contains snapshots"), "expected already contains snapshots error, got %q", err)
}
func TestSaveBlobAsync(t *testing.T) {
repo, _, _ := repository.TestRepositoryWithVersion(t, 2)
ctx := context.Background()
type result struct {
id restic.ID
known bool
size int
err error
}
numCalls := 10
results := make([]result, numCalls)
var resultsMutex sync.Mutex
err := repo.WithBlobUploader(ctx, func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
var wg sync.WaitGroup
wg.Add(numCalls)
for i := 0; i < numCalls; i++ {
// Use unique data for each call
testData := []byte(fmt.Sprintf("test blob data %d", i))
uploader.SaveBlobAsync(ctx, restic.DataBlob, testData, restic.ID{}, false,
func(newID restic.ID, known bool, size int, err error) {
defer wg.Done()
resultsMutex.Lock()
results[i] = result{newID, known, size, err}
resultsMutex.Unlock()
})
}
wg.Wait()
return nil
})
rtest.OK(t, err)
for i, result := range results {
testData := []byte(fmt.Sprintf("test blob data %d", i))
expectedID := restic.Hash(testData)
rtest.Assert(t, result.err == nil, "result %d: unexpected error %v", i, result.err)
rtest.Assert(t, result.id.Equal(expectedID), "result %d: expected ID %v, got %v", i, expectedID, result.id)
rtest.Assert(t, !result.known, "result %d: expected unknown blob", i)
}
}
func TestSaveBlobAsyncErrorHandling(t *testing.T) {
repo, _, _ := repository.TestRepositoryWithVersion(t, 2)
ctx, cancel := context.WithCancel(context.Background())
var callbackCalled atomic.Bool
err := repo.WithBlobUploader(ctx, func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
cancel()
// Callback must be called even if the context is canceled
uploader.SaveBlobAsync(ctx, restic.DataBlob, []byte("test blob data"), restic.ID{}, false,
func(newID restic.ID, known bool, size int, err error) {
callbackCalled.Store(true)
})
return nil
})
rtest.Assert(t, errors.Is(err, context.Canceled), "expected context canceled error, got %v", err)
rtest.Assert(t, callbackCalled.Load(), "callback was not called")
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/doc.go | internal/repository/doc.go | // Package repository implements a restic repository on top of a backend. In
// the following the abstractions used for this package are listed. More
// information can be found in the restic design document.
//
// # File
//
// A file is a named handle for some data saved in the backend. For the local
// backend, this corresponds to actual files saved to disk. Usually, the SHA256
// hash of the content is used for a file's name (hexadecimal, in lower-case
// ASCII characters). An exception is the file `config`. Most files are
// encrypted before being saved in a backend. This means that the name is the
// hash of the ciphertext.
//
// # Blob
//
// A blob is a number of bytes that has a type (data or tree). Blobs are
// identified by an ID, which is the SHA256 hash of the blobs' contents. One or
// more blobs are bundled together in a Pack and then saved to the backend.
// Blobs are always encrypted before being bundled in a Pack.
//
// # Pack
//
// A Pack is a File in the backend that contains one or more (encrypted) blobs,
// followed by a header at the end of the Pack. The header is encrypted and
// contains the ID, type, length and offset for each blob contained in the
// Pack.
package repository
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/prune_test.go | internal/repository/prune_test.go | package repository_test
import (
"context"
"fmt"
"math"
"math/rand"
"testing"
"time"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/repository/pack"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/ui/progress"
)
func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) {
seed := time.Now().UnixNano()
random := rand.New(rand.NewSource(seed))
t.Logf("rand initialized with seed %d", seed)
repo, _, be := repository.TestRepositoryWithVersion(t, 0)
createRandomBlobs(t, random, repo, 4, 0.5, true)
createRandomBlobs(t, random, repo, 5, 0.5, true)
keep, _ := selectBlobs(t, random, repo, 0.5)
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
// duplicate a few blobs to exercise those code paths
for blob := range keep {
buf, err := repo.LoadBlob(ctx, blob.Type, blob.ID, nil)
rtest.OK(t, err)
_, _, _, err = uploader.SaveBlob(ctx, blob.Type, buf, blob.ID, true)
rtest.OK(t, err)
}
return nil
}))
plan, err := repository.PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository, usedBlobs restic.FindBlobSet) error {
for blob := range keep {
usedBlobs.Insert(blob)
}
return nil
}, &progress.NoopPrinter{})
rtest.OK(t, err)
rtest.OK(t, plan.Execute(context.TODO(), &progress.NoopPrinter{}))
repo = repository.TestOpenBackend(t, be)
repository.TestCheckRepo(t, repo)
if errOnUnused {
existing := listBlobs(repo)
rtest.Assert(t, existing.Equals(keep), "unexpected blobs, wanted %v got %v", keep, existing)
}
}
func TestPrune(t *testing.T) {
for _, test := range []struct {
name string
opts repository.PruneOptions
errOnUnused bool
}{
{
name: "0",
opts: repository.PruneOptions{
MaxRepackBytes: math.MaxUint64,
MaxUnusedBytes: func(used uint64) (unused uint64) { return 0 },
},
errOnUnused: true,
},
{
name: "50",
opts: repository.PruneOptions{
MaxRepackBytes: math.MaxUint64,
MaxUnusedBytes: func(used uint64) (unused uint64) { return used / 2 },
},
},
{
name: "unlimited",
opts: repository.PruneOptions{
MaxRepackBytes: math.MaxUint64,
MaxUnusedBytes: func(used uint64) (unused uint64) { return math.MaxUint64 },
},
},
{
name: "cachableonly",
opts: repository.PruneOptions{
MaxRepackBytes: math.MaxUint64,
MaxUnusedBytes: func(used uint64) (unused uint64) { return used / 20 },
RepackCacheableOnly: true,
},
},
{
name: "small",
opts: repository.PruneOptions{
MaxRepackBytes: math.MaxUint64,
MaxUnusedBytes: func(used uint64) (unused uint64) { return math.MaxUint64 },
RepackSmall: true,
},
errOnUnused: true,
},
} {
t.Run(test.name, func(t *testing.T) {
testPrune(t, test.opts, test.errOnUnused)
})
t.Run(test.name+"-recovery", func(t *testing.T) {
opts := test.opts
opts.UnsafeRecovery = true
// unsafeNoSpaceRecovery does not repack partially used pack files
testPrune(t, opts, false)
})
}
}
/*
1.) create repository with packsize of 2M.
2.) create enough data for 11 packfiles (31 packs)
3.) run a repository.PlanPrune(...) with a packsize of 16M (current default).
4.) run plan.Execute(...), extract plan.Stats() and check.
5.) Check that all blobs are contained in the new packfiles.
6.) The result should be less packfiles than before
*/
func TestPruneSmall(t *testing.T) {
seed := time.Now().UnixNano()
random := rand.New(rand.NewSource(seed))
t.Logf("rand initialized with seed %d", seed)
be := repository.TestBackend(t)
repo, _ := repository.TestRepositoryWithBackend(t, be, 0, repository.Options{PackSize: repository.MinPackSize})
const blobSize = 1000 * 1000
const numBlobsCreated = 55
keep := restic.NewBlobSet()
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
// we need a minum of 11 packfiles, each packfile will be about 5 Mb long
for i := 0; i < numBlobsCreated; i++ {
buf := make([]byte, blobSize)
random.Read(buf)
id, _, _, err := uploader.SaveBlob(ctx, restic.DataBlob, buf, restic.ID{}, false)
rtest.OK(t, err)
keep.Insert(restic.BlobHandle{Type: restic.DataBlob, ID: id})
}
return nil
}))
// gather number of packfiles
repoPacks, err := pack.Size(context.TODO(), repo, false)
rtest.OK(t, err)
lenPackfilesBefore := len(repoPacks)
rtest.OK(t, repo.Close())
// and reopen repository with default packsize
repo = repository.TestOpenBackend(t, be)
rtest.OK(t, repo.LoadIndex(context.TODO(), nil))
opts := repository.PruneOptions{
MaxRepackBytes: math.MaxUint64,
MaxUnusedBytes: func(used uint64) (unused uint64) { return blobSize / 4 },
SmallPackBytes: 5 * 1024 * 1024,
RepackSmall: true,
}
plan, err := repository.PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository, usedBlobs restic.FindBlobSet) error {
for blob := range keep {
usedBlobs.Insert(blob)
}
return nil
}, &progress.NoopPrinter{})
rtest.OK(t, err)
rtest.OK(t, plan.Execute(context.TODO(), &progress.NoopPrinter{}))
stats := plan.Stats()
rtest.Equals(t, stats.Size.Used/blobSize, uint64(numBlobsCreated), fmt.Sprintf("total size of blobs should be %d but is %d",
numBlobsCreated, stats.Size.Used/blobSize))
rtest.Equals(t, stats.Blobs.Used, stats.Blobs.Repack, "the number of blobs should be identical after a repack")
// repopen repository
repo = repository.TestOpenBackend(t, be)
repository.TestCheckRepo(t, repo)
// load all blobs
for blob := range keep {
_, err := repo.LoadBlob(context.TODO(), blob.Type, blob.ID, nil)
rtest.OK(t, err)
}
repoPacks, err = pack.Size(context.TODO(), repo, false)
rtest.OK(t, err)
lenPackfilesAfter := len(repoPacks)
rtest.Equals(t, lenPackfilesBefore > lenPackfilesAfter, true,
fmt.Sprintf("the number packfiles before %d and after repack %d", lenPackfilesBefore, lenPackfilesAfter))
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/repair_pack.go | internal/repository/repair_pack.go | package repository
import (
"context"
"errors"
"io"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
)
func RepairPacks(ctx context.Context, repo *Repository, ids restic.IDSet, printer progress.Printer) error {
printer.P("salvaging intact data from specified pack files")
bar := printer.NewCounter("pack files")
bar.SetMax(uint64(len(ids)))
defer bar.Done()
err := repo.WithBlobUploader(ctx, func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
// examine all data the indexes have for the pack file
for b := range repo.ListPacksFromIndex(ctx, ids) {
blobs := b.Blobs
if len(blobs) == 0 {
printer.E("no blobs found for pack %v", b.PackID)
bar.Add(1)
continue
}
err := repo.LoadBlobsFromPack(ctx, b.PackID, blobs, func(blob restic.BlobHandle, buf []byte, err error) error {
if err != nil {
printer.E("failed to load blob %v: %v", blob.ID, err)
return nil
}
id, _, _, err := uploader.SaveBlob(ctx, blob.Type, buf, restic.ID{}, true)
if !id.Equal(blob.ID) {
panic("pack id mismatch during upload")
}
return err
})
// ignore truncated file parts
if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) {
return err
}
bar.Add(1)
}
return nil
})
if err != nil {
return err
}
bar.Done()
// remove salvaged packs from index
err = rewriteIndexFiles(ctx, repo, ids, nil, nil, printer)
if err != nil {
return err
}
// cleanup
printer.P("removing salvaged pack files")
// if we fail to delete the damaged pack files, then prune will remove them later on
bar = printer.NewCounter("files deleted")
_ = restic.ParallelRemove(ctx, &internalRepository{repo}, ids, restic.PackFile, nil, bar)
bar.Done()
return nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/key.go | internal/repository/key.go | package repository
import (
"context"
"encoding/json"
"fmt"
"os"
"os/user"
"time"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/debug"
)
var (
// ErrNoKeyFound is returned when no key for the repository could be decrypted.
ErrNoKeyFound = errors.New("wrong password or no key found")
// ErrMaxKeysReached is returned when the maximum number of keys was checked and no key could be found.
ErrMaxKeysReached = errors.New("maximum number of keys reached")
)
// Key represents an encrypted master key for a repository.
type Key struct {
Created time.Time `json:"created"`
Username string `json:"username"`
Hostname string `json:"hostname"`
KDF string `json:"kdf"`
N int `json:"N"`
R int `json:"r"`
P int `json:"p"`
Salt []byte `json:"salt"`
Data []byte `json:"data"`
user *crypto.Key
master *crypto.Key
id restic.ID
}
// params tracks the parameters used for the KDF. If not set, it will be
// calibrated on the first run of AddKey().
var params *crypto.Params
const (
// KDFTimeout specifies the maximum runtime for the KDF.
KDFTimeout = 500 * time.Millisecond
// KDFMemory limits the memory the KDF is allowed to use.
KDFMemory = 60
)
// createMasterKey creates a new master key in the given backend and encrypts
// it with the password.
func createMasterKey(ctx context.Context, s *Repository, password string) (*Key, error) {
return AddKey(ctx, s, password, "", "", nil)
}
// OpenKey tries do decrypt the key specified by name with the given password.
func OpenKey(ctx context.Context, s *Repository, id restic.ID, password string) (*Key, error) {
k, err := LoadKey(ctx, s, id)
if err != nil {
debug.Log("LoadKey(%v) returned error %v", id.String(), err)
return nil, err
}
// check KDF
if k.KDF != "scrypt" {
return nil, errors.New("only supported KDF is scrypt()")
}
// derive user key
params := crypto.Params{
N: k.N,
R: k.R,
P: k.P,
}
k.user, err = crypto.KDF(params, k.Salt, password)
if err != nil {
return nil, errors.Wrap(err, "crypto.KDF")
}
// decrypt master keys
nonce, ciphertext := k.Data[:k.user.NonceSize()], k.Data[k.user.NonceSize():]
buf, err := k.user.Open(nil, nonce, ciphertext, nil)
if err != nil {
return nil, err
}
// restore json
k.master = &crypto.Key{}
err = json.Unmarshal(buf, k.master)
if err != nil {
debug.Log("Unmarshal() returned error %v", err)
return nil, errors.Wrap(err, "Unmarshal")
}
k.id = id
if !k.Valid() {
return nil, errors.New("Invalid key for repository")
}
return k, nil
}
// SearchKey tries to decrypt at most maxKeys keys in the backend with the
// given password. If none could be found, ErrNoKeyFound is returned. When
// maxKeys is reached, ErrMaxKeysReached is returned. When setting maxKeys to
// zero, all keys in the repo are checked.
func SearchKey(ctx context.Context, s *Repository, password string, maxKeys int, keyHint string) (k *Key, err error) {
checked := 0
if len(keyHint) > 0 {
id, err := restic.Find(ctx, s, restic.KeyFile, keyHint)
if err == nil {
key, err := OpenKey(ctx, s, id, password)
if err == nil {
debug.Log("successfully opened hinted key %v", id)
return key, nil
}
debug.Log("could not open hinted key %v", id)
} else {
debug.Log("Could not find hinted key %v", keyHint)
}
}
listCtx, cancel := context.WithCancel(ctx)
defer cancel()
// try at most maxKeys keys in repo
err = s.List(listCtx, restic.KeyFile, func(id restic.ID, _ int64) error {
checked++
if maxKeys > 0 && checked > maxKeys {
return ErrMaxKeysReached
}
debug.Log("trying key %q", id.String())
key, err := OpenKey(ctx, s, id, password)
if err != nil {
debug.Log("key %v returned error %v", id.String(), err)
// ErrUnauthenticated means the password is wrong, try the next key
if errors.Is(err, crypto.ErrUnauthenticated) {
return nil
}
return err
}
debug.Log("successfully opened key %v", id.String())
k = key
cancel()
return nil
})
if err == context.Canceled {
err = nil
}
if err != nil {
return nil, err
}
if k == nil {
return nil, ErrNoKeyFound
}
return k, nil
}
// LoadKey loads a key from the backend.
func LoadKey(ctx context.Context, s *Repository, id restic.ID) (k *Key, err error) {
data, err := s.LoadRaw(ctx, restic.KeyFile, id)
if err != nil {
return nil, err
}
k = &Key{}
err = json.Unmarshal(data, k)
if err != nil {
return nil, errors.Wrap(err, "Unmarshal")
}
return k, nil
}
// AddKey adds a new key to an already existing repository.
func AddKey(ctx context.Context, s *Repository, password, username, hostname string, template *crypto.Key) (*Key, error) {
// make sure we have valid KDF parameters
if params == nil {
p, err := crypto.Calibrate(KDFTimeout, KDFMemory)
if err != nil {
return nil, errors.Wrap(err, "Calibrate")
}
params = &p
debug.Log("calibrated KDF parameters are %v", p)
}
// fill meta data about key
newkey := &Key{
Created: time.Now(),
Username: username,
Hostname: hostname,
KDF: "scrypt",
N: params.N,
R: params.R,
P: params.P,
}
if newkey.Hostname == "" {
newkey.Hostname, _ = os.Hostname()
}
if newkey.Username == "" {
usr, err := user.Current()
if err == nil {
newkey.Username = usr.Username
}
}
// generate random salt
var err error
newkey.Salt, err = crypto.NewSalt()
if err != nil {
panic("unable to read enough random bytes for salt: " + err.Error())
}
// call KDF to derive user key
newkey.user, err = crypto.KDF(*params, newkey.Salt, password)
if err != nil {
return nil, err
}
if template == nil {
// generate new random master keys
newkey.master = crypto.NewRandomKey()
} else {
// copy master keys from old key
newkey.master = template
}
// encrypt master keys (as json) with user key
buf, err := json.Marshal(newkey.master)
if err != nil {
return nil, errors.Wrap(err, "Marshal")
}
nonce := crypto.NewRandomNonce()
ciphertext := make([]byte, 0, crypto.CiphertextLength(len(buf)))
ciphertext = append(ciphertext, nonce...)
ciphertext = newkey.user.Seal(ciphertext, nonce, buf, nil)
newkey.Data = ciphertext
// dump as json
buf, err = json.Marshal(newkey)
if err != nil {
return nil, errors.Wrap(err, "Marshal")
}
id := restic.Hash(buf)
// store in repository and return
h := backend.Handle{
Type: restic.KeyFile,
Name: id.String(),
}
err = s.be.Save(ctx, h, backend.NewByteReader(buf, s.be.Hasher()))
if err != nil {
return nil, err
}
newkey.id = id
return newkey, nil
}
func RemoveKey(ctx context.Context, repo *Repository, id restic.ID) error {
if id == repo.KeyID() {
return errors.New("refusing to remove key currently used to access repository")
}
h := backend.Handle{Type: restic.KeyFile, Name: id.String()}
return repo.be.Remove(ctx, h)
}
func (k *Key) String() string {
if k == nil {
return "<Key nil>"
}
return fmt.Sprintf("<Key of %s@%s, created on %s>", k.Username, k.Hostname, k.Created)
}
// ID returns an identifier for the key.
func (k Key) ID() restic.ID {
return k.id
}
// Valid tests whether the mac and encryption keys are valid (i.e. not zero)
func (k *Key) Valid() bool {
return k.user.Valid() && k.master.Valid()
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/prune.go | internal/repository/prune.go | package repository
import (
"context"
"fmt"
"math"
"sort"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository/index"
"github.com/restic/restic/internal/repository/pack"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
)
var ErrIndexIncomplete = errors.Fatal("index is not complete")
var ErrPacksMissing = errors.Fatal("packs from index missing in repo")
var ErrSizeNotMatching = errors.Fatal("pack size does not match calculated size from index")
// PruneOptions collects all options for the cleanup command.
type PruneOptions struct {
DryRun bool
UnsafeRecovery bool
MaxUnusedBytes func(used uint64) (unused uint64) // calculates the number of unused bytes after repacking, according to MaxUnused
MaxRepackBytes uint64
SmallPackBytes uint64
RepackCacheableOnly bool
RepackSmall bool
RepackUncompressed bool
}
type PruneStats struct {
Blobs struct {
Used uint
Duplicate uint
Unused uint
Remove uint
Repack uint
Repackrm uint
}
Size struct {
Used uint64
Duplicate uint64
Unused uint64
Remove uint64
Repack uint64
Repackrm uint64
Unref uint64
Uncompressed uint64
}
Packs struct {
Used uint
Unused uint
PartlyUsed uint
Unref uint
Keep uint
Repack uint
Remove uint
}
}
type PrunePlan struct {
removePacksFirst restic.IDSet // packs to remove first (unreferenced packs)
repackPacks restic.IDSet // packs to repack
keepBlobs *index.AssociatedSet[uint8] // blobs to keep during repacking
removePacks restic.IDSet // packs to remove
ignorePacks restic.IDSet // packs to ignore when rebuilding the index
repo *Repository
stats PruneStats
opts PruneOptions
}
type packInfo struct {
usedBlobs uint
unusedBlobs uint
duplicateBlobs uint
usedSize uint64
unusedSize uint64
tpe restic.BlobType
uncompressed bool
}
type packInfoWithID struct {
ID restic.ID
packInfo
mustCompress bool
}
// PlanPrune selects which files to rewrite and which to delete and which blobs to keep.
// Also some summary statistics are returned.
func PlanPrune(ctx context.Context, opts PruneOptions, repo *Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository, usedBlobs restic.FindBlobSet) error, printer progress.Printer) (*PrunePlan, error) {
var stats PruneStats
if opts.UnsafeRecovery {
// prevent repacking data to make sure users cannot get stuck.
opts.MaxRepackBytes = 0
}
if repo.Connections() < 2 {
return nil, fmt.Errorf("prune requires a backend connection limit of at least two")
}
if repo.Config().Version < 2 && opts.RepackUncompressed {
return nil, fmt.Errorf("compression requires at least repository format version 2")
}
if opts.SmallPackBytes > uint64(repo.PackSize()) {
return nil, fmt.Errorf("repack-smaller-than exceeds repository packsize")
}
usedBlobs := index.NewAssociatedSet[uint8](repo.idx)
err := getUsedBlobs(ctx, repo, usedBlobs)
if err != nil {
return nil, err
}
printer.P("searching used packs...\n")
keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo, usedBlobs, &stats, printer)
if err != nil {
return nil, err
}
printer.P("collecting packs for deletion and repacking\n")
plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, printer)
if err != nil {
return nil, err
}
if len(plan.repackPacks) != 0 {
// when repacking, we do not want to keep blobs which are
// already contained in kept packs, so delete them from keepBlobs
err := repo.ListBlobs(ctx, func(blob restic.PackedBlob) {
if plan.removePacks.Has(blob.PackID) || plan.repackPacks.Has(blob.PackID) {
return
}
keepBlobs.Delete(blob.BlobHandle)
})
if err != nil {
return nil, err
}
} else {
// keepBlobs is only needed if packs are repacked
keepBlobs = nil
}
plan.keepBlobs = keepBlobs
plan.repo = repo
plan.stats = stats
plan.opts = opts
return &plan, nil
}
func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs *index.AssociatedSet[uint8], stats *PruneStats, printer progress.Printer) (*index.AssociatedSet[uint8], map[restic.ID]packInfo, error) {
// iterate over all blobs in index to find out which blobs are duplicates
// The counter in usedBlobs describes how many instances of the blob exist in the repository index
// Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist
err := idx.ListBlobs(ctx, func(blob restic.PackedBlob) {
bh := blob.BlobHandle
count, ok := usedBlobs.Get(bh)
if ok {
if count < math.MaxUint8 {
// don't overflow, but saturate count at 255
// this can lead to a non-optimal pack selection, but won't cause
// problems otherwise
count++
}
usedBlobs.Set(bh, count)
}
})
if err != nil {
return nil, nil, err
}
// Check if all used blobs have been found in index
missingBlobs := restic.NewBlobSet()
for bh, count := range usedBlobs.All() {
if count == 0 {
// blob does not exist in any pack files
missingBlobs.Insert(bh)
}
}
if len(missingBlobs) != 0 {
printer.E("%v not found in the index\n\n"+
"Integrity check failed: Data seems to be missing.\n"+
"Will not start prune to prevent (additional) data loss!\n"+
"Please report this error (along with the output of the 'prune' run) at\n"+
"https://github.com/restic/restic/issues/new/choose\n", missingBlobs)
return nil, nil, ErrIndexIncomplete
}
indexPack := make(map[restic.ID]packInfo)
// save computed pack header size
sz, err := pack.Size(ctx, idx, true)
if err != nil {
return nil, nil, err
}
for pid, hdrSize := range sz {
// initialize tpe with NumBlobTypes to indicate it's not set
indexPack[pid] = packInfo{tpe: restic.NumBlobTypes, usedSize: uint64(hdrSize)}
}
hasDuplicates := false
// iterate over all blobs in index to generate packInfo
err = idx.ListBlobs(ctx, func(blob restic.PackedBlob) {
ip := indexPack[blob.PackID]
// Set blob type if not yet set
if ip.tpe == restic.NumBlobTypes {
ip.tpe = blob.Type
}
// mark mixed packs with "Invalid blob type"
if ip.tpe != blob.Type {
ip.tpe = restic.InvalidBlob
}
bh := blob.BlobHandle
size := uint64(blob.Length)
dupCount, _ := usedBlobs.Get(bh)
switch {
case dupCount >= 2:
hasDuplicates = true
// mark as unused for now, we will later on select one copy
ip.unusedSize += size
ip.unusedBlobs++
ip.duplicateBlobs++
// count as duplicate, will later on change one copy to be counted as used
stats.Size.Duplicate += size
stats.Blobs.Duplicate++
case dupCount == 1: // used blob, not duplicate
ip.usedSize += size
ip.usedBlobs++
stats.Size.Used += size
stats.Blobs.Used++
default: // unused blob
ip.unusedSize += size
ip.unusedBlobs++
stats.Size.Unused += size
stats.Blobs.Unused++
}
if !blob.IsCompressed() {
ip.uncompressed = true
}
// update indexPack
indexPack[blob.PackID] = ip
})
if err != nil {
return nil, nil, err
}
// if duplicate blobs exist, those will be set to either "used" or "unused":
// - mark only one occurrence of duplicate blobs as used
// - if there are already some used blobs in a pack, possibly mark duplicates in this pack as "used"
// - if a pack only consists of duplicates (which by definition are used blobs), mark it as "used". This
// ensures that already rewritten packs are kept.
// - if there are no used blobs in a pack, possibly mark duplicates as "unused"
if hasDuplicates {
// iterate again over all blobs in index (this is pretty cheap, all in-mem)
err = idx.ListBlobs(ctx, func(blob restic.PackedBlob) {
bh := blob.BlobHandle
count, ok := usedBlobs.Get(bh)
// skip non-duplicate, aka. normal blobs
// count == 0 is used to mark that this was a duplicate blob with only a single occurrence remaining
if !ok || count == 1 {
return
}
ip := indexPack[blob.PackID]
size := uint64(blob.Length)
switch {
case ip.usedBlobs > 0, ip.duplicateBlobs == ip.unusedBlobs, count == 0:
// other used blobs in pack, only duplicate blobs or "last" occurrence -> transition to used
// a pack file created by an interrupted prune run will consist of only duplicate blobs
// thus select such already repacked pack files
ip.usedSize += size
ip.usedBlobs++
ip.unusedSize -= size
ip.unusedBlobs--
// same for the global statistics
stats.Size.Used += size
stats.Blobs.Used++
stats.Size.Duplicate -= size
stats.Blobs.Duplicate--
// let other occurrences remain marked as unused
usedBlobs.Set(bh, 1)
default:
// remain unused and decrease counter
count--
if count == 1 {
// setting count to 1 would lead to forgetting that this blob had duplicates
// thus use the special value zero. This will select the last instance of the blob for keeping.
count = 0
}
usedBlobs.Set(bh, count)
}
// update indexPack
indexPack[blob.PackID] = ip
})
if err != nil {
return nil, nil, err
}
}
// Sanity check. If no duplicates exist, all blobs have value 1. After handling
// duplicates, this also applies to duplicates.
for _, count := range usedBlobs.All() {
if count != 1 {
panic("internal error during blob selection")
}
}
return usedBlobs, indexPack, nil
}
func decidePackAction(ctx context.Context, opts PruneOptions, repo *Repository, indexPack map[restic.ID]packInfo, stats *PruneStats, printer progress.Printer) (PrunePlan, error) {
removePacksFirst := restic.NewIDSet()
removePacks := restic.NewIDSet()
repackPacks := restic.NewIDSet()
var repackCandidates []packInfoWithID
var repackSmallCandidates []packInfoWithID
repoVersion := repo.Config().Version
// only repack very small files by default
targetPackSize := repo.PackSize() / 25
if opts.SmallPackBytes > 0 {
targetPackSize = uint(opts.SmallPackBytes)
} else if opts.RepackSmall {
// consider files with at least 80% of the target size as large enough
targetPackSize = repo.PackSize() / 5 * 4
}
// loop over all packs and decide what to do
bar := printer.NewCounter("packs processed")
bar.SetMax(uint64(len(indexPack)))
err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error {
p, ok := indexPack[id]
if !ok {
// Pack was not referenced in index and is not used => immediately remove!
printer.V("will remove pack %v as it is unused and not indexed\n", id.Str())
removePacksFirst.Insert(id)
stats.Size.Unref += uint64(packSize)
return nil
}
if p.unusedSize+p.usedSize != uint64(packSize) && p.usedBlobs != 0 {
// Pack size does not fit and pack is needed => error
// If the pack is not needed, this is no error, the pack can
// and will be simply removed, see below.
printer.E("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n",
id.Str(), p.unusedSize+p.usedSize, packSize)
return ErrSizeNotMatching
}
// statistics
switch {
case p.usedBlobs == 0:
stats.Packs.Unused++
case p.unusedBlobs == 0:
stats.Packs.Used++
default:
stats.Packs.PartlyUsed++
}
if p.uncompressed {
stats.Size.Uncompressed += p.unusedSize + p.usedSize
}
mustCompress := false
if repoVersion >= 2 {
// repo v2: always repack tree blobs if uncompressed
// compress data blobs if requested
mustCompress = (p.tpe == restic.TreeBlob || opts.RepackUncompressed) && p.uncompressed
}
// decide what to do
switch {
case p.usedBlobs == 0:
// All blobs in pack are no longer used => remove pack!
removePacks.Insert(id)
stats.Blobs.Remove += p.unusedBlobs
stats.Size.Remove += p.unusedSize
case opts.RepackCacheableOnly && p.tpe == restic.DataBlob:
// if this is a data pack and --repack-cacheable-only is set => keep pack!
stats.Packs.Keep++
case p.unusedBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress:
if packSize >= int64(targetPackSize) {
// All blobs in pack are used and not mixed => keep pack!
stats.Packs.Keep++
} else {
repackSmallCandidates = append(repackSmallCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress})
}
default:
// all other packs are candidates for repacking
repackCandidates = append(repackCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress})
}
delete(indexPack, id)
bar.Add(1)
return nil
})
bar.Done()
if err != nil {
return PrunePlan{}, err
}
// At this point indexPacks contains only missing packs!
// missing packs that are not needed can be ignored
ignorePacks := restic.NewIDSet()
for id, p := range indexPack {
if p.usedBlobs == 0 {
ignorePacks.Insert(id)
stats.Blobs.Remove += p.unusedBlobs
stats.Size.Remove += p.unusedSize
delete(indexPack, id)
}
}
if len(indexPack) != 0 {
printer.E("The index references %d needed pack files which are missing from the repository:\n", len(indexPack))
for id := range indexPack {
printer.E(" %v\n", id)
}
return PrunePlan{}, ErrPacksMissing
}
if len(ignorePacks) != 0 {
printer.E("Missing but unneeded pack files are referenced in the index, will be repaired\n")
for id := range ignorePacks {
printer.E("will forget missing pack file %v\n", id)
}
}
if len(repackSmallCandidates) < 10 {
// too few small files to be worth the trouble, this also prevents endlessly repacking
// if there is just a single pack file below the target size
stats.Packs.Keep += uint(len(repackSmallCandidates))
} else {
repackCandidates = append(repackCandidates, repackSmallCandidates...)
}
// Sort repackCandidates such that packs with highest ratio unused/used space are picked first.
// This is equivalent to sorting by unused / total space.
// Instead of unused[i] / used[i] > unused[j] / used[j] we use
// unused[i] * used[j] > unused[j] * used[i] as uint32*uint32 < uint64
// Moreover packs containing trees and too short packs are sorted to the beginning
sort.Slice(repackCandidates, func(i, j int) bool {
pi := repackCandidates[i].packInfo
pj := repackCandidates[j].packInfo
switch {
case pi.tpe != restic.DataBlob && pj.tpe == restic.DataBlob:
return true
case pj.tpe != restic.DataBlob && pi.tpe == restic.DataBlob:
return false
case pi.unusedSize+pi.usedSize < uint64(targetPackSize) && pj.unusedSize+pj.usedSize >= uint64(targetPackSize):
return true
case pj.unusedSize+pj.usedSize < uint64(targetPackSize) && pi.unusedSize+pi.usedSize >= uint64(targetPackSize):
return false
}
return pi.unusedSize*pj.usedSize > pj.unusedSize*pi.usedSize
})
repack := func(id restic.ID, p packInfo) {
repackPacks.Insert(id)
stats.Blobs.Repack += p.unusedBlobs + p.usedBlobs
stats.Size.Repack += p.unusedSize + p.usedSize
stats.Blobs.Repackrm += p.unusedBlobs
stats.Size.Repackrm += p.unusedSize
if p.uncompressed {
stats.Size.Uncompressed -= p.unusedSize + p.usedSize
}
}
// calculate limit for number of unused bytes in the repo after repacking
maxUnusedSizeAfter := opts.MaxUnusedBytes(stats.Size.Used)
for _, p := range repackCandidates {
remainingUnusedSize := stats.Size.Duplicate + stats.Size.Unused - stats.Size.Remove - stats.Size.Repackrm
reachedUnusedSizeAfter := remainingUnusedSize < maxUnusedSizeAfter
reachedRepackSize := stats.Size.Repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes
packIsLargeEnough := p.unusedSize+p.usedSize >= uint64(targetPackSize)
switch {
case reachedRepackSize:
stats.Packs.Keep++
case p.tpe != restic.DataBlob, p.mustCompress:
// repacking non-data packs / uncompressed-trees is only limited by repackSize
repack(p.ID, p.packInfo)
case reachedUnusedSizeAfter && packIsLargeEnough:
// for all other packs stop repacking if tolerated unused size is reached.
stats.Packs.Keep++
default:
repack(p.ID, p.packInfo)
}
}
stats.Packs.Unref = uint(len(removePacksFirst))
stats.Packs.Repack = uint(len(repackPacks))
stats.Packs.Remove = uint(len(removePacks))
if repo.Config().Version < 2 {
// compression not supported for repository format version 1
stats.Size.Uncompressed = 0
}
return PrunePlan{removePacksFirst: removePacksFirst,
removePacks: removePacks,
repackPacks: repackPacks,
ignorePacks: ignorePacks,
}, nil
}
func (plan *PrunePlan) Stats() PruneStats {
return plan.stats
}
// Execute does the actual pruning:
// - remove unreferenced packs first
// - repack given pack files while keeping the given blobs
// - rebuild the index while ignoring all files that will be deleted
// - delete the files
// plan.removePacks and plan.ignorePacks are modified in this function.
func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) error {
if plan.opts.DryRun {
printer.V("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n")
if len(plan.removePacksFirst) > 0 {
printer.V("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst)
}
printer.V("Would have repacked and removed the following packs:\n%v\n\n", plan.repackPacks)
printer.V("Would have removed the following no longer used packs:\n%v\n\n", plan.removePacks)
// Always quit here if DryRun was set!
return nil
}
repo := plan.repo
// make sure the plan can only be used once
plan.repo = nil
// unreferenced packs can be safely deleted first
if len(plan.removePacksFirst) != 0 {
printer.P("deleting unreferenced packs\n")
_ = deleteFiles(ctx, true, &internalRepository{repo}, plan.removePacksFirst, restic.PackFile, printer)
// forget unused data
plan.removePacksFirst = nil
}
if ctx.Err() != nil {
return ctx.Err()
}
if len(plan.repackPacks) != 0 {
printer.P("repacking packs\n")
bar := printer.NewCounter("packs repacked")
err := repo.WithBlobUploader(ctx, func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
return CopyBlobs(ctx, repo, repo, uploader, plan.repackPacks, plan.keepBlobs, bar, printer.P)
})
if err != nil {
return errors.Fatalf("%s", err)
}
// Also remove repacked packs
plan.removePacks.Merge(plan.repackPacks)
// forget unused data
plan.repackPacks = nil
if plan.keepBlobs.Len() != 0 {
printer.E("%v was not repacked\n\n"+
"Integrity check failed.\n"+
"Please report this error (along with the output of the 'prune' run) at\n"+
"https://github.com/restic/restic/issues/new/choose\n", plan.keepBlobs)
return errors.Fatal("internal error: blobs were not repacked")
}
// allow GC of the blob set
plan.keepBlobs = nil
}
if len(plan.ignorePacks) == 0 {
plan.ignorePacks = plan.removePacks
} else {
plan.ignorePacks.Merge(plan.removePacks)
}
if plan.opts.UnsafeRecovery {
printer.P("deleting index files\n")
indexFiles := repo.idx.IDs()
err := deleteFiles(ctx, false, &internalRepository{repo}, indexFiles, restic.IndexFile, printer)
if err != nil {
return errors.Fatalf("%s", err)
}
} else if len(plan.ignorePacks) != 0 {
err := rewriteIndexFiles(ctx, repo, plan.ignorePacks, nil, nil, printer)
if err != nil {
return errors.Fatalf("%s", err)
}
}
if len(plan.removePacks) != 0 {
printer.P("removing %d old packs\n", len(plan.removePacks))
_ = deleteFiles(ctx, true, &internalRepository{repo}, plan.removePacks, restic.PackFile, printer)
}
if ctx.Err() != nil {
return ctx.Err()
}
if plan.opts.UnsafeRecovery {
err := repo.idx.SaveFallback(ctx, &internalRepository{repo}, plan.ignorePacks, printer.NewCounter("packs processed"))
if err != nil {
return errors.Fatalf("%s", err)
}
}
// drop outdated in-memory index
repo.clearIndex()
printer.P("done\n")
return nil
}
// deleteFiles deletes the given fileList of fileType in parallel
// if ignoreError=true, it will print a warning if there was an error, else it will abort.
func deleteFiles(ctx context.Context, ignoreError bool, repo restic.RemoverUnpacked[restic.FileType], fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error {
bar := printer.NewCounter("files deleted")
defer bar.Done()
return restic.ParallelRemove(ctx, repo, fileList, fileType, func(id restic.ID, err error) error {
if err != nil {
printer.E("unable to remove %v/%v from the repository\n", fileType, id)
if !ignoreError {
return err
}
}
printer.VV("removed %v/%v\n", fileType, id)
return nil
}, bar)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/raw.go | internal/repository/raw.go | package repository
import (
"bytes"
"context"
"fmt"
"io"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/restic"
)
// LoadRaw reads all data stored in the backend for the file with id and filetype t.
// If the backend returns data that does not match the id, then the buffer is returned
// along with an error that is a restic.ErrInvalidData error.
func (r *Repository) LoadRaw(ctx context.Context, t restic.FileType, id restic.ID) (buf []byte, err error) {
h := backend.Handle{Type: t, Name: id.String()}
buf, err = loadRaw(ctx, r.be, h)
// retry loading damaged data only once. If a file fails to download correctly
// the second time, then it is likely corrupted at the backend.
if h.Type != backend.ConfigFile && id != restic.Hash(buf) {
if r.cache != nil {
// Cleanup cache to make sure it's not the cached copy that is broken.
// Ignore error as there's not much we can do in that case.
_ = r.cache.Forget(h)
}
buf, err = loadRaw(ctx, r.be, h)
if err == nil && id != restic.Hash(buf) {
// Return corrupted data to the caller if it is still broken the second time to
// let the caller decide what to do with the data.
return buf, fmt.Errorf("LoadRaw(%v): %w", h, restic.ErrInvalidData)
}
}
if err != nil {
return nil, err
}
return buf, nil
}
func loadRaw(ctx context.Context, be backend.Backend, h backend.Handle) (buf []byte, err error) {
err = be.Load(ctx, h, 0, 0, func(rd io.Reader) error {
wr := new(bytes.Buffer)
_, cerr := io.Copy(wr, rd)
if cerr != nil {
return cerr
}
buf = wr.Bytes()
return cerr
})
return buf, err
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/repair_pack_test.go | internal/repository/repair_pack_test.go | package repository_test
import (
"context"
"math/rand"
"testing"
"time"
"github.com/restic/restic/internal/backend"
backendtest "github.com/restic/restic/internal/backend/test"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/ui/progress"
)
func listBlobs(repo restic.Repository) restic.BlobSet {
blobs := restic.NewBlobSet()
_ = repo.ListBlobs(context.TODO(), func(pb restic.PackedBlob) {
blobs.Insert(pb.BlobHandle)
})
return blobs
}
func replaceFile(t *testing.T, be backend.Backend, h backend.Handle, damage func([]byte) []byte) {
buf, err := backendtest.LoadAll(context.TODO(), be, h)
rtest.OK(t, err)
buf = damage(buf)
rtest.OK(t, be.Remove(context.TODO(), h))
rtest.OK(t, be.Save(context.TODO(), h, backend.NewByteReader(buf, be.Hasher())))
}
func TestRepairBrokenPack(t *testing.T) {
repository.TestAllVersions(t, testRepairBrokenPack)
}
func testRepairBrokenPack(t *testing.T, version uint) {
tests := []struct {
name string
damage func(t *testing.T, random *rand.Rand, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet)
}{
{
"valid pack",
func(t *testing.T, random *rand.Rand, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) {
return packsBefore, restic.NewBlobSet()
},
},
{
"broken pack",
func(t *testing.T, random *rand.Rand, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) {
wrongBlob := createRandomWrongBlob(t, random, repo)
damagedPacks := findPacksForBlobs(t, repo, restic.NewBlobSet(wrongBlob))
return damagedPacks, restic.NewBlobSet(wrongBlob)
},
},
{
"partially broken pack",
func(t *testing.T, random *rand.Rand, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) {
// damage one of the pack files
damagedID := packsBefore.List()[0]
replaceFile(t, be, backend.Handle{Type: backend.PackFile, Name: damagedID.String()},
func(buf []byte) []byte {
buf[0] ^= 0xff
return buf
})
// find blob that starts at offset 0
var damagedBlob restic.BlobHandle
for blobs := range repo.ListPacksFromIndex(context.TODO(), restic.NewIDSet(damagedID)) {
for _, blob := range blobs.Blobs {
if blob.Offset == 0 {
damagedBlob = blob.BlobHandle
}
}
}
return restic.NewIDSet(damagedID), restic.NewBlobSet(damagedBlob)
},
}, {
"truncated pack",
func(t *testing.T, random *rand.Rand, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) {
// damage one of the pack files
damagedID := packsBefore.List()[0]
replaceFile(t, be, backend.Handle{Type: backend.PackFile, Name: damagedID.String()},
func(buf []byte) []byte {
buf = buf[0:10]
return buf
})
// all blobs in the file are broken
damagedBlobs := restic.NewBlobSet()
for blobs := range repo.ListPacksFromIndex(context.TODO(), restic.NewIDSet(damagedID)) {
for _, blob := range blobs.Blobs {
damagedBlobs.Insert(blob.BlobHandle)
}
}
return restic.NewIDSet(damagedID), damagedBlobs
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
// disable verification to allow adding corrupted blobs to the repository
repo, be := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true})
seed := time.Now().UnixNano()
random := rand.New(rand.NewSource(seed))
t.Logf("rand seed is %v", seed)
createRandomBlobs(t, random, repo, 5, 0.7, true)
packsBefore := listPacks(t, repo)
blobsBefore := listBlobs(repo)
toRepair, damagedBlobs := test.damage(t, random, repo, be, packsBefore)
rtest.OK(t, repository.RepairPacks(context.TODO(), repo, toRepair, &progress.NoopPrinter{}))
// reload index
rtest.OK(t, repo.LoadIndex(context.TODO(), nil))
packsAfter := listPacks(t, repo)
blobsAfter := listBlobs(repo)
rtest.Assert(t, len(packsAfter.Intersect(toRepair)) == 0, "some damaged packs were not removed")
rtest.Assert(t, len(packsBefore.Sub(toRepair).Sub(packsAfter)) == 0, "not-damaged packs were removed")
rtest.Assert(t, blobsBefore.Sub(damagedBlobs).Equals(blobsAfter), "diverging blob lists")
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/repack.go | internal/repository/repack.go | package repository
import (
"context"
"sync"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/feature"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
"golang.org/x/sync/errgroup"
)
type repackBlobSet interface {
Has(bh restic.BlobHandle) bool
Delete(bh restic.BlobHandle)
Len() int
}
type LogFunc func(msg string, args ...interface{})
// CopyBlobs takes a list of packs together with a list of blobs contained in
// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved
// into a new pack. Returned is the list of obsolete packs which can then
// be removed.
//
// The map keepBlobs is modified by CopyBlobs, it is used to keep track of which
// blobs have been processed.
func CopyBlobs(
ctx context.Context,
repo restic.Repository,
dstRepo restic.Repository,
dstUploader restic.BlobSaverWithAsync,
packs restic.IDSet,
keepBlobs repackBlobSet,
p *progress.Counter,
logf LogFunc,
) error {
debug.Log("repacking %d packs while keeping %d blobs", len(packs), keepBlobs.Len())
if logf == nil {
logf = func(_ string, _ ...interface{}) {}
}
p.SetMax(uint64(len(packs)))
defer p.Done()
if repo == dstRepo && dstRepo.Connections() < 2 {
return errors.New("repack step requires a backend connection limit of at least two")
}
return repack(ctx, repo, dstRepo, dstUploader, packs, keepBlobs, p, logf)
}
func repack(
ctx context.Context,
repo restic.Repository,
dstRepo restic.Repository,
uploader restic.BlobSaverWithAsync,
packs restic.IDSet,
keepBlobs repackBlobSet,
p *progress.Counter,
logf LogFunc,
) error {
wg, wgCtx := errgroup.WithContext(ctx)
if feature.Flag.Enabled(feature.S3Restore) {
job, err := repo.StartWarmup(ctx, packs)
if err != nil {
return err
}
if job.HandleCount() != 0 {
logf("warming up %d packs from cold storage, this may take a while...", job.HandleCount())
if err := job.Wait(ctx); err != nil {
return err
}
}
}
var keepMutex sync.Mutex
downloadQueue := make(chan restic.PackBlobs)
wg.Go(func() error {
defer close(downloadQueue)
for pbs := range repo.ListPacksFromIndex(wgCtx, packs) {
var packBlobs []restic.Blob
keepMutex.Lock()
// filter out unnecessary blobs
for _, entry := range pbs.Blobs {
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
if keepBlobs.Has(h) {
packBlobs = append(packBlobs, entry)
}
}
keepMutex.Unlock()
select {
case downloadQueue <- restic.PackBlobs{PackID: pbs.PackID, Blobs: packBlobs}:
case <-wgCtx.Done():
return wgCtx.Err()
}
}
return wgCtx.Err()
})
worker := func() error {
for t := range downloadQueue {
err := repo.LoadBlobsFromPack(wgCtx, t.PackID, t.Blobs, func(blob restic.BlobHandle, buf []byte, err error) error {
if err != nil {
// a required blob couldn't be retrieved
return err
}
keepMutex.Lock()
// recheck whether some other worker was faster
shouldKeep := keepBlobs.Has(blob)
if shouldKeep {
keepBlobs.Delete(blob)
}
keepMutex.Unlock()
if !shouldKeep {
return nil
}
// We do want to save already saved blobs!
_, _, _, err = uploader.SaveBlob(wgCtx, blob.Type, buf, blob.ID, true)
if err != nil {
return err
}
debug.Log(" saved blob %v", blob.ID)
return nil
})
if err != nil {
return err
}
p.Add(1)
}
return nil
}
// as packs are streamed the concurrency is limited by IO
// reduce by one to ensure that uploading is always possible
repackWorkerCount := int(repo.Connections() - 1)
if repo != dstRepo {
// no need to share the upload and download connections for different repositories
repackWorkerCount = int(repo.Connections())
}
for i := 0; i < repackWorkerCount; i++ {
wg.Go(worker)
}
return wg.Wait()
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/index/associated_data.go | internal/repository/index/associated_data.go | package index
import (
"iter"
"slices"
"sort"
"github.com/restic/restic/internal/restic"
)
type associatedSetSub[T any] struct {
value []T
isSet []bool
}
// AssociatedSet is a memory efficient implementation of a BlobSet that can
// store a small data item for each BlobHandle. It relies on a special property
// of our MasterIndex implementation. A BlobHandle can be permanently identified
// using an offset that never changes as MasterIndex entries cannot be modified (only added).
//
// The AssociatedSet thus can use an array with the size of the MasterIndex to store
// its data. Access to an individual entry is possible by looking up the BlobHandle's
// offset from the MasterIndex.
//
// BlobHandles that are not part of the MasterIndex can be stored by placing them in
// an overflow set that is expected to be empty in the normal case.
type AssociatedSet[T any] struct {
byType [restic.NumBlobTypes]associatedSetSub[T]
overflow map[restic.BlobHandle]T
idx *MasterIndex
}
func NewAssociatedSet[T any](mi *MasterIndex) *AssociatedSet[T] {
a := AssociatedSet[T]{
overflow: make(map[restic.BlobHandle]T),
idx: mi,
}
for typ := range a.byType {
if typ == 0 {
continue
}
// index starts counting at 1
count := mi.stableLen(restic.BlobType(typ)) + 1
a.byType[typ].value = make([]T, count)
a.byType[typ].isSet = make([]bool, count)
}
return &a
}
func (a *AssociatedSet[T]) Get(bh restic.BlobHandle) (T, bool) {
if val, ok := a.overflow[bh]; ok {
return val, true
}
idx := a.idx.blobIndex(bh)
bt := &a.byType[bh.Type]
if idx >= len(bt.value) || idx == -1 {
var zero T
return zero, false
}
has := bt.isSet[idx]
if has {
return bt.value[idx], has
}
var zero T
return zero, false
}
func (a *AssociatedSet[T]) Has(bh restic.BlobHandle) bool {
_, ok := a.Get(bh)
return ok
}
func (a *AssociatedSet[T]) Set(bh restic.BlobHandle, val T) {
if _, ok := a.overflow[bh]; ok {
a.overflow[bh] = val
return
}
idx := a.idx.blobIndex(bh)
bt := &a.byType[bh.Type]
if idx >= len(bt.value) || idx == -1 {
a.overflow[bh] = val
} else {
bt.value[idx] = val
bt.isSet[idx] = true
}
}
func (a *AssociatedSet[T]) Insert(bh restic.BlobHandle) {
var zero T
a.Set(bh, zero)
}
func (a *AssociatedSet[T]) Delete(bh restic.BlobHandle) {
if _, ok := a.overflow[bh]; ok {
delete(a.overflow, bh)
return
}
idx := a.idx.blobIndex(bh)
bt := &a.byType[bh.Type]
if idx < len(bt.value) && idx != -1 {
bt.isSet[idx] = false
}
}
type haser interface {
Has(bh restic.BlobHandle) bool
}
// Intersect returns a new set containing the handles that are present in both sets.
func (a *AssociatedSet[T]) Intersect(other haser) *AssociatedSet[T] {
result := NewAssociatedSet[T](a.idx)
// Determining the smaller set already requires iterating over all keys
// and thus provides no performance benefit.
for bh := range a.Keys() {
if other.Has(bh) {
// preserve value receiver
val, _ := a.Get(bh)
result.Set(bh, val)
}
}
return result
}
// Sub returns a new set containing all handles that are present in a but not in
// other.
func (a *AssociatedSet[T]) Sub(other haser) *AssociatedSet[T] {
result := NewAssociatedSet[T](a.idx)
for bh := range a.Keys() {
if !other.Has(bh) {
val, _ := a.Get(bh)
result.Set(bh, val)
}
}
return result
}
func (a *AssociatedSet[T]) Len() int {
count := 0
for range a.All() {
count++
}
return count
}
func (a *AssociatedSet[T]) All() iter.Seq2[restic.BlobHandle, T] {
return func(yield func(restic.BlobHandle, T) bool) {
for k, v := range a.overflow {
if !yield(k, v) {
return
}
}
for pb := range a.idx.Values() {
if _, ok := a.overflow[pb.BlobHandle]; ok {
// already reported via overflow set
continue
}
val, known := a.Get(pb.BlobHandle)
if known {
if !yield(pb.BlobHandle, val) {
return
}
}
}
}
}
func (a *AssociatedSet[T]) Keys() iter.Seq[restic.BlobHandle] {
return func(yield func(restic.BlobHandle) bool) {
for bh := range a.All() {
if !yield(bh) {
return
}
}
}
}
func (a *AssociatedSet[T]) String() string {
list := restic.BlobHandles(slices.Collect(a.Keys()))
sort.Sort(list)
str := list.String()
if len(str) < 2 {
return "{}"
}
return "{" + str[1:len(str)-1] + "}"
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/index/index_parallel_test.go | internal/repository/index/index_parallel_test.go | package index_test
import (
"context"
"path/filepath"
"testing"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/repository/index"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
var repoFixture = filepath.Join("..", "testdata", "test-repo.tar.gz")
func TestRepositoryForAllIndexes(t *testing.T) {
repo, _, cleanup := repository.TestFromFixture(t, repoFixture)
defer cleanup()
expectedIndexIDs := restic.NewIDSet()
rtest.OK(t, repo.List(context.TODO(), restic.IndexFile, func(id restic.ID, size int64) error {
expectedIndexIDs.Insert(id)
return nil
}))
// check that all expected indexes are loaded without errors
indexIDs := restic.NewIDSet()
var indexErr error
rtest.OK(t, index.ForAllIndexes(context.TODO(), repo, repo, func(id restic.ID, index *index.Index, err error) error {
if err != nil {
indexErr = err
}
indexIDs.Insert(id)
return nil
}))
rtest.OK(t, indexErr)
rtest.Equals(t, expectedIndexIDs, indexIDs)
// must failed with the returned error
iterErr := errors.New("error to pass upwards")
err := index.ForAllIndexes(context.TODO(), repo, repo, func(id restic.ID, index *index.Index, err error) error {
return iterErr
})
rtest.Equals(t, iterErr, err)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/index/index.go | internal/repository/index/index.go | package index
import (
"bytes"
"context"
"encoding/json"
"io"
"iter"
"math"
"sync"
"time"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository/pack"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/debug"
)
// In large repositories, millions of blobs are stored in the repository
// and restic needs to store an index entry for each blob in memory for
// most operations.
// Hence the index data structure defined here is one of the main contributions
// to the total memory requirements of restic.
//
// We store the index entries in indexMaps. In these maps, entries take 56
// bytes each, plus 8/4 = 2 bytes of unused pointers on average, not counting
// malloc and header struct overhead and ignoring duplicates (those are only
// present in edge cases and are also removed by prune runs).
//
// In the index entries, we need to reference the packID. As one pack may
// contain many blobs the packIDs are saved in a separate array and only the index
// within this array is saved in the indexEntry
//
// We assume on average a minimum of 8 blobs per pack; BP=8.
// (Note that for large files there should be 3 blobs per pack as the average chunk
// size is 1.5 MB and the minimum pack size is 4 MB)
//
// We have the following sizes:
// indexEntry: 56 bytes (on amd64)
// each packID: 32 bytes
//
// To save N index entries, we therefore need:
// N * (56 + 2) bytes + N * 32 bytes / BP = N * 62 bytes,
// i.e., fewer than 64 bytes per blob in an index.
// Index holds lookup tables for id -> pack.
type Index struct {
m sync.RWMutex
byType [restic.NumBlobTypes]indexMap
packs restic.IDs
final bool // set to true for all indexes read from the backend ("finalized")
ids restic.IDs // set to the IDs of the contained finalized indexes
created time.Time
}
// NewIndex returns a new index.
func NewIndex() *Index {
return &Index{
created: time.Now(),
}
}
// addToPacks saves the given pack ID and return the index.
// This procedere allows to use pack IDs which can be easily garbage collected after.
func (idx *Index) addToPacks(id restic.ID) int {
idx.packs = append(idx.packs, id)
return len(idx.packs) - 1
}
func (idx *Index) store(packIndex int, blob restic.Blob) {
// assert that offset and length fit into uint32!
if blob.Offset > math.MaxUint32 || blob.Length > math.MaxUint32 || blob.UncompressedLength > math.MaxUint32 {
panic("offset or length does not fit in uint32. You have packs > 4GB!")
}
m := &idx.byType[blob.Type]
m.add(blob.ID, packIndex, uint32(blob.Offset), uint32(blob.Length), uint32(blob.UncompressedLength))
}
// Final returns true iff the index is already written to the repository, it is
// finalized.
func (idx *Index) Final() bool {
idx.m.RLock()
defer idx.m.RUnlock()
return idx.final
}
const (
indexMaxBlobs = 50000
indexMaxAge = 10 * time.Minute
)
// Full returns true iff the index is "full enough" to be saved as a preliminary index.
var Full = func(idx *Index) bool {
idx.m.RLock()
defer idx.m.RUnlock()
debug.Log("checking whether index %p is full", idx)
var blobs uint
for typ := range idx.byType {
blobs += idx.byType[typ].len()
}
age := time.Since(idx.created)
switch {
case age >= indexMaxAge:
debug.Log("index %p is old enough", idx, age)
return true
case blobs >= indexMaxBlobs:
debug.Log("index %p has %d blobs", idx, blobs)
return true
}
debug.Log("index %p only has %d blobs and is too young (%v)", idx, blobs, age)
return false
}
var Oversized = func(idx *Index) bool {
idx.m.RLock()
defer idx.m.RUnlock()
var blobs uint
for typ := range idx.byType {
blobs += idx.byType[typ].len()
}
return blobs >= indexMaxBlobs+pack.MaxHeaderEntries
}
// StorePack remembers the ids of all blobs of a given pack
// in the index
func (idx *Index) StorePack(id restic.ID, blobs []restic.Blob) {
idx.m.Lock()
defer idx.m.Unlock()
if idx.final {
panic("store new item in finalized index")
}
debug.Log("%v", blobs)
packIndex := idx.addToPacks(id)
for _, blob := range blobs {
idx.store(packIndex, blob)
}
}
func (idx *Index) toPackedBlob(e *indexEntry, t restic.BlobType) restic.PackedBlob {
return restic.PackedBlob{
Blob: restic.Blob{
BlobHandle: restic.BlobHandle{
ID: e.id,
Type: t},
Length: uint(e.length),
Offset: uint(e.offset),
UncompressedLength: uint(e.uncompressedLength),
},
PackID: idx.packs[e.packIndex],
}
}
// Lookup queries the index for the blob ID and returns all entries including
// duplicates. Adds found entries to blobs and returns the result.
func (idx *Index) Lookup(bh restic.BlobHandle, pbs []restic.PackedBlob) []restic.PackedBlob {
idx.m.RLock()
defer idx.m.RUnlock()
for e := range idx.byType[bh.Type].valuesWithID(bh.ID) {
pbs = append(pbs, idx.toPackedBlob(e, bh.Type))
}
return pbs
}
// Has returns true iff the id is listed in the index.
func (idx *Index) Has(bh restic.BlobHandle) bool {
idx.m.RLock()
defer idx.m.RUnlock()
return idx.byType[bh.Type].get(bh.ID) != nil
}
// LookupSize returns the length of the plaintext content of the blob with the
// given id.
func (idx *Index) LookupSize(bh restic.BlobHandle) (plaintextLength uint, found bool) {
idx.m.RLock()
defer idx.m.RUnlock()
e := idx.byType[bh.Type].get(bh.ID)
if e == nil {
return 0, false
}
if e.uncompressedLength != 0 {
return uint(e.uncompressedLength), true
}
return uint(crypto.PlaintextLength(int(e.length))), true
}
// Values returns an iterator over all blobs known to the index. This blocks any
// modification of the index.
func (idx *Index) Values() iter.Seq[restic.PackedBlob] {
return func(yield func(restic.PackedBlob) bool) {
idx.m.RLock()
defer idx.m.RUnlock()
for typ := range idx.byType {
m := &idx.byType[typ]
for e := range m.values() {
if !yield(idx.toPackedBlob(e, restic.BlobType(typ))) {
return
}
}
}
}
}
type EachByPackResult struct {
PackID restic.ID
Blobs []restic.Blob
}
// EachByPack returns a channel that yields all blobs known to the index
// grouped by packID but ignoring blobs with a packID in packPlacklist for
// finalized indexes.
// This filtering is used when rebuilding the index where we need to ignore packs
// from the finalized index which have been re-read into a non-finalized index.
// When the context is cancelled, the background goroutine
// terminates. This blocks any modification of the index.
func (idx *Index) EachByPack(ctx context.Context, packBlacklist restic.IDSet) <-chan EachByPackResult {
idx.m.RLock()
ch := make(chan EachByPackResult)
go func() {
defer idx.m.RUnlock()
defer close(ch)
byPack := make(map[restic.ID][restic.NumBlobTypes][]*indexEntry)
for typ := range idx.byType {
m := &idx.byType[typ]
for e := range m.values() {
packID := idx.packs[e.packIndex]
if !idx.final || !packBlacklist.Has(packID) {
v := byPack[packID]
v[typ] = append(v[typ], e)
byPack[packID] = v
}
}
}
for packID, packByType := range byPack {
var result EachByPackResult
result.PackID = packID
for typ, p := range packByType {
for _, e := range p {
result.Blobs = append(result.Blobs, idx.toPackedBlob(e, restic.BlobType(typ)).Blob)
}
}
// allow GC once entry is no longer necessary
delete(byPack, packID)
select {
case <-ctx.Done():
return
case ch <- result:
}
}
}()
return ch
}
// Packs returns all packs in this index
func (idx *Index) Packs() restic.IDSet {
idx.m.RLock()
defer idx.m.RUnlock()
packs := restic.NewIDSet()
for _, packID := range idx.packs {
packs.Insert(packID)
}
return packs
}
type packJSON struct {
ID restic.ID `json:"id"`
Blobs []blobJSON `json:"blobs"`
}
type blobJSON struct {
ID restic.ID `json:"id"`
Type restic.BlobType `json:"type"`
Offset uint `json:"offset"`
Length uint `json:"length"`
UncompressedLength uint `json:"uncompressed_length,omitempty"`
}
// generatePackList returns a list of packs.
func (idx *Index) generatePackList() ([]packJSON, error) {
list := make([]packJSON, 0, len(idx.packs))
packs := make(map[restic.ID]int, len(list)) // Maps to index in list.
for typ := range idx.byType {
m := &idx.byType[typ]
for e := range m.values() {
packID := idx.packs[e.packIndex]
if packID.IsNull() {
panic("null pack id")
}
i, ok := packs[packID]
if !ok {
i = len(list)
list = append(list, packJSON{ID: packID})
packs[packID] = i
}
p := &list[i]
// add blob
p.Blobs = append(p.Blobs, blobJSON{
ID: e.id,
Type: restic.BlobType(typ),
Offset: uint(e.offset),
Length: uint(e.length),
UncompressedLength: uint(e.uncompressedLength),
})
}
}
return list, nil
}
type jsonIndex struct {
// removed: Supersedes restic.IDs `json:"supersedes,omitempty"`
Packs []packJSON `json:"packs"`
}
// Encode writes the JSON serialization of the index to the writer w.
func (idx *Index) Encode(w io.Writer) error {
debug.Log("encoding index")
idx.m.RLock()
defer idx.m.RUnlock()
list, err := idx.generatePackList()
if err != nil {
return err
}
enc := json.NewEncoder(w)
idxJSON := jsonIndex{
Packs: list,
}
return enc.Encode(idxJSON)
}
// SaveIndex saves an index in the repository.
func (idx *Index) SaveIndex(ctx context.Context, repo restic.SaverUnpacked[restic.FileType]) (restic.ID, error) {
buf := bytes.NewBuffer(nil)
err := idx.Encode(buf)
if err != nil {
return restic.ID{}, err
}
id, err := repo.SaveUnpacked(ctx, restic.IndexFile, buf.Bytes())
ierr := idx.SetID(id)
if ierr != nil {
// logic bug
panic(ierr)
}
return id, err
}
// Finalize sets the index to final.
func (idx *Index) Finalize() {
debug.Log("finalizing index")
idx.m.Lock()
defer idx.m.Unlock()
idx.final = true
}
// IDs returns the IDs of the index, if available. If the index is not yet
// finalized, an error is returned.
func (idx *Index) IDs() (restic.IDs, error) {
idx.m.RLock()
defer idx.m.RUnlock()
if !idx.final {
return nil, errors.New("index not finalized")
}
return idx.ids, nil
}
// SetID sets the ID the index has been written to. This requires that
// Finalize() has been called before, otherwise an error is returned.
func (idx *Index) SetID(id restic.ID) error {
idx.m.Lock()
defer idx.m.Unlock()
if !idx.final {
return errors.New("index is not final")
}
if len(idx.ids) > 0 {
return errors.New("ID already set")
}
debug.Log("ID set to %v", id)
idx.ids = append(idx.ids, id)
return nil
}
// Dump writes the pretty-printed JSON representation of the index to w.
func (idx *Index) Dump(w io.Writer) error {
debug.Log("dumping index")
idx.m.RLock()
defer idx.m.RUnlock()
list, err := idx.generatePackList()
if err != nil {
return err
}
outer := jsonIndex{
Packs: list,
}
buf, err := json.MarshalIndent(outer, "", " ")
if err != nil {
return err
}
_, err = w.Write(append(buf, '\n'))
if err != nil {
return errors.Wrap(err, "Write")
}
debug.Log("done")
return nil
}
// merge() merges indexes, i.e. idx.merge(idx2) merges the contents of idx2 into idx.
// During merging exact duplicates are removed; idx2 is not changed by this method.
func (idx *Index) merge(idx2 *Index) error {
idx.m.Lock()
defer idx.m.Unlock()
idx2.m.Lock()
defer idx2.m.Unlock()
if !idx2.final {
return errors.New("index to merge is not final")
}
packlen := len(idx.packs)
// first append packs as they might be accessed when looking for duplicates below
idx.packs = append(idx.packs, idx2.packs...)
// copy all index entries of idx2 to idx
for typ := range idx2.byType {
m2 := &idx2.byType[typ]
m := &idx.byType[typ]
// helper func to test if identical entry is contained in idx
hasIdenticalEntry := func(e2 *indexEntry) (found bool) {
for e := range m.valuesWithID(e2.id) {
b := idx.toPackedBlob(e, restic.BlobType(typ))
b2 := idx2.toPackedBlob(e2, restic.BlobType(typ))
if b == b2 {
found = true
break
}
}
return found
}
for e2 := range m2.values() {
if !hasIdenticalEntry(e2) {
// packIndex needs to be changed as idx2.pack was appended to idx.pack, see above
m.add(e2.id, e2.packIndex+packlen, e2.offset, e2.length, e2.uncompressedLength)
}
}
}
idx.ids = append(idx.ids, idx2.ids...)
return nil
}
// DecodeIndex unserializes an index from buf.
func DecodeIndex(buf []byte, id restic.ID) (idx *Index, err error) {
debug.Log("Start decoding index")
idxJSON := &jsonIndex{}
err = json.Unmarshal(buf, idxJSON)
if err != nil {
debug.Log("Error %v", err)
return nil, errors.Wrap(err, "DecodeIndex")
}
idx = NewIndex()
for _, p := range idxJSON.Packs {
packID := idx.addToPacks(p.ID)
for _, blob := range p.Blobs {
idx.store(packID, restic.Blob{
BlobHandle: restic.BlobHandle{
Type: blob.Type,
ID: blob.ID},
Offset: blob.Offset,
Length: blob.Length,
UncompressedLength: blob.UncompressedLength,
})
}
}
idx.ids = append(idx.ids, id)
idx.final = true
debug.Log("done")
return idx, nil
}
func (idx *Index) BlobIndex(bh restic.BlobHandle) int {
idx.m.RLock()
defer idx.m.RUnlock()
return idx.byType[bh.Type].firstIndex(bh.ID)
}
func (idx *Index) Len(t restic.BlobType) uint {
idx.m.RLock()
defer idx.m.RUnlock()
return idx.byType[t].len()
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/index/indexmap_test.go | internal/repository/index/indexmap_test.go | package index
import (
"math/rand"
"testing"
"time"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
func TestIndexMapBasic(t *testing.T) {
t.Parallel()
var (
id restic.ID
m indexMap
r = rand.New(rand.NewSource(98765))
)
for i := 1; i <= 400; i++ {
r.Read(id[:])
rtest.Assert(t, m.get(id) == nil, "%v retrieved but not added", id)
m.add(id, 0, 0, 0, 0)
rtest.Assert(t, m.get(id) != nil, "%v added but not retrieved", id)
rtest.Equals(t, uint(i), m.len())
}
}
func TestIndexMapForeach(t *testing.T) {
t.Parallel()
const N = 10
var m indexMap
// Don't crash on empty map.
//nolint:revive // ignore empty iteration
for range m.values() {
// empty iteration
}
for i := 0; i < N; i++ {
var id restic.ID
id[0] = byte(i)
m.add(id, i, uint32(i), uint32(i), uint32(i/2))
}
seen := make(map[int]struct{})
for e := range m.values() {
i := int(e.id[0])
rtest.Assert(t, i < N, "unknown id %v in indexMap", e.id)
rtest.Equals(t, i, e.packIndex)
rtest.Equals(t, i, int(e.length))
rtest.Equals(t, i, int(e.offset))
rtest.Equals(t, i/2, int(e.uncompressedLength))
seen[i] = struct{}{}
}
rtest.Equals(t, N, len(seen))
ncalls := 0
for range m.values() {
ncalls++
break
}
rtest.Equals(t, 1, ncalls)
}
func TestIndexMapForeachWithID(t *testing.T) {
t.Parallel()
const ndups = 3
var (
id restic.ID
m indexMap
r = rand.New(rand.NewSource(1234321))
)
r.Read(id[:])
// No result (and no crash) for empty map.
n := 0
for range m.valuesWithID(id) {
n++
}
rtest.Equals(t, 0, n)
// Test insertion and retrieval of duplicates.
for i := 0; i < ndups; i++ {
m.add(id, i, 0, 0, 0)
}
for i := 0; i < 100; i++ {
var otherid restic.ID
r.Read(otherid[:])
m.add(otherid, -1, 0, 0, 0)
}
n = 0
var packs [ndups]bool
for e := range m.valuesWithID(id) {
packs[e.packIndex] = true
n++
}
rtest.Equals(t, ndups, n)
for i := range packs {
rtest.Assert(t, packs[i], "duplicate from pack %d not retrieved", i)
}
}
func TestHashedArrayTree(t *testing.T) {
hat := newHAT()
const testSize = 1024
for i := uint(0); i < testSize; i++ {
rtest.Assert(t, hat.Size() == i, "expected hat size %v got %v", i, hat.Size())
e, idx := hat.Alloc()
rtest.Assert(t, idx == i, "expected entry at idx %v got %v", i, idx)
e.length = uint32(i)
}
for i := uint(0); i < testSize; i++ {
e := hat.Ref(i)
rtest.Assert(t, e.length == uint32(i), "expected entry to contain %v got %v", uint32(i), e.length)
}
}
func BenchmarkIndexMapHash(b *testing.B) {
var m indexMap
m.add(restic.ID{}, 0, 0, 0, 0) // Trigger lazy initialization.
ids := make([]restic.ID, 128) // 4 KiB.
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for i := range ids {
r.Read(ids[i][:])
}
b.ReportAllocs()
b.SetBytes(int64(len(restic.ID{}) * len(ids)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
for _, id := range ids {
m.hash(id)
}
}
}
func TestIndexMapFirstIndex(t *testing.T) {
t.Parallel()
var (
id restic.ID
m indexMap
r = rand.New(rand.NewSource(98765))
fi = make(map[restic.ID]int)
)
for i := 1; i <= 400; i++ {
r.Read(id[:])
rtest.Equals(t, -1, m.firstIndex(id), "wrong firstIndex for nonexistent id")
m.add(id, 0, 0, 0, 0)
idx := m.firstIndex(id)
rtest.Equals(t, i, idx, "unexpected index for id")
fi[id] = idx
}
// iterate over blobs, as this is a hashmap the order is effectively random
for id, idx := range fi {
rtest.Equals(t, idx, m.firstIndex(id), "wrong index returned")
}
}
func TestIndexMapFirstIndexDuplicates(t *testing.T) {
t.Parallel()
var (
id restic.ID
m indexMap
r = rand.New(rand.NewSource(98765))
)
r.Read(id[:])
for i := 1; i <= 10; i++ {
m.add(id, 0, 0, 0, 0)
}
idx := m.firstIndex(id)
rtest.Equals(t, 1, idx, "unexpected index for id")
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/index/index_test.go | internal/repository/index/index_test.go | package index_test
import (
"bytes"
"context"
"fmt"
"math/rand"
"sync"
"testing"
"github.com/restic/restic/internal/repository/index"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
func TestIndexSerialize(t *testing.T) {
tests := []restic.PackedBlob{}
idx := index.NewIndex()
// create 50 packs with 20 blobs each
for i := 0; i < 50; i++ {
packID := restic.NewRandomID()
var blobs []restic.Blob
pos := uint(0)
for j := 0; j < 20; j++ {
length := uint(i*100 + j)
uncompressedLength := uint(0)
if i >= 25 {
// test a mix of compressed and uncompressed packs
uncompressedLength = 2 * length
}
pb := restic.PackedBlob{
Blob: restic.Blob{
BlobHandle: restic.NewRandomBlobHandle(),
Offset: pos,
Length: length,
UncompressedLength: uncompressedLength,
},
PackID: packID,
}
blobs = append(blobs, pb.Blob)
tests = append(tests, pb)
pos += length
}
idx.StorePack(packID, blobs)
}
wr := bytes.NewBuffer(nil)
err := idx.Encode(wr)
rtest.OK(t, err)
idx2ID := restic.NewRandomID()
idx2, err := index.DecodeIndex(wr.Bytes(), idx2ID)
rtest.OK(t, err)
rtest.Assert(t, idx2 != nil, "nil returned for decoded index")
indexID, err := idx2.IDs()
rtest.OK(t, err)
rtest.Equals(t, indexID, restic.IDs{idx2ID})
wr2 := bytes.NewBuffer(nil)
err = idx2.Encode(wr2)
rtest.OK(t, err)
for _, testBlob := range tests {
list := idx.Lookup(testBlob.BlobHandle, nil)
if len(list) != 1 {
t.Errorf("expected one result for blob %v, got %v: %v", testBlob.ID.Str(), len(list), list)
}
result := list[0]
rtest.Equals(t, testBlob, result)
list2 := idx2.Lookup(testBlob.BlobHandle, nil)
if len(list2) != 1 {
t.Errorf("expected one result for blob %v, got %v: %v", testBlob.ID.Str(), len(list2), list2)
}
result2 := list2[0]
rtest.Equals(t, testBlob, result2)
}
// add more blobs to idx
newtests := []restic.PackedBlob{}
for i := 0; i < 10; i++ {
packID := restic.NewRandomID()
var blobs []restic.Blob
pos := uint(0)
for j := 0; j < 10; j++ {
length := uint(i*100 + j)
pb := restic.PackedBlob{
Blob: restic.Blob{
BlobHandle: restic.NewRandomBlobHandle(),
Offset: pos,
Length: length,
},
PackID: packID,
}
blobs = append(blobs, pb.Blob)
newtests = append(newtests, pb)
pos += length
}
idx.StorePack(packID, blobs)
}
// finalize; serialize idx, unserialize to idx3
idx.Finalize()
wr3 := bytes.NewBuffer(nil)
err = idx.Encode(wr3)
rtest.OK(t, err)
rtest.Assert(t, idx.Final(),
"index not final after encoding")
id := restic.NewRandomID()
rtest.OK(t, idx.SetID(id))
ids, err := idx.IDs()
rtest.OK(t, err)
rtest.Equals(t, restic.IDs{id}, ids)
idx3, err := index.DecodeIndex(wr3.Bytes(), id)
rtest.OK(t, err)
rtest.Assert(t, idx3 != nil, "nil returned for decoded index")
rtest.Assert(t, idx3.Final(), "decoded index is not final")
// all new blobs must be in the index
for _, testBlob := range newtests {
list := idx3.Lookup(testBlob.BlobHandle, nil)
if len(list) != 1 {
t.Errorf("expected one result for blob %v, got %v: %v", testBlob.ID.Str(), len(list), list)
}
blob := list[0]
rtest.Equals(t, testBlob, blob)
}
}
func TestIndexSize(t *testing.T) {
idx := index.NewIndex()
packs := 200
blobCount := 100
for i := 0; i < packs; i++ {
packID := restic.NewRandomID()
var blobs []restic.Blob
pos := uint(0)
for j := 0; j < blobCount; j++ {
length := uint(i*100 + j)
blobs = append(blobs, restic.Blob{
BlobHandle: restic.NewRandomBlobHandle(),
Offset: pos,
Length: length,
})
pos += length
}
idx.StorePack(packID, blobs)
}
wr := bytes.NewBuffer(nil)
err := idx.Encode(wr)
rtest.OK(t, err)
rtest.Equals(t, uint(packs*blobCount), idx.Len(restic.DataBlob))
rtest.Equals(t, uint(0), idx.Len(restic.TreeBlob))
t.Logf("Index file size for %d blobs in %d packs is %d", blobCount*packs, packs, wr.Len())
}
// example index serialization from doc/Design.rst
var docExampleV1 = []byte(`
{
"supersedes": [
"ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452"
],
"packs": [
{
"id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c",
"blobs": [
{
"id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce",
"type": "data",
"offset": 0,
"length": 38
},{
"id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae",
"type": "tree",
"offset": 38,
"length": 112
},
{
"id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66",
"type": "data",
"offset": 150,
"length": 123
}
]
}
]
}
`)
var docExampleV2 = []byte(`
{
"supersedes": [
"ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452"
],
"packs": [
{
"id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c",
"blobs": [
{
"id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce",
"type": "data",
"offset": 0,
"length": 38
},
{
"id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae",
"type": "tree",
"offset": 38,
"length": 112,
"uncompressed_length": 511
},
{
"id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66",
"type": "data",
"offset": 150,
"length": 123,
"uncompressed_length": 234
}
]
}
]
}
`)
var exampleTests = []struct {
id, packID restic.ID
tpe restic.BlobType
offset, length uint
uncompressedLength uint
}{
{
restic.TestParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"),
restic.TestParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
restic.DataBlob, 0, 38, 0,
}, {
restic.TestParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"),
restic.TestParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
restic.TreeBlob, 38, 112, 511,
}, {
restic.TestParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"),
restic.TestParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
restic.DataBlob, 150, 123, 234,
},
}
var exampleLookupTest = struct {
packID restic.ID
blobs map[restic.ID]restic.BlobType
}{
restic.TestParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
map[restic.ID]restic.BlobType{
restic.TestParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): restic.DataBlob,
restic.TestParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): restic.TreeBlob,
restic.TestParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): restic.DataBlob,
},
}
func TestIndexUnserialize(t *testing.T) {
for _, task := range []struct {
idxBytes []byte
version int
}{
{docExampleV1, 1},
{docExampleV2, 2},
} {
idx, err := index.DecodeIndex(task.idxBytes, restic.NewRandomID())
rtest.OK(t, err)
for _, test := range exampleTests {
list := idx.Lookup(restic.BlobHandle{ID: test.id, Type: test.tpe}, nil)
if len(list) != 1 {
t.Errorf("expected one result for blob %v, got %v: %v", test.id.Str(), len(list), list)
}
blob := list[0]
t.Logf("looking for blob %v/%v, got %v", test.tpe, test.id.Str(), blob)
rtest.Equals(t, test.packID, blob.PackID)
rtest.Equals(t, test.tpe, blob.Type)
rtest.Equals(t, test.offset, blob.Offset)
rtest.Equals(t, test.length, blob.Length)
switch task.version {
case 1:
rtest.Equals(t, uint(0), blob.UncompressedLength)
case 2:
rtest.Equals(t, test.uncompressedLength, blob.UncompressedLength)
default:
t.Fatal("Invalid index version")
}
}
blobs := listPack(t, idx, exampleLookupTest.packID)
if len(blobs) != len(exampleLookupTest.blobs) {
t.Fatalf("expected %d blobs in pack, got %d", len(exampleLookupTest.blobs), len(blobs))
}
for _, blob := range blobs {
b, ok := exampleLookupTest.blobs[blob.ID]
if !ok {
t.Errorf("unexpected blob %v found", blob.ID.Str())
}
if blob.Type != b {
t.Errorf("unexpected type for blob %v: want %v, got %v", blob.ID.Str(), b, blob.Type)
}
}
}
}
func listPack(t testing.TB, idx *index.Index, id restic.ID) (pbs []restic.PackedBlob) {
for pb := range idx.Values() {
if pb.PackID.Equal(id) {
pbs = append(pbs, pb)
}
}
return pbs
}
var (
benchmarkIndexJSON []byte
benchmarkIndexJSONOnce sync.Once
)
func initBenchmarkIndexJSON() {
idx, _ := createRandomIndex(rand.New(rand.NewSource(0)), 200000)
var buf bytes.Buffer
err := idx.Encode(&buf)
if err != nil {
panic(err)
}
benchmarkIndexJSON = buf.Bytes()
}
func BenchmarkDecodeIndex(b *testing.B) {
benchmarkIndexJSONOnce.Do(initBenchmarkIndexJSON)
id := restic.NewRandomID()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := index.DecodeIndex(benchmarkIndexJSON, id)
rtest.OK(b, err)
}
}
func BenchmarkDecodeIndexParallel(b *testing.B) {
benchmarkIndexJSONOnce.Do(initBenchmarkIndexJSON)
id := restic.NewRandomID()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, err := index.DecodeIndex(benchmarkIndexJSON, id)
rtest.OK(b, err)
}
})
}
func BenchmarkEncodeIndex(b *testing.B) {
for _, n := range []int{100, 1000, 10000} {
idx, _ := createRandomIndex(rand.New(rand.NewSource(0)), n)
b.Run(fmt.Sprint(n), func(b *testing.B) {
buf := new(bytes.Buffer)
err := idx.Encode(buf)
rtest.OK(b, err)
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
buf.Reset()
_ = idx.Encode(buf)
}
})
}
}
func TestIndexPacks(t *testing.T) {
idx := index.NewIndex()
packs := restic.NewIDSet()
for i := 0; i < 20; i++ {
packID := restic.NewRandomID()
idx.StorePack(packID, []restic.Blob{
{
BlobHandle: restic.NewRandomBlobHandle(),
Offset: 0,
Length: 23,
},
})
packs.Insert(packID)
}
idxPacks := idx.Packs()
rtest.Assert(t, packs.Equals(idxPacks), "packs in index do not match packs added to index")
}
const maxPackSize = 16 * 1024 * 1024
// This function generates a (insecure) random ID, similar to NewRandomID
func NewRandomTestID(rng *rand.Rand) restic.ID {
id := restic.ID{}
rng.Read(id[:])
return id
}
func createRandomIndex(rng *rand.Rand, packfiles int) (idx *index.Index, lookupBh restic.BlobHandle) {
idx = index.NewIndex()
// create index with given number of pack files
for i := 0; i < packfiles; i++ {
packID := NewRandomTestID(rng)
var blobs []restic.Blob
offset := 0
for offset < maxPackSize {
size := 2000 + rng.Intn(4*1024*1024)
id := NewRandomTestID(rng)
blobs = append(blobs, restic.Blob{
BlobHandle: restic.BlobHandle{
Type: restic.DataBlob,
ID: id,
},
Length: uint(size),
UncompressedLength: uint(2 * size),
Offset: uint(offset),
})
offset += size
}
idx.StorePack(packID, blobs)
if i == 0 {
lookupBh = restic.BlobHandle{
Type: restic.DataBlob,
ID: blobs[rng.Intn(len(blobs))].ID,
}
}
}
return idx, lookupBh
}
func BenchmarkIndexHasUnknown(b *testing.B) {
idx, _ := createRandomIndex(rand.New(rand.NewSource(0)), 200000)
lookupBh := restic.NewRandomBlobHandle()
b.ResetTimer()
for i := 0; i < b.N; i++ {
idx.Has(lookupBh)
}
}
func BenchmarkIndexHasKnown(b *testing.B) {
idx, lookupBh := createRandomIndex(rand.New(rand.NewSource(0)), 200000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
idx.Has(lookupBh)
}
}
func BenchmarkIndexAlloc(b *testing.B) {
rng := rand.New(rand.NewSource(0))
b.ReportAllocs()
for i := 0; i < b.N; i++ {
createRandomIndex(rng, 200000)
}
}
func BenchmarkIndexAllocParallel(b *testing.B) {
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
rng := rand.New(rand.NewSource(0))
for pb.Next() {
createRandomIndex(rng, 200000)
}
})
}
func TestIndexHas(t *testing.T) {
tests := []restic.PackedBlob{}
idx := index.NewIndex()
// create 50 packs with 20 blobs each
for i := 0; i < 50; i++ {
packID := restic.NewRandomID()
var blobs []restic.Blob
pos := uint(0)
for j := 0; j < 20; j++ {
length := uint(i*100 + j)
uncompressedLength := uint(0)
if i >= 25 {
// test a mix of compressed and uncompressed packs
uncompressedLength = 2 * length
}
pb := restic.PackedBlob{
Blob: restic.Blob{
BlobHandle: restic.NewRandomBlobHandle(),
Offset: pos,
Length: length,
UncompressedLength: uncompressedLength,
},
PackID: packID,
}
blobs = append(blobs, pb.Blob)
tests = append(tests, pb)
pos += length
}
idx.StorePack(packID, blobs)
}
for _, testBlob := range tests {
rtest.Assert(t, idx.Has(testBlob.BlobHandle), "Index reports not having data blob added to it")
}
rtest.Assert(t, !idx.Has(restic.NewRandomBlobHandle()), "Index reports having a data blob not added to it")
rtest.Assert(t, !idx.Has(restic.BlobHandle{ID: tests[0].ID, Type: restic.TreeBlob}), "Index reports having a tree blob added to it with the same id as a data blob")
}
func TestMixedEachByPack(t *testing.T) {
idx := index.NewIndex()
expected := make(map[restic.ID]int)
// create 50 packs with 2 blobs each
for i := 0; i < 50; i++ {
packID := restic.NewRandomID()
expected[packID] = 1
blobs := []restic.Blob{
{
BlobHandle: restic.BlobHandle{Type: restic.DataBlob, ID: restic.NewRandomID()},
Offset: 0,
Length: 42,
},
{
BlobHandle: restic.BlobHandle{Type: restic.TreeBlob, ID: restic.NewRandomID()},
Offset: 42,
Length: 43,
},
}
idx.StorePack(packID, blobs)
}
reported := make(map[restic.ID]int)
for bp := range idx.EachByPack(context.TODO(), restic.NewIDSet()) {
reported[bp.PackID]++
rtest.Equals(t, 2, len(bp.Blobs)) // correct blob count
if bp.Blobs[0].Offset > bp.Blobs[1].Offset {
bp.Blobs[1], bp.Blobs[0] = bp.Blobs[0], bp.Blobs[1]
}
b0 := bp.Blobs[0]
rtest.Assert(t, b0.Type == restic.DataBlob && b0.Offset == 0 && b0.Length == 42, "wrong blob", b0)
b1 := bp.Blobs[1]
rtest.Assert(t, b1.Type == restic.TreeBlob && b1.Offset == 42 && b1.Length == 43, "wrong blob", b1)
}
rtest.Equals(t, expected, reported)
}
func TestEachByPackIgnoes(t *testing.T) {
idx := index.NewIndex()
ignores := restic.NewIDSet()
expected := make(map[restic.ID]int)
// create 50 packs with one blob each
for i := 0; i < 50; i++ {
packID := restic.NewRandomID()
if i < 3 {
ignores.Insert(packID)
} else {
expected[packID] = 1
}
blobs := []restic.Blob{
{
BlobHandle: restic.BlobHandle{Type: restic.DataBlob, ID: restic.NewRandomID()},
Offset: 0,
Length: 42,
},
}
idx.StorePack(packID, blobs)
}
idx.Finalize()
reported := make(map[restic.ID]int)
for bp := range idx.EachByPack(context.TODO(), ignores) {
reported[bp.PackID]++
rtest.Equals(t, 1, len(bp.Blobs)) // correct blob count
b0 := bp.Blobs[0]
rtest.Assert(t, b0.Type == restic.DataBlob && b0.Offset == 0 && b0.Length == 42, "wrong blob", b0)
}
rtest.Equals(t, expected, reported)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/index/index_internal_test.go | internal/repository/index/index_internal_test.go | package index
import (
"testing"
"github.com/restic/restic/internal/repository/pack"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
func TestIndexOversized(t *testing.T) {
idx := NewIndex()
// Add blobs up to indexMaxBlobs + pack.MaxHeaderEntries - 1
packID := idx.addToPacks(restic.NewRandomID())
for i := uint(0); i < indexMaxBlobs+pack.MaxHeaderEntries-1; i++ {
idx.store(packID, restic.Blob{
BlobHandle: restic.BlobHandle{
Type: restic.DataBlob,
ID: restic.NewRandomID(),
},
Length: 100,
Offset: uint(i) * 100,
})
}
rtest.Assert(t, !Oversized(idx), "index should not be considered oversized")
// Add one more blob to exceed the limit
idx.store(packID, restic.Blob{
BlobHandle: restic.BlobHandle{
Type: restic.DataBlob,
ID: restic.NewRandomID(),
},
Length: 100,
Offset: uint(indexMaxBlobs+pack.MaxHeaderEntries) * 100,
})
rtest.Assert(t, Oversized(idx), "index should be considered oversized")
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/index/indexmap.go | internal/repository/index/indexmap.go | package index
import (
"hash/maphash"
"iter"
"github.com/restic/restic/internal/restic"
)
// An indexMap is a chained hash table that maps blob IDs to indexEntries.
// It allows storing multiple entries with the same key.
//
// IndexMap uses some optimizations that are not compatible with supporting
// deletions.
//
// The buckets in this hash table contain only pointers, rather than inlined
// key-value pairs like the standard Go map. This way, only a pointer array
// needs to be resized when the table grows, preventing memory usage spikes.
type indexMap struct {
// The number of buckets is always a power of two and never zero.
buckets []uint
numentries uint
mh maphash.Hash
blockList hashedArrayTree
}
const (
growthFactor = 2 // Must be a power of 2.
maxLoad = 4 // Max. number of entries per bucket.
)
// add inserts an indexEntry for the given arguments into the map,
// using id as the key.
func (m *indexMap) add(id restic.ID, packIdx int, offset, length uint32, uncompressedLength uint32) {
switch {
case m.numentries == 0: // Lazy initialization.
m.init()
case m.numentries >= maxLoad*uint(len(m.buckets)):
m.grow()
}
h := m.hash(id)
e, idx := m.newEntry()
e.id = id
e.next = m.buckets[h] // Prepend to existing chain.
e.packIndex = packIdx
e.offset = offset
e.length = length
e.uncompressedLength = uncompressedLength
m.buckets[h] = idx
m.numentries++
}
// values returns an iterator over all entries in the map.
func (m *indexMap) values() iter.Seq[*indexEntry] {
return func(yield func(*indexEntry) bool) {
blockCount := m.blockList.Size()
for i := uint(1); i < blockCount; i++ {
if !yield(m.resolve(i)) {
return
}
}
}
}
// valuesWithID returns an iterator over all entries with the given id.
func (m *indexMap) valuesWithID(id restic.ID) iter.Seq[*indexEntry] {
return func(yield func(*indexEntry) bool) {
if len(m.buckets) == 0 {
return
}
h := m.hash(id)
ei := m.buckets[h]
for ei != 0 {
e := m.resolve(ei)
ei = e.next
if e.id != id {
continue
}
if !yield(e) {
return
}
}
}
}
// get returns the first entry for the given id.
func (m *indexMap) get(id restic.ID) *indexEntry {
if len(m.buckets) == 0 {
return nil
}
h := m.hash(id)
ei := m.buckets[h]
for ei != 0 {
e := m.resolve(ei)
if e.id == id {
return e
}
ei = e.next
}
return nil
}
// firstIndex returns the index of the first entry for ID id.
// This index is guaranteed to never change.
func (m *indexMap) firstIndex(id restic.ID) int {
if len(m.buckets) == 0 {
return -1
}
idx := -1
h := m.hash(id)
ei := m.buckets[h]
for ei != 0 {
e := m.resolve(ei)
cur := ei
ei = e.next
if e.id != id {
continue
}
if int(cur) < idx || idx == -1 {
// casting from uint to int is unproblematic as we'd run out of memory
// before this can result in an overflow.
idx = int(cur)
}
}
return idx
}
func (m *indexMap) grow() {
m.buckets = make([]uint, growthFactor*len(m.buckets))
blockCount := m.blockList.Size()
for i := uint(1); i < blockCount; i++ {
e := m.resolve(i)
h := m.hash(e.id)
e.next = m.buckets[h]
m.buckets[h] = i
}
}
func (m *indexMap) hash(id restic.ID) uint {
// We use maphash to prevent backups of specially crafted inputs
// from degrading performance.
// While SHA-256 should be collision-resistant, for hash table indices
// we use only a few bits of it and finding collisions for those is
// much easier than breaking the whole algorithm.
mh := maphash.Hash{}
mh.SetSeed(m.mh.Seed())
_, _ = mh.Write(id[:])
h := uint(mh.Sum64())
return h & uint(len(m.buckets)-1)
}
func (m *indexMap) init() {
const initialBuckets = 64
m.buckets = make([]uint, initialBuckets)
// first entry in blockList serves as null byte
m.blockList = *newHAT()
m.newEntry()
}
func (m *indexMap) len() uint { return m.numentries }
func (m *indexMap) newEntry() (*indexEntry, uint) {
return m.blockList.Alloc()
}
func (m *indexMap) resolve(idx uint) *indexEntry {
return m.blockList.Ref(idx)
}
type indexEntry struct {
id restic.ID
next uint
packIndex int // Position in containing Index's packs field.
offset uint32
length uint32
uncompressedLength uint32
}
type hashedArrayTree struct {
mask uint
maskShift uint
blockSize uint
size uint
blockList [][]indexEntry
}
func newHAT() *hashedArrayTree {
// start with a small block size
blockSizePower := uint(2)
blockSize := uint(1 << blockSizePower)
return &hashedArrayTree{
mask: blockSize - 1,
maskShift: blockSizePower,
blockSize: blockSize,
size: 0,
blockList: make([][]indexEntry, blockSize),
}
}
func (h *hashedArrayTree) Alloc() (*indexEntry, uint) {
h.grow()
size := h.size
idx, subIdx := h.index(size)
h.size++
return &h.blockList[idx][subIdx], size
}
func (h *hashedArrayTree) index(pos uint) (idx uint, subIdx uint) {
subIdx = pos & h.mask
idx = pos >> h.maskShift
return
}
func (h *hashedArrayTree) Ref(pos uint) *indexEntry {
if pos >= h.size {
panic("array index out of bounds")
}
idx, subIdx := h.index(pos)
return &h.blockList[idx][subIdx]
}
func (h *hashedArrayTree) Size() uint {
return h.size
}
func (h *hashedArrayTree) grow() {
idx, subIdx := h.index(h.size)
if int(idx) == len(h.blockList) {
// blockList is too short -> double list and block size
h.blockSize *= 2
h.mask = h.mask*2 + 1
h.maskShift++
idx = idx / 2
oldBlocks := h.blockList
h.blockList = make([][]indexEntry, h.blockSize)
// pairwise merging of blocks
for i := 0; i < len(oldBlocks); i += 2 {
block := make([]indexEntry, 0, h.blockSize)
block = append(block, oldBlocks[i]...)
block = append(block, oldBlocks[i+1]...)
h.blockList[i/2] = block
// allow GC
oldBlocks[i] = nil
oldBlocks[i+1] = nil
}
}
if subIdx == 0 {
// new index entry batch
h.blockList[idx] = make([]indexEntry, h.blockSize)
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/index/index_parallel.go | internal/repository/index/index_parallel.go | package index
import (
"context"
"runtime"
"sync"
"github.com/restic/restic/internal/restic"
)
// ForAllIndexes loads all index files in parallel and calls the given callback.
// It is guaranteed that the function is not run concurrently. If the callback
// returns an error, this function is cancelled and also returns that error.
func ForAllIndexes(ctx context.Context, lister restic.Lister, repo restic.LoaderUnpacked,
fn func(id restic.ID, index *Index, err error) error) error {
// decoding an index can take quite some time such that this can be both CPU- or IO-bound
// as the whole index is kept in memory anyways, a few workers too much don't matter
workerCount := repo.Connections() + uint(runtime.GOMAXPROCS(0))
var m sync.Mutex
return restic.ParallelList(ctx, lister, restic.IndexFile, workerCount, func(ctx context.Context, id restic.ID, _ int64) error {
var err error
var idx *Index
buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id)
if err == nil {
idx, err = DecodeIndex(buf, id)
}
m.Lock()
defer m.Unlock()
return fn(id, idx, err)
})
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/index/testing.go | internal/repository/index/testing.go | package index
import (
"testing"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
)
func TestMergeIndex(t testing.TB, mi *MasterIndex) ([]*Index, int, restic.IDSet) {
finalIndexes := mi.finalizeNotFinalIndexes()
ids := restic.NewIDSet()
for _, idx := range finalIndexes {
id := restic.NewRandomID()
ids.Insert(id)
test.OK(t, idx.SetID(id))
}
test.OK(t, mi.MergeFinalIndexes())
return finalIndexes, len(mi.idx), ids
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/index/master_index.go | internal/repository/index/master_index.go | package index
import (
"context"
"fmt"
"iter"
"runtime"
"sync"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
"golang.org/x/sync/errgroup"
)
// MasterIndex is a collection of indexes and IDs of chunks that are in the process of being saved.
type MasterIndex struct {
idx []*Index
pendingBlobs map[restic.BlobHandle]uint
idxMutex sync.RWMutex
}
// NewMasterIndex creates a new master index.
func NewMasterIndex() *MasterIndex {
mi := &MasterIndex{pendingBlobs: make(map[restic.BlobHandle]uint)}
mi.clear()
return mi
}
func (mi *MasterIndex) clear() {
// Always add an empty final index, such that MergeFinalIndexes can merge into this.
mi.idx = []*Index{NewIndex()}
mi.idx[0].Finalize()
}
// Lookup queries all known Indexes for the ID and returns all matches.
func (mi *MasterIndex) Lookup(bh restic.BlobHandle) (pbs []restic.PackedBlob) {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
for _, idx := range mi.idx {
pbs = idx.Lookup(bh, pbs)
}
return pbs
}
// LookupSize queries all known Indexes for the ID and returns the first match.
// Also returns true if the ID is pending.
func (mi *MasterIndex) LookupSize(bh restic.BlobHandle) (uint, bool) {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
// also return true if blob is pending
if size, ok := mi.pendingBlobs[bh]; ok {
return size, true
}
for _, idx := range mi.idx {
if size, found := idx.LookupSize(bh); found {
return size, found
}
}
return 0, false
}
// AddPending adds a given blob to list of pending Blobs
// Before doing so it checks if this blob is already known.
// Returns true if adding was successful and false if the blob
// was already known
func (mi *MasterIndex) AddPending(bh restic.BlobHandle, size uint) bool {
mi.idxMutex.Lock()
defer mi.idxMutex.Unlock()
// Check if blob is pending or in index
if _, ok := mi.pendingBlobs[bh]; ok {
return false
}
for _, idx := range mi.idx {
if idx.Has(bh) {
return false
}
}
// really not known -> insert
mi.pendingBlobs[bh] = size
return true
}
// IDs returns the IDs of all indexes contained in the index.
func (mi *MasterIndex) IDs() restic.IDSet {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
ids := restic.NewIDSet()
for _, idx := range mi.idx {
if !idx.Final() {
continue
}
indexIDs, err := idx.IDs()
if err != nil {
debug.Log("not using index, ID() returned error %v", err)
continue
}
for _, id := range indexIDs {
ids.Insert(id)
}
}
return ids
}
// Packs returns all packs that are covered by the index.
// If packBlacklist is given, those packs are only contained in the
// resulting IDSet if they are contained in a non-final (newly written) index.
func (mi *MasterIndex) Packs(packBlacklist restic.IDSet) restic.IDSet {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
packs := restic.NewIDSet()
for _, idx := range mi.idx {
idxPacks := idx.Packs()
if idx.final && len(packBlacklist) > 0 {
idxPacks = idxPacks.Sub(packBlacklist)
}
packs.Merge(idxPacks)
}
return packs
}
// Insert adds a new index to the MasterIndex.
func (mi *MasterIndex) Insert(idx *Index) {
mi.idxMutex.Lock()
defer mi.idxMutex.Unlock()
mi.idx = append(mi.idx, idx)
}
// StorePack remembers the id and pack in the index.
func (mi *MasterIndex) StorePack(ctx context.Context, id restic.ID, blobs []restic.Blob, r restic.SaverUnpacked[restic.FileType]) error {
mi.storePack(id, blobs)
return mi.saveFullIndex(ctx, r)
}
func (mi *MasterIndex) storePack(id restic.ID, blobs []restic.Blob) {
mi.idxMutex.Lock()
defer mi.idxMutex.Unlock()
// delete blobs from pending
for _, blob := range blobs {
delete(mi.pendingBlobs, restic.BlobHandle{Type: blob.Type, ID: blob.ID})
}
for _, idx := range mi.idx {
if !idx.Final() {
idx.StorePack(id, blobs)
return
}
}
newIdx := NewIndex()
newIdx.StorePack(id, blobs)
mi.idx = append(mi.idx, newIdx)
}
// finalizeNotFinalIndexes finalizes all indexes that
// have not yet been saved and returns that list
func (mi *MasterIndex) finalizeNotFinalIndexes() []*Index {
mi.idxMutex.Lock()
defer mi.idxMutex.Unlock()
var list []*Index
for _, idx := range mi.idx {
if !idx.Final() {
idx.Finalize()
list = append(list, idx)
}
}
debug.Log("return %d indexes", len(list))
return list
}
// finalizeFullIndexes finalizes all indexes that are full and returns that list.
func (mi *MasterIndex) finalizeFullIndexes() []*Index {
mi.idxMutex.Lock()
defer mi.idxMutex.Unlock()
var list []*Index
debug.Log("checking %d indexes", len(mi.idx))
for _, idx := range mi.idx {
if idx.Final() {
continue
}
if Full(idx) {
debug.Log("index %p is full", idx)
idx.Finalize()
list = append(list, idx)
} else {
debug.Log("index %p not full", idx)
}
}
debug.Log("return %d indexes", len(list))
return list
}
// Values returns an iterator over all blobs known to the index. This blocks any
// modification of the index.
func (mi *MasterIndex) Values() iter.Seq[restic.PackedBlob] {
return func(yield func(restic.PackedBlob) bool) {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
for _, idx := range mi.idx {
for pb := range idx.Values() {
if !yield(pb) {
return
}
}
}
}
}
// MergeFinalIndexes merges all final indexes together.
// After calling, there will be only one big final index in MasterIndex
// containing all final index contents.
// Indexes that are not final are left untouched.
// This merging can only be called after all index files are loaded - as
// removing of superseded index contents is only possible for unmerged indexes.
func (mi *MasterIndex) MergeFinalIndexes() error {
mi.idxMutex.Lock()
defer mi.idxMutex.Unlock()
// The first index is always final and the one to merge into
newIdx := mi.idx[:1]
for i := 1; i < len(mi.idx); i++ {
idx := mi.idx[i]
// clear reference in masterindex as it may become stale
mi.idx[i] = nil
// do not merge indexes that have no id set
ids, _ := idx.IDs()
if !idx.Final() || len(ids) == 0 {
newIdx = append(newIdx, idx)
} else {
err := mi.idx[0].merge(idx)
if err != nil {
return fmt.Errorf("MergeFinalIndexes: %w", err)
}
}
}
mi.idx = newIdx
return nil
}
func (mi *MasterIndex) Load(ctx context.Context, r restic.ListerLoaderUnpacked, p *progress.Counter, cb func(id restic.ID, idx *Index, err error) error) error {
indexList, err := restic.MemorizeList(ctx, r, restic.IndexFile)
if err != nil {
return err
}
if p != nil {
var numIndexFiles uint64
err := indexList.List(ctx, restic.IndexFile, func(_ restic.ID, _ int64) error {
numIndexFiles++
return nil
})
if err != nil {
return err
}
p.SetMax(numIndexFiles)
defer p.Done()
}
err = ForAllIndexes(ctx, indexList, r, func(id restic.ID, idx *Index, err error) error {
if p != nil {
p.Add(1)
}
if cb != nil {
err = cb(id, idx, err)
}
if err != nil {
return err
}
// special case to allow check to ignore index loading errors
if idx == nil {
return nil
}
mi.Insert(idx)
return nil
})
if err != nil {
return err
}
return mi.MergeFinalIndexes()
}
type MasterIndexRewriteOpts struct {
SaveProgress *progress.Counter
DeleteProgress func() *progress.Counter
DeleteReport func(id restic.ID, err error)
}
// Rewrite removes packs whose ID is in excludePacks from all known indexes.
// It also removes the rewritten index files and those listed in extraObsolete.
// If oldIndexes is not nil, then only the indexes in this set are processed.
// This is used by repair index to only rewrite and delete the old indexes.
//
// Must not be called concurrently to any other MasterIndex operation.
func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked[restic.FileType], excludePacks restic.IDSet, oldIndexes restic.IDSet, extraObsolete restic.IDs, opts MasterIndexRewriteOpts) error {
for _, idx := range mi.idx {
if !idx.Final() {
panic("internal error - index must be saved before calling MasterIndex.Rewrite")
}
}
var indexes restic.IDSet
if oldIndexes != nil {
// repair index adds new index entries for already existing pack files
// only remove the old (possibly broken) entries by only processing old indexes
indexes = oldIndexes
} else {
indexes = mi.IDs()
}
p := opts.SaveProgress
p.SetMax(uint64(len(indexes)))
// reset state which is not necessary for Rewrite and just consumes a lot of memory
// the index state would be invalid after Rewrite completes anyways
mi.clear()
runtime.GC()
// copy excludePacks to prevent unintended sideeffects
excludePacks = excludePacks.Clone()
if excludePacks == nil {
excludePacks = restic.NewIDSet()
}
debug.Log("start rebuilding index of %d indexes, excludePacks: %v", len(indexes), excludePacks)
wg, wgCtx := errgroup.WithContext(ctx)
idxCh := make(chan restic.ID)
wg.Go(func() error {
defer close(idxCh)
for id := range indexes {
select {
case idxCh <- id:
case <-wgCtx.Done():
return wgCtx.Err()
}
}
return nil
})
var rewriteWg sync.WaitGroup
type rewriteTask struct {
idx *Index
}
rewriteCh := make(chan rewriteTask)
loader := func() error {
defer rewriteWg.Done()
for id := range idxCh {
buf, err := repo.LoadUnpacked(wgCtx, restic.IndexFile, id)
if err != nil {
return fmt.Errorf("LoadUnpacked(%v): %w", id.Str(), err)
}
idx, err := DecodeIndex(buf, id)
if err != nil {
return err
}
select {
case rewriteCh <- rewriteTask{idx}:
case <-wgCtx.Done():
return wgCtx.Err()
}
}
return nil
}
// loading an index can take quite some time such that this is probably CPU-bound
// the index files are probably already cached at this point
loaderCount := runtime.GOMAXPROCS(0)
// run workers on ch
for i := 0; i < loaderCount; i++ {
rewriteWg.Add(1)
wg.Go(loader)
}
wg.Go(func() error {
rewriteWg.Wait()
close(rewriteCh)
return nil
})
obsolete := restic.NewIDSet(extraObsolete...)
saveCh := make(chan *Index)
wg.Go(func() error {
defer close(saveCh)
newIndex := NewIndex()
for task := range rewriteCh {
// always rewrite indexes that include a pack that must be removed or that are not full
if len(task.idx.Packs().Intersect(excludePacks)) == 0 && Full(task.idx) && !Oversized(task.idx) {
// make sure that each pack is only stored exactly once in the index
excludePacks.Merge(task.idx.Packs())
// index is already up to date
p.Add(1)
continue
}
ids, err := task.idx.IDs()
if err != nil || len(ids) != 1 {
panic("internal error, index has no ID")
}
obsolete.Merge(restic.NewIDSet(ids...))
for pbs := range task.idx.EachByPack(wgCtx, excludePacks) {
newIndex.StorePack(pbs.PackID, pbs.Blobs)
if Full(newIndex) {
select {
case saveCh <- newIndex:
case <-wgCtx.Done():
return wgCtx.Err()
}
newIndex = NewIndex()
}
}
if wgCtx.Err() != nil {
return wgCtx.Err()
}
// make sure that each pack is only stored exactly once in the index
excludePacks.Merge(task.idx.Packs())
p.Add(1)
}
select {
case saveCh <- newIndex:
case <-wgCtx.Done():
}
return nil
})
var savers errgroup.Group
// encoding an index can take quite some time such that this can be CPU- or IO-bound
// do not add repo.Connections() here as there are already the loader goroutines.
savers.SetLimit(runtime.GOMAXPROCS(0))
for idx := range saveCh {
savers.Go(func() error {
idx.Finalize()
if len(idx.packs) == 0 {
return nil
}
_, err := idx.SaveIndex(wgCtx, repo)
return err
})
}
wg.Go(savers.Wait)
err := wg.Wait()
p.Done()
if err != nil {
return fmt.Errorf("failed to rewrite indexes: %w", err)
}
p = nil
if opts.DeleteProgress != nil {
p = opts.DeleteProgress()
}
defer p.Done()
return restic.ParallelRemove(ctx, repo, obsolete, restic.IndexFile, func(id restic.ID, err error) error {
if opts.DeleteReport != nil {
opts.DeleteReport(id, err)
}
return err
}, p)
}
// SaveFallback saves all known indexes to index files, leaving out any
// packs whose ID is contained in packBlacklist from finalized indexes.
// It is only intended for use by prune with the UnsafeRecovery option.
//
// Must not be called concurrently to any other MasterIndex operation.
func (mi *MasterIndex) SaveFallback(ctx context.Context, repo restic.SaverRemoverUnpacked[restic.FileType], excludePacks restic.IDSet, p *progress.Counter) error {
p.SetMax(uint64(len(mi.Packs(excludePacks))))
mi.idxMutex.Lock()
defer mi.idxMutex.Unlock()
debug.Log("start rebuilding index of %d indexes, excludePacks: %v", len(mi.idx), excludePacks)
obsolete := restic.NewIDSet()
wg, wgCtx := errgroup.WithContext(ctx)
// keep concurrency bounded as we're on a fallback path
wg.SetLimit(1 + int(repo.Connections()))
ch := make(chan *Index)
wg.Go(func() error {
defer close(ch)
newIndex := NewIndex()
for _, idx := range mi.idx {
if idx.Final() {
ids, err := idx.IDs()
if err != nil {
panic("internal error - finalized index without ID")
}
debug.Log("adding index ids %v to supersedes field", ids)
obsolete.Merge(restic.NewIDSet(ids...))
}
for pbs := range idx.EachByPack(wgCtx, excludePacks) {
newIndex.StorePack(pbs.PackID, pbs.Blobs)
p.Add(1)
if Full(newIndex) {
select {
case ch <- newIndex:
case <-wgCtx.Done():
return wgCtx.Err()
}
newIndex = NewIndex()
}
}
if wgCtx.Err() != nil {
return wgCtx.Err()
}
}
select {
case ch <- newIndex:
case <-wgCtx.Done():
}
return nil
})
for idx := range ch {
wg.Go(func() error {
idx.Finalize()
_, err := idx.SaveIndex(wgCtx, repo)
return err
})
}
err := wg.Wait()
p.Done()
// the index no longer matches to stored state
mi.clear()
return err
}
// saveIndex saves all indexes in the backend.
func (mi *MasterIndex) saveIndex(ctx context.Context, r restic.SaverUnpacked[restic.FileType], indexes ...*Index) error {
for i, idx := range indexes {
debug.Log("Saving index %d", i)
sid, err := idx.SaveIndex(ctx, r)
if err != nil {
return err
}
debug.Log("Saved index %d as %v", i, sid)
}
return mi.MergeFinalIndexes()
}
// Flush saves all new indexes in the backend.
func (mi *MasterIndex) Flush(ctx context.Context, r restic.SaverUnpacked[restic.FileType]) error {
return mi.saveIndex(ctx, r, mi.finalizeNotFinalIndexes()...)
}
// saveFullIndex saves all full indexes in the backend.
func (mi *MasterIndex) saveFullIndex(ctx context.Context, r restic.SaverUnpacked[restic.FileType]) error {
return mi.saveIndex(ctx, r, mi.finalizeFullIndexes()...)
}
// ListPacks returns the blobs of the specified pack files grouped by pack file.
func (mi *MasterIndex) ListPacks(ctx context.Context, packs restic.IDSet) <-chan restic.PackBlobs {
out := make(chan restic.PackBlobs)
go func() {
defer close(out)
// only resort a part of the index to keep the memory overhead bounded
for i := byte(0); i < 16; i++ {
packBlob := make(map[restic.ID][]restic.Blob)
for pack := range packs {
if pack[0]&0xf == i {
packBlob[pack] = nil
}
}
if len(packBlob) == 0 {
continue
}
for pb := range mi.Values() {
if ctx.Err() != nil {
return
}
if packs.Has(pb.PackID) && pb.PackID[0]&0xf == i {
packBlob[pb.PackID] = append(packBlob[pb.PackID], pb.Blob)
}
}
// pass on packs
for packID, pbs := range packBlob {
// allow GC
packBlob[packID] = nil
select {
case out <- restic.PackBlobs{PackID: packID, Blobs: pbs}:
case <-ctx.Done():
return
}
}
}
}()
return out
}
// Only for use by AssociatedSet
func (mi *MasterIndex) blobIndex(h restic.BlobHandle) int {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
// other indexes are ignored as their ids can change when merged into the main index
return mi.idx[0].BlobIndex(h)
}
// Only for use by AssociatedSet
func (mi *MasterIndex) stableLen(t restic.BlobType) uint {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
// other indexes are ignored as their ids can change when merged into the main index
return mi.idx[0].Len(t)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/index/master_index_test.go | internal/repository/index/master_index_test.go | package index_test
import (
"context"
"fmt"
"math/rand"
"runtime"
"testing"
"time"
"github.com/restic/restic/internal/checker"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/repository/index"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
func TestMasterIndex(t *testing.T) {
bhInIdx1 := restic.NewRandomBlobHandle()
bhInIdx2 := restic.NewRandomBlobHandle()
bhInIdx12 := restic.BlobHandle{ID: restic.NewRandomID(), Type: restic.TreeBlob}
blob1 := restic.PackedBlob{
PackID: restic.NewRandomID(),
Blob: restic.Blob{
BlobHandle: bhInIdx1,
Length: uint(crypto.CiphertextLength(10)),
Offset: 0,
},
}
blob2 := restic.PackedBlob{
PackID: restic.NewRandomID(),
Blob: restic.Blob{
BlobHandle: bhInIdx2,
Length: uint(crypto.CiphertextLength(100)),
Offset: 10,
UncompressedLength: 200,
},
}
blob12a := restic.PackedBlob{
PackID: restic.NewRandomID(),
Blob: restic.Blob{
BlobHandle: bhInIdx12,
Length: uint(crypto.CiphertextLength(123)),
Offset: 110,
UncompressedLength: 80,
},
}
blob12b := restic.PackedBlob{
PackID: restic.NewRandomID(),
Blob: restic.Blob{
BlobHandle: bhInIdx12,
Length: uint(crypto.CiphertextLength(123)),
Offset: 50,
UncompressedLength: 80,
},
}
idx1 := index.NewIndex()
idx1.StorePack(blob1.PackID, []restic.Blob{blob1.Blob})
idx1.StorePack(blob12a.PackID, []restic.Blob{blob12a.Blob})
idx2 := index.NewIndex()
idx2.StorePack(blob2.PackID, []restic.Blob{blob2.Blob})
idx2.StorePack(blob12b.PackID, []restic.Blob{blob12b.Blob})
mIdx := index.NewMasterIndex()
mIdx.Insert(idx1)
mIdx.Insert(idx2)
// test idInIdx1
blobs := mIdx.Lookup(bhInIdx1)
rtest.Equals(t, []restic.PackedBlob{blob1}, blobs)
size, found := mIdx.LookupSize(bhInIdx1)
rtest.Equals(t, true, found)
rtest.Equals(t, uint(10), size)
// test idInIdx2
blobs = mIdx.Lookup(bhInIdx2)
rtest.Equals(t, []restic.PackedBlob{blob2}, blobs)
size, found = mIdx.LookupSize(bhInIdx2)
rtest.Equals(t, true, found)
rtest.Equals(t, uint(200), size)
// test idInIdx12
blobs = mIdx.Lookup(bhInIdx12)
rtest.Equals(t, 2, len(blobs))
// test Lookup result for blob12a
found = false
if blobs[0] == blob12a || blobs[1] == blob12a {
found = true
}
rtest.Assert(t, found, "blob12a not found in result")
// test Lookup result for blob12b
found = false
if blobs[0] == blob12b || blobs[1] == blob12b {
found = true
}
rtest.Assert(t, found, "blob12a not found in result")
size, found = mIdx.LookupSize(bhInIdx12)
rtest.Equals(t, true, found)
rtest.Equals(t, uint(80), size)
// test not in index
blobs = mIdx.Lookup(restic.NewRandomBlobHandle())
rtest.Assert(t, blobs == nil, "Expected no blobs when fetching with a random id")
_, found = mIdx.LookupSize(restic.NewRandomBlobHandle())
rtest.Assert(t, !found, "Expected no blobs when fetching with a random id")
}
func TestMasterIndexAddPending(t *testing.T) {
mIdx := index.NewMasterIndex()
// Test AddPending: successfully add a new blob
bhPending := restic.NewRandomBlobHandle()
added := mIdx.AddPending(bhPending, 100)
rtest.Equals(t, true, added)
// Test AddPending: try to add the same blob again (should return false)
added = mIdx.AddPending(bhPending, 200)
rtest.Equals(t, false, added)
// Test AddPending: try to add a blob that's already in an index (should return false)
bhInIndex := restic.NewRandomBlobHandle()
idx := index.NewIndex()
idx.StorePack(restic.NewRandomID(), []restic.Blob{{
BlobHandle: bhInIndex,
Length: uint(crypto.CiphertextLength(50)),
Offset: 0,
UncompressedLength: 50,
}})
mIdx.Insert(idx)
added = mIdx.AddPending(bhInIndex, 100)
rtest.Equals(t, false, added)
// Test LookupSize: returns pending blob size when blob is pending
size, found := mIdx.LookupSize(bhPending)
rtest.Equals(t, true, found)
rtest.Equals(t, uint(100), size)
}
// noopSaver is a no-op implementation of SaverUnpacked for testing.
type noopSaver struct{}
func (n *noopSaver) Connections() uint {
return 2
}
func (n *noopSaver) SaveUnpacked(_ context.Context, _ restic.FileType, buf []byte) (restic.ID, error) {
return restic.Hash(buf), nil
}
func TestMasterIndexStorePackRemovesPending(t *testing.T) {
mIdx := index.NewMasterIndex()
// Add a blob as pending
bhPending := restic.NewRandomBlobHandle()
added := mIdx.AddPending(bhPending, 75)
rtest.Equals(t, true, added)
// Store the blob in a pack
packID := restic.NewRandomID()
blob := restic.Blob{
BlobHandle: bhPending,
Length: uint(crypto.CiphertextLength(75)),
Offset: 0,
UncompressedLength: 75,
}
saver := &noopSaver{}
err := mIdx.StorePack(context.Background(), packID, []restic.Blob{blob}, saver)
rtest.OK(t, err)
// Verify it is still found
size, found := mIdx.LookupSize(bhPending)
rtest.Equals(t, true, found)
rtest.Equals(t, uint(75), size)
// Verify the blob can be found via Lookup from the index
blobs := mIdx.Lookup(bhPending)
rtest.Assert(t, len(blobs) > 0, "blob should be found in index after StorePack")
rtest.Equals(t, packID, blobs[0].PackID)
rtest.Equals(t, bhPending, blobs[0].BlobHandle)
// Test that adding the same blob as pending again fails (it's now in index)
added = mIdx.AddPending(bhPending, 100)
rtest.Equals(t, false, added)
}
func TestMasterMergeFinalIndexes(t *testing.T) {
bhInIdx1 := restic.NewRandomBlobHandle()
bhInIdx2 := restic.NewRandomBlobHandle()
blob1 := restic.PackedBlob{
PackID: restic.NewRandomID(),
Blob: restic.Blob{
BlobHandle: bhInIdx1,
Length: 10,
Offset: 0,
},
}
blob2 := restic.PackedBlob{
PackID: restic.NewRandomID(),
Blob: restic.Blob{
BlobHandle: bhInIdx2,
Length: 100,
Offset: 10,
UncompressedLength: 200,
},
}
idx1 := index.NewIndex()
idx1.StorePack(blob1.PackID, []restic.Blob{blob1.Blob})
idx2 := index.NewIndex()
idx2.StorePack(blob2.PackID, []restic.Blob{blob2.Blob})
mIdx := index.NewMasterIndex()
mIdx.Insert(idx1)
mIdx.Insert(idx2)
rtest.Equals(t, restic.NewIDSet(), mIdx.IDs())
finalIndexes, idxCount, ids := index.TestMergeIndex(t, mIdx)
rtest.Equals(t, []*index.Index{idx1, idx2}, finalIndexes)
rtest.Equals(t, 1, idxCount)
rtest.Equals(t, ids, mIdx.IDs())
blobCount := 0
for range mIdx.Values() {
blobCount++
}
rtest.Equals(t, 2, blobCount)
blobs := mIdx.Lookup(bhInIdx1)
rtest.Equals(t, []restic.PackedBlob{blob1}, blobs)
blobs = mIdx.Lookup(bhInIdx2)
rtest.Equals(t, []restic.PackedBlob{blob2}, blobs)
blobs = mIdx.Lookup(restic.NewRandomBlobHandle())
rtest.Assert(t, blobs == nil, "Expected no blobs when fetching with a random id")
// merge another index containing identical blobs
idx3 := index.NewIndex()
idx3.StorePack(blob1.PackID, []restic.Blob{blob1.Blob})
idx3.StorePack(blob2.PackID, []restic.Blob{blob2.Blob})
mIdx.Insert(idx3)
finalIndexes, idxCount, newIDs := index.TestMergeIndex(t, mIdx)
rtest.Equals(t, []*index.Index{idx3}, finalIndexes)
rtest.Equals(t, 1, idxCount)
ids.Merge(newIDs)
rtest.Equals(t, ids, mIdx.IDs())
// Index should have same entries as before!
blobs = mIdx.Lookup(bhInIdx1)
rtest.Equals(t, []restic.PackedBlob{blob1}, blobs)
blobs = mIdx.Lookup(bhInIdx2)
rtest.Equals(t, []restic.PackedBlob{blob2}, blobs)
blobCount = 0
for range mIdx.Values() {
blobCount++
}
rtest.Equals(t, 2, blobCount)
}
func createRandomMasterIndex(t testing.TB, rng *rand.Rand, num, size int) (*index.MasterIndex, restic.BlobHandle) {
mIdx := index.NewMasterIndex()
for i := 0; i < num-1; i++ {
idx, _ := createRandomIndex(rng, size)
mIdx.Insert(idx)
}
idx1, lookupBh := createRandomIndex(rng, size)
mIdx.Insert(idx1)
index.TestMergeIndex(t, mIdx)
return mIdx, lookupBh
}
func BenchmarkMasterIndexAlloc(b *testing.B) {
rng := rand.New(rand.NewSource(0))
b.ReportAllocs()
for i := 0; i < b.N; i++ {
createRandomMasterIndex(b, rng, 10000, 5)
}
}
func BenchmarkMasterIndexLookupSingleIndex(b *testing.B) {
mIdx, lookupBh := createRandomMasterIndex(b, rand.New(rand.NewSource(0)), 1, 200000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
mIdx.Lookup(lookupBh)
}
}
func BenchmarkMasterIndexLookupMultipleIndex(b *testing.B) {
mIdx, lookupBh := createRandomMasterIndex(b, rand.New(rand.NewSource(0)), 100, 10000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
mIdx.Lookup(lookupBh)
}
}
func BenchmarkMasterIndexLookupSingleIndexUnknown(b *testing.B) {
lookupBh := restic.NewRandomBlobHandle()
mIdx, _ := createRandomMasterIndex(b, rand.New(rand.NewSource(0)), 1, 200000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
mIdx.Lookup(lookupBh)
}
}
func BenchmarkMasterIndexLookupMultipleIndexUnknown(b *testing.B) {
lookupBh := restic.NewRandomBlobHandle()
mIdx, _ := createRandomMasterIndex(b, rand.New(rand.NewSource(0)), 100, 10000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
mIdx.Lookup(lookupBh)
}
}
func BenchmarkMasterIndexLookupParallel(b *testing.B) {
for _, numindices := range []int{25, 50, 100} {
var lookupBh restic.BlobHandle
b.StopTimer()
rng := rand.New(rand.NewSource(0))
mIdx, lookupBh := createRandomMasterIndex(b, rng, numindices, 10000)
b.StartTimer()
name := fmt.Sprintf("known,indices=%d", numindices)
b.Run(name, func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
mIdx.Lookup(lookupBh)
}
})
})
lookupBh = restic.NewRandomBlobHandle()
name = fmt.Sprintf("unknown,indices=%d", numindices)
b.Run(name, func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
mIdx.Lookup(lookupBh)
}
})
})
}
}
func BenchmarkMasterIndexLookupBlobSize(b *testing.B) {
rng := rand.New(rand.NewSource(0))
mIdx, lookupBh := createRandomMasterIndex(b, rand.New(rng), 5, 200000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
mIdx.LookupSize(lookupBh)
}
}
func BenchmarkMasterIndexEach(b *testing.B) {
rng := rand.New(rand.NewSource(0))
mIdx, _ := createRandomMasterIndex(b, rand.New(rng), 5, 200000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
entries := 0
for range mIdx.Values() {
entries++
}
}
}
func BenchmarkMasterIndexGC(b *testing.B) {
mIdx, _ := createRandomMasterIndex(b, rand.New(rand.NewSource(0)), 100, 10000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
runtime.GC()
}
runtime.KeepAlive(mIdx)
}
var (
snapshotTime = time.Unix(1470492820, 207401672)
depth = 3
)
func createFilledRepo(t testing.TB, snapshots int, version uint) (*repository.Repository, restic.Unpacked[restic.FileType]) {
repo, unpacked, _ := repository.TestRepositoryWithVersion(t, version)
for i := 0; i < snapshots; i++ {
data.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(i)*time.Second), depth)
}
return repo, unpacked
}
func TestIndexSave(t *testing.T) {
repository.TestAllVersions(t, testIndexSave)
}
func testIndexSave(t *testing.T, version uint) {
for _, test := range []struct {
name string
saver func(idx *index.MasterIndex, repo restic.Unpacked[restic.FileType]) error
}{
{"rewrite no-op", func(idx *index.MasterIndex, repo restic.Unpacked[restic.FileType]) error {
return idx.Rewrite(context.TODO(), repo, nil, nil, nil, index.MasterIndexRewriteOpts{})
}},
{"rewrite skip-all", func(idx *index.MasterIndex, repo restic.Unpacked[restic.FileType]) error {
return idx.Rewrite(context.TODO(), repo, nil, restic.NewIDSet(), nil, index.MasterIndexRewriteOpts{})
}},
{"SaveFallback", func(idx *index.MasterIndex, repo restic.Unpacked[restic.FileType]) error {
err := restic.ParallelRemove(context.TODO(), repo, idx.IDs(), restic.IndexFile, nil, nil)
if err != nil {
return nil
}
return idx.SaveFallback(context.TODO(), repo, restic.NewIDSet(), nil)
}},
} {
t.Run(test.name, func(t *testing.T) {
repo, unpacked := createFilledRepo(t, 3, version)
idx := index.NewMasterIndex()
rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil))
blobs := make(map[restic.PackedBlob]struct{})
for pb := range idx.Values() {
blobs[pb] = struct{}{}
}
rtest.OK(t, test.saver(idx, unpacked))
idx = index.NewMasterIndex()
rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil))
for pb := range idx.Values() {
if _, ok := blobs[pb]; ok {
delete(blobs, pb)
} else {
t.Fatalf("unexpected blobs %v", pb)
}
}
rtest.Equals(t, 0, len(blobs), "saved index is missing blobs")
checker.TestCheckRepo(t, repo)
})
}
}
func TestIndexSavePartial(t *testing.T) {
repository.TestAllVersions(t, testIndexSavePartial)
}
func testIndexSavePartial(t *testing.T, version uint) {
repo, unpacked := createFilledRepo(t, 3, version)
// capture blob list before adding fourth snapshot
idx := index.NewMasterIndex()
rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil))
blobs := make(map[restic.PackedBlob]struct{})
for pb := range idx.Values() {
blobs[pb] = struct{}{}
}
// add+remove new snapshot and track its pack files
packsBefore := listPacks(t, repo)
sn := data.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(4)*time.Second), depth)
rtest.OK(t, repo.RemoveUnpacked(context.TODO(), restic.WriteableSnapshotFile, *sn.ID()))
packsAfter := listPacks(t, repo)
newPacks := packsAfter.Sub(packsBefore)
// rewrite index and remove pack files of new snapshot
idx = index.NewMasterIndex()
rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil))
rtest.OK(t, idx.Rewrite(context.TODO(), unpacked, newPacks, nil, nil, index.MasterIndexRewriteOpts{}))
// check blobs
idx = index.NewMasterIndex()
rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil))
for pb := range idx.Values() {
if _, ok := blobs[pb]; ok {
delete(blobs, pb)
} else {
t.Fatalf("unexpected blobs %v", pb)
}
}
rtest.Equals(t, 0, len(blobs), "saved index is missing blobs")
// remove pack files to make check happy
rtest.OK(t, restic.ParallelRemove(context.TODO(), unpacked, newPacks, restic.PackFile, nil, nil))
checker.TestCheckRepo(t, repo)
}
func listPacks(t testing.TB, repo restic.Lister) restic.IDSet {
s := restic.NewIDSet()
rtest.OK(t, repo.List(context.TODO(), restic.PackFile, func(id restic.ID, _ int64) error {
s.Insert(id)
return nil
}))
return s
}
func TestRewriteOversizedIndex(t *testing.T) {
repo, unpacked, _ := repository.TestRepositoryWithVersion(t, 2)
const fullIndexCount = 1000
// replace index size checks for testing
originalIndexFull := index.Full
originalIndexOversized := index.Oversized
defer func() {
index.Full = originalIndexFull
index.Oversized = originalIndexOversized
}()
index.Full = func(idx *index.Index) bool {
return idx.Len(restic.DataBlob) > fullIndexCount
}
index.Oversized = func(idx *index.Index) bool {
return idx.Len(restic.DataBlob) > 2*fullIndexCount
}
var blobs []restic.Blob
// build oversized index
idx := index.NewIndex()
numPacks := 5
for p := 0; p < numPacks; p++ {
packID := restic.NewRandomID()
packBlobs := make([]restic.Blob, 0, fullIndexCount)
for i := 0; i < fullIndexCount; i++ {
blob := restic.Blob{
BlobHandle: restic.BlobHandle{
Type: restic.DataBlob,
ID: restic.NewRandomID(),
},
Length: 100,
Offset: uint(i * 100),
}
packBlobs = append(packBlobs, blob)
blobs = append(blobs, blob)
}
idx.StorePack(packID, packBlobs)
}
idx.Finalize()
_, err := idx.SaveIndex(context.Background(), unpacked)
rtest.OK(t, err)
// construct master index for the oversized index
mi := index.NewMasterIndex()
rtest.OK(t, mi.Load(context.Background(), repo, nil, nil))
// rewrite the index
rtest.OK(t, mi.Rewrite(context.Background(), unpacked, nil, nil, nil, index.MasterIndexRewriteOpts{}))
// load the rewritten indexes
mi2 := index.NewMasterIndex()
rtest.OK(t, mi2.Load(context.Background(), repo, nil, nil))
// verify that blobs are still in the index
for _, blob := range blobs {
_, found := mi2.LookupSize(blob.BlobHandle)
rtest.Assert(t, found, "blob %v missing after rewrite", blob.ID)
}
// check that multiple indexes were created
ids := mi2.IDs()
rtest.Assert(t, len(ids) > 1, "oversized index was not split into multiple indexes")
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/index/associated_data_test.go | internal/repository/index/associated_data_test.go | package index
import (
"context"
"slices"
"testing"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
)
type noopSaver struct{}
func (n *noopSaver) Connections() uint {
return 2
}
func (n *noopSaver) SaveUnpacked(_ context.Context, _ restic.FileType, buf []byte) (restic.ID, error) {
return restic.Hash(buf), nil
}
func makeFakePackedBlob() (restic.BlobHandle, restic.PackedBlob) {
bh := restic.NewRandomBlobHandle()
blob := restic.PackedBlob{
PackID: restic.NewRandomID(),
Blob: restic.Blob{
BlobHandle: bh,
Length: uint(crypto.CiphertextLength(10)),
Offset: 0,
},
}
return bh, blob
}
func list(bs *AssociatedSet[uint8]) restic.BlobHandles {
return restic.BlobHandles(slices.Collect(bs.Keys()))
}
func TestAssociatedSet(t *testing.T) {
bh, blob := makeFakePackedBlob()
mi := NewMasterIndex()
test.OK(t, mi.StorePack(context.TODO(), blob.PackID, []restic.Blob{blob.Blob}, &noopSaver{}))
test.OK(t, mi.Flush(context.TODO(), &noopSaver{}))
bs := NewAssociatedSet[uint8](mi)
test.Equals(t, bs.Len(), 0)
test.Equals(t, list(bs), restic.BlobHandles(nil))
// check non existent
test.Equals(t, bs.Has(bh), false)
_, ok := bs.Get(bh)
test.Equals(t, false, ok)
// test insert
bs.Insert(bh)
test.Equals(t, bs.Has(bh), true)
test.Equals(t, bs.Len(), 1)
test.Equals(t, list(bs), restic.BlobHandles{bh})
test.Equals(t, 0, len(bs.overflow))
// test set
bs.Set(bh, 42)
test.Equals(t, bs.Has(bh), true)
test.Equals(t, bs.Len(), 1)
val, ok := bs.Get(bh)
test.Equals(t, true, ok)
test.Equals(t, uint8(42), val)
s := bs.String()
test.Assert(t, len(s) > 10, "invalid string: %v", s)
// test remove
bs.Delete(bh)
test.Equals(t, bs.Len(), 0)
test.Equals(t, bs.Has(bh), false)
test.Equals(t, list(bs), restic.BlobHandles(nil))
test.Equals(t, "{}", bs.String())
// test set
bs.Set(bh, 43)
test.Equals(t, bs.Has(bh), true)
test.Equals(t, bs.Len(), 1)
val, ok = bs.Get(bh)
test.Equals(t, true, ok)
test.Equals(t, uint8(43), val)
test.Equals(t, 0, len(bs.overflow))
// test update
bs.Set(bh, 44)
val, ok = bs.Get(bh)
test.Equals(t, true, ok)
test.Equals(t, uint8(44), val)
test.Equals(t, 0, len(bs.overflow))
// test overflow blob
of := restic.NewRandomBlobHandle()
test.Equals(t, false, bs.Has(of))
// set
bs.Set(of, 7)
test.Equals(t, 1, len(bs.overflow))
test.Equals(t, bs.Len(), 2)
// get
val, ok = bs.Get(of)
test.Equals(t, true, ok)
test.Equals(t, uint8(7), val)
test.Equals(t, list(bs), restic.BlobHandles{of, bh})
// update
bs.Set(of, 8)
val, ok = bs.Get(of)
test.Equals(t, true, ok)
test.Equals(t, uint8(8), val)
test.Equals(t, 1, len(bs.overflow))
// delete
bs.Delete(of)
test.Equals(t, bs.Len(), 1)
test.Equals(t, bs.Has(of), false)
test.Equals(t, list(bs), restic.BlobHandles{bh})
test.Equals(t, 0, len(bs.overflow))
}
func TestAssociatedSetWithExtendedIndex(t *testing.T) {
_, blob := makeFakePackedBlob()
mi := NewMasterIndex()
test.OK(t, mi.StorePack(context.TODO(), blob.PackID, []restic.Blob{blob.Blob}, &noopSaver{}))
test.OK(t, mi.Flush(context.TODO(), &noopSaver{}))
bs := NewAssociatedSet[uint8](mi)
// add new blobs to index after building the set
of, blob2 := makeFakePackedBlob()
test.OK(t, mi.StorePack(context.TODO(), blob2.PackID, []restic.Blob{blob2.Blob}, &noopSaver{}))
test.OK(t, mi.Flush(context.TODO(), &noopSaver{}))
// non-existent
test.Equals(t, false, bs.Has(of))
// set
bs.Set(of, 5)
test.Equals(t, 1, len(bs.overflow))
test.Equals(t, bs.Len(), 1)
// get
val, ok := bs.Get(of)
test.Equals(t, true, ok)
test.Equals(t, uint8(5), val)
test.Equals(t, list(bs), restic.BlobHandles{of})
// update
bs.Set(of, 8)
val, ok = bs.Get(of)
test.Equals(t, true, ok)
test.Equals(t, uint8(8), val)
test.Equals(t, 1, len(bs.overflow))
// delete
bs.Delete(of)
test.Equals(t, bs.Len(), 0)
test.Equals(t, bs.Has(of), false)
test.Equals(t, list(bs), restic.BlobHandles(nil))
test.Equals(t, 0, len(bs.overflow))
}
func TestAssociatedSetIntersectAndSub(t *testing.T) {
mi := NewMasterIndex()
saver := &noopSaver{}
bh1, blob1 := makeFakePackedBlob()
bh2, blob2 := makeFakePackedBlob()
bh3, blob3 := makeFakePackedBlob()
bh4, blob4 := makeFakePackedBlob()
test.OK(t, mi.StorePack(context.TODO(), blob1.PackID, []restic.Blob{blob1.Blob}, saver))
test.OK(t, mi.StorePack(context.TODO(), blob2.PackID, []restic.Blob{blob2.Blob}, saver))
test.OK(t, mi.StorePack(context.TODO(), blob3.PackID, []restic.Blob{blob3.Blob}, saver))
test.OK(t, mi.StorePack(context.TODO(), blob4.PackID, []restic.Blob{blob4.Blob}, saver))
test.OK(t, mi.Flush(context.TODO(), saver))
t.Run("Intersect", func(t *testing.T) {
bs1, bs2 := NewAssociatedSet[uint8](mi), NewAssociatedSet[uint8](mi)
test.Equals(t, bs1.Intersect(bs2).Len(), 0)
bs1, bs2 = NewAssociatedSet[uint8](mi), NewAssociatedSet[uint8](mi)
bs1.Set(bh1, 10)
bs2.Set(bh2, 20)
test.Equals(t, bs1.Intersect(bs2).Len(), 0)
bs1, bs2 = NewAssociatedSet[uint8](mi), NewAssociatedSet[uint8](mi)
bs1.Set(bh3, 40)
bs2.Set(bh3, 50)
bs2.Set(bh4, 60)
result := bs1.Intersect(bs2)
test.Equals(t, result.Len(), 1)
val, _ := result.Get(bh3)
test.Equals(t, uint8(40), val)
bs1, bs2 = NewAssociatedSet[uint8](mi), NewAssociatedSet[uint8](mi)
bs1.Set(bh3, 40)
bs1.Set(bh4, 70)
bs2.Set(bh3, 50)
bs2.Set(bh4, 60)
result = bs1.Intersect(bs2)
test.Equals(t, result.Len(), 2)
val, _ = result.Get(bh3)
test.Equals(t, uint8(40), val)
val, _ = result.Get(bh4)
test.Equals(t, uint8(70), val)
})
t.Run("Sub", func(t *testing.T) {
bs1, bs2 := NewAssociatedSet[uint8](mi), NewAssociatedSet[uint8](mi)
test.Equals(t, bs1.Sub(bs2).Len(), 0)
bs1, bs2 = NewAssociatedSet[uint8](mi), NewAssociatedSet[uint8](mi)
bs1.Set(bh1, 10)
bs1.Set(bh2, 20)
bs2.Set(bh3, 30)
result := bs1.Sub(bs2)
test.Equals(t, result.Len(), 2)
val, _ := result.Get(bh1)
test.Equals(t, uint8(10), val)
val, _ = result.Get(bh2)
test.Equals(t, uint8(20), val)
bs1, bs2 = NewAssociatedSet[uint8](mi), NewAssociatedSet[uint8](mi)
bs1.Set(bh1, 10)
bs1.Set(bh2, 20)
bs1.Set(bh3, 40)
bs2.Set(bh2, 50)
result = bs1.Sub(bs2)
test.Equals(t, result.Len(), 2)
test.Assert(t, result.Has(bh1) && result.Has(bh3) && !result.Has(bh2), "only bh1 and bh3 should be in result")
bs1, bs2 = NewAssociatedSet[uint8](mi), NewAssociatedSet[uint8](mi)
bs1.Set(bh1, 60)
bs2.Set(bh1, 70)
bs2.Set(bh2, 80)
test.Equals(t, bs1.Sub(bs2).Len(), 0)
})
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/pack/pack_test.go | internal/repository/pack/pack_test.go | package pack_test
import (
"bytes"
"context"
"crypto/rand"
"crypto/sha256"
"encoding/json"
"io"
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/mem"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/repository/pack"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
var testLens = []int{23, 31650, 25860, 10928, 13769, 19862, 5211, 127, 13690, 30231}
type Buf struct {
data []byte
id restic.ID
}
func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) {
bufs := createBuffers(t, lengths)
// pack blobs
var buf bytes.Buffer
p := pack.NewPacker(k, &buf)
for _, b := range bufs {
_, err := p.Add(restic.TreeBlob, b.id, b.data, 2*len(b.data))
rtest.OK(t, err)
}
err := p.Finalize()
rtest.OK(t, err)
return bufs, buf.Bytes(), p.Size()
}
func createBuffers(t testing.TB, lengths []int) []Buf {
bufs := []Buf{}
for _, l := range lengths {
b := make([]byte, l)
_, err := io.ReadFull(rand.Reader, b)
rtest.OK(t, err)
h := sha256.Sum256(b)
bufs = append(bufs, Buf{data: b, id: h})
}
return bufs
}
func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReaderAt, packSize uint) {
written := 0
for _, buf := range bufs {
written += len(buf.data)
}
// read and parse it again
entries, hdrSize, err := pack.List(k, rd, int64(packSize))
rtest.OK(t, err)
rtest.Equals(t, len(entries), len(bufs))
// check the head size calculation for consistency
headerSize := pack.CalculateHeaderSize(entries)
written += headerSize
// check length
rtest.Equals(t, uint(written), packSize)
rtest.Equals(t, headerSize, int(hdrSize))
var buf []byte
for i, b := range bufs {
e := entries[i]
rtest.Equals(t, b.id, e.ID)
if len(buf) < int(e.Length) {
buf = make([]byte, int(e.Length))
}
buf = buf[:int(e.Length)]
n, err := rd.ReadAt(buf, int64(e.Offset))
rtest.OK(t, err)
buf = buf[:n]
rtest.Assert(t, bytes.Equal(b.data, buf),
"data for blob %v doesn't match", i)
}
}
func TestCreatePack(t *testing.T) {
// create random keys
k := crypto.NewRandomKey()
bufs, packData, packSize := newPack(t, k, testLens)
rtest.Equals(t, uint(len(packData)), packSize)
verifyBlobs(t, bufs, k, bytes.NewReader(packData), packSize)
}
var blobTypeJSON = []struct {
t restic.BlobType
res string
}{
{restic.DataBlob, `"data"`},
{restic.TreeBlob, `"tree"`},
}
func TestBlobTypeJSON(t *testing.T) {
for _, test := range blobTypeJSON {
// test serialize
buf, err := json.Marshal(test.t)
rtest.OK(t, err)
rtest.Equals(t, test.res, string(buf))
// test unserialize
var v restic.BlobType
err = json.Unmarshal([]byte(test.res), &v)
rtest.OK(t, err)
rtest.Equals(t, test.t, v)
}
}
func TestUnpackReadSeeker(t *testing.T) {
// create random keys
k := crypto.NewRandomKey()
bufs, packData, packSize := newPack(t, k, testLens)
b := mem.New()
id := restic.Hash(packData)
handle := backend.Handle{Type: backend.PackFile, Name: id.String()}
rtest.OK(t, b.Save(context.TODO(), handle, backend.NewByteReader(packData, b.Hasher())))
verifyBlobs(t, bufs, k, backend.ReaderAt(context.TODO(), b, handle), packSize)
}
func TestShortPack(t *testing.T) {
k := crypto.NewRandomKey()
bufs, packData, packSize := newPack(t, k, []int{23})
b := mem.New()
id := restic.Hash(packData)
handle := backend.Handle{Type: backend.PackFile, Name: id.String()}
rtest.OK(t, b.Save(context.TODO(), handle, backend.NewByteReader(packData, b.Hasher())))
verifyBlobs(t, bufs, k, backend.ReaderAt(context.TODO(), b, handle), packSize)
}
func TestPackMerge(t *testing.T) {
k := crypto.NewRandomKey()
bufs := createBuffers(t, []int{1000, 5000, 2000, 3000, 4000, 1500})
splitAt := 3
// Fill packers
var buf1 bytes.Buffer
packer1 := pack.NewPacker(k, &buf1)
for _, b := range bufs[:splitAt] {
_, err := packer1.Add(restic.TreeBlob, b.id, b.data, 2*len(b.data))
rtest.OK(t, err)
}
var buf2 bytes.Buffer
packer2 := pack.NewPacker(k, &buf2)
for _, b := range bufs[splitAt:] {
_, err := packer2.Add(restic.DataBlob, b.id, b.data, 2*len(b.data))
rtest.OK(t, err)
}
err := packer1.Merge(packer2, &buf2)
rtest.OK(t, err)
err = packer1.Finalize()
rtest.OK(t, err)
// Verify all blobs are present in the merged pack
verifyBlobs(t, bufs, k, bytes.NewReader(buf1.Bytes()), packer1.Size())
rtest.Equals(t, len(bufs), packer1.Count())
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/pack/pack_internal_test.go | internal/repository/pack/pack_internal_test.go | package pack
import (
"bytes"
"encoding/binary"
"io"
"strings"
"testing"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
func TestParseHeaderEntry(t *testing.T) {
h := headerEntry{
Type: 0, // Blob
Length: 100,
}
for i := range h.ID {
h.ID[i] = byte(i)
}
buf := new(bytes.Buffer)
_ = binary.Write(buf, binary.LittleEndian, &h)
b, size, err := parseHeaderEntry(buf.Bytes())
rtest.OK(t, err)
rtest.Equals(t, restic.DataBlob, b.Type)
rtest.Equals(t, plainEntrySize, size)
t.Logf("%v %v", h.ID, b.ID)
rtest.Equals(t, h.ID[:], b.ID[:])
rtest.Equals(t, uint(h.Length), b.Length)
rtest.Equals(t, uint(0), b.UncompressedLength)
c := compressedHeaderEntry{
Type: 2, // compressed Blob
Length: 100,
UncompressedLength: 200,
}
for i := range c.ID {
c.ID[i] = byte(i)
}
buf = new(bytes.Buffer)
_ = binary.Write(buf, binary.LittleEndian, &c)
b, size, err = parseHeaderEntry(buf.Bytes())
rtest.OK(t, err)
rtest.Equals(t, restic.DataBlob, b.Type)
rtest.Equals(t, entrySize, size)
t.Logf("%v %v", c.ID, b.ID)
rtest.Equals(t, c.ID[:], b.ID[:])
rtest.Equals(t, uint(c.Length), b.Length)
rtest.Equals(t, uint(c.UncompressedLength), b.UncompressedLength)
}
func TestParseHeaderEntryErrors(t *testing.T) {
h := headerEntry{
Type: 0, // Blob
Length: 100,
}
for i := range h.ID {
h.ID[i] = byte(i)
}
h.Type = 0xae
buf := new(bytes.Buffer)
_ = binary.Write(buf, binary.LittleEndian, &h)
_, _, err := parseHeaderEntry(buf.Bytes())
rtest.Assert(t, err != nil, "no error for invalid type")
h.Type = 0
buf.Reset()
_ = binary.Write(buf, binary.LittleEndian, &h)
_, _, err = parseHeaderEntry(buf.Bytes()[:plainEntrySize-1])
rtest.Assert(t, err != nil, "no error for short input")
}
type countingReaderAt struct {
delegate io.ReaderAt
invocationCount int
}
func (rd *countingReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
rd.invocationCount++
return rd.delegate.ReadAt(p, off)
}
func TestReadHeaderEagerLoad(t *testing.T) {
testReadHeader := func(dataSize, entryCount, expectedReadInvocationCount int) {
expectedHeader := rtest.Random(0, entryCount*int(entrySize)+crypto.Extension)
buf := &bytes.Buffer{}
buf.Write(rtest.Random(0, dataSize)) // pack blobs data
buf.Write(expectedHeader) // pack header
rtest.OK(t, binary.Write(buf, binary.LittleEndian, uint32(len(expectedHeader)))) // pack header length
rd := &countingReaderAt{delegate: bytes.NewReader(buf.Bytes())}
header, err := readHeader(rd, int64(buf.Len()))
rtest.OK(t, err)
rtest.Equals(t, expectedHeader, header)
rtest.Equals(t, expectedReadInvocationCount, rd.invocationCount)
}
// basic
testReadHeader(100, 1, 1)
// header entries == eager entries
testReadHeader(100, eagerEntries-1, 1)
testReadHeader(100, eagerEntries, 1)
testReadHeader(100, eagerEntries+1, 2)
// file size == eager header load size
eagerLoadSize := int((eagerEntries * entrySize) + crypto.Extension)
headerSize := int(1*entrySize) + crypto.Extension
dataSize := eagerLoadSize - headerSize - binary.Size(uint32(0))
testReadHeader(dataSize-1, 1, 1)
testReadHeader(dataSize, 1, 1)
testReadHeader(dataSize+1, 1, 1)
testReadHeader(dataSize+2, 1, 1)
testReadHeader(dataSize+3, 1, 1)
testReadHeader(dataSize+4, 1, 1)
}
func TestReadRecords(t *testing.T) {
testReadRecords := func(dataSize, entryCount, totalRecords int) {
totalHeader := rtest.Random(0, totalRecords*int(entrySize)+crypto.Extension)
bufSize := entryCount*int(entrySize) + crypto.Extension
off := len(totalHeader) - bufSize
if off < 0 {
off = 0
}
expectedHeader := totalHeader[off:]
buf := &bytes.Buffer{}
buf.Write(rtest.Random(0, dataSize)) // pack blobs data
buf.Write(totalHeader) // pack header
rtest.OK(t, binary.Write(buf, binary.LittleEndian, uint32(len(totalHeader)))) // pack header length
rd := bytes.NewReader(buf.Bytes())
header, count, err := readRecords(rd, int64(rd.Len()), bufSize+4)
rtest.OK(t, err)
rtest.Equals(t, len(totalHeader)+4, count)
rtest.Equals(t, expectedHeader, header)
}
// basic
testReadRecords(100, 1, 1)
testReadRecords(100, 0, 1)
testReadRecords(100, 1, 0)
// header entries ~ eager entries
testReadRecords(100, eagerEntries, eagerEntries-1)
testReadRecords(100, eagerEntries, eagerEntries)
testReadRecords(100, eagerEntries, eagerEntries+1)
// file size == eager header load size
eagerLoadSize := int((eagerEntries * entrySize) + crypto.Extension)
headerSize := int(1*entrySize) + crypto.Extension
dataSize := eagerLoadSize - headerSize - binary.Size(uint32(0))
testReadRecords(dataSize-1, 1, 1)
testReadRecords(dataSize, 1, 1)
testReadRecords(dataSize+1, 1, 1)
testReadRecords(dataSize+2, 1, 1)
testReadRecords(dataSize+3, 1, 1)
testReadRecords(dataSize+4, 1, 1)
for i := 0; i < 2; i++ {
for j := 0; j < 2; j++ {
testReadRecords(dataSize, i, j)
}
}
}
func TestUnpackedVerification(t *testing.T) {
// create random keys
k := crypto.NewRandomKey()
blobs := []restic.Blob{
{
BlobHandle: restic.NewRandomBlobHandle(),
Length: 42,
Offset: 0,
UncompressedLength: 2 * 42,
},
}
type DamageType string
const (
damageData DamageType = "data"
damageCiphertext DamageType = "ciphertext"
damageLength DamageType = "length"
)
for _, test := range []struct {
damage DamageType
msg string
}{
{"", ""},
{damageData, "pack header entry mismatch"},
{damageCiphertext, "ciphertext verification failed"},
{damageLength, "header decoding failed"},
} {
header, err := makeHeader(blobs)
rtest.OK(t, err)
if test.damage == damageData {
header[8] ^= 0x42
}
encryptedHeader := make([]byte, 0, crypto.CiphertextLength(len(header)))
nonce := crypto.NewRandomNonce()
encryptedHeader = append(encryptedHeader, nonce...)
encryptedHeader = k.Seal(encryptedHeader, nonce, header, nil)
encryptedHeader = binary.LittleEndian.AppendUint32(encryptedHeader, uint32(len(encryptedHeader)))
if test.damage == damageCiphertext {
encryptedHeader[8] ^= 0x42
}
if test.damage == damageLength {
encryptedHeader[len(encryptedHeader)-1] ^= 0x42
}
err = verifyHeader(k, encryptedHeader, blobs)
if test.msg == "" {
rtest.Assert(t, err == nil, "expected no error, got %v", err)
} else {
rtest.Assert(t, strings.Contains(err.Error(), test.msg), "expected error to contain %q, got %q", test.msg, err)
}
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/pack/pack.go | internal/repository/pack/pack.go | package pack
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"io"
"sync"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/crypto"
)
// Packer is used to create a new Pack.
type Packer struct {
blobs []restic.Blob
bytes uint
k *crypto.Key
wr io.Writer
m sync.Mutex
}
// NewPacker returns a new Packer that can be used to pack blobs together.
func NewPacker(k *crypto.Key, wr io.Writer) *Packer {
return &Packer{k: k, wr: wr}
}
// Add saves the data read from rd as a new blob to the packer. Returned is the
// number of bytes written to the pack plus the pack header entry size.
func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte, uncompressedLength int) (int, error) {
p.m.Lock()
defer p.m.Unlock()
c := restic.Blob{BlobHandle: restic.BlobHandle{Type: t, ID: id}}
n, err := p.wr.Write(data)
c.Length = uint(n)
c.Offset = p.bytes
c.UncompressedLength = uint(uncompressedLength)
p.bytes += uint(n)
p.blobs = append(p.blobs, c)
n += CalculateEntrySize(c)
return n, errors.Wrap(err, "Write")
}
var entrySize = uint(binary.Size(restic.BlobType(0)) + 2*headerLengthSize + len(restic.ID{}))
var plainEntrySize = uint(binary.Size(restic.BlobType(0)) + headerLengthSize + len(restic.ID{}))
// headerEntry describes the format of header entries. It serves only as
// documentation.
type headerEntry struct {
Type uint8
Length uint32
ID restic.ID
}
// compressedHeaderEntry describes the format of header entries for compressed blobs.
// It serves only as documentation.
type compressedHeaderEntry struct {
Type uint8
Length uint32
UncompressedLength uint32
ID restic.ID
}
// Finalize writes the header for all added blobs and finalizes the pack.
func (p *Packer) Finalize() error {
p.m.Lock()
defer p.m.Unlock()
header, err := makeHeader(p.blobs)
if err != nil {
return err
}
encryptedHeader := make([]byte, 0, crypto.CiphertextLength(len(header)))
nonce := crypto.NewRandomNonce()
encryptedHeader = append(encryptedHeader, nonce...)
encryptedHeader = p.k.Seal(encryptedHeader, nonce, header, nil)
encryptedHeader = binary.LittleEndian.AppendUint32(encryptedHeader, uint32(len(encryptedHeader)))
if err := verifyHeader(p.k, encryptedHeader, p.blobs); err != nil {
//nolint:revive,staticcheck // ignore linter warnings about error message spelling
return fmt.Errorf("Detected data corruption while writing pack-file header: %w\nCorrupted data is either caused by hardware issues or software bugs. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting.", err)
}
// append the header
n, err := p.wr.Write(encryptedHeader)
if err != nil {
return errors.Wrap(err, "Write")
}
if n != len(encryptedHeader) {
return errors.New("wrong number of bytes written")
}
p.bytes += uint(len(encryptedHeader))
return nil
}
func verifyHeader(k *crypto.Key, header []byte, expected []restic.Blob) error {
// do not offer a way to skip the pack header verification, as pack headers are usually small enough
// to not result in a significant performance impact
decoded, hdrSize, err := List(k, bytes.NewReader(header), int64(len(header)))
if err != nil {
return fmt.Errorf("header decoding failed: %w", err)
}
if hdrSize != uint32(len(header)) {
return fmt.Errorf("unexpected header size %v instead of %v", hdrSize, len(header))
}
if len(decoded) != len(expected) {
return fmt.Errorf("pack header size mismatch")
}
for i := 0; i < len(decoded); i++ {
if decoded[i] != expected[i] {
return fmt.Errorf("pack header entry mismatch got %v instead of %v", decoded[i], expected[i])
}
}
return nil
}
// HeaderOverhead returns an estimate of the number of bytes written by a call to Finalize.
func (p *Packer) HeaderOverhead() int {
return crypto.CiphertextLength(0) + binary.Size(uint32(0))
}
// makeHeader constructs the header for p.
func makeHeader(blobs []restic.Blob) ([]byte, error) {
buf := make([]byte, 0, len(blobs)*int(entrySize))
for _, b := range blobs {
switch {
case b.Type == restic.DataBlob && b.UncompressedLength == 0:
buf = append(buf, 0)
case b.Type == restic.TreeBlob && b.UncompressedLength == 0:
buf = append(buf, 1)
case b.Type == restic.DataBlob && b.UncompressedLength != 0:
buf = append(buf, 2)
case b.Type == restic.TreeBlob && b.UncompressedLength != 0:
buf = append(buf, 3)
default:
return nil, errors.Errorf("invalid blob type %v", b.Type)
}
var lenLE [4]byte
binary.LittleEndian.PutUint32(lenLE[:], uint32(b.Length))
buf = append(buf, lenLE[:]...)
if b.UncompressedLength != 0 {
binary.LittleEndian.PutUint32(lenLE[:], uint32(b.UncompressedLength))
buf = append(buf, lenLE[:]...)
}
buf = append(buf, b.ID[:]...)
}
return buf, nil
}
// Merge merges another packer into the current packer. Both packers must not be
// finalized yet.
func (p *Packer) Merge(other *Packer, otherData io.Reader) error {
other.m.Lock()
defer other.m.Unlock()
for _, blob := range other.blobs {
data := make([]byte, blob.Length)
_, err := io.ReadFull(otherData, data)
if err != nil {
return err
}
if _, err := p.Add(blob.Type, blob.ID, data, int(blob.UncompressedLength)); err != nil {
return err
}
}
return nil
}
// Size returns the number of bytes written so far.
func (p *Packer) Size() uint {
p.m.Lock()
defer p.m.Unlock()
return p.bytes
}
// Count returns the number of blobs in this packer.
func (p *Packer) Count() int {
p.m.Lock()
defer p.m.Unlock()
return len(p.blobs)
}
// HeaderFull returns true if the pack header is full.
func (p *Packer) HeaderFull() bool {
p.m.Lock()
defer p.m.Unlock()
return headerSize+uint(len(p.blobs)+1)*entrySize > MaxHeaderSize
}
// Blobs returns the slice of blobs that have been written.
func (p *Packer) Blobs() []restic.Blob {
p.m.Lock()
defer p.m.Unlock()
return p.blobs
}
func (p *Packer) String() string {
return fmt.Sprintf("<Packer %d blobs, %d bytes>", len(p.blobs), p.bytes)
}
var (
// we require at least one entry in the header, and one blob for a pack file
minFileSize = plainEntrySize + crypto.Extension + uint(headerLengthSize)
)
const (
// size of the header-length field at the end of the file; it is a uint32
headerLengthSize = 4
// headerSize is the header's constant overhead (independent of #entries)
headerSize = headerLengthSize + crypto.Extension
// MaxHeaderSize is the max size of header including header-length field
MaxHeaderSize = 16*1024*1024 + headerLengthSize
// number of header entries to download as part of header-length request
eagerEntries = 15
)
var (
// MaxHeaderEntries is the number of entries a pack file can contain at most
MaxHeaderEntries = (MaxHeaderSize - headerSize) / entrySize
)
// readRecords reads up to bufsize bytes from the underlying ReaderAt, returning
// the raw header, the total number of bytes in the header, and any error.
// If the header contains fewer than bufsize bytes, the header is truncated to
// the appropriate size.
func readRecords(rd io.ReaderAt, size int64, bufsize int) ([]byte, int, error) {
if bufsize > int(size) {
bufsize = int(size)
}
b := make([]byte, bufsize)
off := size - int64(bufsize)
if _, err := rd.ReadAt(b, off); err != nil {
return nil, 0, err
}
hlen := binary.LittleEndian.Uint32(b[len(b)-headerLengthSize:])
b = b[:len(b)-headerLengthSize]
debug.Log("header length: %v", hlen)
var err error
switch {
case hlen == 0:
err = InvalidFileError{Message: "header length is zero"}
case hlen < crypto.Extension:
err = InvalidFileError{Message: "header length is too short"}
case int64(hlen) > size-int64(headerLengthSize):
err = InvalidFileError{Message: "header is larger than file"}
case int64(hlen) > MaxHeaderSize-int64(headerLengthSize):
err = InvalidFileError{Message: "header is larger than maxHeaderSize"}
}
if err != nil {
return nil, 0, errors.Wrap(err, "readHeader")
}
total := int(hlen + headerLengthSize)
if total < bufsize {
// truncate to the beginning of the pack header
b = b[len(b)-int(hlen):]
}
return b, total, nil
}
// readHeader reads the header at the end of rd. size is the length of the
// whole data accessible in rd.
func readHeader(rd io.ReaderAt, size int64) ([]byte, error) {
debug.Log("size: %v", size)
if size < int64(minFileSize) {
err := InvalidFileError{Message: "file is too short"}
return nil, errors.Wrap(err, "readHeader")
}
// assuming extra request is significantly slower than extra bytes download,
// eagerly download eagerEntries header entries as part of header-length request.
// only make second request if actual number of entries is greater than eagerEntries
eagerSize := eagerEntries*int(entrySize) + headerSize
b, c, err := readRecords(rd, size, eagerSize)
if err != nil {
return nil, err
}
if c <= eagerSize {
// eager read sufficed, return what we got
return b, nil
}
b, _, err = readRecords(rd, size, c)
if err != nil {
return nil, err
}
return b, nil
}
// InvalidFileError is return when a file is found that is not a pack file.
type InvalidFileError struct {
Message string
}
func (e InvalidFileError) Error() string {
return e.Message
}
// List returns the list of entries found in a pack file and the length of the
// header (including header size and crypto overhead)
func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, hdrSize uint32, err error) {
buf, err := readHeader(rd, size)
if err != nil {
return nil, 0, err
}
if len(buf) < crypto.CiphertextLength(0) {
return nil, 0, errors.New("invalid header, too short")
}
hdrSize = headerLengthSize + uint32(len(buf))
nonce, buf := buf[:k.NonceSize()], buf[k.NonceSize():]
buf, err = k.Open(buf[:0], nonce, buf, nil)
if err != nil {
return nil, 0, err
}
// might over allocate a bit if all blobs have EntrySize but only by a few percent
entries = make([]restic.Blob, 0, uint(len(buf))/plainEntrySize)
pos := uint(0)
for len(buf) > 0 {
entry, headerSize, err := parseHeaderEntry(buf)
if err != nil {
return nil, 0, err
}
entry.Offset = pos
entries = append(entries, entry)
pos += entry.Length
buf = buf[headerSize:]
}
return entries, hdrSize, nil
}
func parseHeaderEntry(p []byte) (b restic.Blob, size uint, err error) {
l := uint(len(p))
size = plainEntrySize
if l < plainEntrySize {
err = errors.Errorf("parseHeaderEntry: buffer of size %d too short", len(p))
return b, size, err
}
tpe := p[0]
switch tpe {
case 0, 2:
b.Type = restic.DataBlob
case 1, 3:
b.Type = restic.TreeBlob
default:
return b, size, errors.Errorf("invalid type %d", tpe)
}
b.Length = uint(binary.LittleEndian.Uint32(p[1:5]))
p = p[5:]
if tpe == 2 || tpe == 3 {
size = entrySize
if l < entrySize {
err = errors.Errorf("parseHeaderEntry: buffer of size %d too short", len(p))
return b, size, err
}
b.UncompressedLength = uint(binary.LittleEndian.Uint32(p[0:4]))
p = p[4:]
}
copy(b.ID[:], p[:])
return b, size, nil
}
func CalculateEntrySize(blob restic.Blob) int {
if blob.UncompressedLength != 0 {
return int(entrySize)
}
return int(plainEntrySize)
}
func CalculateHeaderSize(blobs []restic.Blob) int {
size := headerSize
for _, blob := range blobs {
size += CalculateEntrySize(blob)
}
return size
}
// Size returns the size of all packs computed by index information.
// If onlyHdr is set to true, only the size of the header is returned
// Note that this function only gives correct sizes, if there are no
// duplicates in the index.
func Size(ctx context.Context, mi restic.ListBlobser, onlyHdr bool) (map[restic.ID]int64, error) {
packSize := make(map[restic.ID]int64)
err := mi.ListBlobs(ctx, func(blob restic.PackedBlob) {
size, ok := packSize[blob.PackID]
if !ok {
size = headerSize
}
if !onlyHdr {
size += int64(blob.Length)
}
packSize[blob.PackID] = size + int64(CalculateEntrySize(blob.Blob))
})
return packSize, err
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/pack/doc.go | internal/repository/pack/doc.go | // Package pack provides functions for combining and parsing pack files.
package pack
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/hashing/writer_test.go | internal/repository/hashing/writer_test.go | package hashing
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"io"
"testing"
)
func TestWriter(t *testing.T) {
tests := []int{5, 23, 2<<18 + 23, 1 << 20}
for _, size := range tests {
data := make([]byte, size)
_, err := io.ReadFull(rand.Reader, data)
if err != nil {
t.Fatalf("ReadFull: %v", err)
}
expectedHash := sha256.Sum256(data)
wr := NewWriter(io.Discard, sha256.New())
n, err := io.Copy(wr, bytes.NewReader(data))
if err != nil {
t.Fatal(err)
}
if n != int64(size) {
t.Errorf("Writer: invalid number of bytes written: got %d, expected %d",
n, size)
}
resultingHash := wr.Sum(nil)
if !bytes.Equal(expectedHash[:], resultingHash) {
t.Errorf("Writer: hashes do not match: expected %02x, got %02x",
expectedHash, resultingHash)
}
}
}
func BenchmarkWriter(b *testing.B) {
buf := make([]byte, 1<<22)
_, err := io.ReadFull(rand.Reader, buf)
if err != nil {
b.Fatal(err)
}
expectedHash := sha256.Sum256(buf)
b.SetBytes(int64(len(buf)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
wr := NewWriter(io.Discard, sha256.New())
n, err := io.Copy(wr, bytes.NewReader(buf))
if err != nil {
b.Fatal(err)
}
if n != int64(len(buf)) {
b.Errorf("Writer: invalid number of bytes written: got %d, expected %d",
n, len(buf))
}
resultingHash := wr.Sum(nil)
if !bytes.Equal(expectedHash[:], resultingHash) {
b.Errorf("Writer: hashes do not match: expected %02x, got %02x",
expectedHash, resultingHash)
}
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/hashing/writer.go | internal/repository/hashing/writer.go | package hashing
import (
"hash"
"io"
)
// Writer transparently hashes all data while writing it to the underlying writer.
type Writer struct {
w io.Writer
h hash.Hash
}
// NewWriter wraps the writer w and feeds all data written to the hash h.
func NewWriter(w io.Writer, h hash.Hash) *Writer {
return &Writer{
h: h,
w: w,
}
}
// Write wraps the write method of the underlying writer and also hashes all data.
func (h *Writer) Write(p []byte) (int, error) {
// write the data to the underlying writing
n, err := h.w.Write(p)
// according to the interface documentation, Write() on a hash.Hash never
// returns an error.
_, hashErr := h.h.Write(p[:n])
if hashErr != nil {
panic(hashErr)
}
return n, err
}
// Sum returns the hash of all data written so far.
func (h *Writer) Sum(d []byte) []byte {
return h.h.Sum(d)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/hashing/reader.go | internal/repository/hashing/reader.go | package hashing
import (
"hash"
"io"
)
// A Reader hashes all data read from the underlying reader.
type Reader struct {
r io.Reader
h hash.Hash
}
// NewReader returns a new Reader that uses the hash h.
func NewReader(r io.Reader, h hash.Hash) *Reader {
return &Reader{r: r, h: h}
}
func (h *Reader) Read(p []byte) (int, error) {
n, err := h.r.Read(p)
_, _ = h.h.Write(p[:n]) // Never returns an error.
return n, err
}
// Sum returns the hash of the data read so far.
func (h *Reader) Sum(d []byte) []byte {
return h.h.Sum(d)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/hashing/reader_test.go | internal/repository/hashing/reader_test.go | package hashing
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"io"
"testing"
)
func TestReader(t *testing.T) {
tests := []int{5, 23, 2<<18 + 23, 1 << 20}
for _, size := range tests {
data := make([]byte, size)
_, err := io.ReadFull(rand.Reader, data)
if err != nil {
t.Fatalf("ReadFull: %v", err)
}
expectedHash := sha256.Sum256(data)
rd := NewReader(bytes.NewReader(data), sha256.New())
n, err := io.Copy(io.Discard, rd)
if err != nil {
t.Fatal(err)
}
if n != int64(size) {
t.Errorf("Reader: invalid number of bytes written: got %d, expected %d",
n, size)
}
resultingHash := rd.Sum(nil)
if !bytes.Equal(expectedHash[:], resultingHash) {
t.Errorf("Reader: hashes do not match: expected %02x, got %02x",
expectedHash, resultingHash)
}
}
}
func BenchmarkReader(b *testing.B) {
buf := make([]byte, 1<<22)
_, err := io.ReadFull(rand.Reader, buf)
if err != nil {
b.Fatal(err)
}
expectedHash := sha256.Sum256(buf)
b.SetBytes(int64(len(buf)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
rd := NewReader(bytes.NewReader(buf), sha256.New())
n, err := io.Copy(io.Discard, rd)
if err != nil {
b.Fatal(err)
}
if n != int64(len(buf)) {
b.Errorf("Reader: invalid number of bytes written: got %d, expected %d",
n, len(buf))
}
resultingHash := rd.Sum(nil)
if !bytes.Equal(expectedHash[:], resultingHash) {
b.Errorf("Reader: hashes do not match: expected %02x, got %02x",
expectedHash, resultingHash)
}
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/watchdog_roundtriper_test.go | internal/backend/watchdog_roundtriper_test.go | package backend
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/http/httptest"
"testing"
"time"
rtest "github.com/restic/restic/internal/test"
)
func TestRead(t *testing.T) {
data := []byte("abcdef")
var ctr int
kick := func() {
ctr++
}
var closed bool
onClose := func() {
closed = true
}
isTimeout := func(err error) bool {
return false
}
wd := newWatchdogReadCloser(io.NopCloser(bytes.NewReader(data)), 1, kick, onClose, isTimeout)
out, err := io.ReadAll(wd)
rtest.OK(t, err)
rtest.Equals(t, data, out, "data mismatch")
// the EOF read also triggers the kick function
rtest.Equals(t, len(data)*2+2, ctr, "unexpected number of kick calls")
rtest.Equals(t, false, closed, "close function called too early")
rtest.OK(t, wd.Close())
rtest.Equals(t, true, closed, "close function not called")
}
func TestRoundtrip(t *testing.T) {
t.Parallel()
// at the higher delay values, it takes longer to transmit the request/response body
// than the roundTripper timeout
for _, delay := range []int{0, 1, 10, 20} {
t.Run(fmt.Sprintf("%v", delay), func(t *testing.T) {
msg := []byte("ping-pong-data")
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
data, err := io.ReadAll(r.Body)
if err != nil {
w.WriteHeader(500)
return
}
w.WriteHeader(200)
// slowly send the reply
for len(data) >= 2 {
_, _ = w.Write(data[:2])
w.(http.Flusher).Flush()
data = data[2:]
time.Sleep(time.Duration(delay) * time.Millisecond)
}
_, _ = w.Write(data)
}))
defer srv.Close()
rt := newWatchdogRoundtripper(http.DefaultTransport, 100*time.Millisecond, 2)
req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(newSlowReader(bytes.NewReader(msg), time.Duration(delay)*time.Millisecond)))
rtest.OK(t, err)
resp, err := rt.RoundTrip(req)
rtest.OK(t, err)
rtest.Equals(t, 200, resp.StatusCode, "unexpected status code")
response, err := io.ReadAll(resp.Body)
rtest.OK(t, err)
rtest.Equals(t, msg, response, "unexpected response")
rtest.OK(t, resp.Body.Close())
})
}
}
func TestCanceledRoundtrip(t *testing.T) {
rt := newWatchdogRoundtripper(http.DefaultTransport, time.Second, 2)
ctx, cancel := context.WithCancel(context.Background())
cancel()
req, err := http.NewRequestWithContext(ctx, "GET", "http://some.random.url.dfdgsfg", nil)
rtest.OK(t, err)
resp, err := rt.RoundTrip(req)
rtest.Equals(t, context.Canceled, err)
// make linter happy
if resp != nil {
rtest.OK(t, resp.Body.Close())
}
}
type slowReader struct {
data io.Reader
delay time.Duration
}
func newSlowReader(data io.Reader, delay time.Duration) *slowReader {
return &slowReader{
data: data,
delay: delay,
}
}
func (s *slowReader) Read(p []byte) (n int, err error) {
time.Sleep(s.delay)
return s.data.Read(p)
}
func TestUploadTimeout(t *testing.T) {
t.Parallel()
msg := []byte("ping")
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := io.ReadAll(r.Body)
if err != nil {
w.WriteHeader(500)
return
}
t.Error("upload should have been canceled")
}))
defer srv.Close()
rt := newWatchdogRoundtripper(http.DefaultTransport, 10*time.Millisecond, 1024)
req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(newSlowReader(bytes.NewReader(msg), 100*time.Millisecond)))
rtest.OK(t, err)
resp, err := rt.RoundTrip(req)
rtest.Equals(t, errRequestTimeout, err)
// make linter happy
if resp != nil {
rtest.OK(t, resp.Body.Close())
}
}
func TestProcessingTimeout(t *testing.T) {
t.Parallel()
msg := []byte("ping")
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := io.ReadAll(r.Body)
if err != nil {
w.WriteHeader(500)
return
}
time.Sleep(100 * time.Millisecond)
w.WriteHeader(200)
}))
defer srv.Close()
rt := newWatchdogRoundtripper(http.DefaultTransport, 10*time.Millisecond, 1024)
req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(bytes.NewReader(msg)))
rtest.OK(t, err)
resp, err := rt.RoundTrip(req)
rtest.Equals(t, errRequestTimeout, err)
// make linter happy
if resp != nil {
rtest.OK(t, resp.Body.Close())
}
}
func TestDownloadTimeout(t *testing.T) {
t.Parallel()
msg := []byte("ping")
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
data, err := io.ReadAll(r.Body)
if err != nil {
w.WriteHeader(500)
return
}
w.WriteHeader(200)
_, _ = w.Write(data[:2])
w.(http.Flusher).Flush()
data = data[2:]
time.Sleep(100 * time.Millisecond)
_, _ = w.Write(data)
}))
defer srv.Close()
rt := newWatchdogRoundtripper(http.DefaultTransport, 25*time.Millisecond, 1024)
req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(bytes.NewReader(msg)))
rtest.OK(t, err)
resp, err := rt.RoundTrip(req)
rtest.OK(t, err)
rtest.Equals(t, 200, resp.StatusCode, "unexpected status code")
_, err = io.ReadAll(resp.Body)
rtest.Equals(t, errRequestTimeout, err, "response download not canceled")
rtest.OK(t, resp.Body.Close())
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/http_transport.go | internal/backend/http_transport.go | package backend
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"net"
"net/http"
"os"
"strings"
"time"
"github.com/peterbourgon/unixtransport"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/feature"
"golang.org/x/net/http2"
)
// TransportOptions collects various options which can be set for an HTTP based
// transport.
type TransportOptions struct {
// contains filenames of PEM encoded root certificates to trust
RootCertFilenames []string
// contains the name of a file containing the TLS client certificate and private key in PEM format
TLSClientCertKeyFilename string
// Skip TLS certificate verification
InsecureTLS bool
// Specify Custom User-Agent for the http Client
HTTPUserAgent string
// Timeout after which to retry stuck requests
StuckRequestTimeout time.Duration
}
// readPEMCertKey reads a file and returns the PEM encoded certificate and key
// blocks.
func readPEMCertKey(filename string) (certs []byte, key []byte, err error) {
data, err := os.ReadFile(filename)
if err != nil {
return nil, nil, errors.Wrap(err, "ReadFile")
}
var block *pem.Block
for len(data) > 0 {
block, data = pem.Decode(data)
if block == nil {
break
}
switch {
case strings.HasSuffix(block.Type, "CERTIFICATE"):
certs = append(certs, pem.EncodeToMemory(block)...)
case strings.HasSuffix(block.Type, "PRIVATE KEY"):
if key != nil {
return nil, nil, errors.Errorf("error loading TLS cert and key from %v: more than one private key found", filename)
}
key = pem.EncodeToMemory(block)
default:
return nil, nil, errors.Errorf("error loading TLS cert and key from %v: unknown block type %v found", filename, block.Type)
}
}
return certs, key, nil
}
// Transport returns a new http.RoundTripper with default settings applied. If
// a custom rootCertFilename is non-empty, it must point to a valid PEM file,
// otherwise the function will return an error.
func Transport(opts TransportOptions) (http.RoundTripper, error) {
// copied from net/http
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{},
}
// ensure that http2 connections are closed if they are broken
h2, err := http2.ConfigureTransports(tr)
if err != nil {
panic(err)
}
if feature.Flag.Enabled(feature.BackendErrorRedesign) {
h2.WriteByteTimeout = 120 * time.Second
h2.ReadIdleTimeout = 60 * time.Second
h2.PingTimeout = 60 * time.Second
}
unixtransport.Register(tr)
if opts.InsecureTLS {
tr.TLSClientConfig.InsecureSkipVerify = true
}
if opts.TLSClientCertKeyFilename != "" {
certs, key, err := readPEMCertKey(opts.TLSClientCertKeyFilename)
if err != nil {
return nil, err
}
crt, err := tls.X509KeyPair(certs, key)
if err != nil {
return nil, errors.Errorf("parse TLS client cert or key: %v", err)
}
tr.TLSClientConfig.Certificates = []tls.Certificate{crt}
}
if opts.RootCertFilenames != nil {
pool := x509.NewCertPool()
for _, filename := range opts.RootCertFilenames {
if filename == "" {
return nil, errors.Errorf("empty filename for root certificate supplied")
}
b, err := os.ReadFile(filename)
if err != nil {
return nil, errors.Errorf("unable to read root certificate: %v", err)
}
if ok := pool.AppendCertsFromPEM(b); !ok {
return nil, errors.Errorf("cannot parse root certificate from %q", filename)
}
}
tr.TLSClientConfig.RootCAs = pool
}
rt := http.RoundTripper(tr)
// if the userAgent is set in the Transport Options, wrap the
// http.RoundTripper
if opts.HTTPUserAgent != "" {
rt = newCustomUserAgentRoundTripper(rt, opts.HTTPUserAgent)
}
if feature.Flag.Enabled(feature.BackendErrorRedesign) {
if opts.StuckRequestTimeout == 0 {
opts.StuckRequestTimeout = 5 * time.Minute
}
rt = newWatchdogRoundtripper(rt, opts.StuckRequestTimeout, 128*1024)
}
// wrap in the debug round tripper (if active)
return debug.RoundTripper(rt), nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/readerat.go | internal/backend/readerat.go | package backend
import (
"context"
"io"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
)
type backendReaderAt struct {
ctx context.Context
be Backend
h Handle
}
func (brd backendReaderAt) ReadAt(p []byte, offset int64) (n int, err error) {
return ReadAt(brd.ctx, brd.be, brd.h, offset, p)
}
// ReaderAt returns an io.ReaderAt for a file in the backend. The returned reader
// should not escape the caller function to avoid unexpected interactions with the
// embedded context
func ReaderAt(ctx context.Context, be Backend, h Handle) io.ReaderAt {
return backendReaderAt{ctx: ctx, be: be, h: h}
}
// ReadAt reads from the backend handle h at the given position.
func ReadAt(ctx context.Context, be Backend, h Handle, offset int64, p []byte) (n int, err error) {
debug.Log("ReadAt(%v) at %v, len %v", h, offset, len(p))
err = be.Load(ctx, h, len(p), offset, func(rd io.Reader) (ierr error) {
n, ierr = io.ReadFull(rd, p)
return ierr
})
if err != nil {
return 0, errors.Wrapf(err, "ReadFull(%v)", h)
}
debug.Log("ReadAt(%v) ReadFull returned %v bytes", h, n)
return n, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/file.go | internal/backend/file.go | package backend
import (
"fmt"
"github.com/restic/restic/internal/errors"
)
// FileType is the type of a file in the backend.
type FileType uint8
// These are the different data types a backend can store.
const (
PackFile FileType = 1 + iota
KeyFile
LockFile
SnapshotFile
IndexFile
ConfigFile
)
func (t FileType) String() string {
s := "invalid"
switch t {
case PackFile:
// Spelled "data" instead of "pack" for historical reasons.
s = "data"
case KeyFile:
s = "key"
case LockFile:
s = "lock"
case SnapshotFile:
s = "snapshot"
case IndexFile:
s = "index"
case ConfigFile:
s = "config"
}
return s
}
// Handle is used to store and access data in a backend.
type Handle struct {
Type FileType
IsMetadata bool
Name string
}
func (h Handle) String() string {
name := h.Name
if len(name) > 10 {
name = name[:10]
}
return fmt.Sprintf("<%s/%s>", h.Type, name)
}
// Valid returns an error if h is not valid.
func (h Handle) Valid() error {
switch h.Type {
case PackFile:
case KeyFile:
case LockFile:
case SnapshotFile:
case IndexFile:
case ConfigFile:
default:
return errors.Errorf("invalid Type %d", h.Type)
}
if h.Type == ConfigFile {
return nil
}
if h.Name == "" {
return errors.New("invalid Name")
}
return nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/rewind_reader_test.go | internal/backend/rewind_reader_test.go | package backend
import (
"bytes"
"crypto/md5"
"hash"
"io"
"math/rand"
"os"
"path/filepath"
"testing"
"time"
"github.com/restic/restic/internal/test"
)
func TestByteReader(t *testing.T) {
buf := []byte("foobar")
for _, hasher := range []hash.Hash{nil, md5.New()} {
fn := func() RewindReader {
return NewByteReader(buf, hasher)
}
testRewindReader(t, fn, buf)
}
}
func TestFileReader(t *testing.T) {
buf := []byte("foobar")
d := test.TempDir(t)
filename := filepath.Join(d, "file-reader-test")
err := os.WriteFile(filename, buf, 0600)
if err != nil {
t.Fatal(err)
}
f, err := os.Open(filename)
if err != nil {
t.Fatal(err)
}
defer func() {
err := f.Close()
if err != nil {
t.Fatal(err)
}
}()
for _, hasher := range []hash.Hash{nil, md5.New()} {
fn := func() RewindReader {
var hash []byte
if hasher != nil {
// must never fail according to interface
_, err := hasher.Write(buf)
if err != nil {
panic(err)
}
hash = hasher.Sum(nil)
}
rd, err := NewFileReader(f, hash)
if err != nil {
t.Fatal(err)
}
return rd
}
testRewindReader(t, fn, buf)
}
}
func testRewindReader(t *testing.T, fn func() RewindReader, data []byte) {
seed := time.Now().UnixNano()
t.Logf("seed is %d", seed)
rnd := rand.New(rand.NewSource(seed))
type ReaderTestFunc func(t testing.TB, r RewindReader, data []byte)
var tests = []ReaderTestFunc{
func(t testing.TB, rd RewindReader, data []byte) {
if rd.Length() != int64(len(data)) {
t.Fatalf("wrong length returned, want %d, got %d", int64(len(data)), rd.Length())
}
buf := make([]byte, len(data))
_, err := io.ReadFull(rd, buf)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, data) {
t.Fatalf("wrong data returned")
}
if rd.Length() != int64(len(data)) {
t.Fatalf("wrong length returned, want %d, got %d", int64(len(data)), rd.Length())
}
err = rd.Rewind()
if err != nil {
t.Fatal(err)
}
if rd.Length() != int64(len(data)) {
t.Fatalf("wrong length returned, want %d, got %d", int64(len(data)), rd.Length())
}
buf2 := make([]byte, int64(len(data)))
_, err = io.ReadFull(rd, buf2)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf2, data) {
t.Fatalf("wrong data returned")
}
if rd.Length() != int64(len(data)) {
t.Fatalf("wrong length returned, want %d, got %d", int64(len(data)), rd.Length())
}
if rd.Hash() != nil {
hasher := md5.New()
// must never fail according to interface
_, _ = hasher.Write(buf2)
if !bytes.Equal(rd.Hash(), hasher.Sum(nil)) {
t.Fatal("hash does not match data")
}
}
},
func(t testing.TB, rd RewindReader, data []byte) {
// read first bytes
buf := make([]byte, rnd.Intn(len(data)))
_, err := io.ReadFull(rd, buf)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, data[:len(buf)]) {
t.Fatalf("wrong data returned")
}
err = rd.Rewind()
if err != nil {
t.Fatal(err)
}
buf2 := make([]byte, rnd.Intn(len(data)))
_, err = io.ReadFull(rd, buf2)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf2, data[:len(buf2)]) {
t.Fatalf("wrong data returned")
}
// read remainder
buf3 := make([]byte, len(data)-len(buf2))
_, err = io.ReadFull(rd, buf3)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf3, data[len(buf2):]) {
t.Fatalf("wrong data returned")
}
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
rd := fn()
test(t, rd, data)
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/file_test.go | internal/backend/file_test.go | package backend
import (
"testing"
rtest "github.com/restic/restic/internal/test"
)
func TestHandleString(t *testing.T) {
rtest.Equals(t, "<data/foobar>", Handle{Type: PackFile, Name: "foobar"}.String())
rtest.Equals(t, "<lock/1>", Handle{Type: LockFile, Name: "1"}.String())
}
func TestHandleValid(t *testing.T) {
var handleTests = []struct {
h Handle
valid bool
}{
{Handle{Name: "foo"}, false},
{Handle{Type: 0}, false},
{Handle{Type: ConfigFile, Name: ""}, true},
{Handle{Type: PackFile, Name: ""}, false},
{Handle{Type: LockFile, Name: "010203040506"}, true},
}
for i, test := range handleTests {
err := test.h.Valid()
if err != nil && test.valid {
t.Errorf("test %v failed: error returned for valid handle: %v", i, err)
}
if !test.valid && err == nil {
t.Errorf("test %v failed: expected error for invalid handle not found", i)
}
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/shell_split.go | internal/backend/shell_split.go | package backend
import (
"unicode"
"github.com/restic/restic/internal/errors"
)
// shellSplitter splits a command string into separated arguments. It supports
// single and double quoted strings.
type shellSplitter struct {
quote rune
lastChar rune
}
func (s *shellSplitter) isSplitChar(c rune) bool {
// only test for quotes if the last char was not a backslash
if s.lastChar != '\\' {
// quote ended
if s.quote != 0 && c == s.quote {
s.quote = 0
return true
}
// quote starts
if s.quote == 0 && (c == '"' || c == '\'') {
s.quote = c
return true
}
}
s.lastChar = c
// within quote
if s.quote != 0 {
return false
}
// outside quote
return c == '\\' || unicode.IsSpace(c)
}
// SplitShellStrings returns the list of shell strings from a shell command string.
func SplitShellStrings(data string) (strs []string, err error) {
s := &shellSplitter{}
// derived from strings.SplitFunc
fieldStart := -1 // Set to -1 when looking for start of field.
for i, r := range data {
if s.isSplitChar(r) {
if fieldStart >= 0 {
strs = append(strs, data[fieldStart:i])
fieldStart = -1
}
} else if fieldStart == -1 {
fieldStart = i
}
}
if fieldStart >= 0 { // Last field might end at EOF.
strs = append(strs, data[fieldStart:])
}
switch s.quote {
case '\'':
return nil, errors.New("single-quoted string not terminated")
case '"':
return nil, errors.New("double-quoted string not terminated")
}
if len(strs) == 0 {
return nil, errors.New("command string is empty")
}
return strs, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/httpuseragent_roundtripper_test.go | internal/backend/httpuseragent_roundtripper_test.go | package backend
import (
"net/http"
"net/http/httptest"
"testing"
)
func TestCustomUserAgentTransport(t *testing.T) {
// Create a mock HTTP handler that checks the User-Agent header
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
userAgent := r.Header.Get("User-Agent")
if userAgent != "TestUserAgent" {
t.Errorf("Expected User-Agent: TestUserAgent, got: %s", userAgent)
}
w.WriteHeader(http.StatusOK)
})
// Create a test server with the mock handler
server := httptest.NewServer(handler)
defer server.Close()
// Create a custom user agent transport
customUserAgent := "TestUserAgent"
transport := &httpUserAgentRoundTripper{
userAgent: customUserAgent,
rt: http.DefaultTransport,
}
// Create an HTTP client with the custom transport
client := &http.Client{
Transport: transport,
}
// Make a request to the test server
resp, err := client.Get(server.URL)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
t.Log("failed to close response body")
}
}()
// Check the response status code
if resp.StatusCode != http.StatusOK {
t.Errorf("Expected status code: %d, got: %d", http.StatusOK, resp.StatusCode)
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/watchdog_roundtriper.go | internal/backend/watchdog_roundtriper.go | package backend
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"sync/atomic"
"time"
)
var errRequestTimeout = fmt.Errorf("request timeout")
// watchdogRoundtripper cancels an http request if an upload or download did not make progress
// within timeout. The time between fully sending the request and receiving an response is also
// limited by this timeout. This ensures that stuck requests are cancelled after some time.
//
// The roundtriper makes the assumption that the upload and download happen continuously. In particular,
// the caller must not make long pauses between individual read requests from the response body.
type watchdogRoundtripper struct {
rt http.RoundTripper
timeout time.Duration
chunkSize int
}
var _ http.RoundTripper = &watchdogRoundtripper{}
func newWatchdogRoundtripper(rt http.RoundTripper, timeout time.Duration, chunkSize int) *watchdogRoundtripper {
return &watchdogRoundtripper{
rt: rt,
timeout: timeout,
chunkSize: chunkSize,
}
}
func (w *watchdogRoundtripper) RoundTrip(req *http.Request) (*http.Response, error) {
timer := time.NewTimer(w.timeout)
ctx, cancel := context.WithCancel(req.Context())
timedOut := &atomic.Bool{}
// cancel context if timer expires
go func() {
defer timer.Stop()
select {
case <-timer.C:
timedOut.Store(true)
cancel()
case <-ctx.Done():
}
}()
kick := func() {
timer.Reset(w.timeout)
}
isTimeout := func(err error) bool {
return timedOut.Load() && errors.Is(err, context.Canceled)
}
req = req.Clone(ctx)
if req.Body != nil {
// kick watchdog timer as long as uploading makes progress
req.Body = newWatchdogReadCloser(req.Body, w.chunkSize, kick, nil, isTimeout)
}
resp, err := w.rt.RoundTrip(req)
if err != nil {
if isTimeout(err) {
err = errRequestTimeout
}
return nil, err
}
// kick watchdog timer as long as downloading makes progress
// cancel context to stop goroutine once response body is closed
resp.Body = newWatchdogReadCloser(resp.Body, w.chunkSize, kick, cancel, isTimeout)
return resp, nil
}
func newWatchdogReadCloser(rc io.ReadCloser, chunkSize int, kick func(), close func(), isTimeout func(err error) bool) *watchdogReadCloser {
return &watchdogReadCloser{
rc: rc,
chunkSize: chunkSize,
kick: kick,
close: close,
isTimeout: isTimeout,
}
}
type watchdogReadCloser struct {
rc io.ReadCloser
chunkSize int
kick func()
close func()
isTimeout func(err error) bool
}
var _ io.ReadCloser = &watchdogReadCloser{}
func (w *watchdogReadCloser) Read(p []byte) (n int, err error) {
w.kick()
// Read is not required to fill the whole passed in byte slice
// Thus, keep things simple and just stay within our chunkSize.
if len(p) > w.chunkSize {
p = p[:w.chunkSize]
}
n, err = w.rc.Read(p)
w.kick()
if err != nil && w.isTimeout(err) {
err = errRequestTimeout
}
return n, err
}
func (w *watchdogReadCloser) Close() error {
if w.close != nil {
w.close()
}
return w.rc.Close()
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/rewind_reader.go | internal/backend/rewind_reader.go | package backend
import (
"bytes"
"hash"
"io"
"github.com/restic/restic/internal/errors"
)
// RewindReader allows resetting the Reader to the beginning of the data.
type RewindReader interface {
io.Reader
// Rewind rewinds the reader so the same data can be read again from the
// start.
Rewind() error
// Length returns the number of bytes that can be read from the Reader
// after calling Rewind.
Length() int64
// Hash return a hash of the data if requested by the backed.
Hash() []byte
}
// ByteReader implements a RewindReader for a byte slice.
type ByteReader struct {
*bytes.Reader
Len int64
hash []byte
}
// Rewind restarts the reader from the beginning of the data.
func (b *ByteReader) Rewind() error {
_, err := b.Reader.Seek(0, io.SeekStart)
return err
}
// Length returns the number of bytes read from the reader after Rewind is
// called.
func (b *ByteReader) Length() int64 {
return b.Len
}
// Hash return a hash of the data if requested by the backed.
func (b *ByteReader) Hash() []byte {
return b.hash
}
// statically ensure that *ByteReader implements RewindReader.
var _ RewindReader = &ByteReader{}
// NewByteReader prepares a ByteReader that can then be used to read buf.
func NewByteReader(buf []byte, hasher hash.Hash) *ByteReader {
var hash []byte
if hasher != nil {
// must never fail according to interface
_, err := hasher.Write(buf)
if err != nil {
panic(err)
}
hash = hasher.Sum(nil)
}
return &ByteReader{
Reader: bytes.NewReader(buf),
Len: int64(len(buf)),
hash: hash,
}
}
// statically ensure that *FileReader implements RewindReader.
var _ RewindReader = &FileReader{}
// FileReader implements a RewindReader for an open file.
type FileReader struct {
io.ReadSeeker
Len int64
hash []byte
}
// Rewind seeks to the beginning of the file.
func (f *FileReader) Rewind() error {
_, err := f.ReadSeeker.Seek(0, io.SeekStart)
return errors.Wrap(err, "Seek")
}
// Length returns the length of the file.
func (f *FileReader) Length() int64 {
return f.Len
}
// Hash return a hash of the data if requested by the backed.
func (f *FileReader) Hash() []byte {
return f.hash
}
// NewFileReader wraps f in a *FileReader.
func NewFileReader(f io.ReadSeeker, hash []byte) (*FileReader, error) {
pos, err := f.Seek(0, io.SeekEnd)
if err != nil {
return nil, errors.Wrap(err, "Seek")
}
fr := &FileReader{
ReadSeeker: f,
Len: pos,
hash: hash,
}
err = fr.Rewind()
if err != nil {
return nil, err
}
return fr, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/shell_split_test.go | internal/backend/shell_split_test.go | package backend
import (
"reflect"
"testing"
)
func TestShellSplitter(t *testing.T) {
var tests = []struct {
data string
args []string
}{
{
`foo`,
[]string{"foo"},
},
{
`'foo'`,
[]string{"foo"},
},
{
`foo bar baz`,
[]string{"foo", "bar", "baz"},
},
{
`foo 'bar' baz`,
[]string{"foo", "bar", "baz"},
},
{
`'bar box' baz`,
[]string{"bar box", "baz"},
},
{
`"bar 'box'" baz`,
[]string{"bar 'box'", "baz"},
},
{
`'bar "box"' baz`,
[]string{`bar "box"`, "baz"},
},
{
`\"bar box baz`,
[]string{`"bar`, "box", "baz"},
},
{
`"bar/foo/x" "box baz"`,
[]string{"bar/foo/x", "box baz"},
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
args, err := SplitShellStrings(test.data)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(args, test.args) {
t.Fatalf("wrong args returned, want:\n %#v\ngot:\n %#v",
test.args, args)
}
})
}
}
func TestShellSplitterInvalid(t *testing.T) {
var tests = []struct {
data string
err string
}{
{
"foo'",
"single-quoted string not terminated",
},
{
`foo"`,
"double-quoted string not terminated",
},
{
"foo 'bar",
"single-quoted string not terminated",
},
{
`foo "bar`,
"double-quoted string not terminated",
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
args, err := SplitShellStrings(test.data)
if err == nil {
t.Fatalf("expected error not found: %v", test.err)
}
if err.Error() != test.err {
t.Fatalf("expected error not found, want:\n %q\ngot:\n %q", test.err, err.Error())
}
if len(args) > 0 {
t.Fatalf("splitter returned fields from invalid data: %v", args)
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/backend.go | internal/backend/backend.go | package backend
import (
"context"
"fmt"
"hash"
"io"
)
var ErrNoRepository = fmt.Errorf("repository does not exist")
// Backend is used to store and access data.
//
// Backend operations that return an error will be retried when a Backend is
// wrapped in a RetryBackend. To prevent that from happening, the operations
// should return a github.com/cenkalti/backoff/v4.PermanentError. Errors from
// the context package need not be wrapped, as context cancellation is checked
// separately by the retrying logic.
type Backend interface {
// Properties returns information about the backend
Properties() Properties
// Hasher may return a hash function for calculating a content hash for the backend
Hasher() hash.Hash
// Remove removes a File described by h.
Remove(ctx context.Context, h Handle) error
// Close the backend
Close() error
// Save stores the data from rd under the given handle.
Save(ctx context.Context, h Handle, rd RewindReader) error
// Load runs fn with a reader that yields the contents of the file at h at the
// given offset. If length is larger than zero, only a portion of the file
// is read. If the length is larger than zero and the file is too short to return
// the requested length bytes, then an error MUST be returned that is recognized
// by IsPermanentError().
//
// The function fn may be called multiple times during the same Load invocation
// and therefore must be idempotent.
//
// Implementations are encouraged to use util.DefaultLoad
Load(ctx context.Context, h Handle, length int, offset int64, fn func(rd io.Reader) error) error
// Stat returns information about the File identified by h.
Stat(ctx context.Context, h Handle) (FileInfo, error)
// List runs fn for each file in the backend which has the type t. When an
// error occurs (or fn returns an error), List stops and returns it.
//
// The function fn is called exactly once for each file during successful
// execution and at most once in case of an error.
//
// The function fn is called in the same Goroutine that List() is called
// from.
List(ctx context.Context, t FileType, fn func(FileInfo) error) error
// IsNotExist returns true if the error was caused by a non-existing file
// in the backend.
//
// The argument may be a wrapped error. The implementation is responsible
// for unwrapping it.
IsNotExist(err error) bool
// IsPermanentError returns true if the error can very likely not be resolved
// by retrying the operation. Backends should return true if the file is missing,
// the requested range does not (completely) exist in the file or the user is
// not authorized to perform the requested operation.
IsPermanentError(err error) bool
// Delete removes all data in the backend.
Delete(ctx context.Context) error
// Warmup ensures that the specified handles are ready for upcoming reads.
// This is particularly useful for transitioning files from cold to hot
// storage.
//
// The method is non-blocking. WarmupWait can be used to wait for
// completion.
//
// Returns:
// - Handles currently warming up.
// - An error if warmup fails.
Warmup(ctx context.Context, h []Handle) ([]Handle, error)
// WarmupWait waits until all given handles are warm.
WarmupWait(ctx context.Context, h []Handle) error
}
type Properties struct {
// Connections states the maximum number of concurrent backend operations.
Connections uint
// HasAtomicReplace states whether Save() can atomically replace files
HasAtomicReplace bool
// HasFlakyErrors states whether the backend may temporarily return errors
// that are considered as permanent for existing files.
HasFlakyErrors bool
}
type Unwrapper interface {
// Unwrap returns the underlying backend or nil if there is none.
Unwrap() Backend
}
func AsBackend[B Backend](b Backend) B {
for b != nil {
if be, ok := b.(B); ok {
return be
}
if be, ok := b.(Unwrapper); ok {
b = be.Unwrap()
} else {
// not the backend we're looking for
break
}
}
var be B
return be
}
type FreezeBackend interface {
Backend
// Freeze blocks all backend operations except those on lock files
Freeze()
// Unfreeze allows all backend operations to continue
Unfreeze()
}
// FileInfo is contains information about a file in the backend.
type FileInfo struct {
Size int64
Name string
}
// ApplyEnvironmenter fills in a backend configuration from the environment
type ApplyEnvironmenter interface {
ApplyEnvironment(prefix string)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/backend_test.go | internal/backend/backend_test.go | package backend_test
import (
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/test"
)
type testBackend struct {
backend.Backend
}
func (t *testBackend) Unwrap() backend.Backend {
return nil
}
type otherTestBackend struct {
backend.Backend
}
func (t *otherTestBackend) Unwrap() backend.Backend {
return t.Backend
}
func TestAsBackend(t *testing.T) {
other := otherTestBackend{}
test.Assert(t, backend.AsBackend[*testBackend](other) == nil, "otherTestBackend is not a testBackend backend")
testBe := &testBackend{}
test.Assert(t, backend.AsBackend[*testBackend](testBe) == testBe, "testBackend was not returned")
wrapper := &otherTestBackend{Backend: testBe}
test.Assert(t, backend.AsBackend[*testBackend](wrapper) == testBe, "failed to unwrap testBackend backend")
wrapper.Backend = other
test.Assert(t, backend.AsBackend[*testBackend](wrapper) == nil, "a wrapped otherTestBackend is not a testBackend")
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/doc.go | internal/backend/doc.go | // Package backend provides local and remote storage for restic repositories.
// All backends need to implement the Backend interface. There is a MemBackend,
// which stores all data in a map internally and can be used for testing.
package backend
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/httpuseragent_roundtripper.go | internal/backend/httpuseragent_roundtripper.go | package backend
import "net/http"
// httpUserAgentRoundTripper is a custom http.RoundTripper that modifies the User-Agent header
// of outgoing HTTP requests.
type httpUserAgentRoundTripper struct {
userAgent string
rt http.RoundTripper
}
func newCustomUserAgentRoundTripper(rt http.RoundTripper, userAgent string) *httpUserAgentRoundTripper {
return &httpUserAgentRoundTripper{
rt: rt,
userAgent: userAgent,
}
}
// RoundTrip modifies the User-Agent header of the request and then delegates the request
// to the underlying RoundTripper.
func (c *httpUserAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
req = req.Clone(req.Context())
req.Header.Set("User-Agent", c.userAgent)
return c.rt.RoundTrip(req)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/util/limited_reader.go | internal/backend/util/limited_reader.go | package util
import "io"
// LimitedReadCloser wraps io.LimitedReader and exposes the Close() method.
type LimitedReadCloser struct {
io.Closer
io.LimitedReader
}
// LimitReadCloser returns a new reader wraps r in an io.LimitedReader, but also
// exposes the Close() method.
func LimitReadCloser(r io.ReadCloser, n int64) *LimitedReadCloser {
return &LimitedReadCloser{Closer: r, LimitedReader: io.LimitedReader{R: r, N: n}}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/util/defaults.go | internal/backend/util/defaults.go | package util
import (
"context"
"io"
"github.com/restic/restic/internal/backend"
)
// DefaultLoad implements Backend.Load using lower-level openReader func
func DefaultLoad(ctx context.Context, h backend.Handle, length int, offset int64,
openReader func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error),
fn func(rd io.Reader) error) error {
rd, err := openReader(ctx, h, length, offset)
if err != nil {
return err
}
err = fn(rd)
if err != nil {
_ = rd.Close() // ignore secondary errors closing the reader
return err
}
return rd.Close()
}
// DefaultDelete removes all restic keys in the bucket. It will not remove the bucket itself.
func DefaultDelete(ctx context.Context, be backend.Backend) error {
alltypes := []backend.FileType{
backend.PackFile,
backend.KeyFile,
backend.LockFile,
backend.SnapshotFile,
backend.IndexFile}
for _, t := range alltypes {
err := be.List(ctx, t, func(fi backend.FileInfo) error {
return be.Remove(ctx, backend.Handle{Type: t, Name: fi.Name})
})
if err != nil {
return nil
}
}
err := be.Remove(ctx, backend.Handle{Type: backend.ConfigFile})
if err != nil && be.IsNotExist(err) {
err = nil
}
return err
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/util/defaults_test.go | internal/backend/util/defaults_test.go | package util_test
import (
"context"
"io"
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/util"
"github.com/restic/restic/internal/errors"
rtest "github.com/restic/restic/internal/test"
)
type mockReader struct {
closed bool
}
func (rd *mockReader) Read(_ []byte) (n int, err error) {
return 0, nil
}
func (rd *mockReader) Close() error {
rd.closed = true
return nil
}
func TestDefaultLoad(t *testing.T) {
h := backend.Handle{Name: "id", Type: backend.PackFile}
rd := &mockReader{}
// happy case, assert correct parameters are passed around and content stream is closed
err := util.DefaultLoad(context.TODO(), h, 10, 11, func(ctx context.Context, ih backend.Handle, length int, offset int64) (io.ReadCloser, error) {
rtest.Equals(t, h, ih)
rtest.Equals(t, 10, length)
rtest.Equals(t, int64(11), offset)
return rd, nil
}, func(ird io.Reader) error {
rtest.Equals(t, rd, ird)
return nil
})
rtest.OK(t, err)
rtest.Equals(t, true, rd.closed)
// unhappy case, assert producer errors are handled correctly
err = util.DefaultLoad(context.TODO(), h, 10, 11, func(ctx context.Context, ih backend.Handle, length int, offset int64) (io.ReadCloser, error) {
return nil, errors.Errorf("producer error")
}, func(ird io.Reader) error {
t.Fatalf("unexpected consumer invocation")
return nil
})
rtest.Equals(t, "producer error", err.Error())
// unhappy case, assert consumer errors are handled correctly
rd = &mockReader{}
err = util.DefaultLoad(context.TODO(), h, 10, 11, func(ctx context.Context, ih backend.Handle, length int, offset int64) (io.ReadCloser, error) {
return rd, nil
}, func(ird io.Reader) error {
return errors.Errorf("consumer error")
})
rtest.Equals(t, true, rd.closed)
rtest.Equals(t, "consumer error", err.Error())
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/util/paths.go | internal/backend/util/paths.go | package util
import "os"
type Modes struct {
Dir os.FileMode
File os.FileMode
}
// DefaultModes defines the default permissions to apply to new repository
// files and directories stored on file-based backends.
var DefaultModes = Modes{Dir: 0700, File: 0600}
// DeriveModesFromFileInfo will, given the mode of a regular file, compute
// the mode we should use for new files and directories. If the passed
// error is non-nil DefaultModes are returned.
func DeriveModesFromFileInfo(fi os.FileInfo, err error) Modes {
m := DefaultModes
if err != nil {
return m
}
if fi.Mode()&0040 != 0 { // Group has read access
m.Dir |= 0070
m.File |= 0060
}
return m
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/logger/log.go | internal/backend/logger/log.go | package logger
import (
"context"
"io"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
)
type Backend struct {
backend.Backend
}
// statically ensure that Backend implements backend.Backend.
var _ backend.Backend = &Backend{}
func New(be backend.Backend) *Backend {
return &Backend{Backend: be}
}
func (be *Backend) IsNotExist(err error) bool {
isNotExist := be.Backend.IsNotExist(err)
debug.Log("IsNotExist(%T, %#v, %v)", err, err, isNotExist)
return isNotExist
}
// Save adds new Data to the backend.
func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
debug.Log("Save(%v, %v)", h, rd.Length())
err := be.Backend.Save(ctx, h, rd)
debug.Log(" save err %v", err)
return err
}
// Remove deletes a file from the backend.
func (be *Backend) Remove(ctx context.Context, h backend.Handle) error {
debug.Log("Remove(%v)", h)
err := be.Backend.Remove(ctx, h)
debug.Log(" remove err %v", err)
return err
}
func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(io.Reader) error) error {
debug.Log("Load(%v, length %v, offset %v)", h, length, offset)
err := be.Backend.Load(ctx, h, length, offset, fn)
debug.Log(" load err %v", err)
return err
}
func (be *Backend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) {
debug.Log("Stat(%v)", h)
fi, err := be.Backend.Stat(ctx, h)
debug.Log(" stat err %v", err)
return fi, err
}
func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error {
debug.Log("List(%v)", t)
err := be.Backend.List(ctx, t, fn)
debug.Log(" list err %v", err)
return err
}
func (be *Backend) Delete(ctx context.Context) error {
debug.Log("Delete()")
err := be.Backend.Delete(ctx)
debug.Log(" delete err %v", err)
return err
}
func (be *Backend) Close() error {
debug.Log("Close()")
err := be.Backend.Close()
debug.Log(" close err %v", err)
return err
}
func (be *Backend) Unwrap() backend.Backend { return be.Backend }
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/sftp/sftp_test.go | internal/backend/sftp/sftp_test.go | package sftp_test
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"github.com/restic/restic/internal/backend/sftp"
"github.com/restic/restic/internal/backend/test"
"github.com/restic/restic/internal/errors"
rtest "github.com/restic/restic/internal/test"
)
func findSFTPServerBinary() string {
for _, dir := range strings.Split(rtest.TestSFTPPath, ":") {
testpath := filepath.Join(dir, "sftp-server")
_, err := os.Stat(testpath)
if !errors.Is(err, os.ErrNotExist) {
return testpath
}
}
return ""
}
var sftpServer = findSFTPServerBinary()
func newTestSuite(t testing.TB) *test.Suite[sftp.Config] {
return &test.Suite[sftp.Config]{
// NewConfig returns a config for a new temporary backend that will be used in tests.
NewConfig: func() (*sftp.Config, error) {
dir := rtest.TempDir(t)
t.Logf("create new backend at %v", dir)
cfg := &sftp.Config{
Path: dir,
Command: fmt.Sprintf("%q -e", sftpServer),
Connections: 5,
}
return cfg, nil
},
Factory: sftp.NewFactory(),
}
}
func TestBackendSFTP(t *testing.T) {
defer func() {
if t.Skipped() {
rtest.SkipDisallowed(t, "restic/backend/sftp.TestBackendSFTP")
}
}()
if sftpServer == "" {
t.Skip("sftp server binary not found")
}
newTestSuite(t).RunTests(t)
}
func BenchmarkBackendSFTP(t *testing.B) {
if sftpServer == "" {
t.Skip("sftp server binary not found")
}
newTestSuite(t).RunBenchmarks(t)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/sftp/sftp.go | internal/backend/sftp/sftp.go | package sftp
import (
"bufio"
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"hash"
"io"
"os"
"os/exec"
"path"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/layout"
"github.com/restic/restic/internal/backend/limiter"
"github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/backend/util"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/feature"
"github.com/restic/restic/internal/terminal"
"github.com/cenkalti/backoff/v4"
"github.com/pkg/sftp"
"golang.org/x/sync/errgroup"
)
// SFTP is a backend in a directory accessed via SFTP.
type SFTP struct {
c *sftp.Client
p string
cmd *exec.Cmd
result <-chan error
posixRename bool
layout.Layout
Config
util.Modes
}
var _ backend.Backend = &SFTP{}
var errTooShort = fmt.Errorf("file is too short")
func NewFactory() location.Factory {
return location.NewLimitedBackendFactory("sftp", ParseConfig, location.NoPassword, limiter.WrapBackendConstructor(Create), limiter.WrapBackendConstructor(Open))
}
func startClient(cfg Config, errorLog func(string, ...interface{})) (*SFTP, error) {
program, args, err := buildSSHCommand(cfg)
if err != nil {
return nil, err
}
debug.Log("start client %v %v", program, args)
// Connect to a remote host and request the sftp subsystem via the 'ssh'
// command. This assumes that passwordless login is correctly configured.
cmd := exec.Command(program, args...)
// prefix the errors with the program name
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, errors.Wrap(err, "cmd.StderrPipe")
}
go func() {
sc := bufio.NewScanner(stderr)
for sc.Scan() {
errorLog("subprocess %v: %v\n", program, sc.Text())
}
}()
// get stdin and stdout
wr, err := cmd.StdinPipe()
if err != nil {
return nil, errors.Wrap(err, "cmd.StdinPipe")
}
rd, err := cmd.StdoutPipe()
if err != nil {
return nil, errors.Wrap(err, "cmd.StdoutPipe")
}
bg, err := terminal.StartForeground(cmd)
if err != nil {
if errors.Is(err, exec.ErrDot) {
return nil, errors.Errorf("cannot implicitly run relative executable %v found in current directory, use -o sftp.command=./<command> to override", cmd.Path)
}
return nil, err
}
// wait in a different goroutine
ch := make(chan error, 1)
go func() {
err := cmd.Wait()
debug.Log("ssh command exited, err %v", err)
for {
ch <- errors.Wrap(err, "ssh command exited")
}
}()
// open the SFTP session
client, err := sftp.NewClientPipe(rd, wr,
// write multiple packets (32kb) in parallel per file
// not strictly necessary as we use ReadFromWithConcurrency
sftp.UseConcurrentWrites(true),
// increase send buffer per file to 4MB
sftp.MaxConcurrentRequestsPerFile(128))
if err != nil {
return nil, errors.Errorf("unable to start the sftp session, error: %v", err)
}
err = bg()
if err != nil {
return nil, errors.Wrap(err, "bg")
}
_, posixRename := client.HasExtension("posix-rename@openssh.com")
return &SFTP{
c: client,
cmd: cmd,
result: ch,
posixRename: posixRename,
Layout: layout.NewDefaultLayout(cfg.Path, path.Join),
}, nil
}
// clientError returns an error if the client has exited. Otherwise, nil is
// returned immediately.
func (r *SFTP) clientError() error {
select {
case err := <-r.result:
debug.Log("client has exited with err %v", err)
return backoff.Permanent(err)
default:
}
return nil
}
// Open opens an sftp backend as described by the config by running
// "ssh" with the appropriate arguments (or cfg.Command, if set).
func Open(_ context.Context, cfg Config, errorLog func(string, ...interface{})) (*SFTP, error) {
debug.Log("open backend with config %#v", cfg)
sftp, err := startClient(cfg, errorLog)
if err != nil {
debug.Log("unable to start program: %v", err)
return nil, err
}
return open(sftp, cfg)
}
func open(sftp *SFTP, cfg Config) (*SFTP, error) {
fi, err := sftp.c.Stat(sftp.Layout.Filename(backend.Handle{Type: backend.ConfigFile}))
m := util.DeriveModesFromFileInfo(fi, err)
debug.Log("using (%03O file, %03O dir) permissions", m.File, m.Dir)
sftp.Config = cfg
sftp.p = cfg.Path
sftp.Modes = m
return sftp, nil
}
func (r *SFTP) mkdirAllDataSubdirs(ctx context.Context, nconn uint) error {
// Run multiple MkdirAll calls concurrently. These involve multiple
// round-trips and we do a lot of them, so this whole operation can be slow
// on high-latency links.
g, _ := errgroup.WithContext(ctx)
// Use errgroup's built-in semaphore, because r.sem is not initialized yet.
g.SetLimit(int(nconn))
for _, d := range r.Paths() {
g.Go(func() error {
// First try Mkdir. For most directories in Paths, this takes one
// round trip, not counting duplicate parent creations causes by
// concurrency. MkdirAll first does Stat, then recursive MkdirAll
// on the parent, so calls typically take three round trips.
if err := r.c.Mkdir(d); err == nil {
return nil
}
return errors.Wrapf(r.c.MkdirAll(d), "MkdirAll %v", d)
})
}
return g.Wait()
}
// IsNotExist returns true if the error is caused by a not existing file.
func (r *SFTP) IsNotExist(err error) bool {
return errors.Is(err, os.ErrNotExist)
}
func (r *SFTP) IsPermanentError(err error) bool {
return r.IsNotExist(err) || errors.Is(err, errTooShort) || errors.Is(err, os.ErrPermission)
}
func buildSSHCommand(cfg Config) (cmd string, args []string, err error) {
if cfg.Command != "" {
args, err := backend.SplitShellStrings(cfg.Command)
if err != nil {
return "", nil, err
}
if cfg.Args != "" {
return "", nil, errors.New("cannot specify both sftp.command and sftp.args options")
}
return args[0], args[1:], nil
}
cmd = "ssh"
host, port := cfg.Host, cfg.Port
args = []string{host}
if port != "" {
args = append(args, "-p", port)
}
if cfg.User != "" {
args = append(args, "-l", cfg.User)
}
if cfg.Args != "" {
a, err := backend.SplitShellStrings(cfg.Args)
if err != nil {
return "", nil, err
}
args = append(args, a...)
}
args = append(args, "-s", "sftp")
return cmd, args, nil
}
// Create creates an sftp backend as described by the config by running "ssh"
// with the appropriate arguments (or cfg.Command, if set).
func Create(ctx context.Context, cfg Config, errorLog func(string, ...interface{})) (*SFTP, error) {
sftp, err := startClient(cfg, errorLog)
if err != nil {
debug.Log("unable to start program: %v", err)
return nil, err
}
sftp.Modes = util.DefaultModes
// test if config file already exists
_, err = sftp.c.Lstat(sftp.Layout.Filename(backend.Handle{Type: backend.ConfigFile}))
if err == nil {
return nil, errors.New("config file already exists")
}
// create paths for data and refs
if err = sftp.mkdirAllDataSubdirs(ctx, cfg.Connections); err != nil {
return nil, err
}
// repurpose existing connection
return open(sftp, cfg)
}
func (r *SFTP) Properties() backend.Properties {
return backend.Properties{
Connections: r.Config.Connections,
HasAtomicReplace: r.posixRename,
}
}
// Hasher may return a hash function for calculating a content hash for the backend
func (r *SFTP) Hasher() hash.Hash {
return nil
}
// tempSuffix generates a random string suffix that should be sufficiently long
// to avoid accidental conflicts
func tempSuffix() string {
var nonce [16]byte
_, err := rand.Read(nonce[:])
if err != nil {
panic(err)
}
return hex.EncodeToString(nonce[:])
}
func setFileReadonly(client *sftp.Client, path string, mode os.FileMode) error {
// clear owner/group/other write bits
readonlyMode := mode &^ 0o222
err := client.Chmod(path, readonlyMode)
// if the operation is not supported in the sftp server we ignore it.
if errors.Is(err, sftp.ErrSSHFxOpUnsupported) {
return nil
}
return err
}
// Save stores data in the backend at the handle.
func (r *SFTP) Save(_ context.Context, h backend.Handle, rd backend.RewindReader) error {
if err := r.clientError(); err != nil {
return err
}
filename := r.Filename(h)
tmpFilename := filename + "-restic-temp-" + tempSuffix()
dirname := r.Dirname(h)
// create new file
f, err := r.c.OpenFile(tmpFilename, os.O_CREATE|os.O_EXCL|os.O_WRONLY)
if r.IsNotExist(err) {
// error is caused by a missing directory, try to create it
mkdirErr := r.c.MkdirAll(r.Dirname(h))
if mkdirErr != nil {
debug.Log("error creating dir %v: %v", r.Dirname(h), mkdirErr)
} else {
// try again
f, err = r.c.OpenFile(tmpFilename, os.O_CREATE|os.O_EXCL|os.O_WRONLY)
}
}
if err != nil {
return errors.Wrapf(err, "OpenFile %v", tmpFilename)
}
// pkg/sftp doesn't allow creating with a mode.
// Chmod while the file is still empty.
if err == nil {
err = f.Chmod(r.Modes.File)
if err != nil {
return errors.Wrapf(err, "Chmod %v", tmpFilename)
}
}
defer func() {
if err == nil {
return
}
// Try not to leave a partial file behind.
rmErr := r.c.Remove(f.Name())
if rmErr != nil {
debug.Log("sftp: failed to remove broken file %v: %v",
f.Name(), rmErr)
}
}()
// save data, make sure to use the optimized sftp upload method
wbytes, err := f.ReadFromWithConcurrency(rd, 0)
if err != nil {
_ = f.Close()
err = r.checkNoSpace(dirname, rd.Length(), err)
return errors.Wrapf(err, "Write %v", tmpFilename)
}
// sanity check
if wbytes != rd.Length() {
_ = f.Close()
return errors.Errorf("Write %v: wrote %d bytes instead of the expected %d bytes", tmpFilename, wbytes, rd.Length())
}
err = f.Close()
if err != nil {
return errors.Wrapf(err, "Close %v", tmpFilename)
}
// Prefer POSIX atomic rename if available.
if r.posixRename {
err = r.c.PosixRename(tmpFilename, filename)
} else {
err = r.c.Rename(tmpFilename, filename)
}
err = setFileReadonly(r.c, filename, r.Modes.File)
if err != nil {
return errors.Errorf("sftp setFileReadonly: %v", err)
}
return errors.Wrapf(err, "Rename %v", tmpFilename)
}
// checkNoSpace checks if err was likely caused by lack of available space
// on the remote, and if so, makes it permanent.
func (r *SFTP) checkNoSpace(dir string, size int64, origErr error) error {
// The SFTP protocol has a message for ENOSPC,
// but pkg/sftp doesn't export it and OpenSSH's sftp-server
// sends FX_FAILURE instead.
e, ok := origErr.(*sftp.StatusError)
_, hasExt := r.c.HasExtension("statvfs@openssh.com")
if !ok || e.FxCode() != sftp.ErrSSHFxFailure || !hasExt {
return origErr
}
fsinfo, err := r.c.StatVFS(dir)
if err != nil {
debug.Log("sftp: StatVFS returned %v", err)
return origErr
}
if fsinfo.Favail == 0 || fsinfo.Frsize*fsinfo.Bavail < uint64(size) {
err := errors.New("sftp: no space left on device")
return backoff.Permanent(err)
}
return origErr
}
// Load runs fn with a reader that yields the contents of the file at h at the
// given offset.
func (r *SFTP) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
if err := r.clientError(); err != nil {
return err
}
return util.DefaultLoad(ctx, h, length, offset, r.openReader, func(rd io.Reader) error {
if length == 0 || !feature.Flag.Enabled(feature.BackendErrorRedesign) {
return fn(rd)
}
// there is no direct way to efficiently check whether the file is too short
// rd is already a LimitedReader which can be used to track the number of bytes read
err := fn(rd)
// check the underlying reader to be agnostic to however fn() handles the returned error
_, rderr := rd.Read([]byte{0})
if rderr == io.EOF && rd.(*util.LimitedReadCloser).N != 0 {
// file is too short
return fmt.Errorf("%w: %v", errTooShort, err)
}
return err
})
}
func (r *SFTP) openReader(_ context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
f, err := r.c.Open(r.Filename(h))
if err != nil {
return nil, errors.Wrapf(err, "Open %v", r.Filename(h))
}
if offset > 0 {
_, err = f.Seek(offset, 0)
if err != nil {
_ = f.Close()
return nil, errors.Wrapf(err, "Seek %v", r.Filename(h))
}
}
if length > 0 {
// unlimited reads usually use io.Copy which needs WriteTo support at the underlying reader
// limited reads are usually combined with io.ReadFull which reads all required bytes into a buffer in one go
return util.LimitReadCloser(f, int64(length)), nil
}
return f, nil
}
// Stat returns information about a blob.
func (r *SFTP) Stat(_ context.Context, h backend.Handle) (backend.FileInfo, error) {
if err := r.clientError(); err != nil {
return backend.FileInfo{}, err
}
fi, err := r.c.Lstat(r.Filename(h))
if err != nil {
return backend.FileInfo{}, errors.Wrapf(err, "Lstat %v", r.Filename(h))
}
return backend.FileInfo{Size: fi.Size(), Name: h.Name}, nil
}
// Remove removes the content stored at name.
func (r *SFTP) Remove(_ context.Context, h backend.Handle) error {
if err := r.clientError(); err != nil {
return err
}
return errors.Wrapf(r.c.Remove(r.Filename(h)), "Remove %v", r.Filename(h))
}
// List runs fn for each file in the backend which has the type t. When an
// error occurs (or fn returns an error), List stops and returns it.
func (r *SFTP) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error {
if err := r.clientError(); err != nil {
return err
}
basedir, subdirs := r.Basedir(t)
walker := r.c.Walk(basedir)
for {
ok := walker.Step()
if !ok {
break
}
if walker.Err() != nil {
if r.IsNotExist(walker.Err()) {
debug.Log("ignoring non-existing directory")
return nil
}
return errors.Wrapf(walker.Err(), "Walk %v", basedir)
}
if walker.Path() == basedir {
continue
}
if walker.Stat().IsDir() && !subdirs {
walker.SkipDir()
continue
}
fi := walker.Stat()
if !fi.Mode().IsRegular() {
continue
}
debug.Log("send %v\n", path.Base(walker.Path()))
rfi := backend.FileInfo{
Name: path.Base(walker.Path()),
Size: fi.Size(),
}
if ctx.Err() != nil {
return ctx.Err()
}
err := fn(rfi)
if err != nil {
return err
}
if ctx.Err() != nil {
return ctx.Err()
}
}
return ctx.Err()
}
var closeTimeout = 2 * time.Second
// Close closes the sftp connection and terminates the underlying command.
func (r *SFTP) Close() error {
if r == nil {
return nil
}
err := errors.Wrap(r.c.Close(), "Close")
debug.Log("Close returned error %v", err)
// wait for closeTimeout before killing the process
select {
case err := <-r.result:
return err
case <-time.After(closeTimeout):
}
if err := r.cmd.Process.Kill(); err != nil {
return err
}
// get the error, but ignore it
<-r.result
return nil
}
func (r *SFTP) deleteRecursive(ctx context.Context, name string) error {
entries, err := r.c.ReadDir(name)
if err != nil {
return errors.Wrapf(err, "ReadDir %v", name)
}
for _, fi := range entries {
if ctx.Err() != nil {
return ctx.Err()
}
itemName := path.Join(name, fi.Name())
if fi.IsDir() {
err := r.deleteRecursive(ctx, itemName)
if err != nil {
return err
}
err = r.c.RemoveDirectory(itemName)
if err != nil {
return errors.Wrapf(err, "RemoveDirectory %v", itemName)
}
continue
}
err := r.c.Remove(itemName)
if err != nil {
return errors.Wrapf(err, "Remove %v", itemName)
}
}
return nil
}
// Delete removes all data in the backend.
func (r *SFTP) Delete(ctx context.Context) error {
return r.deleteRecursive(ctx, r.p)
}
// Warmup not implemented
func (r *SFTP) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
return []backend.Handle{}, nil
}
func (r *SFTP) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/sftp/config.go | internal/backend/sftp/config.go | package sftp
import (
"net/url"
"path"
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
)
// Config collects all information required to connect to an sftp server.
type Config struct {
User, Host, Port, Path string
Command string `option:"command" help:"specify command to create sftp connection"`
Args string `option:"args" help:"specify arguments for ssh"`
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
}
// NewConfig returns a new config with default options applied.
func NewConfig() Config {
return Config{
Connections: 5,
}
}
func init() {
options.Register("sftp", Config{})
}
// ParseConfig parses the string s and extracts the sftp config. The
// supported configuration formats are sftp://user@host[:port]/directory
// and sftp:user@host:directory. The directory will be path Cleaned and can
// be an absolute path if it starts with a '/' (e.g.
// sftp://user@host//absolute and sftp:user@host:/absolute).
func ParseConfig(s string) (*Config, error) {
var user, host, port, dir string
switch {
case strings.HasPrefix(s, "sftp://"):
// parse the "sftp://user@host/path" url format
url, err := url.Parse(s)
if err != nil {
return nil, errors.WithStack(err)
}
if url.User != nil {
user = url.User.Username()
}
host = url.Hostname()
port = url.Port()
dir = url.Path
if dir == "" {
return nil, errors.Errorf("invalid backend %q, no directory specified", s)
}
dir = dir[1:]
case strings.HasPrefix(s, "sftp:"):
// parse the sftp:user@host:path format, which means we'll get
// "user@host:path" in s
s = s[5:]
// split user@host and path at the colon
var colon bool
host, dir, colon = strings.Cut(s, ":")
if !colon {
return nil, errors.New("sftp: invalid format, hostname or path not found")
}
// split user and host at the "@"
data := strings.SplitN(host, "@", 3)
if len(data) == 3 {
user = data[0] + "@" + data[1]
host = data[2]
} else if len(data) == 2 {
user = data[0]
host = data[1]
}
default:
return nil, errors.New(`invalid format, does not start with "sftp:"`)
}
p := path.Clean(dir)
if strings.HasPrefix(p, "~") {
return nil, errors.New("sftp path starts with the tilde (~) character, that fails for most sftp servers.\nUse a relative directory, most servers interpret this as relative to the user's home directory")
}
cfg := NewConfig()
cfg.User = user
cfg.Host = host
cfg.Port = port
cfg.Path = p
return &cfg, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/sftp/config_test.go | internal/backend/sftp/config_test.go | package sftp
import (
"testing"
"github.com/restic/restic/internal/backend/test"
)
var configTests = []test.ConfigTestData[Config]{
// first form, user specified sftp://user@host/dir
{
S: "sftp://user@host/dir/subdir",
Cfg: Config{User: "user", Host: "host", Path: "dir/subdir", Connections: 5},
},
{
S: "sftp://host/dir/subdir",
Cfg: Config{Host: "host", Path: "dir/subdir", Connections: 5},
},
{
S: "sftp://host//dir/subdir",
Cfg: Config{Host: "host", Path: "/dir/subdir", Connections: 5},
},
{
S: "sftp://host:10022//dir/subdir",
Cfg: Config{Host: "host", Port: "10022", Path: "/dir/subdir", Connections: 5},
},
{
S: "sftp://user@host:10022//dir/subdir",
Cfg: Config{User: "user", Host: "host", Port: "10022", Path: "/dir/subdir", Connections: 5},
},
{
S: "sftp://user@host/dir/subdir/../other",
Cfg: Config{User: "user", Host: "host", Path: "dir/other", Connections: 5},
},
{
S: "sftp://user@host/dir///subdir",
Cfg: Config{User: "user", Host: "host", Path: "dir/subdir", Connections: 5},
},
// IPv6 address.
{
S: "sftp://user@[::1]/dir",
Cfg: Config{User: "user", Host: "::1", Path: "dir", Connections: 5},
},
// IPv6 address with port.
{
S: "sftp://user@[::1]:22/dir",
Cfg: Config{User: "user", Host: "::1", Port: "22", Path: "dir", Connections: 5},
},
// second form, user specified sftp:user@host:/dir
{
S: "sftp:user@host:/dir/subdir",
Cfg: Config{User: "user", Host: "host", Path: "/dir/subdir", Connections: 5},
},
{
S: "sftp:user@domain@host:/dir/subdir",
Cfg: Config{User: "user@domain", Host: "host", Path: "/dir/subdir", Connections: 5},
},
{
S: "sftp:host:../dir/subdir",
Cfg: Config{Host: "host", Path: "../dir/subdir", Connections: 5},
},
{
S: "sftp:user@host:dir/subdir:suffix",
Cfg: Config{User: "user", Host: "host", Path: "dir/subdir:suffix", Connections: 5},
},
{
S: "sftp:user@host:dir/subdir/../other",
Cfg: Config{User: "user", Host: "host", Path: "dir/other", Connections: 5},
},
{
S: "sftp:user@host:dir///subdir",
Cfg: Config{User: "user", Host: "host", Path: "dir/subdir", Connections: 5},
},
}
func TestParseConfig(t *testing.T) {
test.ParseConfigTester(t, ParseConfig, configTests)
}
var configTestsInvalid = []string{
"sftp://host:dir",
}
func TestParseConfigInvalid(t *testing.T) {
for i, test := range configTestsInvalid {
_, err := ParseConfig(test)
if err == nil {
t.Errorf("test %d: invalid config %s did not return an error", i, test)
continue
}
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/sftp/sshcmd_test.go | internal/backend/sftp/sshcmd_test.go | package sftp
import (
"reflect"
"testing"
)
var sshcmdTests = []struct {
cfg Config
cmd string
args []string
err string
}{
{
Config{User: "user", Host: "host", Path: "dir/subdir"},
"ssh",
[]string{"host", "-l", "user", "-s", "sftp"},
"",
},
{
Config{Host: "host", Path: "dir/subdir"},
"ssh",
[]string{"host", "-s", "sftp"},
"",
},
{
Config{Host: "host", Port: "10022", Path: "/dir/subdir"},
"ssh",
[]string{"host", "-p", "10022", "-s", "sftp"},
"",
},
{
Config{User: "user", Host: "host", Port: "10022", Path: "/dir/subdir"},
"ssh",
[]string{"host", "-p", "10022", "-l", "user", "-s", "sftp"},
"",
},
{
Config{User: "user", Host: "host", Port: "10022", Path: "/dir/subdir", Args: "-i /path/to/id_rsa"},
"ssh",
[]string{"host", "-p", "10022", "-l", "user", "-i", "/path/to/id_rsa", "-s", "sftp"},
"",
},
{
Config{Command: "ssh something", Args: "-i /path/to/id_rsa"},
"",
nil,
"cannot specify both sftp.command and sftp.args options",
},
{
// IPv6 address.
Config{User: "user", Host: "::1", Path: "dir"},
"ssh",
[]string{"::1", "-l", "user", "-s", "sftp"},
"",
},
{
// IPv6 address with zone and port.
Config{User: "user", Host: "::1%lo0", Port: "22", Path: "dir"},
"ssh",
[]string{"::1%lo0", "-p", "22", "-l", "user", "-s", "sftp"},
"",
},
}
func TestBuildSSHCommand(t *testing.T) {
for i, test := range sshcmdTests {
t.Run("", func(t *testing.T) {
cmd, args, err := buildSSHCommand(test.cfg)
if test.err != "" {
if err.Error() != test.err {
t.Fatalf("expected error %v got %v", test.err, err.Error())
}
} else {
if err != nil {
t.Fatalf("%v in test %d", err, i)
}
}
if cmd != test.cmd {
t.Fatalf("cmd: want %v, got %v", test.cmd, cmd)
}
if !reflect.DeepEqual(test.args, args) {
t.Fatalf("wrong args in test %d, want:\n %v\ngot:\n %v",
i, test.args, args)
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/sftp/layout_test.go | internal/backend/sftp/layout_test.go | package sftp_test
import (
"context"
"fmt"
"path/filepath"
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/sftp"
rtest "github.com/restic/restic/internal/test"
)
func TestLayout(t *testing.T) {
if sftpServer == "" {
t.Skip("sftp server binary not available")
}
path := rtest.TempDir(t)
var tests = []struct {
filename string
failureExpected bool
packfiles map[string]bool
}{
{"repo-layout-default.tar.gz", false, map[string]bool{
"aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false,
"fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false,
"c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false,
}},
}
for _, test := range tests {
t.Run(test.filename, func(t *testing.T) {
rtest.SetupTarTestFixture(t, path, filepath.Join("..", "testdata", test.filename))
repo := filepath.Join(path, "repo")
be, err := sftp.Open(context.TODO(), sftp.Config{
Command: fmt.Sprintf("%q -e", sftpServer),
Path: repo,
Connections: 5,
}, t.Logf)
if err != nil {
t.Fatal(err)
}
if be == nil {
t.Fatalf("Open() returned nil but no error")
}
packs := make(map[string]bool)
err = be.List(context.TODO(), backend.PackFile, func(fi backend.FileInfo) error {
packs[fi.Name] = false
return nil
})
rtest.OK(t, err)
if len(packs) == 0 {
t.Errorf("List() returned zero pack files")
}
for id := range test.packfiles {
if _, ok := packs[id]; !ok {
t.Errorf("packfile with id %v not found", id)
}
packs[id] = true
}
for id, v := range packs {
if !v {
t.Errorf("unexpected id %v found", id)
}
}
if err = be.Close(); err != nil {
t.Errorf("Close() returned error %v", err)
}
rtest.RemoveAll(t, filepath.Join(path, "repo"))
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/sftp/doc.go | internal/backend/sftp/doc.go | // Package sftp implements repository storage in a directory on a remote server
// via the sftp protocol.
package sftp
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/cache/file.go | internal/backend/cache/file.go | package cache
import (
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"github.com/pkg/errors"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/util"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/restic"
)
func (c *Cache) filename(h backend.Handle) string {
if len(h.Name) < 2 {
panic("Name is empty or too short")
}
subdir := h.Name[:2]
return filepath.Join(c.path, cacheLayoutPaths[h.Type], subdir, h.Name)
}
func (c *Cache) canBeCached(t backend.FileType) bool {
if c == nil {
return false
}
_, ok := cacheLayoutPaths[t]
return ok
}
// load returns a reader that yields the contents of the file with the
// given handle. rd must be closed after use. If an error is returned, the
// ReadCloser is nil. The bool return value indicates whether the requested
// file exists in the cache. It can be true even when no reader is returned
// because length or offset are out of bounds
func (c *Cache) load(h backend.Handle, length int, offset int64) (io.ReadCloser, bool, error) {
debug.Log("Load(%v, %v, %v) from cache", h, length, offset)
if !c.canBeCached(h.Type) {
return nil, false, errors.New("cannot be cached")
}
f, err := os.Open(c.filename(h))
if err != nil {
return nil, false, errors.WithStack(err)
}
fi, err := f.Stat()
if err != nil {
_ = f.Close()
return nil, true, errors.WithStack(err)
}
size := fi.Size()
if size <= int64(crypto.CiphertextLength(0)) {
_ = f.Close()
return nil, true, errors.Errorf("cached file %v is truncated", h)
}
if size < offset+int64(length) {
_ = f.Close()
return nil, true, errors.Errorf("cached file %v is too short", h)
}
if offset > 0 {
if _, err = f.Seek(offset, io.SeekStart); err != nil {
_ = f.Close()
return nil, true, err
}
}
if length <= 0 {
return f, true, nil
}
return util.LimitReadCloser(f, int64(length)), true, nil
}
// save saves a file in the cache.
func (c *Cache) save(h backend.Handle, rd io.Reader) error {
debug.Log("Save to cache: %v", h)
if rd == nil {
return errors.New("Save() called with nil reader")
}
if !c.canBeCached(h.Type) {
return errors.New("cannot be cached")
}
finalname := c.filename(h)
dir := filepath.Dir(finalname)
err := os.Mkdir(dir, 0700)
if err != nil && !errors.Is(err, os.ErrExist) {
return err
}
// First save to a temporary location. This allows multiple concurrent
// restics to use a single cache dir.
f, err := os.CreateTemp(dir, "tmp-")
if err != nil {
return err
}
n, err := io.Copy(f, rd)
if err != nil {
_ = f.Close()
_ = os.Remove(f.Name())
return errors.Wrap(err, "Copy")
}
if n <= int64(crypto.CiphertextLength(0)) {
_ = f.Close()
_ = os.Remove(f.Name())
debug.Log("trying to cache truncated file %v, removing", h)
return nil
}
// Close, then rename. Windows doesn't like the reverse order.
if err = f.Close(); err != nil {
_ = os.Remove(f.Name())
return errors.WithStack(err)
}
err = os.Rename(f.Name(), finalname)
if err != nil {
_ = os.Remove(f.Name())
}
if runtime.GOOS == "windows" && errors.Is(err, os.ErrPermission) {
// On Windows, renaming over an existing file is ok
// (os.Rename is MoveFileExW with MOVEFILE_REPLACE_EXISTING
// since Go 1.5), but not when someone else has the file open.
//
// When we get Access denied, we assume that's the case
// and the other process has written the desired contents to f.
err = nil
}
return errors.WithStack(err)
}
func (c *Cache) Forget(h backend.Handle) error {
h.IsMetadata = false
if _, ok := c.forgotten.Load(h); ok {
// Delete a file at most once while restic runs.
// This prevents repeatedly caching and forgetting broken files
return fmt.Errorf("circuit breaker prevents repeated deletion of cached file %v", h)
}
removed, err := c.remove(h)
if removed {
c.forgotten.Store(h, struct{}{})
}
return err
}
// remove deletes a file. When the file is not cached, no error is returned.
func (c *Cache) remove(h backend.Handle) (bool, error) {
if !c.canBeCached(h.Type) {
return false, nil
}
err := os.Remove(c.filename(h))
removed := err == nil
if errors.Is(err, os.ErrNotExist) {
err = nil
}
return removed, err
}
// Clear removes all files of type t from the cache that are not contained in
// the set valid.
func (c *Cache) Clear(t restic.FileType, valid restic.IDSet) error {
debug.Log("Clearing cache for %v: %v valid files", t, len(valid))
if !c.canBeCached(t) {
return nil
}
list, err := c.list(t)
if err != nil {
return err
}
for id := range list {
if valid.Has(id) {
continue
}
// ignore ErrNotExist to gracefully handle multiple processes running Clear() concurrently
if err = os.Remove(c.filename(backend.Handle{Type: t, Name: id.String()})); err != nil && !errors.Is(err, os.ErrNotExist) {
return err
}
}
return nil
}
func isFile(fi os.FileInfo) bool {
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
}
// list returns a list of all files of type T in the cache.
func (c *Cache) list(t restic.FileType) (restic.IDSet, error) {
if !c.canBeCached(t) {
return nil, errors.New("cannot be cached")
}
list := restic.NewIDSet()
dir := filepath.Join(c.path, cacheLayoutPaths[t])
err := filepath.Walk(dir, func(name string, fi os.FileInfo, err error) error {
if err != nil {
// ignore ErrNotExist to gracefully handle multiple processes clearing the cache
if errors.Is(err, os.ErrNotExist) {
return nil
}
return errors.Wrap(err, "Walk")
}
if !isFile(fi) {
return nil
}
id, err := restic.ParseID(filepath.Base(name))
if err != nil {
return nil
}
list.Insert(id)
return nil
})
return list, err
}
// Has returns true if the file is cached.
func (c *Cache) Has(h backend.Handle) bool {
if !c.canBeCached(h.Type) {
return false
}
_, err := os.Stat(c.filename(h))
return err == nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/cache/dir_test.go | internal/backend/cache/dir_test.go | package cache
import (
"os"
"testing"
rtest "github.com/restic/restic/internal/test"
)
// DefaultDir should honor RESTIC_CACHE_DIR on all platforms.
func TestCacheDirEnv(t *testing.T) {
cachedir := os.Getenv("RESTIC_CACHE_DIR")
if cachedir == "" {
cachedir = "/doesnt/exist"
err := os.Setenv("RESTIC_CACHE_DIR", cachedir)
if err != nil {
t.Fatal(err)
}
defer func() {
err := os.Unsetenv("RESTIC_CACHE_DIR")
if err != nil {
t.Fatal(err)
}
}()
}
dir, err := DefaultDir()
rtest.Equals(t, cachedir, dir)
rtest.OK(t, err)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/cache/cache.go | internal/backend/cache/cache.go | package cache
import (
"fmt"
"os"
"path/filepath"
"regexp"
"strconv"
"sync"
"time"
"github.com/pkg/errors"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/restic"
)
// Cache manages a local cache.
type Cache struct {
path string
Base string
Created bool
forgotten sync.Map
}
const dirMode = 0700
const fileMode = 0644
func readVersion(dir string) (v uint, err error) {
buf, err := os.ReadFile(filepath.Join(dir, "version"))
if err != nil {
return 0, errors.Wrap(err, "readVersion")
}
ver, err := strconv.ParseUint(string(buf), 10, 32)
if err != nil {
return 0, errors.Wrap(err, "readVersion")
}
return uint(ver), nil
}
const cacheVersion = 1
var cacheLayoutPaths = map[restic.FileType]string{
restic.PackFile: "data",
restic.SnapshotFile: "snapshots",
restic.IndexFile: "index",
}
const cachedirTagSignature = "Signature: 8a477f597d28d172789f06886806bc55\n"
func writeCachedirTag(dir string) error {
tagfile := filepath.Join(dir, "CACHEDIR.TAG")
f, err := os.OpenFile(tagfile, os.O_CREATE|os.O_EXCL|os.O_WRONLY, fileMode)
if err != nil {
if errors.Is(err, os.ErrExist) {
return nil
}
return errors.WithStack(err)
}
debug.Log("Create CACHEDIR.TAG at %v", dir)
if _, err := f.Write([]byte(cachedirTagSignature)); err != nil {
_ = f.Close()
return errors.WithStack(err)
}
return errors.WithStack(f.Close())
}
// New returns a new cache for the repo ID at basedir. If basedir is the empty
// string, the default cache location (according to the XDG standard) is used.
//
// For partial files, the complete file is loaded and stored in the cache when
// performReadahead returns true.
func New(id string, basedir string) (c *Cache, err error) {
if basedir == "" {
basedir, err = DefaultDir()
if err != nil {
return nil, err
}
}
err = os.MkdirAll(basedir, dirMode)
if err != nil {
return nil, errors.WithStack(err)
}
// create base dir and tag it as a cache directory
if err = writeCachedirTag(basedir); err != nil {
return nil, err
}
cachedir := filepath.Join(basedir, id)
debug.Log("using cache dir %v", cachedir)
created := false
v, err := readVersion(cachedir)
switch {
case err == nil:
if v > cacheVersion {
return nil, errors.New("cache version is newer")
}
// Update the timestamp so that we can detect old cache dirs.
err = updateTimestamp(cachedir)
if err != nil {
return nil, err
}
case errors.Is(err, os.ErrNotExist):
// Create the repo cache dir. The parent exists, so Mkdir suffices.
err := os.Mkdir(cachedir, dirMode)
switch {
case err == nil:
created = true
case errors.Is(err, os.ErrExist):
default:
return nil, errors.WithStack(err)
}
default:
return nil, errors.Wrap(err, "readVersion")
}
if v < cacheVersion {
err = os.WriteFile(filepath.Join(cachedir, "version"), []byte(fmt.Sprintf("%d", cacheVersion)), fileMode)
if err != nil {
return nil, errors.WithStack(err)
}
}
for _, p := range cacheLayoutPaths {
if err = os.MkdirAll(filepath.Join(cachedir, p), dirMode); err != nil {
return nil, errors.WithStack(err)
}
}
c = &Cache{
path: cachedir,
Base: basedir,
Created: created,
}
return c, nil
}
// updateTimestamp sets the modification timestamp (mtime and atime) for the
// directory d to the current time.
func updateTimestamp(d string) error {
t := time.Now()
return os.Chtimes(d, t, t)
}
// MaxCacheAge is the default age (30 days) after which cache directories are considered old.
const MaxCacheAge = 30 * 24 * time.Hour
func validCacheDirName(s string) bool {
r := regexp.MustCompile(`^[a-fA-F0-9]{64}$|^restic-check-cache-[0-9]+$`)
return r.MatchString(s)
}
// listCacheDirs returns the list of cache directories.
func listCacheDirs(basedir string) ([]os.FileInfo, error) {
f, err := os.Open(basedir)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
err = nil
}
return nil, err
}
entries, err := f.Readdir(-1)
if err != nil {
return nil, err
}
err = f.Close()
if err != nil {
return nil, err
}
result := make([]os.FileInfo, 0, len(entries))
for _, entry := range entries {
if !entry.IsDir() {
continue
}
if !validCacheDirName(entry.Name()) {
continue
}
result = append(result, entry)
}
return result, nil
}
// All returns a list of cache directories.
func All(basedir string) (dirs []os.FileInfo, err error) {
return listCacheDirs(basedir)
}
// OlderThan returns the list of cache directories older than max.
func OlderThan(basedir string, max time.Duration) ([]os.FileInfo, error) {
entries, err := listCacheDirs(basedir)
if err != nil {
return nil, err
}
var oldCacheDirs []os.FileInfo
for _, fi := range entries {
if !IsOld(fi.ModTime(), max) {
continue
}
oldCacheDirs = append(oldCacheDirs, fi)
}
debug.Log("%d old cache dirs found", len(oldCacheDirs))
return oldCacheDirs, nil
}
// Old returns a list of cache directories with a modification time of more
// than 30 days ago.
func Old(basedir string) ([]os.FileInfo, error) {
return OlderThan(basedir, MaxCacheAge)
}
// IsOld returns true if the timestamp is considered old.
func IsOld(t time.Time, maxAge time.Duration) bool {
oldest := time.Now().Add(-maxAge)
return t.Before(oldest)
}
// Wrap returns a backend with a cache.
func (c *Cache) Wrap(be backend.Backend, errorLog func(string, ...interface{})) backend.Backend {
return newBackend(be, c, errorLog)
}
// BaseDir returns the base directory.
func (c *Cache) BaseDir() string {
return c.Base
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/cache/cache_test.go | internal/backend/cache/cache_test.go | package cache
import (
"os"
"path/filepath"
"testing"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
func TestNew(t *testing.T) {
parent := rtest.TempDir(t)
basedir := filepath.Join(parent, "cache")
id := restic.NewRandomID().String()
tagFile := filepath.Join(basedir, "CACHEDIR.TAG")
versionFile := filepath.Join(basedir, id, "version")
const (
stepCreate = iota
stepComplete
stepRmTag
stepRmVersion
stepEnd
)
for step := stepCreate; step < stepEnd; step++ {
switch step {
case stepRmTag:
rtest.OK(t, os.Remove(tagFile))
case stepRmVersion:
rtest.OK(t, os.Remove(versionFile))
}
c, err := New(id, basedir)
rtest.OK(t, err)
rtest.Equals(t, basedir, c.Base)
rtest.Equals(t, step == stepCreate, c.Created)
for _, name := range []string{tagFile, versionFile} {
info, err := os.Lstat(name)
rtest.OK(t, err)
rtest.Assert(t, info.Mode().IsRegular(), "")
}
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/cache/file_test.go | internal/backend/cache/file_test.go | package cache
import (
"bytes"
"fmt"
"io"
"math/rand"
"os"
"runtime"
"testing"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
"golang.org/x/sync/errgroup"
)
func generateRandomFiles(t testing.TB, random *rand.Rand, tpe backend.FileType, c *Cache) restic.IDSet {
ids := restic.NewIDSet()
for i := 0; i < random.Intn(15)+10; i++ {
buf := rtest.Random(random.Int(), 1<<19)
id := restic.Hash(buf)
h := backend.Handle{Type: tpe, Name: id.String()}
if c.Has(h) {
t.Errorf("index %v present before save", id)
}
err := c.save(h, bytes.NewReader(buf))
if err != nil {
t.Fatal(err)
}
ids.Insert(id)
}
return ids
}
// randomID returns a random ID from s.
func randomID(s restic.IDSet) restic.ID {
for id := range s {
return id
}
panic("set is empty")
}
func load(t testing.TB, c *Cache, h backend.Handle) []byte {
rd, inCache, err := c.load(h, 0, 0)
if err != nil {
t.Fatal(err)
}
rtest.Equals(t, true, inCache, "expected inCache flag to be true")
if rd == nil {
t.Fatalf("load() returned nil reader")
}
buf, err := io.ReadAll(rd)
if err != nil {
t.Fatal(err)
}
if err = rd.Close(); err != nil {
t.Fatal(err)
}
return buf
}
func listFiles(t testing.TB, c *Cache, tpe restic.FileType) restic.IDSet {
list, err := c.list(tpe)
if err != nil {
t.Errorf("listing failed: %v", err)
}
return list
}
func clearFiles(t testing.TB, c *Cache, tpe restic.FileType, valid restic.IDSet) {
if err := c.Clear(tpe, valid); err != nil {
t.Error(err)
}
}
func TestFiles(t *testing.T) {
seed := time.Now().Unix()
t.Logf("seed is %v", seed)
random := rand.New(rand.NewSource(seed))
c := TestNewCache(t)
var tests = []restic.FileType{
restic.SnapshotFile,
restic.PackFile,
restic.IndexFile,
}
for _, tpe := range tests {
t.Run(tpe.String(), func(t *testing.T) {
ids := generateRandomFiles(t, random, tpe, c)
id := randomID(ids)
h := backend.Handle{Type: tpe, Name: id.String()}
id2 := restic.Hash(load(t, c, h))
if !id.Equal(id2) {
t.Errorf("wrong data returned, want %v, got %v", id.Str(), id2.Str())
}
if !c.Has(h) {
t.Errorf("cache thinks index %v isn't present", id.Str())
}
list := listFiles(t, c, tpe)
if !ids.Equals(list) {
t.Errorf("wrong list of index IDs returned, want:\n %v\ngot:\n %v", ids, list)
}
clearFiles(t, c, tpe, restic.NewIDSet(id))
list2 := listFiles(t, c, tpe)
ids.Delete(id)
want := restic.NewIDSet(id)
if !list2.Equals(want) {
t.Errorf("ClearIndexes removed indexes, want:\n %v\ngot:\n %v", list2, want)
}
clearFiles(t, c, tpe, restic.NewIDSet())
want = restic.NewIDSet()
list3 := listFiles(t, c, tpe)
if !list3.Equals(want) {
t.Errorf("ClearIndexes returned a wrong list, want:\n %v\ngot:\n %v", want, list3)
}
})
}
}
func TestFileLoad(t *testing.T) {
seed := time.Now().Unix()
t.Logf("seed is %v", seed)
random := rand.New(rand.NewSource(seed))
c := TestNewCache(t)
// save about 5 MiB of data in the cache
data := rtest.Random(random.Int(), 5234142)
id := restic.ID{}
copy(id[:], data)
h := backend.Handle{
Type: restic.PackFile,
Name: id.String(),
}
if err := c.save(h, bytes.NewReader(data)); err != nil {
t.Fatalf("Save() returned error: %v", err)
}
var tests = []struct {
offset int64
length int
}{
{0, 0},
{5, 0},
{32*1024 + 5, 0},
{0, 123},
{0, 64*1024 + 234},
{100, 5234142 - 100},
}
for _, test := range tests {
t.Run(fmt.Sprintf("%v/%v", test.length, test.offset), func(t *testing.T) {
rd, inCache, err := c.load(h, test.length, test.offset)
if err != nil {
t.Fatal(err)
}
rtest.Equals(t, true, inCache, "expected inCache flag to be true")
buf, err := io.ReadAll(rd)
if err != nil {
t.Fatal(err)
}
if err = rd.Close(); err != nil {
t.Fatal(err)
}
o := int(test.offset)
l := test.length
if test.length == 0 {
l = len(data) - o
}
if l > len(data)-o {
l = len(data) - o
}
if len(buf) != l {
t.Fatalf("wrong number of bytes returned: want %d, got %d", l, len(buf))
}
if !bytes.Equal(buf, data[o:o+l]) {
t.Fatalf("wrong data returned, want:\n %02x\ngot:\n %02x", data[o:o+16], buf[:16])
}
})
}
}
// Simulate multiple processes writing to a cache, using goroutines.
//
// The possibility of sharing a cache between multiple concurrent restic
// processes isn't guaranteed in the docs and doesn't always work on Windows, hence the
// check on GOOS. Cache sharing is considered a "nice to have" on POSIX, for now.
//
// The cache first creates a temporary file and then renames it to its final name.
// On Windows renaming internally creates a file handle with a shareMode which
// includes FILE_SHARE_DELETE. The Go runtime opens files without FILE_SHARE_DELETE,
// thus Open(fn) will fail until the file handle used for renaming was closed.
// See https://devblogs.microsoft.com/oldnewthing/20211022-00/?p=105822
// for hints on how to fix this properly.
func TestFileSaveConcurrent(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("may not work due to FILE_SHARE_DELETE issue")
}
seed := time.Now().Unix()
t.Logf("seed is %v", seed)
random := rand.New(rand.NewSource(seed))
const nproc = 40
var (
c = TestNewCache(t)
data = rtest.Random(1, 10000)
g errgroup.Group
id restic.ID
)
random.Read(id[:])
h := backend.Handle{
Type: restic.PackFile,
Name: id.String(),
}
for i := 0; i < nproc/2; i++ {
g.Go(func() error { return c.save(h, bytes.NewReader(data)) })
// Can't use load because only the main goroutine may call t.Fatal.
g.Go(func() error {
// The timing is hard to get right, but the main thing we want to
// ensure is ENOENT or nil error.
time.Sleep(time.Duration(100+rand.Intn(200)) * time.Millisecond)
f, _, err := c.load(h, 0, 0)
t.Logf("Load error: %v", err)
switch {
case err == nil:
case errors.Is(err, os.ErrNotExist):
return nil
default:
return err
}
defer func() { _ = f.Close() }()
read, err := io.ReadAll(f)
if err == nil && !bytes.Equal(read, data) {
err = errors.New("mismatch between Save and Load")
}
return err
})
}
rtest.OK(t, g.Wait())
saved := load(t, c, h)
rtest.Equals(t, data, saved)
}
func TestFileSaveAfterDamage(t *testing.T) {
c := TestNewCache(t)
rtest.OK(t, os.RemoveAll(c.path))
// save a few bytes of data in the cache
data := rtest.Random(123456789, 42)
id := restic.Hash(data)
h := backend.Handle{
Type: restic.PackFile,
Name: id.String(),
}
if err := c.save(h, bytes.NewReader(data)); err == nil {
t.Fatal("Missing error when saving to deleted cache directory")
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/cache/backend.go | internal/backend/cache/backend.go | package cache
import (
"context"
"io"
"sync"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/restic"
)
// Backend wraps a restic.Backend and adds a cache.
type Backend struct {
backend.Backend
*Cache
// inProgress contains the handle for all files that are currently
// downloaded. The channel in the value is closed as soon as the download
// is finished.
inProgressMutex sync.Mutex
inProgress map[backend.Handle]chan struct{}
errorLog func(string, ...interface{})
}
// ensure Backend implements backend.Backend
var _ backend.Backend = &Backend{}
func newBackend(be backend.Backend, c *Cache, errorLog func(string, ...interface{})) *Backend {
return &Backend{
Backend: be,
Cache: c,
inProgress: make(map[backend.Handle]chan struct{}),
errorLog: errorLog,
}
}
// Remove deletes a file from the backend and the cache if it has been cached.
func (b *Backend) Remove(ctx context.Context, h backend.Handle) error {
debug.Log("cache Remove(%v)", h)
err := b.Backend.Remove(ctx, h)
if err != nil {
return err
}
_, err = b.Cache.remove(h)
return err
}
func autoCacheTypes(h backend.Handle) bool {
switch h.Type {
case backend.IndexFile, backend.SnapshotFile:
return true
case backend.PackFile:
return h.IsMetadata
}
return false
}
// Save stores a new file in the backend and the cache.
func (b *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
if !autoCacheTypes(h) {
return b.Backend.Save(ctx, h, rd)
}
debug.Log("Save(%v): auto-store in the cache", h)
// make sure the reader is at the start
err := rd.Rewind()
if err != nil {
return err
}
// first, save in the backend
err = b.Backend.Save(ctx, h, rd)
if err != nil {
return err
}
// next, save in the cache
err = rd.Rewind()
if err != nil {
return err
}
err = b.Cache.save(h, rd)
if err != nil {
debug.Log("unable to save %v to cache: %v", h, err)
return err
}
return nil
}
func (b *Backend) cacheFile(ctx context.Context, h backend.Handle) error {
finish := make(chan struct{})
b.inProgressMutex.Lock()
other, alreadyDownloading := b.inProgress[h]
if !alreadyDownloading {
b.inProgress[h] = finish
}
b.inProgressMutex.Unlock()
if alreadyDownloading {
debug.Log("readahead %v is already performed by somebody else, delegating...", h)
<-other
debug.Log("download %v finished", h)
return nil
}
defer func() {
// signal other waiting goroutines that the file may now be cached
close(finish)
// remove the finish channel from the map
b.inProgressMutex.Lock()
delete(b.inProgress, h)
b.inProgressMutex.Unlock()
}()
// test again, maybe the file was cached in the meantime
if !b.Cache.Has(h) {
// nope, it's still not in the cache, pull it from the repo and save it
err := b.Backend.Load(ctx, h, 0, 0, func(rd io.Reader) error {
return b.Cache.save(h, rd)
})
if err != nil {
// try to remove from the cache, ignore errors
_, _ = b.Cache.remove(h)
}
return err
}
return nil
}
// loadFromCache will try to load the file from the cache.
func (b *Backend) loadFromCache(h backend.Handle, length int, offset int64, consumer func(rd io.Reader) error) (bool, error) {
rd, inCache, err := b.Cache.load(h, length, offset)
if err != nil {
return inCache, err
}
err = consumer(rd)
if err != nil {
_ = rd.Close() // ignore secondary errors
return true, err
}
return true, rd.Close()
}
// Load loads a file from the cache or the backend.
func (b *Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, consumer func(rd io.Reader) error) error {
b.inProgressMutex.Lock()
waitForFinish, inProgress := b.inProgress[h]
b.inProgressMutex.Unlock()
if inProgress {
debug.Log("downloading %v is already in progress, waiting for finish", h)
<-waitForFinish
debug.Log("downloading %v finished", h)
}
// try loading from cache without checking that the handle is actually cached
inCache, err := b.loadFromCache(h, length, offset, consumer)
if inCache {
if err != nil {
debug.Log("error loading %v from cache: %v", h, err)
}
// the caller must explicitly use cache.Forget() to remove the cache entry
return err
}
// if we don't automatically cache this file type, fall back to the backend
if !autoCacheTypes(h) {
debug.Log("Load(%v, %v, %v): delegating to backend", h, length, offset)
return b.Backend.Load(ctx, h, length, offset, consumer)
}
debug.Log("auto-store %v in the cache", h)
err = b.cacheFile(ctx, h)
if err != nil {
return err
}
inCache, err = b.loadFromCache(h, length, offset, consumer)
if inCache {
if err != nil {
debug.Log("error loading %v from cache: %v", h, err)
}
return err
}
debug.Log("error caching %v: %v, falling back to backend", h, err)
return b.Backend.Load(ctx, h, length, offset, consumer)
}
// Stat tests whether the backend has a file. If it does not exist but still
// exists in the cache, it is removed from the cache.
func (b *Backend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) {
debug.Log("cache Stat(%v)", h)
fi, err := b.Backend.Stat(ctx, h)
if err != nil && b.Backend.IsNotExist(err) {
// try to remove from the cache, ignore errors
_, _ = b.Cache.remove(h)
}
return fi, err
}
// IsNotExist returns true if the error is caused by a non-existing file.
func (b *Backend) IsNotExist(err error) bool {
return b.Backend.IsNotExist(err)
}
func (b *Backend) Unwrap() backend.Backend {
return b.Backend
}
func (b *Backend) List(ctx context.Context, t backend.FileType, fn func(f backend.FileInfo) error) error {
if !b.Cache.canBeCached(t) {
return b.Backend.List(ctx, t, fn)
}
// will contain the IDs of the files that are in the repository
ids := restic.NewIDSet()
// wrap the original function to also add the file to the ids set
wrapFn := func(f backend.FileInfo) error {
id, err := restic.ParseID(f.Name)
if err != nil {
// ignore files with invalid name
return nil
}
ids.Insert(id)
// execute the original function
return fn(f)
}
err := b.Backend.List(ctx, t, wrapFn)
if err != nil {
return err
}
if ctx.Err() != nil {
return ctx.Err()
}
// clear the cache for files that are not in the repo anymore, ignore errors
err = b.Cache.Clear(t, ids)
if err != nil {
b.errorLog("error clearing %s files in cache: %v\n", t.String(), err)
}
return nil
}
// Warmup delegates to wrapped backend.
func (b *Backend) Warmup(ctx context.Context, h []backend.Handle) ([]backend.Handle, error) {
return b.Backend.Warmup(ctx, h)
}
// WarmupWait delegates to wrapped backend.
func (b *Backend) WarmupWait(ctx context.Context, h []backend.Handle) error {
return b.Backend.WarmupWait(ctx, h)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/cache/testing.go | internal/backend/cache/testing.go | package cache
import (
"testing"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
)
// TestNewCache returns a cache in a temporary directory which is removed when
// cleanup is called.
func TestNewCache(t testing.TB) *Cache {
dir := test.TempDir(t)
t.Logf("created new cache at %v", dir)
cache, err := New(restic.NewRandomID().String(), dir)
if err != nil {
t.Fatal(err)
}
return cache
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/cache/backend_test.go | internal/backend/cache/backend_test.go | package cache
import (
"bytes"
"context"
"io"
"math/rand"
"strings"
"sync"
"testing"
"time"
"github.com/pkg/errors"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/mem"
backendtest "github.com/restic/restic/internal/backend/test"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
)
func loadAndCompare(t testing.TB, be backend.Backend, h backend.Handle, data []byte) {
buf, err := backendtest.LoadAll(context.TODO(), be, h)
if err != nil {
t.Fatal(err)
}
if len(buf) != len(data) {
t.Fatalf("wrong number of bytes read, want %v, got %v", len(data), len(buf))
}
if !bytes.Equal(buf, data) {
t.Fatalf("wrong data returned, want:\n %02x\ngot:\n %02x", data[:16], buf[:16])
}
}
func save(t testing.TB, be backend.Backend, h backend.Handle, data []byte) {
err := be.Save(context.TODO(), h, backend.NewByteReader(data, be.Hasher()))
if err != nil {
t.Fatal(err)
}
}
func remove(t testing.TB, be backend.Backend, h backend.Handle) {
err := be.Remove(context.TODO(), h)
if err != nil {
t.Fatal(err)
}
}
func randomData(n int) (backend.Handle, []byte) {
data := test.Random(rand.Int(), n)
id := restic.Hash(data)
h := backend.Handle{
Type: backend.IndexFile,
Name: id.String(),
}
return h, data
}
func list(t testing.TB, be backend.Backend, fn func(backend.FileInfo) error) {
err := be.List(context.TODO(), backend.IndexFile, fn)
if err != nil {
t.Fatal(err)
}
}
func TestBackend(t *testing.T) {
be := mem.New()
c := TestNewCache(t)
wbe := c.Wrap(be, t.Logf)
h, data := randomData(5234142)
// save directly in backend
save(t, be, h, data)
if c.Has(h) {
t.Errorf("cache has file too early")
}
// load data via cache
loadAndCompare(t, wbe, h, data)
if !c.Has(h) {
t.Errorf("cache doesn't have file after load")
}
// remove via cache
remove(t, wbe, h)
if c.Has(h) {
t.Errorf("cache has file after remove")
}
// save via cache
save(t, wbe, h, data)
if !c.Has(h) {
t.Errorf("cache doesn't have file after load")
}
// load data directly from backend
loadAndCompare(t, be, h, data)
// load data via cache
loadAndCompare(t, wbe, h, data)
// remove directly
remove(t, be, h)
if !c.Has(h) {
t.Errorf("file not in cache any more")
}
// run stat
_, err := wbe.Stat(context.TODO(), h)
if err == nil {
t.Errorf("expected error for removed file not found, got nil")
}
if !wbe.IsNotExist(err) {
t.Errorf("Stat() returned error that does not match IsNotExist(): %v", err)
}
if c.Has(h) {
t.Errorf("removed file still in cache after stat")
}
}
type loadCountingBackend struct {
backend.Backend
ctr int
}
func (l *loadCountingBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
l.ctr++
return l.Backend.Load(ctx, h, length, offset, fn)
}
func TestOutOfBoundsAccess(t *testing.T) {
be := &loadCountingBackend{Backend: mem.New()}
c := TestNewCache(t)
wbe := c.Wrap(be, t.Logf)
h, data := randomData(50)
save(t, be, h, data)
// load out of bounds
err := wbe.Load(context.TODO(), h, 100, 100, func(rd io.Reader) error {
t.Error("cache returned non-existent file section")
return errors.New("broken")
})
test.Assert(t, strings.Contains(err.Error(), " is too short"), "expected too short error, got %v", err)
test.Equals(t, 1, be.ctr, "expected file to be loaded only once")
// file must nevertheless get cached
if !c.Has(h) {
t.Errorf("cache doesn't have file after load")
}
// start within bounds, but request too large chunk
err = wbe.Load(context.TODO(), h, 100, 0, func(rd io.Reader) error {
t.Error("cache returned non-existent file section")
return errors.New("broken")
})
test.Assert(t, strings.Contains(err.Error(), " is too short"), "expected too short error, got %v", err)
test.Equals(t, 1, be.ctr, "expected file to be loaded only once")
}
func TestForget(t *testing.T) {
be := &loadCountingBackend{Backend: mem.New()}
c := TestNewCache(t)
wbe := c.Wrap(be, t.Logf)
h, data := randomData(50)
save(t, be, h, data)
loadAndCompare(t, wbe, h, data)
test.Equals(t, 1, be.ctr, "expected file to be loaded once")
// must still exist even if load returns an error
exp := errors.New("error")
err := wbe.Load(context.TODO(), h, 0, 0, func(rd io.Reader) error {
return exp
})
test.Equals(t, exp, err, "wrong error")
test.Assert(t, c.Has(h), "missing cache entry")
test.OK(t, c.Forget(h))
test.Assert(t, !c.Has(h), "cache entry should have been removed")
// cache it again
loadAndCompare(t, wbe, h, data)
test.Assert(t, c.Has(h), "missing cache entry")
// forget must delete file only once
err = c.Forget(h)
test.Assert(t, strings.Contains(err.Error(), "circuit breaker prevents repeated deletion of cached file"), "wrong error message %q", err)
test.Assert(t, c.Has(h), "cache entry should still exist")
}
type loadErrorBackend struct {
backend.Backend
loadError error
}
func (be loadErrorBackend) Load(_ context.Context, _ backend.Handle, _ int, _ int64, _ func(rd io.Reader) error) error {
time.Sleep(10 * time.Millisecond)
return be.loadError
}
func TestErrorBackend(t *testing.T) {
be := mem.New()
c := TestNewCache(t)
h, data := randomData(5234142)
// save directly in backend
save(t, be, h, data)
testErr := errors.New("test error")
errBackend := loadErrorBackend{
Backend: be,
loadError: testErr,
}
loadTest := func(wg *sync.WaitGroup, be backend.Backend) {
defer wg.Done()
buf, err := backendtest.LoadAll(context.TODO(), be, h)
if err == testErr {
return
}
if err != nil {
t.Error(err)
return
}
if !bytes.Equal(buf, data) {
t.Errorf("data does not match")
}
time.Sleep(time.Millisecond)
}
wrappedBE := c.Wrap(errBackend, t.Logf)
var wg sync.WaitGroup
for i := 0; i < 5; i++ {
wg.Add(1)
go loadTest(&wg, wrappedBE)
}
wg.Wait()
}
func TestAutomaticCacheClear(t *testing.T) {
be := mem.New()
c := TestNewCache(t)
wbe := c.Wrap(be, t.Logf)
// add two handles h1 and h2
h1, data := randomData(2000)
// save h1 directly to the backend
save(t, be, h1, data)
if c.Has(h1) {
t.Errorf("cache has file1 too early")
}
h2, data2 := randomData(3000)
// save h2 directly to the backend
save(t, be, h2, data2)
if c.Has(h2) {
t.Errorf("cache has file2 too early")
}
loadAndCompare(t, wbe, h1, data)
if !c.Has(h1) {
t.Errorf("cache doesn't have file1 after load")
}
loadAndCompare(t, wbe, h2, data2)
if !c.Has(h2) {
t.Errorf("cache doesn't have file2 after load")
}
// remove h1 directly from the backend
remove(t, be, h1)
if !c.Has(h1) {
t.Errorf("file1 not in cache any more, should be removed from cache only after list")
}
// list all files in the backend
list(t, wbe, func(_ backend.FileInfo) error { return nil })
// h1 should be removed from the cache
if c.Has(h1) {
t.Errorf("cache has file1 after remove")
}
// h2 should still be in the cache
if !c.Has(h2) {
t.Errorf("cache doesn't have file2 after list")
}
}
func TestAutomaticCacheClearInvalidFilename(t *testing.T) {
be := mem.New()
c := TestNewCache(t)
data := test.Random(rand.Int(), 42)
h := backend.Handle{
Type: backend.IndexFile,
Name: "tmp12345",
}
save(t, be, h, data)
wbe := c.Wrap(be, t.Logf)
// list all files in the backend
list(t, wbe, func(_ backend.FileInfo) error { return nil })
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/cache/dir.go | internal/backend/cache/dir.go | package cache
import (
"fmt"
"os"
"path/filepath"
)
// EnvDir return $RESTIC_CACHE_DIR env
func EnvDir() string {
return os.Getenv("RESTIC_CACHE_DIR")
}
// DefaultDir returns $RESTIC_CACHE_DIR, or the default cache directory
// for the current OS if that variable is not set.
func DefaultDir() (cachedir string, err error) {
cachedir = EnvDir()
if cachedir != "" {
return cachedir, nil
}
cachedir, err = os.UserCacheDir()
if err != nil {
return "", fmt.Errorf("unable to locate cache directory: %v", err)
}
return filepath.Join(cachedir, "restic"), nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/sema/semaphore.go | internal/backend/sema/semaphore.go | // Package sema implements semaphores.
package sema
import (
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
)
// A semaphore limits access to a restricted resource.
type semaphore struct {
ch chan struct{}
}
// newSemaphore returns a new semaphore with capacity n.
func newSemaphore(n uint) (semaphore, error) {
if n == 0 {
return semaphore{}, errors.New("capacity must be a positive number")
}
return semaphore{
ch: make(chan struct{}, n),
}, nil
}
// GetToken blocks until a Token is available.
func (s semaphore) GetToken() {
s.ch <- struct{}{}
debug.Log("acquired token")
}
// ReleaseToken returns a token.
func (s semaphore) ReleaseToken() { <-s.ch }
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/sema/backend.go | internal/backend/sema/backend.go | package sema
import (
"context"
"io"
"sync"
"github.com/cenkalti/backoff/v4"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/errors"
)
// make sure that connectionLimitedBackend implements backend.Backend
var _ backend.Backend = &connectionLimitedBackend{}
// connectionLimitedBackend limits the number of concurrent operations.
type connectionLimitedBackend struct {
backend.Backend
sem semaphore
freezeLock sync.Mutex
}
// NewBackend creates a backend that limits the concurrent operations on the underlying backend
func NewBackend(be backend.Backend) backend.Backend {
sem, err := newSemaphore(be.Properties().Connections)
if err != nil {
panic(err)
}
return &connectionLimitedBackend{
Backend: be,
sem: sem,
}
}
// typeDependentLimit acquire a token unless the FileType is a lock file. The returned function
// must be called to release the token.
func (be *connectionLimitedBackend) typeDependentLimit(t backend.FileType) func() {
// allow concurrent lock file operations to ensure that the lock refresh is always possible
if t == backend.LockFile {
return func() {}
}
be.sem.GetToken()
// prevent token usage while the backend is frozen
be.freezeLock.Lock()
defer be.freezeLock.Unlock()
return be.sem.ReleaseToken
}
// Freeze blocks all backend operations except those on lock files
func (be *connectionLimitedBackend) Freeze() {
be.freezeLock.Lock()
}
// Unfreeze allows all backend operations to continue
func (be *connectionLimitedBackend) Unfreeze() {
be.freezeLock.Unlock()
}
// Save adds new Data to the backend.
func (be *connectionLimitedBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
if err := h.Valid(); err != nil {
return backoff.Permanent(err)
}
defer be.typeDependentLimit(h.Type)()
if ctx.Err() != nil {
return ctx.Err()
}
return be.Backend.Save(ctx, h, rd)
}
// Load runs fn with a reader that yields the contents of the file at h at the
// given offset.
func (be *connectionLimitedBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
if err := h.Valid(); err != nil {
return backoff.Permanent(err)
}
if offset < 0 {
return backoff.Permanent(errors.New("offset is negative"))
}
if length < 0 {
return backoff.Permanent(errors.Errorf("invalid length %d", length))
}
defer be.typeDependentLimit(h.Type)()
if ctx.Err() != nil {
return ctx.Err()
}
return be.Backend.Load(ctx, h, length, offset, fn)
}
// Stat returns information about a file in the backend.
func (be *connectionLimitedBackend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) {
if err := h.Valid(); err != nil {
return backend.FileInfo{}, backoff.Permanent(err)
}
defer be.typeDependentLimit(h.Type)()
if ctx.Err() != nil {
return backend.FileInfo{}, ctx.Err()
}
return be.Backend.Stat(ctx, h)
}
// Remove deletes a file from the backend.
func (be *connectionLimitedBackend) Remove(ctx context.Context, h backend.Handle) error {
if err := h.Valid(); err != nil {
return backoff.Permanent(err)
}
defer be.typeDependentLimit(h.Type)()
if ctx.Err() != nil {
return ctx.Err()
}
return be.Backend.Remove(ctx, h)
}
func (be *connectionLimitedBackend) Unwrap() backend.Backend {
return be.Backend
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/sema/backend_test.go | internal/backend/sema/backend_test.go | package sema_test
import (
"context"
"io"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/mock"
"github.com/restic/restic/internal/backend/sema"
"github.com/restic/restic/internal/test"
"golang.org/x/sync/errgroup"
)
func TestParameterValidationSave(t *testing.T) {
m := mock.NewBackend()
m.SaveFn = func(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
return nil
}
be := sema.NewBackend(m)
err := be.Save(context.TODO(), backend.Handle{}, nil)
test.Assert(t, err != nil, "Save() with invalid handle did not return an error")
}
func TestParameterValidationLoad(t *testing.T) {
m := mock.NewBackend()
m.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
return io.NopCloser(nil), nil
}
be := sema.NewBackend(m)
nilCb := func(rd io.Reader) error { return nil }
err := be.Load(context.TODO(), backend.Handle{}, 10, 0, nilCb)
test.Assert(t, err != nil, "Load() with invalid handle did not return an error")
h := backend.Handle{Type: backend.PackFile, Name: "foobar"}
err = be.Load(context.TODO(), h, 10, -1, nilCb)
test.Assert(t, err != nil, "Save() with negative offset did not return an error")
err = be.Load(context.TODO(), h, -1, 0, nilCb)
test.Assert(t, err != nil, "Save() with negative length did not return an error")
}
func TestParameterValidationStat(t *testing.T) {
m := mock.NewBackend()
m.StatFn = func(ctx context.Context, h backend.Handle) (backend.FileInfo, error) {
return backend.FileInfo{}, nil
}
be := sema.NewBackend(m)
_, err := be.Stat(context.TODO(), backend.Handle{})
test.Assert(t, err != nil, "Stat() with invalid handle did not return an error")
}
func TestParameterValidationRemove(t *testing.T) {
m := mock.NewBackend()
m.RemoveFn = func(ctx context.Context, h backend.Handle) error {
return nil
}
be := sema.NewBackend(m)
err := be.Remove(context.TODO(), backend.Handle{})
test.Assert(t, err != nil, "Remove() with invalid handle did not return an error")
}
func TestUnwrap(t *testing.T) {
m := mock.NewBackend()
be := sema.NewBackend(m)
unwrapper := be.(backend.Unwrapper)
test.Assert(t, unwrapper.Unwrap() == m, "Unwrap() returned wrong backend")
}
func countingBlocker() (func(), func(int) int) {
ctr := int64(0)
blocker := make(chan struct{})
wait := func() {
// count how many goroutines were allowed by the semaphore
atomic.AddInt64(&ctr, 1)
// block until the test can retrieve the counter
<-blocker
}
unblock := func(expected int) int {
// give goroutines enough time to block
var blocked int64
for i := 0; i < 100 && blocked < int64(expected); i++ {
time.Sleep(100 * time.Microsecond)
blocked = atomic.LoadInt64(&ctr)
}
close(blocker)
return int(blocked)
}
return wait, unblock
}
func concurrencyTester(t *testing.T, setup func(m *mock.Backend), handler func(be backend.Backend) func() error, unblock func(int) int, isUnlimited bool) {
expectBlocked := 2
workerCount := expectBlocked + 1
m := mock.NewBackend()
setup(m)
m.PropertiesFn = func() backend.Properties {
return backend.Properties{
Connections: uint(expectBlocked),
HasAtomicReplace: false,
}
}
be := sema.NewBackend(m)
var wg errgroup.Group
for i := 0; i < workerCount; i++ {
wg.Go(handler(be))
}
if isUnlimited {
expectBlocked = workerCount
}
blocked := unblock(expectBlocked)
test.Assert(t, blocked == expectBlocked, "Unexpected number of goroutines blocked: %v", blocked)
test.OK(t, wg.Wait())
}
func TestConcurrencyLimitSave(t *testing.T) {
wait, unblock := countingBlocker()
concurrencyTester(t, func(m *mock.Backend) {
m.SaveFn = func(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
wait()
return nil
}
}, func(be backend.Backend) func() error {
return func() error {
h := backend.Handle{Type: backend.PackFile, Name: "foobar"}
return be.Save(context.TODO(), h, nil)
}
}, unblock, false)
}
func TestConcurrencyLimitLoad(t *testing.T) {
wait, unblock := countingBlocker()
concurrencyTester(t, func(m *mock.Backend) {
m.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
wait()
return io.NopCloser(nil), nil
}
}, func(be backend.Backend) func() error {
return func() error {
h := backend.Handle{Type: backend.PackFile, Name: "foobar"}
nilCb := func(rd io.Reader) error { return nil }
return be.Load(context.TODO(), h, 10, 0, nilCb)
}
}, unblock, false)
}
func TestConcurrencyLimitStat(t *testing.T) {
wait, unblock := countingBlocker()
concurrencyTester(t, func(m *mock.Backend) {
m.StatFn = func(ctx context.Context, h backend.Handle) (backend.FileInfo, error) {
wait()
return backend.FileInfo{}, nil
}
}, func(be backend.Backend) func() error {
return func() error {
h := backend.Handle{Type: backend.PackFile, Name: "foobar"}
_, err := be.Stat(context.TODO(), h)
return err
}
}, unblock, false)
}
func TestConcurrencyLimitDelete(t *testing.T) {
wait, unblock := countingBlocker()
concurrencyTester(t, func(m *mock.Backend) {
m.RemoveFn = func(ctx context.Context, h backend.Handle) error {
wait()
return nil
}
}, func(be backend.Backend) func() error {
return func() error {
h := backend.Handle{Type: backend.PackFile, Name: "foobar"}
return be.Remove(context.TODO(), h)
}
}, unblock, false)
}
func TestConcurrencyUnlimitedLockSave(t *testing.T) {
wait, unblock := countingBlocker()
concurrencyTester(t, func(m *mock.Backend) {
m.SaveFn = func(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
wait()
return nil
}
}, func(be backend.Backend) func() error {
return func() error {
h := backend.Handle{Type: backend.LockFile, Name: "foobar"}
return be.Save(context.TODO(), h, nil)
}
}, unblock, true)
}
func TestFreeze(t *testing.T) {
var counter int64
m := mock.NewBackend()
m.SaveFn = func(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
atomic.AddInt64(&counter, 1)
return nil
}
m.PropertiesFn = func() backend.Properties {
return backend.Properties{
Connections: 2,
HasAtomicReplace: false,
}
}
be := sema.NewBackend(m)
fb := be.(backend.FreezeBackend)
// Freeze backend
fb.Freeze()
// Start Save call that should block
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
h := backend.Handle{Type: backend.PackFile, Name: "foobar"}
test.OK(t, be.Save(context.TODO(), h, nil))
}()
// check
time.Sleep(1 * time.Millisecond)
val := atomic.LoadInt64(&counter)
test.Assert(t, val == 0, "save call worked despite frozen backend")
// unfreeze and check that save did complete
fb.Unfreeze()
wg.Wait()
val = atomic.LoadInt64(&counter)
test.Assert(t, val == 1, "save call should have completed")
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/mock/backend.go | internal/backend/mock/backend.go | package mock
import (
"context"
"hash"
"io"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/errors"
)
// Backend implements a mock backend.
type Backend struct {
CloseFn func() error
IsNotExistFn func(err error) bool
IsPermanentErrorFn func(err error) bool
SaveFn func(ctx context.Context, h backend.Handle, rd backend.RewindReader) error
OpenReaderFn func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error)
StatFn func(ctx context.Context, h backend.Handle) (backend.FileInfo, error)
ListFn func(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error
RemoveFn func(ctx context.Context, h backend.Handle) error
DeleteFn func(ctx context.Context) error
WarmupFn func(ctx context.Context, h []backend.Handle) ([]backend.Handle, error)
WarmupWaitFn func(ctx context.Context, h []backend.Handle) error
PropertiesFn func() backend.Properties
HasherFn func() hash.Hash
}
// NewBackend returns new mock Backend instance
func NewBackend() *Backend {
be := &Backend{}
return be
}
// Close the backend.
func (m *Backend) Close() error {
if m.CloseFn == nil {
return nil
}
return m.CloseFn()
}
func (m *Backend) Properties() backend.Properties {
if m.PropertiesFn == nil {
return backend.Properties{
Connections: 2,
HasAtomicReplace: false,
}
}
return m.PropertiesFn()
}
// Hasher may return a hash function for calculating a content hash for the backend
func (m *Backend) Hasher() hash.Hash {
if m.HasherFn == nil {
return nil
}
return m.HasherFn()
}
// IsNotExist returns true if the error is caused by a missing file.
func (m *Backend) IsNotExist(err error) bool {
if m.IsNotExistFn == nil {
return false
}
return m.IsNotExistFn(err)
}
func (m *Backend) IsPermanentError(err error) bool {
if m.IsPermanentErrorFn == nil {
return false
}
return m.IsPermanentErrorFn(err)
}
// Save data in the backend.
func (m *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
if m.SaveFn == nil {
return errors.New("not implemented")
}
return m.SaveFn(ctx, h, rd)
}
// Load runs fn with a reader that yields the contents of the file at h at the
// given offset.
func (m *Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
rd, err := m.openReader(ctx, h, length, offset)
if err != nil {
return err
}
err = fn(rd)
if err != nil {
_ = rd.Close() // ignore secondary errors closing the reader
return err
}
return rd.Close()
}
func (m *Backend) openReader(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
if m.OpenReaderFn == nil {
return nil, errors.New("not implemented")
}
return m.OpenReaderFn(ctx, h, length, offset)
}
// Stat an object in the backend.
func (m *Backend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) {
if m.StatFn == nil {
return backend.FileInfo{}, errors.New("not implemented")
}
return m.StatFn(ctx, h)
}
// List items of type t.
func (m *Backend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error {
if m.ListFn == nil {
return nil
}
return m.ListFn(ctx, t, fn)
}
// Remove data from the backend.
func (m *Backend) Remove(ctx context.Context, h backend.Handle) error {
if m.RemoveFn == nil {
return errors.New("not implemented")
}
return m.RemoveFn(ctx, h)
}
// Delete all data.
func (m *Backend) Delete(ctx context.Context) error {
if m.DeleteFn == nil {
return errors.New("not implemented")
}
return m.DeleteFn(ctx)
}
func (m *Backend) Warmup(ctx context.Context, h []backend.Handle) ([]backend.Handle, error) {
if m.WarmupFn == nil {
return []backend.Handle{}, errors.New("not implemented")
}
return m.WarmupFn(ctx, h)
}
func (m *Backend) WarmupWait(ctx context.Context, h []backend.Handle) error {
if m.WarmupWaitFn == nil {
return errors.New("not implemented")
}
return m.WarmupWaitFn(ctx, h)
}
// Make sure that Backend implements the backend interface.
var _ backend.Backend = &Backend{}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/azure/azure_test.go | internal/backend/azure/azure_test.go | package azure_test
import (
"bytes"
"context"
"fmt"
"io"
"os"
"testing"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/azure"
"github.com/restic/restic/internal/backend/test"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
func newAzureTestSuite() *test.Suite[azure.Config] {
return &test.Suite[azure.Config]{
// do not use excessive data
MinimalData: true,
// NewConfig returns a config for a new temporary backend that will be used in tests.
NewConfig: func() (*azure.Config, error) {
cfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY"))
if err != nil {
return nil, err
}
cfg.ApplyEnvironment("RESTIC_TEST_")
cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
return cfg, nil
},
Factory: azure.NewFactory(),
}
}
func TestBackendAzure(t *testing.T) {
defer func() {
if t.Skipped() {
rtest.SkipDisallowed(t, "restic/backend/azure.TestBackendAzure")
}
}()
vars := []string{
"RESTIC_TEST_AZURE_ACCOUNT_NAME",
"RESTIC_TEST_AZURE_ACCOUNT_KEY",
"RESTIC_TEST_AZURE_REPOSITORY",
}
for _, v := range vars {
if os.Getenv(v) == "" {
t.Skipf("environment variable %v not set", v)
return
}
}
t.Logf("run tests")
newAzureTestSuite().RunTests(t)
}
func BenchmarkBackendAzure(t *testing.B) {
vars := []string{
"RESTIC_TEST_AZURE_ACCOUNT_NAME",
"RESTIC_TEST_AZURE_ACCOUNT_KEY",
"RESTIC_TEST_AZURE_REPOSITORY",
}
for _, v := range vars {
if os.Getenv(v) == "" {
t.Skipf("environment variable %v not set", v)
return
}
}
t.Logf("run tests")
newAzureTestSuite().RunBenchmarks(t)
}
// TestBackendAzureAccountToken tests that a Storage Account SAS/SAT token can authorize.
// This test ensures that restic can use a token that was generated using the storage
// account keys can be used to authorize the azure connection.
// Requires the RESTIC_TEST_AZURE_ACCOUNT_NAME, RESTIC_TEST_AZURE_REPOSITORY, and the
// RESTIC_TEST_AZURE_ACCOUNT_SAS environment variables to be set, otherwise this test
// will be skipped.
func TestBackendAzureAccountToken(t *testing.T) {
vars := []string{
"RESTIC_TEST_AZURE_ACCOUNT_NAME",
"RESTIC_TEST_AZURE_REPOSITORY",
"RESTIC_TEST_AZURE_ACCOUNT_SAS",
}
for _, v := range vars {
if os.Getenv(v) == "" {
t.Skipf("set %v to test SAS/SAT Token Authentication", v)
return
}
}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
cfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY"))
if err != nil {
t.Fatal(err)
}
cfg.AccountName = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_NAME")
cfg.AccountSAS = options.NewSecretString(os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_SAS"))
tr, err := backend.Transport(backend.TransportOptions{})
if err != nil {
t.Fatal(err)
}
_, err = azure.Create(ctx, *cfg, tr, t.Logf)
if err != nil {
t.Fatal(err)
}
}
// TestBackendAzureContainerToken tests that a container SAS/SAT token can authorize.
// This test ensures that restic can use a token that was generated using a user
// delegation key against the container we are storing data in can be used to
// authorize the azure connection.
// Requires the RESTIC_TEST_AZURE_ACCOUNT_NAME, RESTIC_TEST_AZURE_REPOSITORY, and the
// RESTIC_TEST_AZURE_CONTAINER_SAS environment variables to be set, otherwise this test
// will be skipped.
func TestBackendAzureContainerToken(t *testing.T) {
vars := []string{
"RESTIC_TEST_AZURE_ACCOUNT_NAME",
"RESTIC_TEST_AZURE_REPOSITORY",
"RESTIC_TEST_AZURE_CONTAINER_SAS",
}
for _, v := range vars {
if os.Getenv(v) == "" {
t.Skipf("set %v to test SAS/SAT Token Authentication", v)
return
}
}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
cfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY"))
if err != nil {
t.Fatal(err)
}
cfg.AccountName = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_NAME")
cfg.AccountSAS = options.NewSecretString(os.Getenv("RESTIC_TEST_AZURE_CONTAINER_SAS"))
tr, err := backend.Transport(backend.TransportOptions{})
if err != nil {
t.Fatal(err)
}
_, err = azure.Create(ctx, *cfg, tr, t.Logf)
if err != nil {
t.Fatal(err)
}
}
func TestUploadLargeFile(t *testing.T) {
if os.Getenv("RESTIC_AZURE_TEST_LARGE_UPLOAD") == "" {
t.Skip("set RESTIC_AZURE_TEST_LARGE_UPLOAD=1 to test large uploads")
return
}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
if os.Getenv("RESTIC_TEST_AZURE_REPOSITORY") == "" {
t.Skipf("environment variables not available")
return
}
cfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY"))
if err != nil {
t.Fatal(err)
}
cfg.AccountName = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_NAME")
cfg.AccountKey = options.NewSecretString(os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_KEY"))
cfg.Prefix = fmt.Sprintf("test-upload-large-%d", time.Now().UnixNano())
tr, err := backend.Transport(backend.TransportOptions{})
if err != nil {
t.Fatal(err)
}
be, err := azure.Create(ctx, *cfg, tr, t.Logf)
if err != nil {
t.Fatal(err)
}
defer func() {
err := be.Delete(ctx)
if err != nil {
t.Fatal(err)
}
}()
data := rtest.Random(23, 300*1024*1024)
id := restic.Hash(data)
h := backend.Handle{Name: id.String(), Type: backend.PackFile}
t.Logf("hash of %d bytes: %v", len(data), id)
err = be.Save(ctx, h, backend.NewByteReader(data, be.Hasher()))
if err != nil {
t.Fatal(err)
}
defer func() {
err := be.Remove(ctx, h)
if err != nil {
t.Fatal(err)
}
}()
var tests = []struct {
offset, length int
}{
{0, len(data)},
{23, 1024},
{23 + 100*1024, 500},
{888 + 200*1024, 89999},
{888 + 100*1024*1024, 120 * 1024 * 1024},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
want := data[test.offset : test.offset+test.length]
buf := make([]byte, test.length)
err = be.Load(ctx, h, test.length, int64(test.offset), func(rd io.Reader) error {
_, err = io.ReadFull(rd, buf)
return err
})
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, want) {
t.Fatalf("wrong bytes returned")
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/azure/config.go | internal/backend/azure/config.go | package azure
import (
"os"
"path"
"strconv"
"strings"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
)
// Config contains all configuration necessary to connect to an azure compatible
// server.
type Config struct {
AccountName string
AccountSAS options.SecretString
AccountKey options.SecretString
ForceCliCredential bool
EndpointSuffix string
Container string
Prefix string
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
AccessTier string `option:"access-tier" help:"set the access tier for the blob storage (default: inferred from the storage account defaults)"`
}
// NewConfig returns a new Config with the default values filled in.
func NewConfig() Config {
return Config{
Connections: 5,
}
}
func init() {
options.Register("azure", Config{})
}
// ParseConfig parses the string s and extracts the azure config. The
// configuration format is azure:containerName:/[prefix].
func ParseConfig(s string) (*Config, error) {
if !strings.HasPrefix(s, "azure:") {
return nil, errors.New("azure: invalid format")
}
// strip prefix "azure:"
s = s[6:]
// use the first entry of the path as the bucket name and the
// remainder as prefix
container, prefix, colon := strings.Cut(s, ":")
if !colon {
return nil, errors.New("azure: invalid format: bucket name or path not found")
}
prefix = strings.TrimPrefix(path.Clean(prefix), "/")
cfg := NewConfig()
cfg.Container = container
cfg.Prefix = prefix
return &cfg, nil
}
var _ backend.ApplyEnvironmenter = &Config{}
// ApplyEnvironment saves values from the environment to the config.
func (cfg *Config) ApplyEnvironment(prefix string) {
if cfg.AccountName == "" {
cfg.AccountName = os.Getenv(prefix + "AZURE_ACCOUNT_NAME")
}
if cfg.AccountKey.String() == "" {
cfg.AccountKey = options.NewSecretString(os.Getenv(prefix + "AZURE_ACCOUNT_KEY"))
}
if cfg.AccountSAS.String() == "" {
cfg.AccountSAS = options.NewSecretString(os.Getenv(prefix + "AZURE_ACCOUNT_SAS"))
}
var forceCliCred, err = strconv.ParseBool(os.Getenv(prefix + "AZURE_FORCE_CLI_CREDENTIAL"))
if err == nil {
cfg.ForceCliCredential = forceCliCred
}
if cfg.EndpointSuffix == "" {
cfg.EndpointSuffix = os.Getenv(prefix + "AZURE_ENDPOINT_SUFFIX")
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/azure/azure.go | internal/backend/azure/azure.go | package azure
import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"fmt"
"hash"
"io"
"net/http"
"path"
"strings"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/layout"
"github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/backend/util"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
azContainer "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
)
// Backend stores data on an azure endpoint.
type Backend struct {
cfg Config
container *azContainer.Client
connections uint
prefix string
listMaxItems int
layout.Layout
accessTier blob.AccessTier
}
const singleUploadMaxSize = 256 * 1024 * 1024
const singleBlockMaxSize = 100 * 1024 * 1024
const defaultListMaxItems = 5000
// make sure that *Backend implements backend.Backend
var _ backend.Backend = &Backend{}
func NewFactory() location.Factory {
return location.NewHTTPBackendFactory("azure", ParseConfig, location.NoPassword, Create, Open)
}
func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
debug.Log("open, config %#v", cfg)
var client *azContainer.Client
var err error
var endpointSuffix string
if cfg.EndpointSuffix != "" {
endpointSuffix = cfg.EndpointSuffix
} else {
endpointSuffix = "core.windows.net"
}
if cfg.AccountName == "" {
return nil, errors.Fatalf("unable to open Azure backend: Account name ($AZURE_ACCOUNT_NAME) is empty")
}
url := fmt.Sprintf("https://%s.blob.%s/%s", cfg.AccountName, endpointSuffix, cfg.Container)
opts := &azContainer.ClientOptions{
ClientOptions: azcore.ClientOptions{
Transport: &http.Client{Transport: rt},
},
}
if cfg.AccountKey.String() != "" {
// We have an account key value, find the BlobServiceClient
// from with a BasicClient
debug.Log(" - using account key")
cred, err := azblob.NewSharedKeyCredential(cfg.AccountName, cfg.AccountKey.Unwrap())
if err != nil {
return nil, errors.Wrap(err, "NewSharedKeyCredential")
}
client, err = azContainer.NewClientWithSharedKeyCredential(url, cred, opts)
if err != nil {
return nil, errors.Wrap(err, "NewClientWithSharedKeyCredential")
}
} else if cfg.AccountSAS.String() != "" {
// Get the client using the SAS Token as authentication, this
// is longer winded than above because the SDK wants a URL for the Account
// if your using a SAS token, and not just the account name
// we (as per the SDK ) assume the default Azure portal.
// https://github.com/Azure/azure-storage-blob-go/issues/130
debug.Log(" - using sas token")
sas := cfg.AccountSAS.Unwrap()
// strip query sign prefix
if sas[0] == '?' {
sas = sas[1:]
}
urlWithSAS := fmt.Sprintf("%s?%s", url, sas)
client, err = azContainer.NewClientWithNoCredential(urlWithSAS, opts)
if err != nil {
return nil, errors.Wrap(err, "NewAccountSASClientFromEndpointToken")
}
} else {
var cred azcore.TokenCredential
if cfg.ForceCliCredential {
debug.Log(" - using AzureCLICredential")
cred, err = azidentity.NewAzureCLICredential(nil)
if err != nil {
return nil, errors.Wrap(err, "NewAzureCLICredential")
}
} else {
debug.Log(" - using DefaultAzureCredential")
cred, err = azidentity.NewDefaultAzureCredential(nil)
if err != nil {
return nil, errors.Wrap(err, "NewDefaultAzureCredential")
}
}
client, err = azContainer.NewClient(url, cred, opts)
if err != nil {
return nil, errors.Wrap(err, "NewClient")
}
}
var accessTier blob.AccessTier
// if the access tier is not supported, then we will not set the access tier; during the upload process,
// the value will be inferred from the default configured on the storage account.
for _, tier := range supportedAccessTiers() {
if strings.EqualFold(string(tier), cfg.AccessTier) {
accessTier = tier
debug.Log(" - using access tier %v", accessTier)
break
}
}
be := &Backend{
container: client,
cfg: cfg,
connections: cfg.Connections,
Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join),
listMaxItems: defaultListMaxItems,
accessTier: accessTier,
}
return be, nil
}
func supportedAccessTiers() []blob.AccessTier {
return []blob.AccessTier{blob.AccessTierHot, blob.AccessTierCool, blob.AccessTierCold, blob.AccessTierArchive}
}
// Open opens the Azure backend at specified container.
func Open(_ context.Context, cfg Config, rt http.RoundTripper, _ func(string, ...interface{})) (*Backend, error) {
return open(cfg, rt)
}
// Create opens the Azure backend at specified container and creates the container if
// it does not exist yet.
func Create(ctx context.Context, cfg Config, rt http.RoundTripper, _ func(string, ...interface{})) (*Backend, error) {
be, err := open(cfg, rt)
if err != nil {
return nil, errors.Wrap(err, "open")
}
_, err = be.container.GetProperties(ctx, &azContainer.GetPropertiesOptions{})
if err != nil && bloberror.HasCode(err, bloberror.ContainerNotFound) {
_, err = be.container.Create(ctx, &azContainer.CreateOptions{})
if err != nil {
return nil, errors.Wrap(err, "container.Create")
}
} else if err != nil && bloberror.HasCode(err, bloberror.AuthorizationFailure) {
// We ignore this Auth. Failure, as the failure is related to the type
// of SAS/SAT, not an actual real failure. If the token is invalid, we
// fail later on anyway.
// For details see Issue #4004.
debug.Log("Ignoring AuthorizationFailure when calling GetProperties")
} else if err != nil {
return be, errors.Wrap(err, "container.GetProperties")
}
return be, nil
}
// SetListMaxItems sets the number of list items to load per request.
func (be *Backend) SetListMaxItems(i int) {
be.listMaxItems = i
}
// IsNotExist returns true if the error is caused by a not existing file.
func (be *Backend) IsNotExist(err error) bool {
return bloberror.HasCode(err, bloberror.BlobNotFound)
}
func (be *Backend) IsPermanentError(err error) bool {
if be.IsNotExist(err) {
return true
}
var aerr *azcore.ResponseError
if errors.As(err, &aerr) {
if aerr.StatusCode == http.StatusRequestedRangeNotSatisfiable || aerr.StatusCode == http.StatusUnauthorized || aerr.StatusCode == http.StatusForbidden {
return true
}
}
return false
}
func (be *Backend) Properties() backend.Properties {
return backend.Properties{
Connections: be.connections,
HasAtomicReplace: true,
}
}
// Hasher may return a hash function for calculating a content hash for the backend
func (be *Backend) Hasher() hash.Hash {
return md5.New()
}
// Path returns the path in the bucket that is used for this backend.
func (be *Backend) Path() string {
return be.prefix
}
// useAccessTier determines whether to apply the configured access tier to a given file.
// For archive access tier, only data files are stored using that class; metadata
// must remain instantly accessible.
func (be *Backend) useAccessTier(h backend.Handle) bool {
notArchiveClass := !strings.EqualFold(be.cfg.AccessTier, "archive")
isDataFile := h.Type == backend.PackFile && !h.IsMetadata
return isDataFile || notArchiveClass
}
// Save stores data in the backend at the handle.
func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
objName := be.Filename(h)
debug.Log("InsertObject(%v, %v)", be.cfg.AccountName, objName)
var accessTier blob.AccessTier
if be.useAccessTier(h) {
accessTier = be.accessTier
}
var err error
fileSize := rd.Length()
// If the file size is less than or equal to the max size for a single blob, use the single blob upload
// otherwise, use the block-based upload
if fileSize <= singleUploadMaxSize {
err = be.saveSingleBlob(ctx, objName, rd, accessTier)
} else {
err = be.saveLarge(ctx, objName, rd, accessTier)
}
return err
}
// saveSingleBlob uploads data using a single Put Blob operation.
// This method is more efficient for files under 5000 MiB as it requires only one API call
// instead of the two calls (StageBlock + CommitBlockList) required by the block-based approach.
func (be *Backend) saveSingleBlob(ctx context.Context, objName string, rd backend.RewindReader, accessTier blob.AccessTier) error {
blockBlobClient := be.container.NewBlockBlobClient(objName)
buf := make([]byte, rd.Length())
_, err := io.ReadFull(rd, buf)
if err != nil {
return errors.Wrap(err, "ReadFull")
}
reader := bytes.NewReader(buf)
opts := &blockblob.UploadOptions{
Tier: &accessTier,
TransactionalValidation: blob.TransferValidationTypeMD5(rd.Hash()),
}
debug.Log("Upload single blob %v with %d bytes", objName, len(buf))
_, err = blockBlobClient.Upload(ctx, streaming.NopCloser(reader), opts)
return errors.Wrap(err, "Upload")
}
func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.RewindReader, accessTier blob.AccessTier) error {
blockBlobClient := be.container.NewBlockBlobClient(objName)
buf := make([]byte, singleBlockMaxSize)
blocks := []string{}
uploadedBytes := 0
for {
n, err := io.ReadFull(rd, buf)
if err == io.ErrUnexpectedEOF {
err = nil
}
if err == io.EOF {
// end of file reached, no bytes have been read at all
break
}
if err != nil {
return errors.Wrap(err, "ReadFull")
}
buf = buf[:n]
uploadedBytes += n
// upload it as a new "block", use the base64 hash for the ID
h := md5.Sum(buf)
id := base64.StdEncoding.EncodeToString(h[:])
reader := bytes.NewReader(buf)
debug.Log("StageBlock %v with %d bytes", id, len(buf))
_, err = blockBlobClient.StageBlock(ctx, id, streaming.NopCloser(reader), &blockblob.StageBlockOptions{
TransactionalValidation: blob.TransferValidationTypeMD5(h[:]),
})
if err != nil {
return errors.Wrap(err, "StageBlock")
}
blocks = append(blocks, id)
}
// sanity check
if uploadedBytes != int(rd.Length()) {
return errors.Errorf("wrote %d bytes instead of the expected %d bytes", uploadedBytes, rd.Length())
}
_, err := blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{
Tier: &accessTier,
})
debug.Log("uploaded %d parts: %v", len(blocks), blocks)
return errors.Wrap(err, "CommitBlockList")
}
// Load runs fn with a reader that yields the contents of the file at h at the
// given offset.
func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
return util.DefaultLoad(ctx, h, length, offset, be.openReader, fn)
}
func (be *Backend) openReader(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
objName := be.Filename(h)
blockBlobClient := be.container.NewBlobClient(objName)
resp, err := blockBlobClient.DownloadStream(ctx, &blob.DownloadStreamOptions{
Range: azblob.HTTPRange{
Offset: offset,
Count: int64(length),
},
})
if err != nil {
return nil, err
}
if length > 0 && (resp.ContentLength == nil || *resp.ContentLength != int64(length)) {
_ = resp.Body.Close()
return nil, &azcore.ResponseError{ErrorCode: "restic-file-too-short", StatusCode: http.StatusRequestedRangeNotSatisfiable}
}
return resp.Body, err
}
// Stat returns information about a blob.
func (be *Backend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) {
objName := be.Filename(h)
blobClient := be.container.NewBlobClient(objName)
props, err := blobClient.GetProperties(ctx, nil)
if err != nil {
return backend.FileInfo{}, errors.Wrap(err, "blob.GetProperties")
}
fi := backend.FileInfo{
Size: *props.ContentLength,
Name: h.Name,
}
return fi, nil
}
// Remove removes the blob with the given name and type.
func (be *Backend) Remove(ctx context.Context, h backend.Handle) error {
objName := be.Filename(h)
blob := be.container.NewBlobClient(objName)
_, err := blob.Delete(ctx, &azblob.DeleteBlobOptions{})
if be.IsNotExist(err) {
return nil
}
return errors.Wrap(err, "client.RemoveObject")
}
// List runs fn for each file in the backend which has the type t. When an
// error occurs (or fn returns an error), List stops and returns it.
func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error {
prefix, _ := be.Basedir(t)
// make sure prefix ends with a slash
if !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
maxI := int32(be.listMaxItems)
opts := &azContainer.ListBlobsFlatOptions{
MaxResults: &maxI,
Prefix: &prefix,
}
lister := be.container.NewListBlobsFlatPager(opts)
for lister.More() {
resp, err := lister.NextPage(ctx)
if err != nil {
return err
}
debug.Log("got %v objects", len(resp.Segment.BlobItems))
for _, item := range resp.Segment.BlobItems {
m := strings.TrimPrefix(*item.Name, prefix)
if m == "" {
continue
}
fi := backend.FileInfo{
Name: path.Base(m),
Size: *item.Properties.ContentLength,
}
if ctx.Err() != nil {
return ctx.Err()
}
err := fn(fi)
if err != nil {
return err
}
if ctx.Err() != nil {
return ctx.Err()
}
}
}
return ctx.Err()
}
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *Backend) Delete(ctx context.Context) error {
return util.DefaultDelete(ctx, be)
}
// Close does nothing
func (be *Backend) Close() error { return nil }
// Warmup not implemented
func (be *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
return []backend.Handle{}, nil
}
func (be *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/azure/config_test.go | internal/backend/azure/config_test.go | package azure
import (
"testing"
"github.com/restic/restic/internal/backend/test"
)
var configTests = []test.ConfigTestData[Config]{
{S: "azure:container-name:/", Cfg: Config{
Container: "container-name",
Prefix: "",
Connections: 5,
}},
{S: "azure:container-name:/prefix/directory", Cfg: Config{
Container: "container-name",
Prefix: "prefix/directory",
Connections: 5,
}},
{S: "azure:container-name:/prefix/directory/", Cfg: Config{
Container: "container-name",
Prefix: "prefix/directory",
Connections: 5,
}},
}
func TestParseConfig(t *testing.T) {
test.ParseConfigTester(t, ParseConfig, configTests)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/rclone/stdio_conn.go | internal/backend/rclone/stdio_conn.go | package rclone
import (
"net"
"os"
"os/exec"
"sync"
"time"
"github.com/restic/restic/internal/debug"
)
// StdioConn implements a net.Conn via stdin/stdout.
type StdioConn struct {
receive *os.File
send *os.File
cmd *exec.Cmd
closeRecv sync.Once
closeSend sync.Once
}
func (s *StdioConn) Read(p []byte) (int, error) {
n, err := s.receive.Read(p)
return n, err
}
func (s *StdioConn) Write(p []byte) (int, error) {
n, err := s.send.Write(p)
return n, err
}
// Close closes the stream to the child process.
func (s *StdioConn) Close() (err error) {
s.closeSend.Do(func() {
debug.Log("close stdio send connection")
err = s.send.Close()
})
return err
}
// CloseAll closes both streams.
func (s *StdioConn) CloseAll() (err error) {
err = s.Close()
s.closeRecv.Do(func() {
debug.Log("close stdio receive connection")
err2 := s.receive.Close()
if err == nil {
err = err2
}
})
return err
}
// LocalAddr returns nil.
func (s *StdioConn) LocalAddr() net.Addr {
return Addr{}
}
// RemoteAddr returns nil.
func (s *StdioConn) RemoteAddr() net.Addr {
return Addr{}
}
// SetDeadline sets the read/write deadline.
func (s *StdioConn) SetDeadline(t time.Time) error {
err1 := s.receive.SetReadDeadline(t)
err2 := s.send.SetWriteDeadline(t)
if err1 != nil {
return err1
}
return err2
}
// SetReadDeadline sets the read/write deadline.
func (s *StdioConn) SetReadDeadline(t time.Time) error {
return s.receive.SetReadDeadline(t)
}
// SetWriteDeadline sets the read/write deadline.
func (s *StdioConn) SetWriteDeadline(t time.Time) error {
return s.send.SetWriteDeadline(t)
}
// make sure StdioConn implements net.Conn
var _ net.Conn = &StdioConn{}
// Addr implements net.Addr for stdin/stdout.
type Addr struct{}
// Network returns the network type as a string.
func (a Addr) Network() string {
return "stdio"
}
func (a Addr) String() string {
return "stdio"
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/rclone/config.go | internal/backend/rclone/config.go | package rclone
import (
"strings"
"time"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
)
// Config contains all configuration necessary to start rclone.
type Config struct {
Program string `option:"program" help:"path to rclone (default: rclone)"`
Args string `option:"args" help:"arguments for running rclone (default: serve restic --stdio --b2-hard-delete)"`
Remote string
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
Timeout time.Duration `option:"timeout" help:"set a timeout limit to wait for rclone to establish a connection (default: 1m)"`
}
var defaultConfig = Config{
Program: "rclone",
Args: "serve restic --stdio --b2-hard-delete",
Connections: 5,
Timeout: time.Minute,
}
func init() {
options.Register("rclone", Config{})
}
// NewConfig returns a new Config with the default values filled in.
func NewConfig() Config {
return defaultConfig
}
// ParseConfig parses the string s and extracts the remote server URL.
func ParseConfig(s string) (*Config, error) {
if !strings.HasPrefix(s, "rclone:") {
return nil, errors.New("invalid rclone backend specification")
}
s = s[7:]
cfg := NewConfig()
cfg.Remote = s
return &cfg, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/rclone/config_test.go | internal/backend/rclone/config_test.go | package rclone
import (
"testing"
"github.com/restic/restic/internal/backend/test"
)
var configTests = []test.ConfigTestData[Config]{
{
S: "rclone:local:foo:/bar",
Cfg: Config{
Remote: "local:foo:/bar",
Program: defaultConfig.Program,
Args: defaultConfig.Args,
Connections: defaultConfig.Connections,
Timeout: defaultConfig.Timeout,
},
},
}
func TestParseConfig(t *testing.T) {
test.ParseConfigTester(t, ParseConfig, configTests)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/rclone/internal_test.go | internal/backend/rclone/internal_test.go | package rclone
import (
"context"
"os/exec"
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/errors"
rtest "github.com/restic/restic/internal/test"
)
// restic should detect rclone exiting.
func TestRcloneExit(t *testing.T) {
dir := rtest.TempDir(t)
cfg := NewConfig()
cfg.Remote = dir
be, err := Open(context.TODO(), cfg, nil, t.Logf)
var e *exec.Error
if errors.As(err, &e) && e.Err == exec.ErrNotFound {
t.Skipf("program %q not found", e.Name)
return
}
rtest.OK(t, err)
defer func() {
// ignore the error as the test will kill rclone (see below)
_ = be.Close()
}()
err = be.cmd.Process.Kill()
rtest.OK(t, err)
t.Log("killed rclone")
for i := 0; i < 10; i++ {
_, err = be.Stat(context.TODO(), backend.Handle{
Name: "foo",
Type: backend.PackFile,
})
rtest.Assert(t, err != nil, "expected an error")
}
}
// restic should detect rclone startup failures
func TestRcloneFailedStart(t *testing.T) {
cfg := NewConfig()
// exits with exit code 1
cfg.Program = "false"
_, err := Open(context.TODO(), cfg, nil, t.Logf)
var e *exec.ExitError
if !errors.As(err, &e) {
// unexpected error
rtest.OK(t, err)
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/rclone/backend.go | internal/backend/rclone/backend.go | package rclone
import (
"bufio"
"context"
"crypto/tls"
"fmt"
"io"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"strings"
"sync"
"syscall"
"time"
"github.com/cenkalti/backoff/v4"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/limiter"
"github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/backend/rest"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/terminal"
"golang.org/x/net/http2"
)
// Backend is used to access data stored somewhere via rclone.
type Backend struct {
*rest.Backend
tr *http2.Transport
cmd *exec.Cmd
waitCh <-chan struct{}
waitResult error
wg *sync.WaitGroup
conn *StdioConn
}
func NewFactory() location.Factory {
return location.NewLimitedBackendFactory("rclone", ParseConfig, location.NoPassword, Create, Open)
}
// run starts command with args and initializes the StdioConn.
func run(errorLog func(string, ...interface{}), command string, args ...string) (*StdioConn, *sync.WaitGroup, chan struct{}, func() error, error) {
cmd := exec.Command(command, args...)
p, err := cmd.StderrPipe()
if err != nil {
return nil, nil, nil, nil, err
}
var wg sync.WaitGroup
waitCh := make(chan struct{})
// start goroutine to add a prefix to all messages printed by to stderr by rclone
wg.Add(1)
go func() {
defer wg.Done()
defer close(waitCh)
sc := bufio.NewScanner(p)
for sc.Scan() {
errorLog("rclone: %v\n", sc.Text())
}
debug.Log("command has exited, closing waitCh")
}()
r, stdin, err := os.Pipe()
if err != nil {
return nil, nil, nil, nil, err
}
stdout, w, err := os.Pipe()
if err != nil {
// close first pipe and ignore subsequent errors
_ = r.Close()
_ = stdin.Close()
return nil, nil, nil, nil, err
}
cmd.Stdin = r
cmd.Stdout = w
bg, err := terminal.StartForeground(cmd)
// close rclone side of pipes
errR := r.Close()
errW := w.Close()
// return first error
if err == nil {
err = errR
}
if err == nil {
err = errW
}
if err != nil {
if errors.Is(err, exec.ErrDot) {
return nil, nil, nil, nil, errors.Errorf("cannot implicitly run relative executable %v found in current directory, use -o rclone.program=./<program> to override", cmd.Path)
}
return nil, nil, nil, nil, err
}
c := &StdioConn{
receive: stdout,
send: stdin,
cmd: cmd,
}
return c, &wg, waitCh, bg, nil
}
// wrappedConn adds bandwidth limiting capabilities to the StdioConn by
// wrapping the Read/Write methods.
type wrappedConn struct {
*StdioConn
io.Reader
io.Writer
}
func (c *wrappedConn) Read(p []byte) (int, error) {
return c.Reader.Read(p)
}
func (c *wrappedConn) Write(p []byte) (int, error) {
return c.Writer.Write(p)
}
func wrapConn(c *StdioConn, lim limiter.Limiter) *wrappedConn {
wc := &wrappedConn{
StdioConn: c,
Reader: c,
Writer: c,
}
if lim != nil {
wc.Reader = lim.Downstream(c)
wc.Writer = lim.UpstreamWriter(c)
}
return wc
}
// New initializes a Backend and starts the process.
func newBackend(ctx context.Context, cfg Config, lim limiter.Limiter, errorLog func(string, ...interface{})) (*Backend, error) {
var (
args []string
err error
)
// build program args, start with the program
if cfg.Program != "" {
a, err := backend.SplitShellStrings(cfg.Program)
if err != nil {
return nil, err
}
args = append(args, a...)
}
// then add the arguments
if cfg.Args != "" {
a, err := backend.SplitShellStrings(cfg.Args)
if err != nil {
return nil, err
}
args = append(args, a...)
}
// finally, add the remote
args = append(args, cfg.Remote)
arg0, args := args[0], args[1:]
debug.Log("running command: %v %v", arg0, args)
stdioConn, wg, waitCh, bg, err := run(errorLog, arg0, args...)
if err != nil {
return nil, err
}
var conn net.Conn = stdioConn
if lim != nil {
conn = wrapConn(stdioConn, lim)
}
dialCount := 0
tr := &http2.Transport{
AllowHTTP: true, // this is not really HTTP, just stdin/stdout
DialTLSContext: func(_ context.Context, network, address string, _ *tls.Config) (net.Conn, error) {
debug.Log("new connection requested, %v %v", network, address)
if dialCount > 0 {
// the connection to the child process is already closed
return nil, backoff.Permanent(errors.New("rclone stdio connection already closed"))
}
dialCount++
return conn, nil
},
}
cmd := stdioConn.cmd
be := &Backend{
tr: tr,
cmd: cmd,
waitCh: waitCh,
conn: stdioConn,
wg: wg,
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
wg.Add(1)
go func() {
defer wg.Done()
<-waitCh
cancel()
// according to the documentation of StdErrPipe, Wait() must only be called after the former has completed
err := cmd.Wait()
debug.Log("Wait returned %v", err)
be.waitResult = err
// close our side of the pipes to rclone, ignore errors
_ = stdioConn.CloseAll()
}()
// send an HTTP request to the base URL, see if the server is there
client := http.Client{
Transport: debug.RoundTripper(tr),
Timeout: cfg.Timeout,
}
// request a random file which does not exist. we just want to test when
// rclone is able to accept HTTP requests.
url := fmt.Sprintf("http://localhost/file-%d", rand.Uint64())
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
return nil, err
}
req.Header.Set("Accept", rest.ContentTypeV2)
res, err := client.Do(req)
if err != nil {
// ignore subsequent errors
_ = bg()
_ = cmd.Process.Kill()
// wait for rclone to exit
wg.Wait()
// try to return the program exit code if communication with rclone has failed
if be.waitResult != nil &&
(errors.Is(err, context.Canceled) ||
errors.Is(err, io.ErrUnexpectedEOF) ||
errors.Is(err, syscall.EPIPE) ||
errors.Is(err, os.ErrClosed) ||
// there's unfortunately no better way to check for this error
strings.Contains(err.Error(), "http2: client conn could not be established")) {
err = be.waitResult
}
return nil, fmt.Errorf("error talking HTTP to rclone: %w", err)
}
_ = res.Body.Close()
debug.Log("HTTP status %q returned, moving instance to background", res.Status)
err = bg()
if err != nil {
return nil, fmt.Errorf("error moving process to background: %w", err)
}
return be, nil
}
// Open starts an rclone process with the given config.
func Open(ctx context.Context, cfg Config, lim limiter.Limiter, errorLog func(string, ...interface{})) (*Backend, error) {
be, err := newBackend(ctx, cfg, lim, errorLog)
if err != nil {
return nil, err
}
url, err := url.Parse("http://localhost/")
if err != nil {
return nil, err
}
restConfig := rest.Config{
Connections: cfg.Connections,
URL: url,
}
restBackend, err := rest.Open(ctx, restConfig, debug.RoundTripper(be.tr), errorLog)
if err != nil {
_ = be.Close()
return nil, err
}
be.Backend = restBackend
return be, nil
}
// Create initializes a new restic repo with rclone.
func Create(ctx context.Context, cfg Config, lim limiter.Limiter, errorLog func(string, ...interface{})) (*Backend, error) {
be, err := newBackend(ctx, cfg, lim, errorLog)
if err != nil {
return nil, err
}
debug.Log("new backend created")
url, err := url.Parse("http://localhost/")
if err != nil {
return nil, err
}
restConfig := rest.Config{
Connections: cfg.Connections,
URL: url,
}
restBackend, err := rest.Create(ctx, restConfig, debug.RoundTripper(be.tr), errorLog)
if err != nil {
_ = be.Close()
return nil, err
}
be.Backend = restBackend
return be, nil
}
const waitForExit = 5 * time.Second
// Close terminates the backend.
func (be *Backend) Close() error {
debug.Log("exiting rclone")
be.tr.CloseIdleConnections()
select {
case <-be.waitCh:
debug.Log("rclone exited")
case <-time.After(waitForExit):
debug.Log("timeout, closing file descriptors")
err := be.conn.CloseAll()
if err != nil {
return err
}
}
be.wg.Wait()
debug.Log("wait for rclone returned: %v", be.waitResult)
return be.waitResult
}
func (be *Backend) Properties() backend.Properties {
properties := be.Backend.Properties()
properties.HasFlakyErrors = true
return properties
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/rclone/backend_test.go | internal/backend/rclone/backend_test.go | package rclone_test
import (
"os/exec"
"testing"
"github.com/restic/restic/internal/backend/rclone"
"github.com/restic/restic/internal/backend/test"
rtest "github.com/restic/restic/internal/test"
)
func newTestSuite(t testing.TB) *test.Suite[rclone.Config] {
dir := rtest.TempDir(t)
return &test.Suite[rclone.Config]{
// NewConfig returns a config for a new temporary backend that will be used in tests.
NewConfig: func() (*rclone.Config, error) {
t.Logf("use backend at %v", dir)
cfg := rclone.NewConfig()
cfg.Remote = dir
return &cfg, nil
},
Factory: rclone.NewFactory(),
}
}
func findRclone(t testing.TB) {
// try to find a rclone binary
_, err := exec.LookPath("rclone")
if err != nil {
t.Skip(err)
}
}
func TestBackendRclone(t *testing.T) {
defer func() {
if t.Skipped() {
rtest.SkipDisallowed(t, "restic/backend/rclone.TestBackendRclone")
}
}()
findRclone(t)
newTestSuite(t).RunTests(t)
}
func BenchmarkBackendREST(t *testing.B) {
findRclone(t)
newTestSuite(t).RunBenchmarks(t)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/test/tests.go | internal/backend/test/tests.go | package test
import (
"bytes"
"context"
"crypto/sha256"
"fmt"
"io"
"math/rand"
"os"
"reflect"
"sort"
"sync"
"testing"
"time"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"golang.org/x/sync/errgroup"
"github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/backend"
)
func seedRand(t testing.TB) *rand.Rand {
seed := time.Now().UnixNano()
random := rand.New(rand.NewSource(seed))
t.Logf("rand initialized with seed %d", seed)
return random
}
func beTest(ctx context.Context, be backend.Backend, h backend.Handle) (bool, error) {
_, err := be.Stat(ctx, h)
if err != nil && be.IsNotExist(err) {
return false, nil
}
return err == nil, err
}
func LoadAll(ctx context.Context, be backend.Backend, h backend.Handle) ([]byte, error) {
var buf []byte
err := be.Load(ctx, h, 0, 0, func(rd io.Reader) error {
var err error
buf, err = io.ReadAll(rd)
return err
})
if err != nil {
return nil, err
}
return buf, nil
}
// TestStripPasswordCall tests that the StripPassword method of a factory can be called without crashing.
// It does not verify whether passwords are removed correctly
func (s *Suite[C]) TestStripPasswordCall(_ *testing.T) {
s.Factory.StripPassword("some random string")
}
// TestCreateWithConfig tests that creating a backend in a location which already
// has a config file fails.
func (s *Suite[C]) TestCreateWithConfig(t *testing.T) {
b := s.open(t)
defer s.close(t, b)
// remove a config if present
cfgHandle := backend.Handle{Type: backend.ConfigFile}
cfgPresent, err := beTest(context.TODO(), b, cfgHandle)
if err != nil {
t.Fatalf("unable to test for config: %+v", err)
}
if cfgPresent {
remove(t, b, cfgHandle)
}
// save a config
store(t, b, backend.ConfigFile, []byte("test config"))
// now create the backend again, this must fail
_, err = s.createOrError(t)
if err == nil {
t.Fatalf("expected error not found for creating a backend with an existing config file")
}
// remove config
err = b.Remove(context.TODO(), backend.Handle{Type: backend.ConfigFile, Name: ""})
if err != nil {
t.Fatalf("unexpected error removing config: %+v", err)
}
}
// TestConfig saves and loads a config from the backend.
func (s *Suite[C]) TestConfig(t *testing.T) {
b := s.open(t)
defer s.close(t, b)
var testString = "Config"
// create config and read it back
_, err := LoadAll(context.TODO(), b, backend.Handle{Type: backend.ConfigFile})
if err == nil {
t.Fatalf("did not get expected error for non-existing config")
}
test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize error from LoadAll(): %v", err)
test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize error from LoadAll(): %v", err)
err = b.Save(context.TODO(), backend.Handle{Type: backend.ConfigFile}, backend.NewByteReader([]byte(testString), b.Hasher()))
if err != nil {
t.Fatalf("Save() error: %+v", err)
}
// try accessing the config with different names, should all return the
// same config
for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} {
h := backend.Handle{Type: backend.ConfigFile, Name: name}
buf, err := LoadAll(context.TODO(), b, h)
if err != nil {
t.Fatalf("unable to read config with name %q: %+v", name, err)
}
if string(buf) != testString {
t.Fatalf("wrong data returned, want %q, got %q", testString, string(buf))
}
}
// remove the config
remove(t, b, backend.Handle{Type: backend.ConfigFile})
}
// TestLoad tests the backend's Load function.
func (s *Suite[C]) TestLoad(t *testing.T) {
random := seedRand(t)
b := s.open(t)
defer s.close(t, b)
err := testLoad(b, backend.Handle{Type: backend.PackFile, Name: "foobar"})
if err == nil {
t.Fatalf("Load() did not return an error for non-existing blob")
}
test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize non-existing blob: %v", err)
test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize non-existing blob: %v", err)
length := random.Intn(1<<24) + 2000
data := test.Random(23, length)
id := restic.Hash(data)
handle := backend.Handle{Type: backend.PackFile, Name: id.String()}
err = b.Save(context.TODO(), handle, backend.NewByteReader(data, b.Hasher()))
if err != nil {
t.Fatalf("Save() error: %+v", err)
}
t.Logf("saved %d bytes as %v", length, handle)
err = b.Load(context.TODO(), handle, 0, 0, func(rd io.Reader) error {
_, err := io.Copy(io.Discard, rd)
if err != nil {
t.Fatal(err)
}
return errors.Errorf("deliberate error")
})
if err == nil {
t.Fatalf("Load() did not propagate consumer error!")
}
if err.Error() != "deliberate error" {
t.Fatalf("Load() did not correctly propagate consumer error!")
}
loadTests := 50
if s.MinimalData {
loadTests = 10
}
for i := 0; i < loadTests; i++ {
l := random.Intn(length + 2000)
o := random.Intn(length + 2000)
d := data
if o < len(d) {
d = d[o:]
} else {
t.Logf("offset == length, skipping test")
continue
}
getlen := l
if l >= len(d) {
if random.Float32() >= 0.5 {
getlen = 0
} else {
getlen = len(d)
}
}
if l > 0 && l < len(d) {
d = d[:l]
}
var buf []byte
err := b.Load(context.TODO(), handle, getlen, int64(o), func(rd io.Reader) (ierr error) {
buf, ierr = io.ReadAll(rd)
return ierr
})
if err != nil {
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
t.Errorf("Load(%d, %d) returned unexpected error: %+v", l, o, err)
continue
}
if l == 0 && len(buf) != len(d) {
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
t.Errorf("Load(%d, %d) wrong number of bytes read: want %d, got %d", l, o, len(d), len(buf))
continue
}
if l > 0 && l <= len(d) && len(buf) != l {
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
t.Errorf("Load(%d, %d) wrong number of bytes read: want %d, got %d", l, o, l, len(buf))
continue
}
if l > len(d) && len(buf) != len(d) {
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
t.Errorf("Load(%d, %d) wrong number of bytes read for overlong read: want %d, got %d", l, o, l, len(buf))
continue
}
if !bytes.Equal(buf, d) {
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
t.Errorf("Load(%d, %d) returned wrong bytes", l, o)
continue
}
}
// test error checking for partial and fully out of bounds read
// only test for length > 0 as we currently do not need strict out of bounds handling for length==0
for _, offset := range []int{length - 99, length - 50, length, length + 100} {
err = b.Load(context.TODO(), handle, 100, int64(offset), func(rd io.Reader) (ierr error) {
_, ierr = io.ReadAll(rd)
return ierr
})
test.Assert(t, err != nil, "Load() did not return error on out of bounds read! o %v, l %v, filelength %v", offset, 100, length)
test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize out of range read: %v", err)
test.Assert(t, !b.IsNotExist(err), "IsNotExist() must not recognize out of range read: %v", err)
}
test.OK(t, b.Remove(context.TODO(), handle))
}
type setter interface {
SetListMaxItems(int)
}
// TestList makes sure that the backend implements List() pagination correctly.
func (s *Suite[C]) TestList(t *testing.T) {
random := seedRand(t)
numTestFiles := random.Intn(20) + 20
b := s.open(t)
defer s.close(t, b)
// Check that the backend is empty to start with
var found []string
err := b.List(context.TODO(), backend.PackFile, func(fi backend.FileInfo) error {
found = append(found, fi.Name)
return nil
})
if err != nil {
t.Fatalf("List returned error %v", err)
}
if found != nil {
t.Fatalf("backend not empty at start of test - contains: %v", found)
}
list1 := make(map[restic.ID]int64)
var m sync.Mutex
wg, ctx := errgroup.WithContext(context.TODO())
for i := 0; i < numTestFiles; i++ {
data := test.Random(random.Int(), random.Intn(100)+55)
wg.Go(func() error {
id := restic.Hash(data)
h := backend.Handle{Type: backend.PackFile, Name: id.String()}
err := b.Save(ctx, h, backend.NewByteReader(data, b.Hasher()))
m.Lock()
defer m.Unlock()
list1[id] = int64(len(data))
return err
})
}
err = wg.Wait()
if err != nil {
t.Fatal(err)
}
t.Logf("wrote %v files", len(list1))
var tests = []struct {
maxItems int
}{
{11}, {23}, {numTestFiles}, {numTestFiles + 10}, {numTestFiles + 1123},
}
for _, test := range tests {
t.Run(fmt.Sprintf("max-%v", test.maxItems), func(t *testing.T) {
list2 := make(map[restic.ID]int64)
if s, ok := b.(setter); ok {
t.Logf("setting max list items to %d", test.maxItems)
s.SetListMaxItems(test.maxItems)
}
err := b.List(context.TODO(), backend.PackFile, func(fi backend.FileInfo) error {
id, err := restic.ParseID(fi.Name)
if err != nil {
t.Fatal(err)
}
list2[id] = fi.Size
return nil
})
if err != nil {
t.Fatalf("List returned error %v", err)
}
t.Logf("loaded %v IDs from backend", len(list2))
for id, size := range list1 {
size2, ok := list2[id]
if !ok {
t.Errorf("id %v not returned by List()", id.Str())
}
if size != size2 {
t.Errorf("wrong size for id %v returned: want %v, got %v", id.Str(), size, size2)
}
}
for id := range list2 {
_, ok := list1[id]
if !ok {
t.Errorf("extra id %v returned by List()", id.Str())
}
}
})
}
t.Logf("remove %d files", numTestFiles)
handles := make([]backend.Handle, 0, len(list1))
for id := range list1 {
handles = append(handles, backend.Handle{Type: backend.PackFile, Name: id.String()})
}
err = s.delayedRemove(t, b, handles...)
if err != nil {
t.Fatal(err)
}
}
// TestListCancel tests that the context is respected and the error is returned by List.
func (s *Suite[C]) TestListCancel(t *testing.T) {
numTestFiles := 5
b := s.open(t)
defer s.close(t, b)
testFiles := make([]backend.Handle, 0, numTestFiles)
for i := 0; i < numTestFiles; i++ {
data := []byte(fmt.Sprintf("random test blob %v", i))
id := restic.Hash(data)
h := backend.Handle{Type: backend.PackFile, Name: id.String()}
err := b.Save(context.TODO(), h, backend.NewByteReader(data, b.Hasher()))
if err != nil {
t.Fatal(err)
}
testFiles = append(testFiles, h)
}
t.Run("Cancelled", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
cancel()
// pass in a cancelled context
err := b.List(ctx, backend.PackFile, func(fi backend.FileInfo) error {
t.Errorf("got FileInfo %v for cancelled context", fi)
return nil
})
if !errors.Is(err, context.Canceled) {
t.Fatalf("expected error not found, want %v, got %v", context.Canceled, err)
}
})
t.Run("First", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
i := 0
err := b.List(ctx, backend.PackFile, func(fi backend.FileInfo) error {
i++
// cancel the context on the first file
if i == 1 {
cancel()
}
return nil
})
if !errors.Is(err, context.Canceled) {
t.Fatalf("expected error not found, want %v, got %v", context.Canceled, err)
}
if i != 1 {
t.Fatalf("wrong number of files returned by List, want %v, got %v", 1, i)
}
})
t.Run("Last", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
i := 0
err := b.List(ctx, backend.PackFile, func(fi backend.FileInfo) error {
// cancel the context at the last file
i++
if i == numTestFiles {
cancel()
}
return nil
})
if !errors.Is(err, context.Canceled) {
t.Fatalf("expected error not found, want %v, got %v", context.Canceled, err)
}
if i != numTestFiles {
t.Fatalf("wrong number of files returned by List, want %v, got %v", numTestFiles, i)
}
})
t.Run("Timeout", func(t *testing.T) {
// rather large timeout, let's try to get at least one item
timeout := time.Second
ctxTimeout, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
i := 0
// pass in a context with a timeout
err := b.List(ctxTimeout, backend.PackFile, func(fi backend.FileInfo) error {
i++
// wait until the context is cancelled
<-ctxTimeout.Done()
// The cancellation of a context first closes the done channel of the context and
// _afterwards_ propagates the cancellation to child contexts. If the List
// implementation uses a child context, then it may take a moment until that context
// is also cancelled. Thus give the context cancellation a moment to propagate.
time.Sleep(time.Millisecond)
return nil
})
if !errors.Is(err, context.DeadlineExceeded) {
t.Fatalf("expected error not found, want %#v, got %#v", context.DeadlineExceeded, err)
}
if i > 2 {
t.Fatalf("wrong number of files returned by List, want <= 2, got %v", i)
}
})
err := s.delayedRemove(t, b, testFiles...)
if err != nil {
t.Fatal(err)
}
}
type errorCloser struct {
io.ReadSeeker
l int64
t testing.TB
h []byte
}
func (ec errorCloser) Close() error {
ec.t.Error("forbidden method close was called")
return errors.New("forbidden method close was called")
}
func (ec errorCloser) Length() int64 {
return ec.l
}
func (ec errorCloser) Hash() []byte {
return ec.h
}
func (ec errorCloser) Rewind() error {
_, err := ec.ReadSeeker.Seek(0, io.SeekStart)
return err
}
// TestSave tests saving data in the backend.
func (s *Suite[C]) TestSave(t *testing.T) {
random := seedRand(t)
b := s.open(t)
defer s.close(t, b)
var id restic.ID
saveTests := 10
if s.MinimalData {
saveTests = 2
}
for i := 0; i < saveTests; i++ {
length := random.Intn(1<<23) + 200000
data := test.Random(23, length)
id = sha256.Sum256(data)
h := backend.Handle{
Type: backend.PackFile,
Name: id.String(),
}
err := b.Save(context.TODO(), h, backend.NewByteReader(data, b.Hasher()))
test.OK(t, err)
buf, err := LoadAll(context.TODO(), b, h)
test.OK(t, err)
if len(buf) != len(data) {
t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf))
}
if !bytes.Equal(buf, data) {
t.Fatalf("data not equal")
}
fi, err := b.Stat(context.TODO(), h)
test.OK(t, err)
if fi.Name != h.Name {
t.Errorf("Stat() returned wrong name, want %q, got %q", h.Name, fi.Name)
}
if fi.Size != int64(len(data)) {
t.Errorf("Stat() returned different size, want %q, got %d", len(data), fi.Size)
}
err = b.Remove(context.TODO(), h)
if err != nil {
t.Fatalf("error removing item: %+v", err)
}
}
// test saving from a tempfile
tmpfile, err := os.CreateTemp("", "restic-backend-save-test-")
if err != nil {
t.Fatal(err)
}
length := random.Intn(1<<23) + 200000
data := test.Random(23, length)
id = sha256.Sum256(data)
if _, err = tmpfile.Write(data); err != nil {
t.Fatal(err)
}
if _, err = tmpfile.Seek(0, io.SeekStart); err != nil {
t.Fatal(err)
}
h := backend.Handle{Type: backend.PackFile, Name: id.String()}
// wrap the tempfile in an errorCloser, so we can detect if the backend
// closes the reader
var beHash []byte
if b.Hasher() != nil {
beHasher := b.Hasher()
// must never fail according to interface
_, err := beHasher.Write(data)
if err != nil {
panic(err)
}
beHash = beHasher.Sum(nil)
}
err = b.Save(context.TODO(), h, errorCloser{
t: t,
l: int64(length),
ReadSeeker: tmpfile,
h: beHash,
})
if err != nil {
t.Fatal(err)
}
err = s.delayedRemove(t, b, h)
if err != nil {
t.Fatalf("error removing item: %+v", err)
}
if err = tmpfile.Close(); err != nil {
t.Fatal(err)
}
if err = os.Remove(tmpfile.Name()); err != nil {
t.Fatal(err)
}
}
type incompleteByteReader struct {
backend.ByteReader
}
func (r *incompleteByteReader) Length() int64 {
return r.ByteReader.Length() + 42
}
// TestSaveError tests saving data in the backend.
func (s *Suite[C]) TestSaveError(t *testing.T) {
random := seedRand(t)
b := s.open(t)
defer func() {
// rclone will report an error when closing the backend. We have to ignore it
// otherwise this test will always fail
_ = b.Close()
}()
length := random.Intn(1<<23) + 200000
data := test.Random(24, length)
var id restic.ID
copy(id[:], data)
// test that incomplete uploads fail
h := backend.Handle{Type: backend.PackFile, Name: id.String()}
err := b.Save(context.TODO(), h, &incompleteByteReader{ByteReader: *backend.NewByteReader(data, b.Hasher())})
// try to delete possible leftovers
_ = s.delayedRemove(t, b, h)
if err == nil {
t.Fatal("incomplete upload did not fail")
}
}
type wrongByteReader struct {
backend.ByteReader
}
func (b *wrongByteReader) Hash() []byte {
h := b.ByteReader.Hash()
modHash := make([]byte, len(h))
copy(modHash, h)
// flip a bit in the hash
modHash[0] ^= 0x01
return modHash
}
// TestSaveWrongHash tests that uploads with a wrong hash fail
func (s *Suite[C]) TestSaveWrongHash(t *testing.T) {
random := seedRand(t)
b := s.open(t)
defer s.close(t, b)
// nothing to do if the backend doesn't support external hashes
if b.Hasher() == nil {
return
}
length := random.Intn(1<<23) + 200000
data := test.Random(25, length)
var id restic.ID
copy(id[:], data)
// test that upload with hash mismatch fails
h := backend.Handle{Type: backend.PackFile, Name: id.String()}
err := b.Save(context.TODO(), h, &wrongByteReader{ByteReader: *backend.NewByteReader(data, b.Hasher())})
exists, err2 := beTest(context.TODO(), b, h)
if err2 != nil {
t.Fatal(err2)
}
_ = s.delayedRemove(t, b, h)
if err == nil {
t.Fatal("upload with wrong hash did not fail")
}
t.Logf("%v", err)
if exists {
t.Fatal("Backend returned an error but stored the file anyways")
}
}
var testStrings = []struct {
id string
data string
}{
{"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", "foobar"},
{"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"},
{"cc5d46bdb4991c6eae3eb739c9c8a7a46fe9654fab79c47b4fe48383b5b25e1c", "foo/bar"},
{"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"},
}
func store(t testing.TB, b backend.Backend, tpe backend.FileType, data []byte) backend.Handle {
id := restic.Hash(data)
h := backend.Handle{Name: id.String(), Type: tpe}
err := b.Save(context.TODO(), h, backend.NewByteReader(data, b.Hasher()))
test.OK(t, err)
return h
}
// testLoad loads a blob (but discards its contents).
func testLoad(b backend.Backend, h backend.Handle) error {
return b.Load(context.TODO(), h, 0, 0, func(rd io.Reader) (ierr error) {
_, ierr = io.Copy(io.Discard, rd)
return ierr
})
}
func (s *Suite[C]) delayedRemove(t testing.TB, be backend.Backend, handles ...backend.Handle) error {
// Some backend (swift, I'm looking at you) may implement delayed
// removal of data. Let's wait a bit if this happens.
wg, ctx := errgroup.WithContext(context.TODO())
for _, h := range handles {
wg.Go(func() error {
err := be.Remove(ctx, h)
if s.ErrorHandler != nil {
err = s.ErrorHandler(t, be, err)
}
return err
})
}
err := wg.Wait()
if err != nil {
return err
}
start := time.Now()
for _, h := range handles {
attempt := 0
var found bool
var err error
for time.Since(start) <= s.WaitForDelayedRemoval {
found, err = beTest(context.TODO(), be, h)
if s.ErrorHandler != nil {
err = s.ErrorHandler(t, be, err)
}
if err != nil {
return err
}
if !found {
break
}
time.Sleep(2 * time.Second)
attempt++
}
if found {
t.Fatalf("removed blob %v still present after %v (%d attempts)", h, time.Since(start), attempt)
}
}
return nil
}
func delayedList(t testing.TB, b backend.Backend, tpe backend.FileType, max int, maxwait time.Duration) restic.IDs {
list := restic.NewIDSet()
start := time.Now()
for i := 0; i < max; i++ {
err := b.List(context.TODO(), tpe, func(fi backend.FileInfo) error {
id := restic.TestParseID(fi.Name)
list.Insert(id)
return nil
})
if err != nil {
t.Fatal(err)
}
if len(list) < max && time.Since(start) < maxwait {
time.Sleep(500 * time.Millisecond)
}
}
return list.List()
}
// TestBackend tests all functions of the backend.
func (s *Suite[C]) TestBackend(t *testing.T) {
for _, tpe := range []backend.FileType{
backend.PackFile, backend.KeyFile, backend.LockFile,
backend.SnapshotFile, backend.IndexFile,
} {
t.Run(tpe.String(), func(t *testing.T) {
t.Parallel()
b := s.open(t)
defer s.close(t, b)
test.Assert(t, !b.IsNotExist(nil), "IsNotExist() recognized nil error")
test.Assert(t, !b.IsPermanentError(nil), "IsPermanentError() recognized nil error")
// detect non-existing files
for _, ts := range testStrings {
id, err := restic.ParseID(ts.id)
test.OK(t, err)
// test if blob is already in repository
h := backend.Handle{Type: tpe, Name: id.String()}
ret, err := beTest(context.TODO(), b, h)
test.OK(t, err)
test.Assert(t, !ret, "id %q was found (but should not have)", ts.id)
// try to stat a not existing blob
_, err = b.Stat(context.TODO(), h)
test.Assert(t, err != nil, "blob data could be extracted before creation")
test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize Stat() error: %v", err)
test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize Stat() error: %v", err)
// try to read not existing blob
err = testLoad(b, h)
test.Assert(t, err != nil, "blob could be read before creation")
test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize Load() error: %v", err)
test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize Load() error: %v", err)
}
// add files
for _, ts := range testStrings {
store(t, b, tpe, []byte(ts.data))
// test Load()
h := backend.Handle{Type: tpe, Name: ts.id}
buf, err := LoadAll(context.TODO(), b, h)
test.OK(t, err)
test.Equals(t, ts.data, string(buf))
// try to read it out with an offset and a length
start := 1
end := len(ts.data) - 2
length := end - start
buf2 := make([]byte, length)
var n int
err = b.Load(context.TODO(), h, len(buf2), int64(start), func(rd io.Reader) (ierr error) {
n, ierr = io.ReadFull(rd, buf2)
return ierr
})
test.OK(t, err)
test.OK(t, err)
test.Equals(t, len(buf2), n)
test.Equals(t, ts.data[start:end], string(buf2))
}
// test adding the first file again
ts := testStrings[0]
h := backend.Handle{Type: tpe, Name: ts.id}
// remove and recreate
err := s.delayedRemove(t, b, h)
test.OK(t, err)
// test that the blob is gone
ok, err := beTest(context.TODO(), b, h)
test.OK(t, err)
test.Assert(t, !ok, "removed blob still present")
// create blob
err = b.Save(context.TODO(), h, backend.NewByteReader([]byte(ts.data), b.Hasher()))
test.OK(t, err)
// list items
IDs := restic.IDs{}
for _, ts := range testStrings {
id, err := restic.ParseID(ts.id)
test.OK(t, err)
IDs = append(IDs, id)
}
list := delayedList(t, b, tpe, len(IDs), s.WaitForDelayedRemoval)
if len(IDs) != len(list) {
t.Fatalf("wrong number of IDs returned: want %d, got %d", len(IDs), len(list))
}
sort.Sort(IDs)
sort.Sort(list)
if !reflect.DeepEqual(IDs, list) {
t.Fatalf("lists aren't equal, want:\n %v\n got:\n%v\n", IDs, list)
}
var handles []backend.Handle
for _, ts := range testStrings {
id, err := restic.ParseID(ts.id)
test.OK(t, err)
h := backend.Handle{Type: tpe, Name: id.String()}
found, err := beTest(context.TODO(), b, h)
test.OK(t, err)
test.Assert(t, found, fmt.Sprintf("id %v/%q not found", tpe, id))
handles = append(handles, h)
}
test.OK(t, s.delayedRemove(t, b, handles...))
})
}
}
// TestZZZDelete tests the Delete function. The name ensures that this test is executed last.
func (s *Suite[C]) TestZZZDelete(t *testing.T) {
if !test.TestCleanupTempDirs {
t.Skipf("not removing backend, TestCleanupTempDirs is false")
}
b := s.open(t)
defer s.close(t, b)
err := b.Delete(context.TODO())
if err != nil {
t.Fatalf("error deleting backend: %+v", err)
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/test/config.go | internal/backend/test/config.go | package test
import (
"fmt"
"reflect"
"testing"
)
type ConfigTestData[C comparable] struct {
S string
Cfg C
}
func ParseConfigTester[C comparable](t *testing.T, parser func(s string) (*C, error), tests []ConfigTestData[C]) {
for i, test := range tests {
t.Run(fmt.Sprint(i), func(t *testing.T) {
cfg, err := parser(test.S)
if err != nil {
t.Fatalf("%s failed: %v", test.S, err)
}
if !reflect.DeepEqual(*cfg, test.Cfg) {
t.Fatalf("input: %s\n wrong config, want:\n %#v\ngot:\n %#v",
test.S, test.Cfg, *cfg)
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/test/suite.go | internal/backend/test/suite.go | package test
import (
"context"
"fmt"
"reflect"
"strings"
"testing"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/test"
)
// Suite implements a test suite for restic backends.
type Suite[C any] struct {
// Config should be used to configure the backend.
Config *C
// NewConfig returns a config for a new temporary backend that will be used in tests.
NewConfig func() (*C, error)
// Factory contains a factory that can be used to create or open a repository for the tests.
Factory location.Factory
// MinimalData instructs the tests to not use excessive data.
MinimalData bool
// WaitForDelayedRemoval is set to a non-zero value to instruct the test
// suite to wait for this amount of time until a file that was removed
// really disappeared.
WaitForDelayedRemoval time.Duration
// ErrorHandler allows ignoring certain errors.
ErrorHandler func(testing.TB, backend.Backend, error) error
}
// RunTests executes all defined tests as subtests of t.
func (s *Suite[C]) RunTests(t *testing.T) {
var err error
s.Config, err = s.NewConfig()
if err != nil {
t.Fatal(err)
}
// test create/open functions first
be := s.create(t)
s.close(t, be)
for _, test := range s.testFuncs(t) {
t.Run(test.Name, test.Fn)
}
if !test.TestCleanupTempDirs {
t.Logf("not cleaning up backend")
return
}
s.cleanup(t)
}
type testFunction struct {
Name string
Fn func(*testing.T)
}
func (s *Suite[C]) testFuncs(t testing.TB) (funcs []testFunction) {
tpe := reflect.TypeOf(s)
v := reflect.ValueOf(s)
for i := 0; i < tpe.NumMethod(); i++ {
methodType := tpe.Method(i)
name := methodType.Name
// discard functions which do not have the right name
if !strings.HasPrefix(name, "Test") {
continue
}
iface := v.Method(i).Interface()
f, ok := iface.(func(*testing.T))
if !ok {
t.Logf("warning: function %v of *Suite has the wrong signature for a test function\nwant: func(*testing.T),\nhave: %T",
name, iface)
continue
}
funcs = append(funcs, testFunction{
Name: name,
Fn: f,
})
}
return funcs
}
type benchmarkFunction struct {
Name string
Fn func(*testing.B)
}
func (s *Suite[C]) benchmarkFuncs(t testing.TB) (funcs []benchmarkFunction) {
tpe := reflect.TypeOf(s)
v := reflect.ValueOf(s)
for i := 0; i < tpe.NumMethod(); i++ {
methodType := tpe.Method(i)
name := methodType.Name
// discard functions which do not have the right name
if !strings.HasPrefix(name, "Benchmark") {
continue
}
iface := v.Method(i).Interface()
f, ok := iface.(func(*testing.B))
if !ok {
t.Logf("warning: function %v of *Suite has the wrong signature for a test function\nwant: func(*testing.T),\nhave: %T",
name, iface)
continue
}
funcs = append(funcs, benchmarkFunction{
Name: name,
Fn: f,
})
}
return funcs
}
// RunBenchmarks executes all defined benchmarks as subtests of b.
func (s *Suite[C]) RunBenchmarks(b *testing.B) {
var err error
s.Config, err = s.NewConfig()
if err != nil {
b.Fatal(err)
}
// test create/open functions first
be := s.create(b)
s.close(b, be)
for _, test := range s.benchmarkFuncs(b) {
b.Run(test.Name, test.Fn)
}
if !test.TestCleanupTempDirs {
b.Logf("not cleaning up backend")
return
}
s.cleanup(b)
}
func (s *Suite[C]) createOrError(t testing.TB) (backend.Backend, error) {
tr, err := backend.Transport(backend.TransportOptions{})
if err != nil {
return nil, fmt.Errorf("cannot create transport for tests: %v", err)
}
be, err := s.Factory.Create(context.TODO(), s.Config, tr, nil, t.Logf)
if err != nil {
return nil, err
}
_, err = be.Stat(context.TODO(), backend.Handle{Type: backend.ConfigFile})
if err != nil && !be.IsNotExist(err) {
return nil, err
}
if err == nil {
return nil, errors.New("config already exists")
}
return be, nil
}
func (s *Suite[C]) create(t testing.TB) backend.Backend {
be, err := s.createOrError(t)
if err != nil {
t.Fatal(err)
}
return be
}
func (s *Suite[C]) open(t testing.TB) backend.Backend {
tr, err := backend.Transport(backend.TransportOptions{})
if err != nil {
t.Fatalf("cannot create transport for tests: %v", err)
}
be, err := s.Factory.Open(context.TODO(), s.Config, tr, nil, func(string, ...interface{}) {})
if err != nil {
t.Fatal(err)
}
return be
}
func (s *Suite[C]) cleanup(t testing.TB) {
be := s.open(t)
if err := be.Delete(context.TODO()); err != nil {
t.Fatal(err)
}
s.close(t, be)
}
func (s *Suite[C]) close(t testing.TB, be backend.Backend) {
err := be.Close()
if err != nil {
t.Fatal(err)
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/test/doc.go | internal/backend/test/doc.go | // Package test contains a test suite with benchmarks for restic backends.
//
// # Overview
//
// For the test suite to work a few functions need to be implemented to create
// new config, create a backend, open it and run cleanup tasks afterwards. The
// Suite struct has fields for each function.
//
// So for a new backend, a Suite needs to be built with callback functions,
// then the methods RunTests() and RunBenchmarks() can be used to run the
// individual tests and benchmarks as subtests/subbenchmarks.
//
// # Example
//
// Assuming a *Suite is returned by newTestSuite(), the tests and benchmarks
// can be run like this:
//
// func newTestSuite(t testing.TB) *test.Suite {
// return &test.Suite{
// Create: func(cfg interface{}) (backend.Backend, error) {
// [...]
// },
// [...]
// }
// }
//
// func TestSuiteBackendMem(t *testing.T) {
// newTestSuite(t).RunTests(t)
// }
//
// func BenchmarkSuiteBackendMem(b *testing.B) {
// newTestSuite(b).RunBenchmarks(b)
// }
//
// The functions are run in alphabetical order.
//
// # Add new tests
//
// A new test or benchmark can be added by implementing a method on *Suite
// with the name starting with "Test" and a single *testing.T parameter for
// test. For benchmarks, the name must start with "Benchmark" and the parameter
// is a *testing.B
package test
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/test/benchmarks.go | internal/backend/test/benchmarks.go | package test
import (
"bytes"
"context"
"io"
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
)
func saveRandomFile(t testing.TB, be backend.Backend, length int) ([]byte, backend.Handle) {
data := test.Random(23, length)
id := restic.Hash(data)
handle := backend.Handle{Type: backend.PackFile, Name: id.String()}
err := be.Save(context.TODO(), handle, backend.NewByteReader(data, be.Hasher()))
if err != nil {
t.Fatalf("Save() error: %+v", err)
}
return data, handle
}
func remove(t testing.TB, be backend.Backend, h backend.Handle) {
if err := be.Remove(context.TODO(), h); err != nil {
t.Fatalf("Remove() returned error: %v", err)
}
}
// BenchmarkLoadFile benchmarks the Load() method of a backend by
// loading a complete file.
func (s *Suite[C]) BenchmarkLoadFile(t *testing.B) {
be := s.open(t)
defer s.close(t, be)
length := 1<<24 + 2123
data, handle := saveRandomFile(t, be, length)
defer remove(t, be, handle)
buf := make([]byte, length)
t.SetBytes(int64(length))
t.ResetTimer()
for i := 0; i < t.N; i++ {
var n int
err := be.Load(context.TODO(), handle, 0, 0, func(rd io.Reader) (ierr error) {
n, ierr = io.ReadFull(rd, buf)
return ierr
})
t.StopTimer()
switch {
case err != nil:
t.Fatal(err)
case n != length:
t.Fatalf("wrong number of bytes read: want %v, got %v", length, n)
case !bytes.Equal(data, buf):
t.Fatalf("wrong bytes returned")
}
t.StartTimer()
}
}
// BenchmarkLoadPartialFile benchmarks the Load() method of a backend by
// loading the remainder of a file starting at a given offset.
func (s *Suite[C]) BenchmarkLoadPartialFile(t *testing.B) {
be := s.open(t)
defer s.close(t, be)
datalength := 1<<24 + 2123
data, handle := saveRandomFile(t, be, datalength)
defer remove(t, be, handle)
testLength := datalength/4 + 555
buf := make([]byte, testLength)
t.SetBytes(int64(testLength))
t.ResetTimer()
for i := 0; i < t.N; i++ {
var n int
err := be.Load(context.TODO(), handle, testLength, 0, func(rd io.Reader) (ierr error) {
n, ierr = io.ReadFull(rd, buf)
return ierr
})
t.StopTimer()
switch {
case err != nil:
t.Fatal(err)
case n != testLength:
t.Fatalf("wrong number of bytes read: want %v, got %v", testLength, n)
case !bytes.Equal(data[:testLength], buf):
t.Fatalf("wrong bytes returned")
}
t.StartTimer()
}
}
// BenchmarkLoadPartialFileOffset benchmarks the Load() method of a
// backend by loading a number of bytes of a file starting at a given offset.
func (s *Suite[C]) BenchmarkLoadPartialFileOffset(t *testing.B) {
be := s.open(t)
defer s.close(t, be)
datalength := 1<<24 + 2123
data, handle := saveRandomFile(t, be, datalength)
defer remove(t, be, handle)
testLength := datalength/4 + 555
testOffset := 8273
buf := make([]byte, testLength)
t.SetBytes(int64(testLength))
t.ResetTimer()
for i := 0; i < t.N; i++ {
var n int
err := be.Load(context.TODO(), handle, testLength, int64(testOffset), func(rd io.Reader) (ierr error) {
n, ierr = io.ReadFull(rd, buf)
return ierr
})
t.StopTimer()
switch {
case err != nil:
t.Fatal(err)
case n != testLength:
t.Fatalf("wrong number of bytes read: want %v, got %v", testLength, n)
case !bytes.Equal(data[testOffset:testOffset+testLength], buf):
t.Fatalf("wrong bytes returned")
}
t.StartTimer()
}
}
// BenchmarkSave benchmarks the Save() method of a backend.
func (s *Suite[C]) BenchmarkSave(t *testing.B) {
be := s.open(t)
defer s.close(t, be)
length := 1<<24 + 2123
data := test.Random(23, length)
id := restic.Hash(data)
handle := backend.Handle{Type: backend.PackFile, Name: id.String()}
rd := backend.NewByteReader(data, be.Hasher())
t.SetBytes(int64(length))
t.ResetTimer()
for i := 0; i < t.N; i++ {
if err := be.Save(context.TODO(), handle, rd); err != nil {
t.Fatal(err)
}
if err := be.Remove(context.TODO(), handle); err != nil {
t.Fatal(err)
}
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/retry/backend_retry.go | internal/backend/retry/backend_retry.go | package retry
import (
"context"
"errors"
"fmt"
"io"
"sync"
"time"
"github.com/cenkalti/backoff/v4"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/feature"
)
// Backend retries operations on the backend in case of an error with a
// backoff.
type Backend struct {
backend.Backend
MaxElapsedTime time.Duration
Report func(string, error, time.Duration)
Success func(string, int)
failedLoads sync.Map
}
// statically ensure that RetryBackend implements backend.Backend.
var _ backend.Backend = &Backend{}
// New wraps be with a backend that retries operations after a
// backoff. report is called with a description and the error, if one occurred.
// success is called with the number of retries before a successful operation
// (it is not called if it succeeded on the first try)
func New(be backend.Backend, maxElapsedTime time.Duration, report func(string, error, time.Duration), success func(string, int)) *Backend {
return &Backend{
Backend: be,
MaxElapsedTime: maxElapsedTime,
Report: report,
Success: success,
}
}
// retryNotifyErrorWithSuccess is an extension of backoff.RetryNotify with notification of success after an error.
// success is NOT notified on the first run of operation (only after an error).
func retryNotifyErrorWithSuccess(operation backoff.Operation, b backoff.BackOffContext, notify backoff.Notify, success func(retries int)) error {
var operationWrapper backoff.Operation
if success == nil {
operationWrapper = operation
} else {
retries := 0
operationWrapper = func() error {
err := operation()
if err != nil {
retries++
} else if retries > 0 {
success(retries)
}
return err
}
}
err := backoff.RetryNotify(operationWrapper, b, notify)
if err != nil && notify != nil && b.Context().Err() == nil {
// log final error, unless the context was canceled
notify(err, -1)
}
return err
}
func withRetryAtLeastOnce(delegate *backoff.ExponentialBackOff) *retryAtLeastOnce {
return &retryAtLeastOnce{delegate: delegate}
}
type retryAtLeastOnce struct {
delegate *backoff.ExponentialBackOff
numTries uint64
}
func (b *retryAtLeastOnce) NextBackOff() time.Duration {
delay := b.delegate.NextBackOff()
b.numTries++
if b.numTries == 1 && b.delegate.Stop == delay {
return b.delegate.InitialInterval
}
return delay
}
func (b *retryAtLeastOnce) Reset() {
b.numTries = 0
b.delegate.Reset()
}
var fastRetries = false
func (be *Backend) retry(ctx context.Context, msg string, f func() error) error {
// Don't do anything when called with an already cancelled context. There would be
// no retries in that case either, so be consistent and abort always.
// This enforces a strict contract for backend methods: Using a cancelled context
// will prevent any backup repository modifications. This simplifies ensuring that
// a backup repository is not modified any further after a context was cancelled.
// The 'local' backend for example does not provide this guarantee on its own.
if ctx.Err() != nil {
return ctx.Err()
}
bo := backoff.NewExponentialBackOff()
bo.MaxElapsedTime = be.MaxElapsedTime
if feature.Flag.Enabled(feature.BackendErrorRedesign) {
bo.InitialInterval = 1 * time.Second
bo.Multiplier = 2
}
if fastRetries {
// speed up integration tests
bo.InitialInterval = 1 * time.Millisecond
maxElapsedTime := 200 * time.Millisecond
if bo.MaxElapsedTime > maxElapsedTime {
bo.MaxElapsedTime = maxElapsedTime
}
}
var b backoff.BackOff = withRetryAtLeastOnce(bo)
if !feature.Flag.Enabled(feature.BackendErrorRedesign) {
// deprecated behavior
b = backoff.WithMaxRetries(b, 10)
}
permanentErrorAttempts := 1
if be.Backend.Properties().HasFlakyErrors {
permanentErrorAttempts = 5
}
err := retryNotifyErrorWithSuccess(
func() error {
err := f()
// don't retry permanent errors as those very likely cannot be fixed by retrying
// TODO remove IsNotExist(err) special cases when removing the feature flag
if feature.Flag.Enabled(feature.BackendErrorRedesign) && !errors.Is(err, &backoff.PermanentError{}) && be.Backend.IsPermanentError(err) {
permanentErrorAttempts--
}
if permanentErrorAttempts <= 0 {
return backoff.Permanent(err)
}
return err
},
backoff.WithContext(b, ctx),
func(err error, d time.Duration) {
if be.Report != nil {
be.Report(msg, err, d)
}
},
func(retries int) {
if be.Success != nil {
be.Success(msg, retries)
}
},
)
return err
}
// Save stores the data in the backend under the given handle.
func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
return be.retry(ctx, fmt.Sprintf("Save(%v)", h), func() error {
err := rd.Rewind()
if err != nil {
return err
}
err = be.Backend.Save(ctx, h, rd)
if err == nil {
return nil
}
if be.Backend.Properties().HasAtomicReplace {
debug.Log("Save(%v) failed with error: %v", h, err)
// there is no need to remove files from backends which can atomically replace files
// in fact if something goes wrong at the backend side the delete operation might delete the wrong instance of the file
} else {
debug.Log("Save(%v) failed with error, removing file: %v", h, err)
rerr := be.Backend.Remove(ctx, h)
if rerr != nil {
debug.Log("Remove(%v) returned error: %v", h, err)
}
}
// return original error
return err
})
}
// Failed loads expire after an hour
var failedLoadExpiry = time.Hour
// Load returns a reader that yields the contents of the file at h at the
// given offset. If length is larger than zero, only a portion of the file
// is returned. rd must be closed after use. If an error is returned, the
// ReadCloser must be nil.
func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, consumer func(rd io.Reader) error) (err error) {
key := h
key.IsMetadata = false
// Implement the circuit breaker pattern for files that exhausted all retries due to a non-permanent error
if v, ok := be.failedLoads.Load(key); ok {
if time.Since(v.(time.Time)) > failedLoadExpiry {
be.failedLoads.Delete(key)
} else {
// fail immediately if the file was already problematic during the last hour
return fmt.Errorf("circuit breaker open for file %v", h)
}
}
err = be.retry(ctx, fmt.Sprintf("Load(%v, %v, %v)", h, length, offset),
func() error {
return be.Backend.Load(ctx, h, length, offset, consumer)
})
if feature.Flag.Enabled(feature.BackendErrorRedesign) && err != nil && ctx.Err() == nil && !be.IsPermanentError(err) {
// We've exhausted the retries, the file is likely inaccessible. By excluding permanent
// errors, not found or truncated files are not recorded. Also ignore errors if the context
// was canceled.
be.failedLoads.LoadOrStore(key, time.Now())
}
return err
}
// Stat returns information about the File identified by h.
func (be *Backend) Stat(ctx context.Context, h backend.Handle) (fi backend.FileInfo, err error) {
// see the call to `cancel()` below for why this context exists
statCtx, cancel := context.WithCancel(ctx)
defer cancel()
err = be.retry(statCtx, fmt.Sprintf("Stat(%v)", h),
func() error {
var innerError error
fi, innerError = be.Backend.Stat(ctx, h)
if be.Backend.IsNotExist(innerError) {
// stat is only used to check the existence of the config file.
// cancel the context to suppress the final error message if the file is not found.
cancel()
// do not retry if file is not found, as stat is usually used to check whether a file exists
return backoff.Permanent(innerError)
}
return innerError
})
return fi, err
}
// Remove removes a File with type t and name.
func (be *Backend) Remove(ctx context.Context, h backend.Handle) (err error) {
return be.retry(ctx, fmt.Sprintf("Remove(%v)", h), func() error {
return be.Backend.Remove(ctx, h)
})
}
// List runs fn for each file in the backend which has the type t. When an
// error is returned by the underlying backend, the request is retried. When fn
// returns an error, the operation is aborted and the error is returned to the
// caller.
func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error {
// create a new context that we can cancel when fn returns an error, so
// that listing is aborted
listCtx, cancel := context.WithCancel(ctx)
defer cancel()
listed := make(map[string]struct{}) // remember for which files we already ran fn
var innerErr error // remember when fn returned an error, so we can return that to the caller
err := be.retry(listCtx, fmt.Sprintf("List(%v)", t), func() error {
return be.Backend.List(ctx, t, func(fi backend.FileInfo) error {
if _, ok := listed[fi.Name]; ok {
return nil
}
listed[fi.Name] = struct{}{}
innerErr = fn(fi)
if innerErr != nil {
// if fn returned an error, listing is aborted, so we cancel the context
cancel()
}
return innerErr
})
})
// the error fn returned takes precedence
if innerErr != nil {
return innerErr
}
return err
}
func (be *Backend) Unwrap() backend.Backend {
return be.Backend
}
// Warmup delegates to wrapped backend
func (be *Backend) Warmup(ctx context.Context, h []backend.Handle) ([]backend.Handle, error) {
return be.Backend.Warmup(ctx, h)
}
func (be *Backend) WarmupWait(ctx context.Context, h []backend.Handle) error {
return be.Backend.WarmupWait(ctx, h)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/retry/backend_retry_test.go | internal/backend/retry/backend_retry_test.go | package retry
import (
"bytes"
"context"
"io"
"strings"
"testing"
"time"
"github.com/cenkalti/backoff/v4"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/mock"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
)
func TestBackendSaveRetry(t *testing.T) {
buf := bytes.NewBuffer(nil)
errcount := 0
be := &mock.Backend{
SaveFn: func(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
if errcount == 0 {
errcount++
_, err := io.CopyN(io.Discard, rd, 120)
if err != nil {
return err
}
return errors.New("injected error")
}
_, err := io.Copy(buf, rd)
return err
},
}
TestFastRetries(t)
retryBackend := New(be, 10, nil, nil)
data := test.Random(23, 5*1024*1024+11241)
err := retryBackend.Save(context.TODO(), backend.Handle{}, backend.NewByteReader(data, be.Hasher()))
if err != nil {
t.Fatal(err)
}
if len(data) != buf.Len() {
t.Errorf("wrong number of bytes written: want %d, got %d", len(data), buf.Len())
}
if !bytes.Equal(data, buf.Bytes()) {
t.Errorf("wrong data written to backend")
}
}
func TestBackendSaveRetryAtomic(t *testing.T) {
errcount := 0
calledRemove := false
be := &mock.Backend{
SaveFn: func(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
if errcount == 0 {
errcount++
return errors.New("injected error")
}
return nil
},
RemoveFn: func(ctx context.Context, h backend.Handle) error {
calledRemove = true
return nil
},
PropertiesFn: func() backend.Properties {
return backend.Properties{
Connections: 2,
HasAtomicReplace: true,
}
},
}
TestFastRetries(t)
retryBackend := New(be, 10, nil, nil)
data := test.Random(23, 5*1024*1024+11241)
err := retryBackend.Save(context.TODO(), backend.Handle{}, backend.NewByteReader(data, be.Hasher()))
if err != nil {
t.Fatal(err)
}
if calledRemove {
t.Fatal("remove must not be called")
}
}
func TestBackendListRetry(t *testing.T) {
const (
ID1 = "id1"
ID2 = "id2"
)
retry := 0
be := &mock.Backend{
ListFn: func(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error {
// fail during first retry, succeed during second
retry++
if retry == 1 {
_ = fn(backend.FileInfo{Name: ID1})
return errors.New("test list error")
}
_ = fn(backend.FileInfo{Name: ID1})
_ = fn(backend.FileInfo{Name: ID2})
return nil
},
}
TestFastRetries(t)
retryBackend := New(be, 10, nil, nil)
var listed []string
err := retryBackend.List(context.TODO(), backend.PackFile, func(fi backend.FileInfo) error {
listed = append(listed, fi.Name)
return nil
})
test.OK(t, err) // assert overall success
test.Equals(t, 2, retry) // assert retried once
test.Equals(t, []string{ID1, ID2}, listed) // assert no duplicate files
}
func TestBackendListRetryErrorFn(t *testing.T) {
var names = []string{"id1", "id2", "foo", "bar"}
be := &mock.Backend{
ListFn: func(ctx context.Context, tpe backend.FileType, fn func(backend.FileInfo) error) error {
t.Logf("List called for %v", tpe)
for _, name := range names {
err := fn(backend.FileInfo{Name: name})
if err != nil {
return err
}
}
return nil
},
}
TestFastRetries(t)
retryBackend := New(be, 10, nil, nil)
var ErrTest = errors.New("test error")
var listed []string
run := 0
err := retryBackend.List(context.TODO(), backend.PackFile, func(fi backend.FileInfo) error {
t.Logf("fn called for %v", fi.Name)
run++
// return an error for the third item in the list
if run == 3 {
t.Log("returning an error")
return ErrTest
}
listed = append(listed, fi.Name)
return nil
})
if err != ErrTest {
t.Fatalf("wrong error returned, want %v, got %v", ErrTest, err)
}
// processing should stop after the error was returned, so run should be 3
if run != 3 {
t.Fatalf("function was called %d times, wanted %v", run, 3)
}
test.Equals(t, []string{"id1", "id2"}, listed)
}
func TestBackendListRetryErrorBackend(t *testing.T) {
var names = []string{"id1", "id2", "foo", "bar"}
var ErrBackendTest = errors.New("test error")
retries := 0
be := &mock.Backend{
ListFn: func(ctx context.Context, tpe backend.FileType, fn func(backend.FileInfo) error) error {
t.Logf("List called for %v, retries %v", tpe, retries)
retries++
for i, name := range names {
if i == 2 {
return ErrBackendTest
}
err := fn(backend.FileInfo{Name: name})
if err != nil {
return err
}
}
return nil
},
}
TestFastRetries(t)
const maxElapsedTime = 10 * time.Millisecond
now := time.Now()
retryBackend := New(be, maxElapsedTime, nil, nil)
var listed []string
err := retryBackend.List(context.TODO(), backend.PackFile, func(fi backend.FileInfo) error {
t.Logf("fn called for %v", fi.Name)
listed = append(listed, fi.Name)
return nil
})
if err != ErrBackendTest {
t.Fatalf("wrong error returned, want %v, got %v", ErrBackendTest, err)
}
duration := time.Since(now)
if duration > 100*time.Millisecond {
t.Fatalf("list retries took %v, expected at most 10ms", duration)
}
test.Equals(t, names[:2], listed)
}
// failingReader returns an error after reading limit number of bytes
type failingReader struct {
data []byte
pos int
limit int
}
func (r failingReader) Read(p []byte) (n int, err error) {
i := 0
for ; i < len(p) && i+r.pos < r.limit; i++ {
p[i] = r.data[r.pos+i]
}
r.pos += i
if r.pos >= r.limit {
return i, errors.Errorf("reader reached limit of %d", r.limit)
}
return i, nil
}
func (r failingReader) Close() error {
return nil
}
// closingReader adapts io.Reader to io.ReadCloser interface
type closingReader struct {
rd io.Reader
}
func (r closingReader) Read(p []byte) (n int, err error) {
return r.rd.Read(p)
}
func (r closingReader) Close() error {
return nil
}
func TestBackendLoadRetry(t *testing.T) {
data := test.Random(23, 1024)
limit := 100
attempt := 0
be := mock.NewBackend()
be.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
// returns failing reader on first invocation, good reader on subsequent invocations
attempt++
if attempt > 1 {
return closingReader{rd: bytes.NewReader(data)}, nil
}
return failingReader{data: data, limit: limit}, nil
}
TestFastRetries(t)
retryBackend := New(be, 10, nil, nil)
var buf []byte
err := retryBackend.Load(context.TODO(), backend.Handle{}, 0, 0, func(rd io.Reader) (err error) {
buf, err = io.ReadAll(rd)
return err
})
test.OK(t, err)
test.Equals(t, data, buf)
test.Equals(t, 2, attempt)
}
func testBackendLoadNotExists(t *testing.T, hasFlakyErrors bool) {
// load should not retry if the error matches IsNotExist
notFound := errors.New("not found")
attempt := 0
expectedAttempts := 1
if hasFlakyErrors {
expectedAttempts = 5
}
be := mock.NewBackend()
be.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
attempt++
if attempt > expectedAttempts {
t.Fail()
return nil, errors.New("must not retry")
}
return nil, notFound
}
be.PropertiesFn = func() backend.Properties {
return backend.Properties{
Connections: 2,
HasFlakyErrors: hasFlakyErrors,
}
}
be.IsPermanentErrorFn = func(err error) bool {
return errors.Is(err, notFound)
}
TestFastRetries(t)
retryBackend := New(be, time.Second, nil, nil)
err := retryBackend.Load(context.TODO(), backend.Handle{}, 0, 0, func(rd io.Reader) (err error) {
return nil
})
test.Assert(t, be.IsPermanentErrorFn(err), "unexpected error %v", err)
test.Equals(t, expectedAttempts, attempt)
}
func TestBackendLoadNotExists(t *testing.T) {
// Without HasFlakyErrors, should fail after 1 attempt
testBackendLoadNotExists(t, false)
}
func TestBackendLoadNotExistsFlakyErrors(t *testing.T) {
// With HasFlakyErrors, should fail after attempt number 5
testBackendLoadNotExists(t, true)
}
func TestBackendLoadCircuitBreaker(t *testing.T) {
// retry should not retry if the error matches IsPermanentError
notFound := errors.New("not found")
otherError := errors.New("something")
attempt := 0
be := mock.NewBackend()
be.IsPermanentErrorFn = func(err error) bool {
return errors.Is(err, notFound)
}
be.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
attempt++
return nil, otherError
}
nilRd := func(rd io.Reader) (err error) {
return nil
}
TestFastRetries(t)
retryBackend := New(be, 2, nil, nil)
// trip the circuit breaker for file "other"
err := retryBackend.Load(context.TODO(), backend.Handle{Name: "other"}, 0, 0, nilRd)
test.Equals(t, otherError, err, "unexpected error")
test.Equals(t, 2, attempt)
attempt = 0
err = retryBackend.Load(context.TODO(), backend.Handle{Name: "other"}, 0, 0, nilRd)
test.Assert(t, strings.Contains(err.Error(), "circuit breaker open for file"), "expected circuit breaker error, got %v")
test.Equals(t, 0, attempt)
// don't trip for permanent errors
be.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
attempt++
return nil, notFound
}
err = retryBackend.Load(context.TODO(), backend.Handle{Name: "notfound"}, 0, 0, nilRd)
test.Equals(t, notFound, err, "expected circuit breaker to only affect other file, got %v")
err = retryBackend.Load(context.TODO(), backend.Handle{Name: "notfound"}, 0, 0, nilRd)
test.Equals(t, notFound, err, "persistent error must not trigger circuit breaker, got %v")
// wait for circuit breaker to expire
time.Sleep(5 * time.Millisecond)
old := failedLoadExpiry
defer func() {
failedLoadExpiry = old
}()
failedLoadExpiry = 3 * time.Millisecond
err = retryBackend.Load(context.TODO(), backend.Handle{Name: "other"}, 0, 0, nilRd)
test.Equals(t, notFound, err, "expected circuit breaker to reset, got %v")
}
func TestBackendLoadCircuitBreakerCancel(t *testing.T) {
cctx, cancel := context.WithCancel(context.Background())
be := mock.NewBackend()
be.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
cancel()
return nil, errors.New("something")
}
nilRd := func(rd io.Reader) (err error) {
return nil
}
TestFastRetries(t)
retryBackend := New(be, 2, nil, nil)
// canceling the context should not trip the circuit breaker
err := retryBackend.Load(cctx, backend.Handle{Name: "other"}, 0, 0, nilRd)
test.Equals(t, context.Canceled, err, "unexpected error")
// reset context and check that the circuit breaker does not return an error
cctx, cancel = context.WithCancel(context.Background())
defer cancel()
err = retryBackend.Load(cctx, backend.Handle{Name: "other"}, 0, 0, nilRd)
test.Equals(t, context.Canceled, err, "unexpected error")
}
func TestBackendStatNotExists(t *testing.T) {
// stat should not retry if the error matches IsNotExist
notFound := errors.New("not found")
attempt := 0
be := mock.NewBackend()
be.StatFn = func(ctx context.Context, h backend.Handle) (backend.FileInfo, error) {
attempt++
if attempt > 1 {
t.Fail()
return backend.FileInfo{}, errors.New("must not retry")
}
return backend.FileInfo{}, notFound
}
be.IsNotExistFn = func(err error) bool {
return errors.Is(err, notFound)
}
TestFastRetries(t)
retryBackend := New(be, 10, func(s string, err error, d time.Duration) {
t.Fatalf("unexpected error output %v", s)
}, func(s string, i int) {
t.Fatalf("unexpected log output %v", s)
})
_, err := retryBackend.Stat(context.TODO(), backend.Handle{})
test.Assert(t, be.IsNotExistFn(err), "unexpected error %v", err)
test.Equals(t, 1, attempt)
}
func TestBackendRetryPermanent(t *testing.T) {
// retry should not retry if the error matches IsPermanentError
notFound := errors.New("not found")
attempt := 0
be := mock.NewBackend()
be.IsPermanentErrorFn = func(err error) bool {
return errors.Is(err, notFound)
}
TestFastRetries(t)
retryBackend := New(be, 2, nil, nil)
err := retryBackend.retry(context.TODO(), "test", func() error {
attempt++
return notFound
})
test.Assert(t, be.IsPermanentErrorFn(err), "unexpected error %v", err)
test.Equals(t, 1, attempt)
attempt = 0
err = retryBackend.retry(context.TODO(), "test", func() error {
attempt++
return errors.New("something")
})
test.Assert(t, !be.IsPermanentErrorFn(err), "error unexpectedly considered permanent %v", err)
test.Equals(t, 2, attempt)
}
func assertIsCanceled(t *testing.T, err error) {
test.Assert(t, err == context.Canceled, "got unexpected err %v", err)
}
func TestBackendCanceledContext(t *testing.T) {
// unimplemented mock backend functions return an error by default
// check that we received the expected context canceled error instead
TestFastRetries(t)
retryBackend := New(mock.NewBackend(), 2, nil, nil)
h := backend.Handle{Type: backend.PackFile, Name: restic.NewRandomID().String()}
// create an already canceled context
ctx, cancel := context.WithCancel(context.Background())
cancel()
_, err := retryBackend.Stat(ctx, h)
assertIsCanceled(t, err)
err = retryBackend.Save(ctx, h, backend.NewByteReader([]byte{}, nil))
assertIsCanceled(t, err)
err = retryBackend.Remove(ctx, h)
assertIsCanceled(t, err)
err = retryBackend.Load(ctx, backend.Handle{}, 0, 0, func(rd io.Reader) (err error) {
return nil
})
assertIsCanceled(t, err)
err = retryBackend.List(ctx, backend.PackFile, func(backend.FileInfo) error {
return nil
})
assertIsCanceled(t, err)
// don't test "Delete" as it is not used by normal code
}
func TestNotifyWithSuccessIsNotCalled(t *testing.T) {
operation := func() error {
return nil
}
notify := func(error, time.Duration) {
t.Fatal("Notify should not have been called")
}
success := func(retries int) {
t.Fatal("Success should not have been called")
}
err := retryNotifyErrorWithSuccess(operation, backoff.WithContext(&backoff.ZeroBackOff{}, context.Background()), notify, success)
if err != nil {
t.Fatal("retry should not have returned an error")
}
}
func TestNotifyWithSuccessIsCalled(t *testing.T) {
operationCalled := 0
operation := func() error {
operationCalled++
if operationCalled <= 2 {
return errors.New("expected error in test")
}
return nil
}
notifyCalled := 0
notify := func(error, time.Duration) {
notifyCalled++
}
successCalled := 0
success := func(retries int) {
successCalled++
}
err := retryNotifyErrorWithSuccess(operation, backoff.WithContext(&backoff.ZeroBackOff{}, context.Background()), notify, success)
if err != nil {
t.Fatal("retry should not have returned an error")
}
if notifyCalled != 2 {
t.Fatalf("Notify should have been called 2 times, but was called %d times instead", notifyCalled)
}
if successCalled != 1 {
t.Fatalf("Success should have been called only once, but was called %d times instead", successCalled)
}
}
func TestNotifyWithSuccessFinalError(t *testing.T) {
operation := func() error {
return errors.New("expected error in test")
}
notifyCalled := 0
notify := func(error, time.Duration) {
notifyCalled++
}
successCalled := 0
success := func(retries int) {
successCalled++
}
err := retryNotifyErrorWithSuccess(operation, backoff.WithContext(backoff.WithMaxRetries(&backoff.ZeroBackOff{}, 5), context.Background()), notify, success)
test.Assert(t, err.Error() == "expected error in test", "wrong error message %v", err)
test.Equals(t, 6, notifyCalled, "notify should have been called 6 times")
test.Equals(t, 0, successCalled, "success should not have been called")
}
func TestNotifyWithCancelError(t *testing.T) {
operation := func() error {
return errors.New("expected error in test")
}
notify := func(error, time.Duration) {
t.Error("unexpected call to notify")
}
success := func(retries int) {
t.Error("unexpected call to success")
}
ctx, cancel := context.WithCancel(context.Background())
cancel()
err := retryNotifyErrorWithSuccess(operation, backoff.WithContext(&backoff.ZeroBackOff{}, ctx), notify, success)
test.Assert(t, err == context.Canceled, "wrong error message %v", err)
}
type testClock struct {
Time time.Time
}
func (c *testClock) Now() time.Time {
return c.Time
}
func TestRetryAtLeastOnce(t *testing.T) {
expBackOff := backoff.NewExponentialBackOff()
expBackOff.InitialInterval = 500 * time.Millisecond
expBackOff.RandomizationFactor = 0
expBackOff.MaxElapsedTime = 5 * time.Second
expBackOff.Multiplier = 2 // guarantee numerical stability
clock := &testClock{Time: time.Now()}
expBackOff.Clock = clock
expBackOff.Reset()
retry := withRetryAtLeastOnce(expBackOff)
// expire backoff
clock.Time = clock.Time.Add(10 * time.Second)
delay := retry.NextBackOff()
test.Equals(t, expBackOff.InitialInterval, delay, "must retry at least once")
delay = retry.NextBackOff()
test.Equals(t, expBackOff.Stop, delay, "must not retry more than once")
// test reset behavior
retry.Reset()
test.Equals(t, uint64(0), retry.numTries, "numTries should be reset to 0")
// Verify that after reset, NextBackOff returns the initial interval again
delay = retry.NextBackOff()
test.Equals(t, expBackOff.InitialInterval, delay, "retries must work after reset")
delay = retry.NextBackOff()
test.Equals(t, expBackOff.InitialInterval*time.Duration(expBackOff.Multiplier), delay, "retries must work after reset")
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/retry/testing.go | internal/backend/retry/testing.go | package retry
import "testing"
// TestFastRetries reduces the initial retry delay to 1 millisecond
func TestFastRetries(_ testing.TB) {
fastRetries = true
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/s3/s3_test.go | internal/backend/s3/s3_test.go | package s3_test
import (
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"io"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/backend/s3"
"github.com/restic/restic/internal/backend/test"
"github.com/restic/restic/internal/options"
rtest "github.com/restic/restic/internal/test"
)
func mkdir(t testing.TB, dir string) {
err := os.MkdirAll(dir, 0700)
if err != nil {
t.Fatal(err)
}
}
func runMinio(ctx context.Context, t testing.TB, dir, key, secret string) func() {
mkdir(t, filepath.Join(dir, "config"))
mkdir(t, filepath.Join(dir, "root"))
cmd := exec.CommandContext(ctx, "minio",
"server",
"--address", "127.0.0.1:9000",
"--config-dir", filepath.Join(dir, "config"),
filepath.Join(dir, "root"))
cmd.Env = append(os.Environ(),
"MINIO_ACCESS_KEY="+key,
"MINIO_SECRET_KEY="+secret,
)
cmd.Stderr = os.Stderr
err := cmd.Start()
if err != nil {
t.Fatal(err)
}
// wait until the TCP port is reachable
var success bool
for i := 0; i < 100; i++ {
time.Sleep(200 * time.Millisecond)
c, err := net.Dial("tcp", "localhost:9000")
if err == nil {
success = true
if err := c.Close(); err != nil {
t.Fatal(err)
}
break
}
}
if !success {
t.Fatal("unable to connect to minio server")
return nil
}
return func() {
err = cmd.Process.Kill()
if err != nil {
t.Fatal(err)
}
// ignore errors, we've killed the process
_ = cmd.Wait()
}
}
func newRandomCredentials(t testing.TB) (key, secret string) {
buf := make([]byte, 10)
_, err := io.ReadFull(rand.Reader, buf)
if err != nil {
t.Fatal(err)
}
key = hex.EncodeToString(buf)
_, err = io.ReadFull(rand.Reader, buf)
if err != nil {
t.Fatal(err)
}
secret = hex.EncodeToString(buf)
return key, secret
}
func newMinioTestSuite(t testing.TB) (*test.Suite[s3.Config], func()) {
ctx, cancel := context.WithCancel(context.Background())
tempdir := rtest.TempDir(t)
key, secret := newRandomCredentials(t)
cleanup := runMinio(ctx, t, tempdir, key, secret)
return &test.Suite[s3.Config]{
// NewConfig returns a config for a new temporary backend that will be used in tests.
NewConfig: func() (*s3.Config, error) {
cfg := s3.NewConfig()
cfg.Endpoint = "localhost:9000"
cfg.Bucket = "restictestbucket"
cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
cfg.UseHTTP = true
cfg.KeyID = key
cfg.Secret = options.NewSecretString(secret)
return &cfg, nil
},
Factory: location.NewHTTPBackendFactory("s3", s3.ParseConfig, location.NoPassword, func(ctx context.Context, cfg s3.Config, rt http.RoundTripper, errorLog func(string, ...interface{})) (be backend.Backend, err error) {
for i := 0; i < 50; i++ {
be, err = s3.Create(ctx, cfg, rt, errorLog)
if err != nil {
t.Logf("s3 open: try %d: error %v", i, err)
time.Sleep(500 * time.Millisecond)
continue
}
break
}
return be, err
}, s3.Open),
}, func() {
defer cancel()
defer cleanup()
}
}
func TestBackendMinio(t *testing.T) {
defer func() {
if t.Skipped() {
rtest.SkipDisallowed(t, "restic/backend/s3.TestBackendMinio")
}
}()
// try to find a minio binary
_, err := exec.LookPath("minio")
if err != nil {
t.Skip(err)
return
}
suite, cleanup := newMinioTestSuite(t)
defer cleanup()
suite.RunTests(t)
}
func BenchmarkBackendMinio(t *testing.B) {
// try to find a minio binary
_, err := exec.LookPath("minio")
if err != nil {
t.Skip(err)
return
}
suite, cleanup := newMinioTestSuite(t)
defer cleanup()
suite.RunBenchmarks(t)
}
func newS3TestSuite() *test.Suite[s3.Config] {
return &test.Suite[s3.Config]{
// do not use excessive data
MinimalData: true,
// NewConfig returns a config for a new temporary backend that will be used in tests.
NewConfig: func() (*s3.Config, error) {
cfg, err := s3.ParseConfig(os.Getenv("RESTIC_TEST_S3_REPOSITORY"))
if err != nil {
return nil, err
}
cfg.KeyID = os.Getenv("RESTIC_TEST_S3_KEY")
cfg.Secret = options.NewSecretString(os.Getenv("RESTIC_TEST_S3_SECRET"))
cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
return cfg, nil
},
Factory: s3.NewFactory(),
}
}
func TestBackendS3(t *testing.T) {
defer func() {
if t.Skipped() {
rtest.SkipDisallowed(t, "restic/backend/s3.TestBackendS3")
}
}()
vars := []string{
"RESTIC_TEST_S3_KEY",
"RESTIC_TEST_S3_SECRET",
"RESTIC_TEST_S3_REPOSITORY",
}
for _, v := range vars {
if os.Getenv(v) == "" {
t.Skipf("environment variable %v not set", v)
return
}
}
t.Logf("run tests")
newS3TestSuite().RunTests(t)
}
func BenchmarkBackendS3(t *testing.B) {
vars := []string{
"RESTIC_TEST_S3_KEY",
"RESTIC_TEST_S3_SECRET",
"RESTIC_TEST_S3_REPOSITORY",
}
for _, v := range vars {
if os.Getenv(v) == "" {
t.Skipf("environment variable %v not set", v)
return
}
}
t.Logf("run tests")
newS3TestSuite().RunBenchmarks(t)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/s3/s3.go | internal/backend/s3/s3.go | package s3
import (
"context"
"fmt"
"hash"
"io"
"net/http"
"os"
"path"
"slices"
"strings"
"time"
"github.com/cenkalti/backoff/v4"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/layout"
"github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/backend/util"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/feature"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
// Backend stores data on an S3 endpoint.
type Backend struct {
client *minio.Client
cfg Config
layout.Layout
}
// make sure that *Backend implements backend.Backend
var _ backend.Backend = &Backend{}
var archiveClasses = []string{"GLACIER", "DEEP_ARCHIVE"}
type warmupStatus int
const (
warmupStatusCold warmupStatus = iota
warmupStatusWarmingUp
warmupStatusWarm
warmupStatusLukewarm
)
func NewFactory() location.Factory {
return location.NewHTTPBackendFactory("s3", ParseConfig, location.NoPassword, Create, Open)
}
func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
debug.Log("open, config %#v", cfg)
if cfg.EnableRestore && !feature.Flag.Enabled(feature.S3Restore) {
return nil, fmt.Errorf("feature flag `s3-restore` is required to use `-o s3.enable-restore=true`")
}
if cfg.MaxRetries > 0 {
minio.MaxRetry = int(cfg.MaxRetries)
}
creds, err := getCredentials(cfg, rt)
if err != nil {
return nil, errors.Wrap(err, "s3.getCredentials")
}
options := &minio.Options{
Creds: creds,
Secure: !cfg.UseHTTP,
Region: cfg.Region,
Transport: rt,
}
switch strings.ToLower(cfg.BucketLookup) {
case "", "auto":
options.BucketLookup = minio.BucketLookupAuto
case "dns":
options.BucketLookup = minio.BucketLookupDNS
case "path":
options.BucketLookup = minio.BucketLookupPath
default:
return nil, fmt.Errorf(`bad bucket-lookup style %q must be "auto", "path" or "dns"`, cfg.BucketLookup)
}
client, err := minio.New(cfg.Endpoint, options)
if err != nil {
return nil, errors.Wrap(err, "minio.New")
}
be := &Backend{
client: client,
cfg: cfg,
Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join),
}
return be, nil
}
// getCredentials -- runs through the various credential types and returns the first one that works.
// additionally if the user has specified a role to assume, it will do that as well.
func getCredentials(cfg Config, tr http.RoundTripper) (*credentials.Credentials, error) {
if cfg.UnsafeAnonymousAuth {
return credentials.New(&credentials.Static{}), nil
}
// Chains all credential types, in the following order:
// - Static credentials (test only)
// - AWS env vars (i.e. AWS_ACCESS_KEY_ID)
// - Minio env vars (i.e. MINIO_ACCESS_KEY)
// - AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials)
// - Minio creds file (i.e. MINIO_SHARED_CREDENTIALS_FILE or ~/.mc/config.json)
// - IAM profile based credentials. (performs an HTTP
// call to a pre-defined endpoint, only valid inside
// configured ec2 instances)
creds := credentials.NewChainCredentials([]credentials.Provider{
&credentials.Static{ // test only
Value: credentials.Value{
AccessKeyID: cfg.KeyID,
SecretAccessKey: cfg.Secret.Unwrap(),
},
},
&credentials.EnvAWS{},
&credentials.EnvMinio{},
&credentials.FileAWSCredentials{},
&credentials.FileMinioClient{},
&credentials.IAM{},
})
client := &http.Client{Transport: tr}
c, err := creds.GetWithContext(&credentials.CredContext{Client: client})
if err != nil {
return nil, errors.Wrap(err, "creds.Get")
}
if c.SignerType == credentials.SignatureAnonymous {
keyID := os.Getenv("AWS_ACCESS_KEY_ID")
secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
if keyID == "" && secret != "" {
return nil, errors.Fatalf("no credentials found. $AWS_SECRET_ACCESS_KEY is set but $AWS_ACCESS_KEY_ID is empty")
} else if keyID != "" && secret == "" {
return nil, errors.Fatalf("no credentials found. $AWS_ACCESS_KEY_ID is set but $AWS_SECRET_ACCESS_KEY is empty")
}
// Fail if no credentials were found to prevent repeated attempts to (unsuccessfully) retrieve new credentials.
// The first attempt still has to timeout which slows down restic usage considerably. Thus, migrate towards forcing
// users to explicitly decide between authenticated and anonymous access.
return nil, fmt.Errorf("no credentials found. Use `-o s3.unsafe-anonymous-auth=true` for anonymous authentication")
}
roleArn := os.Getenv("RESTIC_AWS_ASSUME_ROLE_ARN")
if roleArn != "" {
// use the region provided by the configuration by default
awsRegion := cfg.Region
// allow the region to be overridden if for some reason it is required
if os.Getenv("RESTIC_AWS_ASSUME_ROLE_REGION") != "" {
awsRegion = os.Getenv("RESTIC_AWS_ASSUME_ROLE_REGION")
}
sessionName := os.Getenv("RESTIC_AWS_ASSUME_ROLE_SESSION_NAME")
externalID := os.Getenv("RESTIC_AWS_ASSUME_ROLE_EXTERNAL_ID")
policy := os.Getenv("RESTIC_AWS_ASSUME_ROLE_POLICY")
stsEndpoint := os.Getenv("RESTIC_AWS_ASSUME_ROLE_STS_ENDPOINT")
if stsEndpoint == "" {
if awsRegion != "" {
if strings.HasPrefix(awsRegion, "cn-") {
stsEndpoint = "https://sts." + awsRegion + ".amazonaws.com.cn"
} else {
stsEndpoint = "https://sts." + awsRegion + ".amazonaws.com"
}
} else {
stsEndpoint = "https://sts.amazonaws.com"
}
}
opts := credentials.STSAssumeRoleOptions{
RoleARN: roleArn,
AccessKey: c.AccessKeyID,
SecretKey: c.SecretAccessKey,
SessionToken: c.SessionToken,
RoleSessionName: sessionName,
ExternalID: externalID,
Policy: policy,
Location: awsRegion,
}
creds, err = credentials.NewSTSAssumeRole(stsEndpoint, opts)
if err != nil {
return nil, errors.Wrap(err, "creds.AssumeRole")
}
}
return creds, nil
}
// Open opens the S3 backend at bucket and region. The bucket is created if it
// does not exist yet.
func Open(_ context.Context, cfg Config, rt http.RoundTripper, _ func(string, ...interface{})) (backend.Backend, error) {
return open(cfg, rt)
}
// Create opens the S3 backend at bucket and region and creates the bucket if
// it does not exist yet.
func Create(ctx context.Context, cfg Config, rt http.RoundTripper, _ func(string, ...interface{})) (backend.Backend, error) {
be, err := open(cfg, rt)
if err != nil {
return nil, errors.Wrap(err, "open")
}
found, err := be.client.BucketExists(ctx, cfg.Bucket)
if err != nil && isAccessDenied(err) {
err = nil
found = true
}
if err != nil {
debug.Log("BucketExists(%v) returned err %v", cfg.Bucket, err)
return nil, errors.Wrap(err, "client.BucketExists")
}
if !found {
// create new bucket with default ACL in default region
err = be.client.MakeBucket(ctx, cfg.Bucket, minio.MakeBucketOptions{})
if err != nil {
return nil, errors.Wrap(err, "client.MakeBucket")
}
}
return be, nil
}
// isAccessDenied returns true if the error is caused by Access Denied.
func isAccessDenied(err error) bool {
debug.Log("isAccessDenied(%T, %#v)", err, err)
var e minio.ErrorResponse
return errors.As(err, &e) && e.Code == "AccessDenied"
}
// IsNotExist returns true if the error is caused by a not existing file.
func (be *Backend) IsNotExist(err error) bool {
var e minio.ErrorResponse
return errors.As(err, &e) && e.Code == "NoSuchKey"
}
func (be *Backend) IsPermanentError(err error) bool {
if be.IsNotExist(err) {
return true
}
var merr minio.ErrorResponse
if errors.As(err, &merr) {
if merr.Code == "InvalidRange" || merr.Code == "AccessDenied" {
return true
}
}
return false
}
func (be *Backend) Properties() backend.Properties {
return backend.Properties{
Connections: be.cfg.Connections,
HasAtomicReplace: true,
}
}
// Hasher may return a hash function for calculating a content hash for the backend
func (be *Backend) Hasher() hash.Hash {
return nil
}
// Path returns the path in the bucket that is used for this backend.
func (be *Backend) Path() string {
return be.cfg.Prefix
}
// useStorageClass returns whether file should be saved in the provided Storage Class
// For archive storage classes, only data files are stored using that class; metadata
// must remain instantly accessible.
func (be *Backend) useStorageClass(h backend.Handle) bool {
isDataFile := h.Type == backend.PackFile && !h.IsMetadata
isArchiveClass := slices.Contains(archiveClasses, be.cfg.StorageClass)
return !isArchiveClass || isDataFile
}
// Save stores data in the backend at the handle.
func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
objName := be.Filename(h)
opts := minio.PutObjectOptions{
ContentType: "application/octet-stream",
// the only option with the high-level api is to let the library handle the checksum computation
SendContentMd5: true,
// only use multipart uploads for very large files
PartSize: 200 * 1024 * 1024,
}
if be.useStorageClass(h) {
opts.StorageClass = be.cfg.StorageClass
}
info, err := be.client.PutObject(ctx, be.cfg.Bucket, objName, io.NopCloser(rd), rd.Length(), opts)
// sanity check
if err == nil && info.Size != rd.Length() {
return errors.Errorf("wrote %d bytes instead of the expected %d bytes", info.Size, rd.Length())
}
return errors.Wrap(err, "client.PutObject")
}
// Load runs fn with a reader that yields the contents of the file at h at the
// given offset.
func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
return util.DefaultLoad(ctx, h, length, offset, be.openReader, fn)
}
func (be *Backend) openReader(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
objName := be.Filename(h)
opts := minio.GetObjectOptions{}
var err error
if length > 0 {
err = opts.SetRange(offset, offset+int64(length)-1)
} else if offset > 0 {
err = opts.SetRange(offset, 0)
}
if err != nil {
return nil, errors.Wrap(err, "SetRange")
}
coreClient := minio.Core{Client: be.client}
rd, info, _, err := coreClient.GetObject(ctx, be.cfg.Bucket, objName, opts)
if err != nil {
return nil, err
}
if feature.Flag.Enabled(feature.BackendErrorRedesign) && length > 0 {
if info.Size > 0 && info.Size != int64(length) {
_ = rd.Close()
return nil, minio.ErrorResponse{Code: "InvalidRange", Message: "restic-file-too-short"}
}
}
return rd, err
}
// Stat returns information about a blob.
func (be *Backend) Stat(ctx context.Context, h backend.Handle) (bi backend.FileInfo, err error) {
objName := be.Filename(h)
var obj *minio.Object
opts := minio.GetObjectOptions{}
obj, err = be.client.GetObject(ctx, be.cfg.Bucket, objName, opts)
if err != nil {
return backend.FileInfo{}, errors.Wrap(err, "client.GetObject")
}
// make sure that the object is closed properly.
defer func() {
e := obj.Close()
if err == nil {
err = errors.Wrap(e, "Close")
}
}()
fi, err := obj.Stat()
if err != nil {
return backend.FileInfo{}, errors.Wrap(err, "Stat")
}
return backend.FileInfo{Size: fi.Size, Name: h.Name}, nil
}
// Remove removes the blob with the given name and type.
func (be *Backend) Remove(ctx context.Context, h backend.Handle) error {
objName := be.Filename(h)
err := be.client.RemoveObject(ctx, be.cfg.Bucket, objName, minio.RemoveObjectOptions{})
if be.IsNotExist(err) {
err = nil
}
return errors.Wrap(err, "client.RemoveObject")
}
// List runs fn for each file in the backend which has the type t. When an
// error occurs (or fn returns an error), List stops and returns it.
func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error {
prefix, recursive := be.Basedir(t)
// make sure prefix ends with a slash
if !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
debug.Log("using ListObjectsV1(%v)", be.cfg.ListObjectsV1)
// NB: unfortunately we can't protect this with be.sem.GetToken() here.
// Doing so would enable a deadlock situation (gh-1399), as ListObjects()
// starts its own goroutine and returns results via a channel.
listresp := be.client.ListObjects(ctx, be.cfg.Bucket, minio.ListObjectsOptions{
Prefix: prefix,
Recursive: recursive,
UseV1: be.cfg.ListObjectsV1,
})
for obj := range listresp {
if obj.Err != nil {
return obj.Err
}
m := strings.TrimPrefix(obj.Key, prefix)
if m == "" {
continue
}
fi := backend.FileInfo{
Name: path.Base(m),
Size: obj.Size,
}
if ctx.Err() != nil {
return ctx.Err()
}
err := fn(fi)
if err != nil {
return err
}
if ctx.Err() != nil {
return ctx.Err()
}
}
return ctx.Err()
}
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *Backend) Delete(ctx context.Context) error {
return util.DefaultDelete(ctx, be)
}
// Close does nothing
func (be *Backend) Close() error { return nil }
// Warmup transitions handles from cold to hot storage if needed.
func (be *Backend) Warmup(ctx context.Context, handles []backend.Handle) ([]backend.Handle, error) {
handlesWarmingUp := []backend.Handle{}
if be.cfg.EnableRestore {
for _, h := range handles {
filename := be.Filename(h)
isWarmingUp, err := be.requestRestore(ctx, filename)
if err != nil {
return handlesWarmingUp, err
}
if isWarmingUp {
debug.Log("s3 file is being restored: %s", filename)
handlesWarmingUp = append(handlesWarmingUp, h)
}
}
}
return handlesWarmingUp, nil
}
// requestRestore sends a glacier restore request on a given file.
func (be *Backend) requestRestore(ctx context.Context, filename string) (bool, error) {
objectInfo, err := be.client.StatObject(ctx, be.cfg.Bucket, filename, minio.StatObjectOptions{})
if err != nil {
return false, err
}
ws := be.getWarmupStatus(objectInfo)
switch ws {
case warmupStatusWarm:
return false, nil
case warmupStatusWarmingUp:
return true, nil
}
opts := minio.RestoreRequest{}
opts.SetDays(be.cfg.RestoreDays)
opts.SetGlacierJobParameters(minio.GlacierJobParameters{Tier: minio.TierType(be.cfg.RestoreTier)})
if err := be.client.RestoreObject(ctx, be.cfg.Bucket, filename, "", opts); err != nil {
var e minio.ErrorResponse
if errors.As(err, &e) {
switch e.Code {
case "InvalidObjectState":
return false, nil
case "RestoreAlreadyInProgress":
return true, nil
}
}
return false, err
}
isWarmingUp := ws != warmupStatusLukewarm
return isWarmingUp, nil
}
// getWarmupStatus returns the warmup status of the provided object.
func (be *Backend) getWarmupStatus(objectInfo minio.ObjectInfo) warmupStatus {
// We can't use objectInfo.StorageClass to get the storage class of the
// object because this field is only set during ListObjects operations.
// The response header is the documented way to get the storage class
// for GetObject/StatObject operations.
storageClass := objectInfo.Metadata.Get("X-Amz-Storage-Class")
isArchiveClass := slices.Contains(archiveClasses, storageClass)
if !isArchiveClass {
return warmupStatusWarm
}
restore := objectInfo.Restore
if restore != nil {
if restore.OngoingRestore {
return warmupStatusWarmingUp
}
minExpiryTime := time.Now().Add(time.Duration(be.cfg.RestoreDays) * 24 * time.Hour)
expiryTime := restore.ExpiryTime
if !expiryTime.IsZero() {
if minExpiryTime.Before(expiryTime) {
return warmupStatusWarm
}
return warmupStatusLukewarm
}
}
return warmupStatusCold
}
// WarmupWait waits until all handles are in hot storage.
func (be *Backend) WarmupWait(ctx context.Context, handles []backend.Handle) error {
timeoutCtx, timeoutCtxCancel := context.WithTimeout(ctx, be.cfg.RestoreTimeout)
defer timeoutCtxCancel()
if be.cfg.EnableRestore {
for _, h := range handles {
filename := be.Filename(h)
err := be.waitForRestore(timeoutCtx, filename)
if err != nil {
return err
}
debug.Log("s3 file is restored: %s", filename)
}
}
return nil
}
// waitForRestore waits for a given file to be restored.
func (be *Backend) waitForRestore(ctx context.Context, filename string) error {
for {
var objectInfo minio.ObjectInfo
// Restore requests can last many hours, therefore network may fail
// temporarily. We don't need to die in such even.
b := backoff.WithMaxRetries(backoff.NewExponentialBackOff(), 10)
b = backoff.WithContext(b, ctx)
err := backoff.Retry(
func() (err error) {
objectInfo, err = be.client.StatObject(ctx, be.cfg.Bucket, filename, minio.StatObjectOptions{})
return
},
b,
)
if err != nil {
return err
}
ws := be.getWarmupStatus(objectInfo)
switch ws {
case warmupStatusLukewarm:
fallthrough
case warmupStatusWarm:
return nil
case warmupStatusCold:
return errors.New("waiting on S3 handle that is not warming up")
}
select {
case <-time.After(1 * time.Minute):
case <-ctx.Done():
return ctx.Err()
}
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/s3/config.go | internal/backend/s3/config.go | package s3
import (
"net/url"
"os"
"path"
"strings"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
)
// Config contains all configuration necessary to connect to an s3 compatible
// server.
type Config struct {
Endpoint string
UseHTTP bool
Bucket string
Prefix string
Layout string `option:"layout" help:"use this backend layout (default: auto-detect) (deprecated)"`
StorageClass string `option:"storage-class" help:"set S3 storage class (STANDARD, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or REDUCED_REDUNDANCY)"`
EnableRestore bool `option:"enable-restore" help:"restore objects from GLACIER or DEEP_ARCHIVE storage classes (default: false, requires \"s3-restore\" feature flag)"`
RestoreDays int `option:"restore-days" help:"lifetime in days of restored object (default: 7)"`
RestoreTimeout time.Duration `option:"restore-timeout" help:"maximum time to wait for objects transition (default: 24h)"`
RestoreTier string `option:"restore-tier" help:"Retrieval tier at which the restore will be processed. (Standard, Bulk or Expedited) (default: Standard)"`
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
MaxRetries uint `option:"retries" help:"set the number of retries attempted"`
Region string `option:"region" help:"set region"`
BucketLookup string `option:"bucket-lookup" help:"bucket lookup style: 'auto', 'dns', or 'path'"`
ListObjectsV1 bool `option:"list-objects-v1" help:"use deprecated V1 api for ListObjects calls"`
UnsafeAnonymousAuth bool `option:"unsafe-anonymous-auth" help:"use anonymous authentication"`
// For testing only
KeyID string
Secret options.SecretString
}
// NewConfig returns a new Config with the default values filled in.
func NewConfig() Config {
return Config{
Connections: 5,
ListObjectsV1: false,
EnableRestore: false,
RestoreDays: 7,
RestoreTimeout: 24 * time.Hour,
RestoreTier: "Standard",
}
}
func init() {
options.Register("s3", Config{})
}
// ParseConfig parses the string s and extracts the s3 config. The two
// supported configuration formats are s3://host/bucketname/prefix and
// s3:host/bucketname/prefix. The host can also be a valid s3 region
// name. If no prefix is given the prefix "restic" will be used.
func ParseConfig(s string) (*Config, error) {
switch {
case strings.HasPrefix(s, "s3:http"):
// assume that a URL has been specified, parse it and
// use the host as the endpoint and the path as the
// bucket name and prefix
url, err := url.Parse(s[3:])
if err != nil {
return nil, errors.WithStack(err)
}
if url.Path == "" {
return nil, errors.New("s3: bucket name not found")
}
bucket, path, _ := strings.Cut(url.Path[1:], "/")
return createConfig(url.Host, bucket, path, url.Scheme == "http")
case strings.HasPrefix(s, "s3://"):
s = s[5:]
case strings.HasPrefix(s, "s3:"):
s = s[3:]
default:
return nil, errors.New("s3: invalid format")
}
// use the first entry of the path as the endpoint and the
// remainder as bucket name and prefix
endpoint, rest, _ := strings.Cut(s, "/")
bucket, prefix, _ := strings.Cut(rest, "/")
return createConfig(endpoint, bucket, prefix, false)
}
func createConfig(endpoint, bucket, prefix string, useHTTP bool) (*Config, error) {
if endpoint == "" {
return nil, errors.New("s3: invalid format, host/region or bucket name not found")
}
if prefix != "" {
prefix = path.Clean(prefix)
}
cfg := NewConfig()
cfg.Endpoint = endpoint
cfg.UseHTTP = useHTTP
cfg.Bucket = bucket
cfg.Prefix = prefix
return &cfg, nil
}
var _ backend.ApplyEnvironmenter = &Config{}
// ApplyEnvironment saves values from the environment to the config.
func (cfg *Config) ApplyEnvironment(prefix string) {
if cfg.Region == "" {
cfg.Region = os.Getenv(prefix + "AWS_DEFAULT_REGION")
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/s3/config_test.go | internal/backend/s3/config_test.go | package s3
import (
"strings"
"testing"
"time"
"github.com/restic/restic/internal/backend/test"
)
func newTestConfig(cfg Config) Config {
if cfg.Connections == 0 {
cfg.Connections = 5
}
if cfg.RestoreDays == 0 {
cfg.RestoreDays = 7
}
if cfg.RestoreTimeout == 0 {
cfg.RestoreTimeout = 24 * time.Hour
}
if cfg.RestoreTier == "" {
cfg.RestoreTier = "Standard"
}
return cfg
}
var configTests = []test.ConfigTestData[Config]{
{S: "s3://eu-central-1/bucketname", Cfg: newTestConfig(Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "",
})},
{S: "s3://eu-central-1/bucketname/", Cfg: newTestConfig(Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "",
})},
{S: "s3://eu-central-1/bucketname/prefix/directory", Cfg: newTestConfig(Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "prefix/directory",
})},
{S: "s3://eu-central-1/bucketname/prefix/directory/", Cfg: newTestConfig(Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "prefix/directory",
})},
{S: "s3:eu-central-1/foobar", Cfg: newTestConfig(Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
Prefix: "",
})},
{S: "s3:eu-central-1/foobar/", Cfg: newTestConfig(Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
Prefix: "",
})},
{S: "s3:eu-central-1/foobar/prefix/directory", Cfg: newTestConfig(Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
Prefix: "prefix/directory",
})},
{S: "s3:eu-central-1/foobar/prefix/directory/", Cfg: newTestConfig(Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
Prefix: "prefix/directory",
})},
{S: "s3:hostname.foo/foobar", Cfg: newTestConfig(Config{
Endpoint: "hostname.foo",
Bucket: "foobar",
Prefix: "",
})},
{S: "s3:hostname.foo/foobar/prefix/directory", Cfg: newTestConfig(Config{
Endpoint: "hostname.foo",
Bucket: "foobar",
Prefix: "prefix/directory",
})},
{S: "s3:https://hostname/foobar", Cfg: newTestConfig(Config{
Endpoint: "hostname",
Bucket: "foobar",
Prefix: "",
})},
{S: "s3:https://hostname:9999/foobar", Cfg: newTestConfig(Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
Prefix: "",
})},
{S: "s3:https://hostname:9999/foobar/", Cfg: newTestConfig(Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
Prefix: "",
})},
{S: "s3:http://hostname:9999/foobar", Cfg: newTestConfig(Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
Prefix: "",
UseHTTP: true,
})},
{S: "s3:http://hostname:9999/foobar/", Cfg: newTestConfig(Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
Prefix: "",
UseHTTP: true,
})},
{S: "s3:http://hostname:9999/bucket/prefix/directory", Cfg: newTestConfig(Config{
Endpoint: "hostname:9999",
Bucket: "bucket",
Prefix: "prefix/directory",
UseHTTP: true,
})},
{S: "s3:http://hostname:9999/bucket/prefix/directory/", Cfg: newTestConfig(Config{
Endpoint: "hostname:9999",
Bucket: "bucket",
Prefix: "prefix/directory",
UseHTTP: true,
})},
}
func TestParseConfig(t *testing.T) {
test.ParseConfigTester(t, ParseConfig, configTests)
}
func TestParseError(t *testing.T) {
const prefix = "s3: invalid format,"
for _, s := range []string{"", "/", "//", "/bucket/prefix"} {
_, err := ParseConfig("s3://" + s)
if err == nil || !strings.HasPrefix(err.Error(), prefix) {
t.Errorf("expected %q, got %q", prefix, err)
}
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/limiter/static_limiter.go | internal/backend/limiter/static_limiter.go | package limiter
import (
"context"
"io"
"net/http"
"golang.org/x/time/rate"
)
type staticLimiter struct {
upstream *rate.Limiter
downstream *rate.Limiter
}
// Limits represents static upload and download limits.
// For both, zero means unlimited.
type Limits struct {
UploadKb int
DownloadKb int
}
// NewStaticLimiter constructs a Limiter with a fixed (static) upload and
// download rate cap
func NewStaticLimiter(l Limits) Limiter {
var (
upstreamBucket *rate.Limiter
downstreamBucket *rate.Limiter
)
if l.UploadKb > 0 {
upstreamBucket = rate.NewLimiter(rate.Limit(toByteRate(l.UploadKb)), int(toByteRate(l.UploadKb)))
}
if l.DownloadKb > 0 {
downstreamBucket = rate.NewLimiter(rate.Limit(toByteRate(l.DownloadKb)), int(toByteRate(l.DownloadKb)))
}
return staticLimiter{
upstream: upstreamBucket,
downstream: downstreamBucket,
}
}
func (l staticLimiter) Upstream(r io.Reader) io.Reader {
return l.limitReader(r, l.upstream)
}
func (l staticLimiter) UpstreamWriter(w io.Writer) io.Writer {
return l.limitWriter(w, l.upstream)
}
func (l staticLimiter) Downstream(r io.Reader) io.Reader {
return l.limitReader(r, l.downstream)
}
func (l staticLimiter) DownstreamWriter(w io.Writer) io.Writer {
return l.limitWriter(w, l.downstream)
}
type roundTripper func(*http.Request) (*http.Response, error)
func (rt roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
return rt(req)
}
func (l staticLimiter) roundTripper(rt http.RoundTripper, req *http.Request) (*http.Response, error) {
type readCloser struct {
io.Reader
io.Closer
}
if req.Body != nil {
req.Body = &readCloser{
Reader: l.Upstream(req.Body),
Closer: req.Body,
}
}
res, err := rt.RoundTrip(req)
if res != nil && res.Body != nil {
res.Body = &readCloser{
Reader: l.Downstream(res.Body),
Closer: res.Body,
}
}
return res, err
}
// Transport returns an HTTP transport limited with the limiter l.
func (l staticLimiter) Transport(rt http.RoundTripper) http.RoundTripper {
return roundTripper(func(req *http.Request) (*http.Response, error) {
return l.roundTripper(rt, req)
})
}
func (l staticLimiter) limitReader(r io.Reader, b *rate.Limiter) io.Reader {
if b == nil {
return r
}
return &rateLimitedReader{r, b}
}
type rateLimitedReader struct {
reader io.Reader
bucket *rate.Limiter
}
func (r *rateLimitedReader) Read(p []byte) (int, error) {
n, err := r.reader.Read(p)
if err := consumeTokens(n, r.bucket); err != nil {
return n, err
}
return n, err
}
func (l staticLimiter) limitWriter(w io.Writer, b *rate.Limiter) io.Writer {
if b == nil {
return w
}
return &rateLimitedWriter{w, b}
}
type rateLimitedWriter struct {
writer io.Writer
bucket *rate.Limiter
}
func (w *rateLimitedWriter) Write(buf []byte) (int, error) {
if err := consumeTokens(len(buf), w.bucket); err != nil {
return 0, err
}
return w.writer.Write(buf)
}
func consumeTokens(tokens int, bucket *rate.Limiter) error {
// bucket allows waiting for at most Burst() tokens at once
maxWait := bucket.Burst()
for tokens > maxWait {
if err := bucket.WaitN(context.Background(), maxWait); err != nil {
return err
}
tokens -= maxWait
}
return bucket.WaitN(context.Background(), tokens)
}
func toByteRate(val int) float64 {
return float64(val) * 1024.
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.