repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restorer/fileswriter_windows_test.go | internal/restorer/fileswriter_windows_test.go | package restorer
import "syscall"
func notEmptyDirError() error {
return syscall.ERROR_DIR_NOT_EMPTY
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restorer/restorer.go | internal/restorer/restorer.go | package restorer
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"sync/atomic"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
restoreui "github.com/restic/restic/internal/ui/restore"
"golang.org/x/sync/errgroup"
)
// Restorer is used to restore a snapshot to a directory.
type Restorer struct {
repo restic.Repository
sn *data.Snapshot
opts Options
fileList map[string]bool
Error func(location string, err error) error
Warn func(message string)
Info func(message string)
// SelectFilter determines whether the item is selectedForRestore or whether a childMayBeSelected.
// selectedForRestore must not depend on isDir as `removeUnexpectedFiles` always passes false to isDir.
SelectFilter func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool)
XattrSelectFilter func(xattrName string) (xattrSelectedForRestore bool)
}
var restorerAbortOnAllErrors = func(_ string, err error) error { return err }
type Options struct {
DryRun bool
Sparse bool
Progress *restoreui.Progress
Overwrite OverwriteBehavior
Delete bool
OwnershipByName bool
}
type OverwriteBehavior int
// Constants for different overwrite behavior
const (
OverwriteAlways OverwriteBehavior = iota
// OverwriteIfChanged is like OverwriteAlways except that it skips restoring the content
// of files with matching size&mtime. Metadata is always restored.
OverwriteIfChanged
OverwriteIfNewer
OverwriteNever
OverwriteInvalid
)
// Set implements the method needed for pflag command flag parsing.
func (c *OverwriteBehavior) Set(s string) error {
switch s {
case "always":
*c = OverwriteAlways
case "if-changed":
*c = OverwriteIfChanged
case "if-newer":
*c = OverwriteIfNewer
case "never":
*c = OverwriteNever
default:
*c = OverwriteInvalid
return fmt.Errorf("invalid overwrite behavior %q, must be one of (always|if-changed|if-newer|never)", s)
}
return nil
}
func (c *OverwriteBehavior) String() string {
switch *c {
case OverwriteAlways:
return "always"
case OverwriteIfChanged:
return "if-changed"
case OverwriteIfNewer:
return "if-newer"
case OverwriteNever:
return "never"
default:
return "invalid"
}
}
func (c *OverwriteBehavior) Type() string {
return "behavior"
}
// NewRestorer creates a restorer preloaded with the content from the snapshot id.
func NewRestorer(repo restic.Repository, sn *data.Snapshot, opts Options) *Restorer {
r := &Restorer{
repo: repo,
opts: opts,
fileList: make(map[string]bool),
Error: restorerAbortOnAllErrors,
SelectFilter: func(string, bool) (bool, bool) { return true, true },
XattrSelectFilter: func(string) bool { return true },
sn: sn,
}
return r
}
type treeVisitor struct {
enterDir func(node *data.Node, target, location string) error
visitNode func(node *data.Node, target, location string) error
// 'entries' contains all files the snapshot contains for this node. This also includes files
// ignored by the SelectFilter.
leaveDir func(node *data.Node, target, location string, entries []string) error
}
func (res *Restorer) sanitizeError(location string, err error) error {
switch err {
case nil, context.Canceled, context.DeadlineExceeded:
// Context errors are permanent.
return err
default:
return res.Error(location, err)
}
}
// traverseTree traverses a tree from the repo and calls treeVisitor.
// target is the path in the file system, location within the snapshot.
func (res *Restorer) traverseTree(ctx context.Context, target string, treeID restic.ID, visitor treeVisitor) error {
location := string(filepath.Separator)
if visitor.enterDir != nil {
err := res.sanitizeError(location, visitor.enterDir(nil, target, location))
if err != nil {
return err
}
}
childFilenames, hasRestored, err := res.traverseTreeInner(ctx, target, location, treeID, visitor)
if err != nil {
return err
}
if hasRestored && visitor.leaveDir != nil {
err = res.sanitizeError(location, visitor.leaveDir(nil, target, location, childFilenames))
}
return err
}
func (res *Restorer) traverseTreeInner(ctx context.Context, target, location string, treeID restic.ID, visitor treeVisitor) (filenames []string, hasRestored bool, err error) {
debug.Log("%v %v %v", target, location, treeID)
tree, err := data.LoadTree(ctx, res.repo, treeID)
if err != nil {
debug.Log("error loading tree %v: %v", treeID, err)
return nil, hasRestored, res.sanitizeError(location, err)
}
if res.opts.Delete {
filenames = make([]string, 0, len(tree.Nodes))
}
for i, node := range tree.Nodes {
if ctx.Err() != nil {
return nil, hasRestored, ctx.Err()
}
// allow GC of tree node
tree.Nodes[i] = nil
if res.opts.Delete {
// just track all files included in the tree node to simplify the control flow.
// tracking too many files does not matter except for a slightly elevated memory usage
filenames = append(filenames, node.Name)
}
// ensure that the node name does not contain anything that refers to a
// top-level directory.
nodeName := filepath.Base(filepath.Join(string(filepath.Separator), node.Name))
if nodeName != node.Name {
debug.Log("node %q has invalid name %q", node.Name, nodeName)
err := res.sanitizeError(location, errors.Errorf("invalid child node name %s", node.Name))
if err != nil {
return nil, hasRestored, err
}
// force disable deletion to prevent unexpected behavior
res.opts.Delete = false
continue
}
nodeTarget := filepath.Join(target, nodeName)
nodeLocation := filepath.Join(location, nodeName)
if target == nodeTarget || !fs.HasPathPrefix(target, nodeTarget) {
debug.Log("target: %v %v", target, nodeTarget)
debug.Log("node %q has invalid target path %q", node.Name, nodeTarget)
err := res.sanitizeError(nodeLocation, errors.New("node has invalid path"))
if err != nil {
return nil, hasRestored, err
}
// force disable deletion to prevent unexpected behavior
res.opts.Delete = false
continue
}
// sockets cannot be restored
if node.Type == data.NodeTypeSocket {
continue
}
selectedForRestore, childMayBeSelected := res.SelectFilter(nodeLocation, node.Type == data.NodeTypeDir)
debug.Log("SelectFilter returned %v %v for %q", selectedForRestore, childMayBeSelected, nodeLocation)
if selectedForRestore {
hasRestored = true
}
if node.Type == data.NodeTypeDir {
if node.Subtree == nil {
return nil, hasRestored, errors.Errorf("Dir without subtree in tree %v", treeID.Str())
}
if selectedForRestore && visitor.enterDir != nil {
err = res.sanitizeError(nodeLocation, visitor.enterDir(node, nodeTarget, nodeLocation))
if err != nil {
return nil, hasRestored, err
}
}
// keep track of restored child status
// so metadata of the current directory are restored on leaveDir
childHasRestored := false
var childFilenames []string
if childMayBeSelected {
childFilenames, childHasRestored, err = res.traverseTreeInner(ctx, nodeTarget, nodeLocation, *node.Subtree, visitor)
err = res.sanitizeError(nodeLocation, err)
if err != nil {
return nil, hasRestored, err
}
// inform the parent directory to restore parent metadata on leaveDir if needed
if childHasRestored {
hasRestored = true
}
}
// metadata need to be restore when leaving the directory in both cases
// selected for restore or any child of any subtree have been restored
if (selectedForRestore || childHasRestored) && visitor.leaveDir != nil {
err = res.sanitizeError(nodeLocation, visitor.leaveDir(node, nodeTarget, nodeLocation, childFilenames))
if err != nil {
return nil, hasRestored, err
}
}
continue
}
if selectedForRestore {
err = res.sanitizeError(nodeLocation, visitor.visitNode(node, nodeTarget, nodeLocation))
if err != nil {
return nil, hasRestored, err
}
}
}
return filenames, hasRestored, nil
}
func (res *Restorer) restoreNodeTo(node *data.Node, target, location string) error {
if !res.opts.DryRun {
debug.Log("restoreNode %v %v %v", node.Name, target, location)
if err := fs.Remove(target); err != nil && !errors.Is(err, os.ErrNotExist) {
return errors.Wrap(err, "RemoveNode")
}
err := fs.NodeCreateAt(node, target)
if err != nil {
debug.Log("node.CreateAt(%s) error %v", target, err)
return err
}
}
res.opts.Progress.AddProgress(location, restoreui.ActionOtherRestored, 0, 0)
return res.restoreNodeMetadataTo(node, target, location)
}
func (res *Restorer) restoreNodeMetadataTo(node *data.Node, target, location string) error {
if res.opts.DryRun {
return nil
}
debug.Log("restoreNodeMetadata %v %v %v", node.Name, target, location)
err := fs.NodeRestoreMetadata(node, target, res.Warn, res.XattrSelectFilter, res.opts.OwnershipByName)
if err != nil {
debug.Log("node.RestoreMetadata(%s) error %v", target, err)
}
return err
}
func (res *Restorer) restoreHardlinkAt(node *data.Node, target, path, location string) error {
if !res.opts.DryRun {
if err := fs.Remove(path); err != nil && !errors.Is(err, os.ErrNotExist) {
return errors.Wrap(err, "RemoveCreateHardlink")
}
err := fs.Link(target, path)
if err != nil {
return errors.WithStack(err)
}
}
res.opts.Progress.AddProgress(location, restoreui.ActionOtherRestored, 0, 0)
// TODO investigate if hardlinks have separate metadata on any supported system
return res.restoreNodeMetadataTo(node, path, location)
}
func (res *Restorer) ensureDir(target string) error {
if res.opts.DryRun {
return nil
}
fi, err := fs.Lstat(target)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("failed to check for directory: %w", err)
}
if err == nil && !fi.IsDir() {
// try to cleanup unexpected file
if err := fs.Remove(target); err != nil {
return fmt.Errorf("failed to remove stale item: %w", err)
}
}
// create parent dir with default permissions
// second pass #leaveDir restores dir metadata after visiting/restoring all children
return fs.MkdirAll(target, 0700)
}
// RestoreTo creates the directories and files in the snapshot below dst.
// Before an item is created, res.Filter is called.
func (res *Restorer) RestoreTo(ctx context.Context, dst string) (uint64, error) {
restoredFileCount := uint64(0)
var err error
if !filepath.IsAbs(dst) {
dst, err = filepath.Abs(dst)
if err != nil {
return restoredFileCount, errors.Wrap(err, "Abs")
}
}
if !res.opts.DryRun {
// ensure that the target directory exists and is actually a directory
// Using ensureDir is too aggressive here as it also removes unexpected files
if err := fs.MkdirAll(dst, 0700); err != nil {
return restoredFileCount, fmt.Errorf("cannot create target directory: %w", err)
}
}
idx := NewHardlinkIndex[string]()
filerestorer := newFileRestorer(dst, res.repo.LoadBlobsFromPack, res.repo.LookupBlob,
res.repo.Connections(), res.opts.Sparse, res.opts.Delete, res.repo.StartWarmup, res.opts.Progress)
filerestorer.Error = res.Error
filerestorer.Info = res.Info
debug.Log("first pass for %q", dst)
var buf []byte
// first tree pass: create directories and collect all files to restore
err = res.traverseTree(ctx, dst, *res.sn.Tree, treeVisitor{
enterDir: func(_ *data.Node, target, location string) error {
debug.Log("first pass, enterDir: mkdir %q, leaveDir should restore metadata", location)
if location != string(filepath.Separator) {
res.opts.Progress.AddFile(0)
}
return res.ensureDir(target)
},
visitNode: func(node *data.Node, target, location string) error {
debug.Log("first pass, visitNode: mkdir %q, leaveDir on second pass should restore metadata", location)
if err := res.ensureDir(filepath.Dir(target)); err != nil {
return err
}
if node.Type != data.NodeTypeFile {
res.opts.Progress.AddFile(0)
return nil
}
if node.Links > 1 {
if idx.Has(node.Inode, node.DeviceID) {
// a hardlinked file does not increase the restore size
res.opts.Progress.AddFile(0)
return nil
}
idx.Add(node.Inode, node.DeviceID, location)
}
buf, err = res.withOverwriteCheck(ctx, node, target, location, false, buf, func(updateMetadataOnly bool, matches *fileState) error {
if updateMetadataOnly {
res.opts.Progress.AddSkippedFile(location, node.Size)
} else {
res.opts.Progress.AddFile(node.Size)
if !res.opts.DryRun {
filerestorer.addFile(location, node.Content, int64(node.Size), matches)
} else {
action := restoreui.ActionFileUpdated
if matches == nil {
action = restoreui.ActionFileRestored
}
// immediately mark as completed
res.opts.Progress.AddProgress(location, action, node.Size, node.Size)
}
}
res.trackFile(location, updateMetadataOnly)
if !updateMetadataOnly {
restoredFileCount++
}
return nil
})
return err
},
})
if err != nil {
return 0, err
}
if !res.opts.DryRun {
err = filerestorer.restoreFiles(ctx)
if err != nil {
return 0, err
}
}
debug.Log("second pass for %q", dst)
// second tree pass: restore special files and filesystem metadata
err = res.traverseTree(ctx, dst, *res.sn.Tree, treeVisitor{
visitNode: func(node *data.Node, target, location string) error {
debug.Log("second pass, visitNode: restore node %q", location)
if node.Type != data.NodeTypeFile {
_, err := res.withOverwriteCheck(ctx, node, target, location, false, nil, func(_ bool, _ *fileState) error {
return res.restoreNodeTo(node, target, location)
})
return err
}
if idx.Has(node.Inode, node.DeviceID) && idx.Value(node.Inode, node.DeviceID) != location {
_, err := res.withOverwriteCheck(ctx, node, target, location, true, nil, func(_ bool, _ *fileState) error {
return res.restoreHardlinkAt(node, filerestorer.targetPath(idx.Value(node.Inode, node.DeviceID)), target, location)
})
return err
}
if _, ok := res.hasRestoredFile(location); ok {
return res.restoreNodeMetadataTo(node, target, location)
}
// don't touch skipped files
return nil
},
leaveDir: func(node *data.Node, target, location string, expectedFilenames []string) error {
if res.opts.Delete {
if err := res.removeUnexpectedFiles(ctx, target, location, expectedFilenames); err != nil {
return err
}
}
if node == nil {
return nil
}
err := res.restoreNodeMetadataTo(node, target, location)
if err == nil {
res.opts.Progress.AddProgress(location, restoreui.ActionDirRestored, 0, 0)
}
return err
},
})
return restoredFileCount, err
}
func (res *Restorer) removeUnexpectedFiles(ctx context.Context, target, location string, expectedFilenames []string) error {
if !res.opts.Delete {
panic("internal error")
}
entries, err := fs.Readdirnames(fs.Local{}, target, fs.O_NOFOLLOW)
if errors.Is(err, os.ErrNotExist) {
return nil
} else if err != nil {
return err
}
keep := map[string]struct{}{}
for _, name := range expectedFilenames {
keep[toComparableFilename(name)] = struct{}{}
}
for _, entry := range entries {
if ctx.Err() != nil {
return ctx.Err()
}
if _, ok := keep[toComparableFilename(entry)]; ok {
continue
}
nodeTarget := filepath.Join(target, entry)
nodeLocation := filepath.Join(location, entry)
if target == nodeTarget || !fs.HasPathPrefix(target, nodeTarget) {
return fmt.Errorf("skipping deletion due to invalid filename: %v", entry)
}
// TODO pass a proper value to the isDir parameter once this becomes relevant for the filters
selectedForRestore, _ := res.SelectFilter(nodeLocation, false)
// only delete files that were selected for restore
if selectedForRestore {
// First collect all files that will be deleted
var filesToDelete []string
err := filepath.Walk(nodeTarget, func(path string, _ os.FileInfo, err error) error {
if err != nil {
return err
}
filesToDelete = append(filesToDelete, path)
return nil
})
if err != nil {
return err
}
if !res.opts.DryRun {
// Perform the deletion
if err := fs.RemoveAll(nodeTarget); err != nil {
return err
}
}
// Report paths as deleted only after successful removal
for i := len(filesToDelete) - 1; i >= 0; i-- {
res.opts.Progress.ReportDeletion(filesToDelete[i])
}
}
}
return nil
}
func (res *Restorer) trackFile(location string, metadataOnly bool) {
res.fileList[location] = metadataOnly
}
func (res *Restorer) hasRestoredFile(location string) (metadataOnly bool, ok bool) {
metadataOnly, ok = res.fileList[location]
return metadataOnly, ok
}
func (res *Restorer) withOverwriteCheck(ctx context.Context, node *data.Node, target, location string, isHardlink bool, buf []byte, cb func(updateMetadataOnly bool, matches *fileState) error) ([]byte, error) {
overwrite, err := shouldOverwrite(res.opts.Overwrite, node, target)
if err != nil {
return buf, err
} else if !overwrite {
size := node.Size
if isHardlink {
size = 0
}
res.opts.Progress.AddSkippedFile(location, size)
return buf, nil
}
var matches *fileState
updateMetadataOnly := false
if node.Type == data.NodeTypeFile && !isHardlink {
// if a file fails to verify, then matches is nil which results in restoring from scratch
matches, buf, _ = res.verifyFile(ctx, target, node, false, res.opts.Overwrite == OverwriteIfChanged, buf)
// skip files that are already correct completely
updateMetadataOnly = !matches.NeedsRestore()
}
return buf, cb(updateMetadataOnly, matches)
}
func shouldOverwrite(overwrite OverwriteBehavior, node *data.Node, destination string) (bool, error) {
if overwrite == OverwriteAlways || overwrite == OverwriteIfChanged {
return true, nil
}
fi, err := fs.Lstat(destination)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return true, nil
}
return false, err
}
switch overwrite {
case OverwriteIfNewer:
// return if node is newer
return node.ModTime.After(fi.ModTime()), nil
case OverwriteNever:
// file exists
return false, nil
}
panic("unknown overwrite behavior")
}
// Snapshot returns the snapshot this restorer is configured to use.
func (res *Restorer) Snapshot() *data.Snapshot {
return res.sn
}
// Number of workers in VerifyFiles.
const nVerifyWorkers = 8
// VerifyFiles checks whether all regular files in the snapshot res.sn
// have been successfully written to dst. It stops when it encounters an
// error. It returns that error and the number of files it has successfully
// verified.
func (res *Restorer) VerifyFiles(ctx context.Context, dst string, countRestoredFiles uint64, p *progress.Counter) (int, error) {
type mustCheck struct {
node *data.Node
path string
}
var (
nchecked uint64
work = make(chan mustCheck, 2*nVerifyWorkers)
)
if p != nil {
p.SetMax(countRestoredFiles)
defer p.Done()
}
g, ctx := errgroup.WithContext(ctx)
// Traverse tree and send jobs to work.
g.Go(func() error {
defer close(work)
err := res.traverseTree(ctx, dst, *res.sn.Tree, treeVisitor{
visitNode: func(node *data.Node, target, location string) error {
if node.Type != data.NodeTypeFile {
return nil
}
if metadataOnly, ok := res.hasRestoredFile(location); !ok || metadataOnly {
return nil
}
select {
case <-ctx.Done():
return ctx.Err()
case work <- mustCheck{node, target}:
return nil
}
},
})
return err
})
for i := 0; i < nVerifyWorkers; i++ {
g.Go(func() (err error) {
var buf []byte
for job := range work {
_, buf, err = res.verifyFile(ctx, job.path, job.node, true, false, buf)
err = res.sanitizeError(job.path, err)
if err != nil || ctx.Err() != nil {
break
}
p.Add(1)
atomic.AddUint64(&nchecked, 1)
}
return err
})
}
return int(nchecked), g.Wait()
}
type fileState struct {
blobMatches []bool
sizeMatches bool
}
func (s *fileState) NeedsRestore() bool {
if s == nil {
return true
}
if !s.sizeMatches {
return true
}
for _, match := range s.blobMatches {
if !match {
return true
}
}
return false
}
func (s *fileState) HasMatchingBlob(i int) bool {
if s == nil || s.blobMatches == nil {
return false
}
return i < len(s.blobMatches) && s.blobMatches[i]
}
// Verify that the file target has the contents of node.
//
// buf and the first return value are scratch space, passed around for reuse.
// Reusing buffers prevents the verifier goroutines allocating all of RAM and
// flushing the filesystem cache (at least on Linux).
func (res *Restorer) verifyFile(ctx context.Context, target string, node *data.Node, failFast bool, trustMtime bool, buf []byte) (*fileState, []byte, error) {
f, err := fs.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0)
if err != nil {
return nil, buf, err
}
defer func() {
_ = f.Close()
}()
fi, err := f.Stat()
sizeMatches := true
switch {
case err != nil:
return nil, buf, err
case !fi.Mode().IsRegular():
return nil, buf, errors.Errorf("Expected %s to be a regular file", target)
case int64(node.Size) != fi.Size():
if failFast {
return nil, buf, errors.Errorf("Invalid file size for %s: expected %d, got %d",
target, node.Size, fi.Size())
}
sizeMatches = false
}
if trustMtime && fi.ModTime().Equal(node.ModTime) && sizeMatches {
return &fileState{nil, sizeMatches}, buf, nil
}
matches := make([]bool, len(node.Content))
var offset int64
for i, blobID := range node.Content {
if ctx.Err() != nil {
return nil, buf, ctx.Err()
}
length, found := res.repo.LookupBlobSize(restic.DataBlob, blobID)
if !found {
return nil, buf, errors.Errorf("Unable to fetch blob %s", blobID)
}
if length > uint(cap(buf)) {
buf = make([]byte, 2*length)
}
buf = buf[:length]
_, err = f.ReadAt(buf, offset)
if err == io.EOF && !failFast {
sizeMatches = false
break
}
if err != nil {
return nil, buf, err
}
matches[i] = blobID.Equal(restic.Hash(buf))
if failFast && !matches[i] {
return nil, buf, errors.Errorf(
"Unexpected content in %s, starting at offset %d",
target, offset)
}
offset += int64(length)
}
return &fileState{matches, sizeMatches}, buf, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restorer/restorer_windows_test.go | internal/restorer/restorer_windows_test.go | //go:build windows
package restorer
import (
"context"
"encoding/json"
"math"
"os"
"path"
"path/filepath"
"syscall"
"testing"
"time"
"unsafe"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/test"
rtest "github.com/restic/restic/internal/test"
"golang.org/x/sys/windows"
)
func getBlockCount(t *testing.T, filename string) int64 {
libkernel32 := windows.NewLazySystemDLL("kernel32.dll")
err := libkernel32.Load()
rtest.OK(t, err)
proc := libkernel32.NewProc("GetCompressedFileSizeW")
err = proc.Find()
rtest.OK(t, err)
namePtr, err := syscall.UTF16PtrFromString(filename)
rtest.OK(t, err)
result, _, _ := proc.Call(uintptr(unsafe.Pointer(namePtr)), 0)
const invalidFileSize = uintptr(4294967295)
if result == invalidFileSize {
return -1
}
return int64(math.Ceil(float64(result) / 512))
}
type DataStreamInfo struct {
name string
data string
}
type NodeInfo struct {
DataStreamInfo
parentDir string
attributes FileAttributes
Exists bool
IsDirectory bool
}
func TestFileAttributeCombination(t *testing.T) {
testFileAttributeCombination(t, false)
}
func TestEmptyFileAttributeCombination(t *testing.T) {
testFileAttributeCombination(t, true)
}
func testFileAttributeCombination(t *testing.T, isEmpty bool) {
t.Parallel()
//Generate combination of 5 attributes.
attributeCombinations := generateCombinations(5, []bool{})
fileName := "TestFile.txt"
// Iterate through each attribute combination
for _, attr1 := range attributeCombinations {
//Set up the required file information
fileInfo := NodeInfo{
DataStreamInfo: getDataStreamInfo(isEmpty, fileName),
parentDir: "dir",
attributes: getFileAttributes(attr1),
Exists: false,
}
//Get the current test name
testName := getCombinationTestName(fileInfo, fileName, fileInfo.attributes)
//Run test
t.Run(testName, func(t *testing.T) {
mainFilePath := runAttributeTests(t, fileInfo, fileInfo.attributes)
verifyFileRestores(isEmpty, mainFilePath, t, fileInfo)
})
}
}
func generateCombinations(n int, prefix []bool) [][]bool {
if n == 0 {
// Return a slice containing the current permutation
return [][]bool{append([]bool{}, prefix...)}
}
// Generate combinations with True
prefixTrue := append(prefix, true)
permsTrue := generateCombinations(n-1, prefixTrue)
// Generate combinations with False
prefixFalse := append(prefix, false)
permsFalse := generateCombinations(n-1, prefixFalse)
// Combine combinations with True and False
return append(permsTrue, permsFalse...)
}
func getDataStreamInfo(isEmpty bool, fileName string) DataStreamInfo {
var dataStreamInfo DataStreamInfo
if isEmpty {
dataStreamInfo = DataStreamInfo{
name: fileName,
}
} else {
dataStreamInfo = DataStreamInfo{
name: fileName,
data: "Main file data stream.",
}
}
return dataStreamInfo
}
func getFileAttributes(values []bool) FileAttributes {
return FileAttributes{
ReadOnly: values[0],
Hidden: values[1],
System: values[2],
Archive: values[3],
Encrypted: values[4],
}
}
func getCombinationTestName(fi NodeInfo, fileName string, overwriteAttr FileAttributes) string {
if fi.attributes.ReadOnly {
fileName += "-ReadOnly"
}
if fi.attributes.Hidden {
fileName += "-Hidden"
}
if fi.attributes.System {
fileName += "-System"
}
if fi.attributes.Archive {
fileName += "-Archive"
}
if fi.attributes.Encrypted {
fileName += "-Encrypted"
}
if fi.Exists {
fileName += "-Overwrite"
if overwriteAttr.ReadOnly {
fileName += "-R"
}
if overwriteAttr.Hidden {
fileName += "-H"
}
if overwriteAttr.System {
fileName += "-S"
}
if overwriteAttr.Archive {
fileName += "-A"
}
if overwriteAttr.Encrypted {
fileName += "-E"
}
}
return fileName
}
func runAttributeTests(t *testing.T, fileInfo NodeInfo, existingFileAttr FileAttributes) string {
testDir := t.TempDir()
res, _ := setupWithFileAttributes(t, fileInfo, testDir, existingFileAttr)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, err := res.RestoreTo(ctx, testDir)
rtest.OK(t, err)
mainFilePath := path.Join(testDir, fileInfo.parentDir, fileInfo.name)
//Verify restore
verifyFileAttributes(t, mainFilePath, fileInfo.attributes)
return mainFilePath
}
func setupWithFileAttributes(t *testing.T, nodeInfo NodeInfo, testDir string, existingFileAttr FileAttributes) (*Restorer, []int) {
t.Helper()
if nodeInfo.Exists {
if !nodeInfo.IsDirectory {
err := os.MkdirAll(path.Join(testDir, nodeInfo.parentDir), os.ModeDir)
rtest.OK(t, err)
filepath := path.Join(testDir, nodeInfo.parentDir, nodeInfo.name)
if existingFileAttr.Encrypted {
err := createEncryptedFileWriteData(filepath, nodeInfo)
rtest.OK(t, err)
} else {
// Write the data to the file
file, err := os.OpenFile(path.Clean(filepath), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
rtest.OK(t, err)
_, err = file.Write([]byte(nodeInfo.data))
rtest.OK(t, err)
err = file.Close()
rtest.OK(t, err)
}
} else {
err := os.MkdirAll(path.Join(testDir, nodeInfo.parentDir, nodeInfo.name), os.ModeDir)
rtest.OK(t, err)
}
pathPointer, err := syscall.UTF16PtrFromString(path.Join(testDir, nodeInfo.parentDir, nodeInfo.name))
rtest.OK(t, err)
syscall.SetFileAttributes(pathPointer, getAttributeValue(&existingFileAttr))
}
index := 0
order := []int{}
streams := []DataStreamInfo{}
if !nodeInfo.IsDirectory {
order = append(order, index)
index++
streams = append(streams, nodeInfo.DataStreamInfo)
}
return setup(t, getNodes(nodeInfo.parentDir, nodeInfo.name, order, streams, nodeInfo.IsDirectory, &nodeInfo.attributes)), order
}
func createEncryptedFileWriteData(filepath string, fileInfo NodeInfo) (err error) {
var ptr *uint16
if ptr, err = windows.UTF16PtrFromString(filepath); err != nil {
return err
}
var handle windows.Handle
//Create the file with encrypted flag
if handle, err = windows.CreateFile(ptr, uint32(windows.GENERIC_READ|windows.GENERIC_WRITE), uint32(windows.FILE_SHARE_READ), nil, uint32(windows.CREATE_ALWAYS), windows.FILE_ATTRIBUTE_ENCRYPTED, 0); err != nil {
return err
}
//Write data to file
if _, err = windows.Write(handle, []byte(fileInfo.data)); err != nil {
return err
}
//Close handle
return windows.CloseHandle(handle)
}
func setup(t *testing.T, nodesMap map[string]Node) *Restorer {
repo := repository.TestRepository(t)
getFileAttributes := func(attr *FileAttributes, isDir bool) (genericAttributes map[data.GenericAttributeType]json.RawMessage) {
if attr == nil {
return
}
fileattr := getAttributeValue(attr)
if isDir {
//If the node is a directory add FILE_ATTRIBUTE_DIRECTORY to attributes
fileattr |= windows.FILE_ATTRIBUTE_DIRECTORY
}
attrs, err := data.WindowsAttrsToGenericAttributes(data.WindowsAttributes{FileAttributes: &fileattr})
test.OK(t, err)
return attrs
}
sn, _ := saveSnapshot(t, repo, Snapshot{
Nodes: nodesMap,
}, getFileAttributes)
res := NewRestorer(repo, sn, Options{})
return res
}
func getAttributeValue(attr *FileAttributes) uint32 {
var fileattr uint32
if attr.ReadOnly {
fileattr |= windows.FILE_ATTRIBUTE_READONLY
}
if attr.Hidden {
fileattr |= windows.FILE_ATTRIBUTE_HIDDEN
}
if attr.Encrypted {
fileattr |= windows.FILE_ATTRIBUTE_ENCRYPTED
}
if attr.Archive {
fileattr |= windows.FILE_ATTRIBUTE_ARCHIVE
}
if attr.System {
fileattr |= windows.FILE_ATTRIBUTE_SYSTEM
}
return fileattr
}
func getNodes(dir string, mainNodeName string, order []int, streams []DataStreamInfo, isDirectory bool, attributes *FileAttributes) map[string]Node {
var mode os.FileMode
if isDirectory {
mode = os.FileMode(2147484159)
} else {
if attributes != nil && attributes.ReadOnly {
mode = os.FileMode(0o444)
} else {
mode = os.FileMode(0o666)
}
}
getFileNodes := func() map[string]Node {
nodes := map[string]Node{}
if isDirectory {
//Add a directory node at the same level as the other streams
nodes[mainNodeName] = Dir{
ModTime: time.Now(),
attributes: attributes,
Mode: mode,
}
}
if len(streams) > 0 {
for _, index := range order {
stream := streams[index]
var attr *FileAttributes = nil
if mainNodeName == stream.name {
attr = attributes
} else if attributes != nil && attributes.Encrypted {
//Set encrypted attribute
attr = &FileAttributes{Encrypted: true}
}
nodes[stream.name] = File{
ModTime: time.Now(),
Data: stream.data,
Mode: mode,
attributes: attr,
}
}
}
return nodes
}
return map[string]Node{
dir: Dir{
Mode: normalizeFileMode(0750 | mode),
ModTime: time.Now(),
Nodes: getFileNodes(),
},
}
}
func verifyFileAttributes(t *testing.T, mainFilePath string, attr FileAttributes) {
ptr, err := windows.UTF16PtrFromString(mainFilePath)
rtest.OK(t, err)
//Get file attributes using syscall
fileAttributes, err := syscall.GetFileAttributes(ptr)
rtest.OK(t, err)
//Test positive and negative scenarios
if attr.ReadOnly {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_READONLY != 0, "Expected read only attribute.")
} else {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_READONLY == 0, "Unexpected read only attribute.")
}
if attr.Hidden {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_HIDDEN != 0, "Expected hidden attribute.")
} else {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_HIDDEN == 0, "Unexpected hidden attribute.")
}
if attr.System {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_SYSTEM != 0, "Expected system attribute.")
} else {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_SYSTEM == 0, "Unexpected system attribute.")
}
if attr.Archive {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ARCHIVE != 0, "Expected archive attribute.")
} else {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ARCHIVE == 0, "Unexpected archive attribute.")
}
if attr.Encrypted {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ENCRYPTED != 0, "Expected encrypted attribute.")
} else {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ENCRYPTED == 0, "Unexpected encrypted attribute.")
}
}
func verifyFileRestores(isEmpty bool, mainFilePath string, t *testing.T, fileInfo NodeInfo) {
if isEmpty {
_, err1 := os.Stat(mainFilePath)
rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The file "+fileInfo.name+" does not exist")
} else {
verifyMainFileRestore(t, mainFilePath, fileInfo)
}
}
func verifyMainFileRestore(t *testing.T, mainFilePath string, fileInfo NodeInfo) {
fi, err1 := os.Stat(mainFilePath)
rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The file "+fileInfo.name+" does not exist")
size := fi.Size()
rtest.Assert(t, size > 0, "The file "+fileInfo.name+" exists but is empty")
content, err := os.ReadFile(mainFilePath)
rtest.OK(t, err)
rtest.Assert(t, string(content) == fileInfo.data, "The file "+fileInfo.name+" exists but the content is not overwritten")
}
func TestDirAttributeCombination(t *testing.T) {
t.Parallel()
attributeCombinations := generateCombinations(4, []bool{})
dirName := "TestDir"
// Iterate through each attribute combination
for _, attr1 := range attributeCombinations {
//Set up the required directory information
dirInfo := NodeInfo{
DataStreamInfo: DataStreamInfo{
name: dirName,
},
parentDir: "dir",
attributes: getDirFileAttributes(attr1),
Exists: false,
IsDirectory: true,
}
//Get the current test name
testName := getCombinationTestName(dirInfo, dirName, dirInfo.attributes)
//Run test
t.Run(testName, func(t *testing.T) {
mainDirPath := runAttributeTests(t, dirInfo, dirInfo.attributes)
//Check directory exists
_, err1 := os.Stat(mainDirPath)
rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The directory "+dirInfo.name+" does not exist")
})
}
}
func getDirFileAttributes(values []bool) FileAttributes {
return FileAttributes{
// readonly not valid for directories
Hidden: values[0],
System: values[1],
Archive: values[2],
Encrypted: values[3],
}
}
func TestFileAttributeCombinationsOverwrite(t *testing.T) {
testFileAttributeCombinationsOverwrite(t, false)
}
func TestEmptyFileAttributeCombinationsOverwrite(t *testing.T) {
testFileAttributeCombinationsOverwrite(t, true)
}
func testFileAttributeCombinationsOverwrite(t *testing.T, isEmpty bool) {
t.Parallel()
debugEnabled := debug.TestLogToStderr(t)
if debugEnabled {
defer debug.TestDisableLog(t)
}
//Get attribute combinations
attributeCombinations := generateCombinations(5, []bool{})
//Get overwrite file attribute combinations
overwriteCombinations := generateCombinations(5, []bool{})
fileName := "TestOverwriteFile"
//Iterate through each attribute combination
for _, attr1 := range attributeCombinations {
fileInfo := NodeInfo{
DataStreamInfo: getDataStreamInfo(isEmpty, fileName),
parentDir: "dir",
attributes: getFileAttributes(attr1),
Exists: true,
}
overwriteFileAttributes := []FileAttributes{}
for _, overwrite := range overwriteCombinations {
overwriteFileAttributes = append(overwriteFileAttributes, getFileAttributes(overwrite))
}
//Iterate through each overwrite attribute combination
for _, overwriteFileAttr := range overwriteFileAttributes {
//Get the test name
testName := getCombinationTestName(fileInfo, fileName, overwriteFileAttr)
//Run test
t.Run(testName, func(t *testing.T) {
mainFilePath := runAttributeTests(t, fileInfo, overwriteFileAttr)
verifyFileRestores(isEmpty, mainFilePath, t, fileInfo)
})
}
}
}
func TestDirAttributeCombinationsOverwrite(t *testing.T) {
t.Parallel()
//Get attribute combinations
attributeCombinations := generateCombinations(4, []bool{})
//Get overwrite dir attribute combinations
overwriteCombinations := generateCombinations(4, []bool{})
dirName := "TestOverwriteDir"
//Iterate through each attribute combination
for _, attr1 := range attributeCombinations {
dirInfo := NodeInfo{
DataStreamInfo: DataStreamInfo{
name: dirName,
},
parentDir: "dir",
attributes: getDirFileAttributes(attr1),
Exists: true,
IsDirectory: true,
}
overwriteDirFileAttributes := []FileAttributes{}
for _, overwrite := range overwriteCombinations {
overwriteDirFileAttributes = append(overwriteDirFileAttributes, getDirFileAttributes(overwrite))
}
//Iterate through each overwrite attribute combinations
for _, overwriteDirAttr := range overwriteDirFileAttributes {
//Get the test name
testName := getCombinationTestName(dirInfo, dirName, overwriteDirAttr)
//Run test
t.Run(testName, func(t *testing.T) {
mainDirPath := runAttributeTests(t, dirInfo, dirInfo.attributes)
//Check directory exists
_, err1 := os.Stat(mainDirPath)
rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The directory "+dirInfo.name+" does not exist")
})
}
}
}
func TestRestoreDeleteCaseInsensitive(t *testing.T) {
repo := repository.TestRepository(t)
tempdir := rtest.TempDir(t)
sn, _ := saveSnapshot(t, repo, Snapshot{
Nodes: map[string]Node{
"anotherfile": File{Data: "content: file\n"},
},
}, noopGetGenericAttributes)
// should delete files that no longer exist in the snapshot
deleteSn, _ := saveSnapshot(t, repo, Snapshot{
Nodes: map[string]Node{
"AnotherfilE": File{Data: "content: file\n"},
},
}, noopGetGenericAttributes)
res := NewRestorer(repo, sn, Options{})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, err := res.RestoreTo(ctx, tempdir)
rtest.OK(t, err)
res = NewRestorer(repo, deleteSn, Options{Delete: true})
_, err = res.RestoreTo(ctx, tempdir)
rtest.OK(t, err)
// anotherfile must still exist
_, err = os.Stat(filepath.Join(tempdir, "anotherfile"))
rtest.OK(t, err)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restorer/filerestorer.go | internal/restorer/filerestorer.go | package restorer
import (
"context"
"fmt"
"path/filepath"
"sync"
"golang.org/x/sync/errgroup"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/feature"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/restore"
)
const (
largeFileBlobCount = 25
)
// information about regular file being restored
type fileInfo struct {
lock sync.Mutex
inProgress bool
sparse bool
size int64
location string // file on local filesystem relative to restorer basedir
blobs interface{} // blobs of the file
state *fileState
}
type fileBlobInfo struct {
id restic.ID // the blob id
offset int64 // blob offset in the file
}
// information about a data pack required to restore one or more files
type packInfo struct {
id restic.ID // the pack id
files map[*fileInfo]struct{} // set of files that use blobs from this pack
}
type blobsLoaderFn func(ctx context.Context, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error
type startWarmupFn func(context.Context, restic.IDSet) (restic.WarmupJob, error)
// fileRestorer restores set of files
type fileRestorer struct {
idx func(restic.BlobType, restic.ID) []restic.PackedBlob
blobsLoader blobsLoaderFn
startWarmup startWarmupFn
workerCount int
filesWriter *filesWriter
zeroChunk restic.ID
sparse bool
progress *restore.Progress
allowRecursiveDelete bool
dst string
files []*fileInfo
Error func(string, error) error
Info func(string)
}
func newFileRestorer(dst string,
blobsLoader blobsLoaderFn,
idx func(restic.BlobType, restic.ID) []restic.PackedBlob,
connections uint,
sparse bool,
allowRecursiveDelete bool,
startWarmup startWarmupFn,
progress *restore.Progress) *fileRestorer {
// as packs are streamed the concurrency is limited by IO
workerCount := int(connections)
return &fileRestorer{
idx: idx,
blobsLoader: blobsLoader,
startWarmup: startWarmup,
filesWriter: newFilesWriter(workerCount, allowRecursiveDelete),
zeroChunk: repository.ZeroChunk(),
sparse: sparse,
progress: progress,
allowRecursiveDelete: allowRecursiveDelete,
workerCount: workerCount,
dst: dst,
Error: restorerAbortOnAllErrors,
Info: func(_ string) {},
}
}
func (r *fileRestorer) addFile(location string, content restic.IDs, size int64, state *fileState) {
r.files = append(r.files, &fileInfo{location: location, blobs: content, size: size, state: state})
}
func (r *fileRestorer) targetPath(location string) string {
return filepath.Join(r.dst, location)
}
func (r *fileRestorer) forEachBlob(blobIDs []restic.ID, fn func(packID restic.ID, packBlob restic.Blob, idx int, fileOffset int64)) error {
if len(blobIDs) == 0 {
return nil
}
fileOffset := int64(0)
for i, blobID := range blobIDs {
packs := r.idx(restic.DataBlob, blobID)
if len(packs) == 0 {
return errors.Errorf("Unknown blob %s", blobID.String())
}
pb := packs[0]
fn(pb.PackID, pb.Blob, i, fileOffset)
fileOffset += int64(pb.DataLength())
}
return nil
}
func (r *fileRestorer) restoreFiles(ctx context.Context) error {
packs := make(map[restic.ID]*packInfo) // all packs
// Process packs in order of first access. While this cannot guarantee
// that file chunks are restored sequentially, it offers a good enough
// approximation to shorten restore times by up to 19% in some test.
var packOrder restic.IDs
// create packInfo from fileInfo
for _, file := range r.files {
if ctx.Err() != nil {
return ctx.Err()
}
fileBlobs := file.blobs.(restic.IDs)
largeFile := len(fileBlobs) > largeFileBlobCount
var packsMap map[restic.ID][]fileBlobInfo
if largeFile {
packsMap = make(map[restic.ID][]fileBlobInfo)
file.blobs = packsMap
}
restoredBlobs := false
err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int, fileOffset int64) {
if !file.state.HasMatchingBlob(idx) {
if largeFile {
packsMap[packID] = append(packsMap[packID], fileBlobInfo{id: blob.ID, offset: fileOffset})
}
restoredBlobs = true
} else {
r.reportBlobProgress(file, uint64(blob.DataLength()))
// completely ignore blob
return
}
pack, ok := packs[packID]
if !ok {
pack = &packInfo{
id: packID,
files: make(map[*fileInfo]struct{}),
}
packs[packID] = pack
packOrder = append(packOrder, packID)
}
pack.files[file] = struct{}{}
if blob.ID.Equal(r.zeroChunk) {
file.sparse = r.sparse
}
})
if err != nil {
// repository index is messed up, can't do anything
return err
}
if len(fileBlobs) == 1 {
// no need to preallocate files with a single block, thus we can always consider them to be sparse
// in addition, a short chunk will never match r.zeroChunk which would prevent sparseness for short files
file.sparse = r.sparse
}
if file.state != nil {
// The restorer currently cannot punch new holes into an existing files.
// Thus sections that contained data but should be sparse after restoring
// the snapshot would still contain the old data resulting in a corrupt restore.
file.sparse = false
}
// empty file or one with already uptodate content. Make sure that the file size is correct
if !restoredBlobs {
err := r.truncateFileToSize(file.location, file.size)
if errFile := r.sanitizeError(file, err); errFile != nil {
return errFile
}
// the progress events were already sent for non-zero size files
if file.size == 0 {
r.reportBlobProgress(file, 0)
}
}
}
// drop no longer necessary file list
r.files = nil
if feature.Flag.Enabled(feature.S3Restore) {
warmupJob, err := r.startWarmup(ctx, restic.NewIDSet(packOrder...))
if err != nil {
return err
}
if warmupJob.HandleCount() != 0 {
r.Info(fmt.Sprintf("warming up %d packs from cold storage, this may take a while...", warmupJob.HandleCount()))
if err := warmupJob.Wait(ctx); err != nil {
return err
}
}
}
wg, ctx := errgroup.WithContext(ctx)
downloadCh := make(chan *packInfo)
worker := func() error {
for pack := range downloadCh {
if err := r.downloadPack(ctx, pack); err != nil {
return err
}
}
return nil
}
for i := 0; i < r.workerCount; i++ {
wg.Go(worker)
}
// the main restore loop
wg.Go(func() error {
defer close(downloadCh)
for _, id := range packOrder {
pack := packs[id]
// allow garbage collection of packInfo
delete(packs, id)
select {
case <-ctx.Done():
return ctx.Err()
case downloadCh <- pack:
debug.Log("Scheduled download pack %s", pack.id.Str())
}
}
return nil
})
return wg.Wait()
}
func (r *fileRestorer) truncateFileToSize(location string, size int64) error {
f, err := createFile(r.targetPath(location), size, false, r.allowRecursiveDelete)
if err != nil {
return err
}
return f.Close()
}
type blobToFileOffsetsMapping map[restic.ID]struct {
files map[*fileInfo][]int64 // file -> offsets (plural!) of the blob in the file
blob restic.Blob
}
func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
// calculate blob->[]files->[]offsets mappings
blobs := make(blobToFileOffsetsMapping)
for file := range pack.files {
addBlob := func(blob restic.Blob, fileOffset int64) {
blobInfo, ok := blobs[blob.ID]
if !ok {
blobInfo.files = make(map[*fileInfo][]int64)
blobInfo.blob = blob
blobs[blob.ID] = blobInfo
}
blobInfo.files[file] = append(blobInfo.files[file], fileOffset)
}
if fileBlobs, ok := file.blobs.(restic.IDs); ok {
err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int, fileOffset int64) {
if packID.Equal(pack.id) && !file.state.HasMatchingBlob(idx) {
addBlob(blob, fileOffset)
}
})
if err != nil {
// restoreFiles should have caught this error before
panic(err)
}
} else if packsMap, ok := file.blobs.(map[restic.ID][]fileBlobInfo); ok {
for _, blob := range packsMap[pack.id] {
idxPacks := r.idx(restic.DataBlob, blob.id)
for _, idxPack := range idxPacks {
if idxPack.PackID.Equal(pack.id) {
addBlob(idxPack.Blob, blob.offset)
break
}
}
}
}
}
// track already processed blobs for precise error reporting
processedBlobs := restic.NewBlobSet()
err := r.downloadBlobs(ctx, pack.id, blobs, processedBlobs)
return r.reportError(blobs, processedBlobs, err)
}
func (r *fileRestorer) sanitizeError(file *fileInfo, err error) error {
switch err {
case nil, context.Canceled, context.DeadlineExceeded:
// Context errors are permanent.
return err
default:
return r.Error(file.location, err)
}
}
func (r *fileRestorer) reportError(blobs blobToFileOffsetsMapping, processedBlobs restic.BlobSet, err error) error {
if err == nil {
return nil
}
// only report error for not yet processed blobs
affectedFiles := make(map[*fileInfo]struct{})
for _, entry := range blobs {
if processedBlobs.Has(entry.blob.BlobHandle) {
continue
}
for file := range entry.files {
affectedFiles[file] = struct{}{}
}
}
for file := range affectedFiles {
if errFile := r.sanitizeError(file, err); errFile != nil {
return errFile
}
}
return nil
}
func (r *fileRestorer) downloadBlobs(ctx context.Context, packID restic.ID,
blobs blobToFileOffsetsMapping, processedBlobs restic.BlobSet) error {
blobList := make([]restic.Blob, 0, len(blobs))
for _, entry := range blobs {
blobList = append(blobList, entry.blob)
}
return r.blobsLoader(ctx, packID, blobList,
func(h restic.BlobHandle, blobData []byte, err error) error {
processedBlobs.Insert(h)
blob := blobs[h.ID]
if err != nil {
for file := range blob.files {
if errFile := r.sanitizeError(file, err); errFile != nil {
return errFile
}
}
return nil
}
for file, offsets := range blob.files {
for _, offset := range offsets {
// avoid long cancellation delays for frequently used blobs
if ctx.Err() != nil {
return ctx.Err()
}
writeToFile := func() error {
// this looks overly complicated and needs explanation
// two competing requirements:
// - must create the file once and only once
// - should allow concurrent writes to the file
// so write the first blob while holding file lock
// write other blobs after releasing the lock
createSize := int64(-1)
file.lock.Lock()
if file.inProgress {
file.lock.Unlock()
} else {
defer file.lock.Unlock()
file.inProgress = true
createSize = file.size
}
writeErr := r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize, file.sparse)
r.reportBlobProgress(file, uint64(len(blobData)))
return writeErr
}
err := r.sanitizeError(file, writeToFile())
if err != nil {
return err
}
}
}
return nil
})
}
func (r *fileRestorer) reportBlobProgress(file *fileInfo, blobSize uint64) {
action := restore.ActionFileUpdated
if file.state == nil {
action = restore.ActionFileRestored
}
r.progress.AddProgress(file.location, action, blobSize, uint64(file.size))
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restorer/hardlinks_index_test.go | internal/restorer/hardlinks_index_test.go | package restorer_test
import (
"testing"
"github.com/restic/restic/internal/restorer"
rtest "github.com/restic/restic/internal/test"
)
// TestHardLinks contains various tests for HardlinkIndex.
func TestHardLinks(t *testing.T) {
idx := restorer.NewHardlinkIndex[string]()
idx.Add(1, 2, "inode1-file1-on-device2")
idx.Add(2, 3, "inode2-file2-on-device3")
sresult := idx.Value(1, 2)
rtest.Equals(t, sresult, "inode1-file1-on-device2")
sresult = idx.Value(2, 3)
rtest.Equals(t, sresult, "inode2-file2-on-device3")
bresult := idx.Has(1, 2)
rtest.Equals(t, bresult, true)
bresult = idx.Has(1, 3)
rtest.Equals(t, bresult, false)
idx.Remove(1, 2)
bresult = idx.Has(1, 2)
rtest.Equals(t, bresult, false)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restorer/doc.go | internal/restorer/doc.go | // Package restorer contains code to restore data from a repository.
//
// The Restorer tries to keep the number of backend requests minimal. It does
// this by downloading all required blobs of a pack file with a single backend
// request and avoiding repeated downloads of the same pack. In addition,
// several pack files are fetched concurrently.
//
// Here is high-level pseudo-code of how the Restorer attempts to achieve
// these goals:
//
// while there are packs to process
// choose a pack to process [1]
// retrieve the pack from the backend [2]
// write pack blobs to the files that need them [3]
//
// Retrieval of repository packs (step [2]) and writing target files (step [3])
// are performed concurrently on multiple goroutines.
//
// Implementation does not guarantee order in which blobs are written to the
// target files and, for example, the last blob of a file can be written to the
// file before any of the preceding file blobs. It is therefore possible to
// have gaps in the data written to the target files if restore fails or
// interrupted by the user.
package restorer
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restorer/restorer_unix.go | internal/restorer/restorer_unix.go | //go:build !windows
package restorer
// toComparableFilename returns a filename suitable for equality checks. On Windows, it returns the
// uppercase version of the string. On all other systems, it returns the unmodified filename.
func toComparableFilename(path string) string {
return path
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restorer/sparsewrite.go | internal/restorer/sparsewrite.go | package restorer
import (
"github.com/restic/restic/internal/restic"
)
// WriteAt writes p to f.File at offset. It tries to do a sparse write
// and updates f.size.
func (f *partialFile) WriteAt(p []byte, offset int64) (n int, err error) {
if !f.sparse {
return f.File.WriteAt(p, offset)
}
n = len(p)
// Skip the longest all-zero prefix of p.
// If it's long enough, we can punch a hole in the file.
skipped := restic.ZeroPrefixLen(p)
p = p[skipped:]
offset += int64(skipped)
switch {
case len(p) == 0:
// All zeros, file already big enough. A previous WriteAt or
// Truncate will have produced the zeros in f.File.
default:
var n2 int
n2, err = f.File.WriteAt(p, offset)
n = skipped + n2
}
return n, err
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restorer/restorer_windows.go | internal/restorer/restorer_windows.go | //go:build windows
package restorer
import "strings"
// toComparableFilename returns a filename suitable for equality checks. On Windows, it returns the
// uppercase version of the string. On all other systems, it returns the unmodified filename.
func toComparableFilename(path string) string {
// apparently NTFS internally uppercases filenames for comparison
return strings.ToUpper(path)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/textfile/read_test.go | internal/textfile/read_test.go | package textfile
import (
"bytes"
"encoding/hex"
"os"
"testing"
)
// writeTempfile writes data to a new temporary file and returns its name
// and a callback that removes it.
func writeTempfile(t testing.TB, data []byte) (string, func()) {
t.Helper()
f, err := os.CreateTemp("", "restic-test-textfile-read-")
if err != nil {
t.Fatal(err)
}
name := f.Name()
defer func() {
closeErr := f.Close()
if err == nil && closeErr != nil {
err = closeErr
}
if err != nil {
// ignore subsequent errors
_ = os.Remove(name)
t.Fatal(err)
}
}()
_, err = f.Write(data)
if err != nil {
t.Fatal(err)
}
return name, func() {
err := os.Remove(name)
if err != nil {
t.Fatal(err)
}
}
}
func dec(s string) []byte {
data, err := hex.DecodeString(s)
if err != nil {
panic(err)
}
return data
}
func TestRead(t *testing.T) {
var tests = []struct {
data []byte
want []byte
}{
{data: []byte("foo bar baz")},
{data: []byte("Ööbär")},
{
data: []byte("\xef\xbb\xbffööbär"),
want: []byte("fööbär"),
},
{
data: dec("feff006600f600f6006200e40072"),
want: []byte("fööbär"),
},
{
data: dec("fffe6600f600f6006200e4007200"),
want: []byte("fööbär"),
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
want := test.want
if want == nil {
want = test.data
}
tempname, cleanup := writeTempfile(t, test.data)
defer cleanup()
data, err := Read(tempname)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(want, data) {
t.Errorf("invalid data returned, want:\n %q\ngot:\n %q", want, data)
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/textfile/read.go | internal/textfile/read.go | // Package textfile allows reading files that contain text. It automatically
// detects and converts several encodings and removes Byte Order Marks (BOM).
package textfile
import (
"bytes"
"os"
"golang.org/x/text/encoding/unicode"
)
// All supported BOMs (Byte Order Marks)
var (
bomUTF8 = []byte{0xef, 0xbb, 0xbf}
bomUTF16BigEndian = []byte{0xfe, 0xff}
bomUTF16LittleEndian = []byte{0xff, 0xfe}
)
// Decode removes a byte order mark and converts the bytes to UTF-8.
func Decode(data []byte) ([]byte, error) {
if bytes.HasPrefix(data, bomUTF8) {
return data[len(bomUTF8):], nil
}
if !bytes.HasPrefix(data, bomUTF16BigEndian) && !bytes.HasPrefix(data, bomUTF16LittleEndian) {
// no encoding specified, let's assume UTF-8
return data, nil
}
// UseBom means automatic endianness selection
e := unicode.UTF16(unicode.BigEndian, unicode.UseBOM)
return e.NewDecoder().Bytes(data)
}
// Read returns the contents of the file, converted to UTF-8, stripped of any BOM.
func Read(filename string) ([]byte, error) {
data, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
return Decode(data)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/filter/exclude_test.go | internal/filter/exclude_test.go | package filter
import (
"testing"
)
func TestRejectByPattern(t *testing.T) {
var tests = []struct {
filename string
reject bool
}{
{filename: "/home/user/foo.go", reject: true},
{filename: "/home/user/foo.c", reject: false},
{filename: "/home/user/foobar", reject: false},
{filename: "/home/user/foobar/x", reject: true},
{filename: "/home/user/README", reject: false},
{filename: "/home/user/README.md", reject: true},
}
patterns := []string{"*.go", "README.md", "/home/user/foobar/*"}
for _, tc := range tests {
t.Run("", func(t *testing.T) {
reject := RejectByPattern(patterns, nil)
res := reject(tc.filename)
if res != tc.reject {
t.Fatalf("wrong result for filename %v: want %v, got %v",
tc.filename, tc.reject, res)
}
})
}
}
func TestRejectByInsensitivePattern(t *testing.T) {
var tests = []struct {
filename string
reject bool
}{
{filename: "/home/user/foo.GO", reject: true},
{filename: "/home/user/foo.c", reject: false},
{filename: "/home/user/foobar", reject: false},
{filename: "/home/user/FOObar/x", reject: true},
{filename: "/home/user/README", reject: false},
{filename: "/home/user/readme.md", reject: true},
}
patterns := []string{"*.go", "README.md", "/home/user/foobar/*"}
for _, tc := range tests {
t.Run("", func(t *testing.T) {
reject := RejectByInsensitivePattern(patterns, nil)
res := reject(tc.filename)
if res != tc.reject {
t.Fatalf("wrong result for filename %v: want %v, got %v",
tc.filename, tc.reject, res)
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/filter/exclude.go | internal/filter/exclude.go | package filter
import (
"bufio"
"bytes"
"fmt"
"os"
"strings"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/textfile"
"github.com/spf13/pflag"
)
// RejectByNameFunc is a function that takes a filename of a
// file that would be included in the backup. The function returns true if it
// should be excluded (rejected) from the backup.
type RejectByNameFunc func(path string) bool
// RejectByPattern returns a RejectByNameFunc which rejects files that match
// one of the patterns.
func RejectByPattern(patterns []string, warnf func(msg string, args ...interface{})) RejectByNameFunc {
parsedPatterns := ParsePatterns(patterns)
return func(item string) bool {
matched, err := List(parsedPatterns, item)
if err != nil {
warnf("error for exclude pattern: %v", err)
}
if matched {
debug.Log("path %q excluded by an exclude pattern", item)
return true
}
return false
}
}
// RejectByInsensitivePattern is like RejectByPattern but case insensitive.
func RejectByInsensitivePattern(patterns []string, warnf func(msg string, args ...interface{})) RejectByNameFunc {
for index, path := range patterns {
patterns[index] = strings.ToLower(path)
}
rejFunc := RejectByPattern(patterns, warnf)
return func(item string) bool {
return rejFunc(strings.ToLower(item))
}
}
// readPatternsFromFiles reads all files and returns the list of
// patterns. For each line, leading and trailing white space is removed
// and comment lines are ignored. For each remaining pattern, environment
// variables are resolved. For adding a literal dollar sign ($), write $$ to
// the file.
func readPatternsFromFiles(files []string) ([]string, error) {
getenvOrDollar := func(s string) string {
if s == "$" {
return "$"
}
return os.Getenv(s)
}
var patterns []string
for _, filename := range files {
err := func() (err error) {
data, err := textfile.Read(filename)
if err != nil {
return err
}
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
// ignore empty lines
if line == "" {
continue
}
// strip comments
if strings.HasPrefix(line, "#") {
continue
}
line = os.Expand(line, getenvOrDollar)
patterns = append(patterns, line)
}
return scanner.Err()
}()
if err != nil {
return nil, fmt.Errorf("failed to read patterns from file %q: %w", filename, err)
}
}
return patterns, nil
}
type ExcludePatternOptions struct {
Excludes []string
InsensitiveExcludes []string
ExcludeFiles []string
InsensitiveExcludeFiles []string
}
func (opts *ExcludePatternOptions) Add(f *pflag.FlagSet) {
f.StringArrayVarP(&opts.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
f.StringArrayVar(&opts.InsensitiveExcludes, "iexclude", nil, "same as --exclude `pattern` but ignores the casing of filenames")
f.StringArrayVar(&opts.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)")
f.StringArrayVar(&opts.InsensitiveExcludeFiles, "iexclude-file", nil, "same as --exclude-file but ignores casing of `file`names in patterns")
}
func (opts *ExcludePatternOptions) Empty() bool {
return len(opts.Excludes) == 0 && len(opts.InsensitiveExcludes) == 0 && len(opts.ExcludeFiles) == 0 && len(opts.InsensitiveExcludeFiles) == 0
}
func (opts ExcludePatternOptions) CollectPatterns(warnf func(msg string, args ...interface{})) ([]RejectByNameFunc, error) {
var fs []RejectByNameFunc
// add patterns from file
if len(opts.ExcludeFiles) > 0 {
excludePatterns, err := readPatternsFromFiles(opts.ExcludeFiles)
if err != nil {
return nil, err
}
if err := ValidatePatterns(excludePatterns); err != nil {
return nil, errors.Fatalf("--exclude-file: %s", err)
}
opts.Excludes = append(opts.Excludes, excludePatterns...)
}
if len(opts.InsensitiveExcludeFiles) > 0 {
excludes, err := readPatternsFromFiles(opts.InsensitiveExcludeFiles)
if err != nil {
return nil, err
}
if err := ValidatePatterns(excludes); err != nil {
return nil, errors.Fatalf("--iexclude-file: %s", err)
}
opts.InsensitiveExcludes = append(opts.InsensitiveExcludes, excludes...)
}
if len(opts.InsensitiveExcludes) > 0 {
if err := ValidatePatterns(opts.InsensitiveExcludes); err != nil {
return nil, errors.Fatalf("--iexclude: %s", err)
}
fs = append(fs, RejectByInsensitivePattern(opts.InsensitiveExcludes, warnf))
}
if len(opts.Excludes) > 0 {
if err := ValidatePatterns(opts.Excludes); err != nil {
return nil, errors.Fatalf("--exclude: %s", err)
}
fs = append(fs, RejectByPattern(opts.Excludes, warnf))
}
return fs, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/filter/filter.go | internal/filter/filter.go | package filter
import (
"path/filepath"
"strings"
"github.com/restic/restic/internal/errors"
)
// ErrBadString is returned when Match is called with the empty string as the
// second argument.
var ErrBadString = errors.New("filter.Match: string is empty")
type patternPart struct {
pattern string // First is "/" for absolute pattern; "" for "**".
isSimple bool
}
// Pattern represents a preparsed filter pattern
type Pattern struct {
original string
parts []patternPart
isNegated bool
}
func prepareStr(str string) ([]string, error) {
if str == "" {
return nil, ErrBadString
}
return splitPath(str), nil
}
func preparePattern(patternStr string) Pattern {
var negate bool
originalPattern := patternStr
if patternStr[0] == '!' {
negate = true
patternStr = patternStr[1:]
}
pathParts := splitPath(filepath.Clean(patternStr))
parts := make([]patternPart, len(pathParts))
for i, part := range pathParts {
isSimple := !strings.ContainsAny(part, "\\[]*?")
// Replace "**" with the empty string to get faster comparisons
// (length-check only) in hasDoubleWildcard.
if part == "**" {
part = ""
}
parts[i] = patternPart{part, isSimple}
}
return Pattern{originalPattern, parts, negate}
}
// Split p into path components. Assuming p has been Cleaned, no component
// will be empty. For absolute paths, the first component is "/".
func splitPath(p string) []string {
parts := strings.Split(filepath.ToSlash(p), "/")
if parts[0] == "" {
parts[0] = "/"
}
return parts
}
// Match returns true if str matches the pattern. When the pattern is
// malformed, filepath.ErrBadPattern is returned. The empty pattern matches
// everything, when str is the empty string ErrBadString is returned.
//
// Pattern can be a combination of patterns suitable for filepath.Match, joined
// by filepath.Separator.
//
// In addition patterns suitable for filepath.Match, pattern accepts a
// recursive wildcard '**', which greedily matches an arbitrary number of
// intermediate directories.
func Match(patternStr, str string) (matched bool, err error) {
if patternStr == "" {
return true, nil
}
pattern := preparePattern(patternStr)
strs, err := prepareStr(str)
if err != nil {
return false, err
}
return match(pattern, strs)
}
// ChildMatch returns true if children of str can match the pattern. When the pattern is
// malformed, filepath.ErrBadPattern is returned. The empty pattern matches
// everything, when str is the empty string ErrBadString is returned.
//
// Pattern can be a combination of patterns suitable for filepath.Match, joined
// by filepath.Separator.
//
// In addition patterns suitable for filepath.Match, pattern accepts a
// recursive wildcard '**', which greedily matches an arbitrary number of
// intermediate directories.
func ChildMatch(patternStr, str string) (matched bool, err error) {
if patternStr == "" {
return true, nil
}
pattern := preparePattern(patternStr)
strs, err := prepareStr(str)
if err != nil {
return false, err
}
return childMatch(pattern, strs)
}
func childMatch(pattern Pattern, strs []string) (matched bool, err error) {
if pattern.parts[0].pattern != "/" {
// relative pattern can always be nested down
return true, nil
}
ok, pos := hasDoubleWildcard(pattern)
if ok && len(strs) >= pos {
// cut off at the double wildcard
strs = strs[:pos]
}
// match path against absolute pattern prefix
l := 0
if len(strs) > len(pattern.parts) {
l = len(pattern.parts)
} else {
l = len(strs)
}
return match(Pattern{pattern.original, pattern.parts[0:l], pattern.isNegated}, strs)
}
func hasDoubleWildcard(list Pattern) (ok bool, pos int) {
for i, item := range list.parts {
if item.pattern == "" {
return true, i
}
}
return false, 0
}
func match(pattern Pattern, strs []string) (matched bool, err error) {
if ok, pos := hasDoubleWildcard(pattern); ok {
// gradually expand '**' into separate wildcards
newPat := make([]patternPart, len(strs))
// copy static prefix once
copy(newPat, pattern.parts[:pos])
for i := 0; i <= len(strs)-len(pattern.parts)+1; i++ {
// limit to static prefix and already appended '*'
newPat := newPat[:pos+i]
// in the first iteration the wildcard expands to nothing
if i > 0 {
newPat[pos+i-1] = patternPart{"*", false}
}
newPat = append(newPat, pattern.parts[pos+1:]...)
matched, err := match(Pattern{pattern.original, newPat, pattern.isNegated}, strs)
if err != nil {
return false, err
}
if matched {
return true, nil
}
}
return false, nil
}
if len(pattern.parts) == 0 && len(strs) == 0 {
return true, nil
}
// an empty pattern never matches a non-empty path
if len(pattern.parts) == 0 {
return false, nil
}
if len(pattern.parts) <= len(strs) {
minOffset := 0
maxOffset := len(strs) - len(pattern.parts)
// special case absolute patterns
if pattern.parts[0].pattern == "/" {
maxOffset = 0
} else if strs[0] == "/" {
// skip absolute path marker if pattern is not rooted
minOffset = 1
}
outer:
for offset := maxOffset; offset >= minOffset; offset-- {
for i := len(pattern.parts) - 1; i >= 0; i-- {
var ok bool
if pattern.parts[i].isSimple {
ok = pattern.parts[i].pattern == strs[offset+i]
} else {
ok, err = filepath.Match(pattern.parts[i].pattern, strs[offset+i])
if err != nil {
return false, errors.Wrap(err, "Match")
}
}
if !ok {
continue outer
}
}
return true, nil
}
}
return false, nil
}
type InvalidPatternError struct {
InvalidPatterns []string
}
func (e *InvalidPatternError) Error() string {
return "invalid pattern(s) provided:\n" + strings.Join(e.InvalidPatterns, "\n")
}
// ValidatePatterns validates a slice of patterns.
// Returns true if all patterns are valid - false otherwise, along with the invalid patterns.
func ValidatePatterns(patterns []string) error {
invalidPatterns := make([]string, 0)
for _, Pattern := range ParsePatterns(patterns) {
// Validate all pattern parts
for _, part := range Pattern.parts {
// Validate the pattern part by trying to match it against itself
if _, validErr := filepath.Match(part.pattern, part.pattern); validErr != nil {
invalidPatterns = append(invalidPatterns, Pattern.original)
// If a single part is invalid, stop processing this pattern
continue
}
}
}
if len(invalidPatterns) > 0 {
return &InvalidPatternError{InvalidPatterns: invalidPatterns}
}
return nil
}
// ParsePatterns prepares a list of patterns for use with List.
func ParsePatterns(pattern []string) []Pattern {
patpat := make([]Pattern, 0)
for _, pat := range pattern {
if pat == "" {
continue
}
pats := preparePattern(pat)
patpat = append(patpat, pats)
}
return patpat
}
// List returns true if str matches one of the patterns. Empty patterns are ignored.
func List(patterns []Pattern, str string) (matched bool, err error) {
matched, _, err = list(patterns, false, str)
return matched, err
}
// ListWithChild returns true if str matches one of the patterns. Empty patterns are ignored.
func ListWithChild(patterns []Pattern, str string) (matched bool, childMayMatch bool, err error) {
return list(patterns, true, str)
}
// list returns true if str matches one of the patterns. Empty patterns are ignored.
// Patterns prefixed by "!" are negated: any matching file excluded by a previous pattern
// will become included again.
func list(patterns []Pattern, checkChildMatches bool, str string) (matched bool, childMayMatch bool, err error) {
if len(patterns) == 0 {
return false, false, nil
}
strs, err := prepareStr(str)
if err != nil {
return false, false, err
}
hasNegatedPattern := false
for _, pat := range patterns {
hasNegatedPattern = hasNegatedPattern || pat.isNegated
}
for _, pat := range patterns {
m, err := match(pat, strs)
if err != nil {
return false, false, err
}
var c bool
if checkChildMatches {
c, err = childMatch(pat, strs)
if err != nil {
return false, false, err
}
} else {
c = true
}
if pat.isNegated {
matched = matched && !m
childMayMatch = childMayMatch && !m
} else {
matched = matched || m
childMayMatch = childMayMatch || c
if matched && childMayMatch && !hasNegatedPattern {
// without negative patterns the result cannot change any more
break
}
}
}
return matched, childMayMatch, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/filter/filter_test.go | internal/filter/filter_test.go | package filter_test
import (
"bufio"
"compress/bzip2"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"github.com/restic/restic/internal/filter"
)
var matchTests = []struct {
pattern string
path string
match bool
}{
{"", "", true},
{"", "foo", true},
{"", "/x/y/z/foo", true},
{"*.go", "/foo/bar/test.go", true},
{"*.c", "/foo/bar/test.go", false},
{"*", "/foo/bar/test.go", true},
{"**", "/foo/bar/test.go", true},
{"foo*", "/foo/bar/test.go", true},
{"bar*", "/foo/bar/test.go", true},
{"/bar*", "/foo/bar/test.go", false},
{"bar/*", "/foo/bar/test.go", true},
{"baz/*", "/foo/bar/test.go", false},
{"bar/test.go", "/foo/bar/test.go", true},
{"bar/*.go", "/foo/bar/test.go", true},
{"ba*/*.go", "/foo/bar/test.go", true},
{"bb*/*.go", "/foo/bar/test.go", false},
{"test.*", "/foo/bar/test.go", true},
{"tesT.*", "/foo/bar/test.go", false},
{"bar/*", "/foo/bar/baz", true},
{"bar", "/foo/bar", true},
{"/foo/bar", "/foo/bar", true},
{"/foo/bar/", "/foo/bar", true},
{"/foo/bar", "/foo/baz", false},
{"/foo/bar", "/foo/baz/", false},
{"/foo///bar", "/foo/bar", true},
{"/foo/../bar", "/foo/bar", false},
{"/foo/../bar", "/bar", true},
{"/foo", "/foo/baz", true},
{"/foo/", "/foo/baz", true},
{"/foo/*", "/foo", false},
{"/foo/*", "/foo/baz", true},
{"bar", "/foo/bar/baz", true},
{"bar", "/foo/bar/test.go", true},
{"/foo/*test.*", "/foo/bar/test.go", false},
{"/foo/*/test.*", "/foo/bar/test.go", true},
{"/foo/*/bar/test.*", "/foo/bar/test.go", false},
{"/*/*/bar/test.*", "/foo/bar/test.go", false},
{"/*/*/bar/test.*", "/foo/bar/baz/test.go", false},
{"/*/*/baz/test.*", "/foo/bar/baz/test.go", true},
{"/*/foo/bar/test.*", "/foo/bar/baz/test.go", false},
{"/*/foo/bar/test.*", "/foo/bar/baz/test.go", false},
{"/foo/bar/test.*", "bar/baz/test.go", false},
{"/x/y/bar/baz/test.*", "bar/baz/test.go", false},
{"/x/y/bar/baz/test.c", "bar/baz/test.go", false},
{"baz/test.*", "bar/baz/test.go", true},
{"baz/tesT.*", "bar/baz/test.go", false},
{"test.go", "bar/baz/test.go", true},
{"*.go", "bar/baz/test.go", true},
{"*.c", "bar/baz/test.go", false},
{"sdk", "/foo/bar/sdk", true},
{"sdk", "/foo/bar/sdk/test/sdk_foo.go", true},
{
"sdk/*/cpp/*/*vars*.html",
"/usr/share/doc/libreoffice/sdk/docs/cpp/ref/a00517.html",
false,
},
{"foo/**/bar/*.go", "/home/user/foo/work/special/project/bar/test.go", true},
{"foo/**/bar/*.go", "/home/user/foo/bar/test.go", true},
{"foo/**/bar/*.go", "x/foo/bar/test.go", true},
{"foo/**/bar/*.go", "foo/bar/test.go", true},
{"foo/**/bar/*.go", "foo/bar/baz/bar/test.go", true},
{"foo/**/bar/*.go", "/home/user/foo/test.c", false},
{"foo/**/bar/*.go", "bar/foo/main.go", false},
{"foo/**/bar/*.go", "/foo/bar/main.go", true},
{"foo/**/bar/*.go", "bar/main.go", false},
{"foo/**/bar", "/home/user/foo/x/y/bar", true},
{"foo/**/bar", "/home/user/foo/x/y/bar/main.go", true},
{"foo/**/bar/**/x", "/home/user/foo/bar/x", true},
{"foo/**/bar/**/x", "/home/user/foo/blaaa/blaz/bar/shared/work/x", true},
{"user/**/important*", "/home/user/work/x/y/hidden/x", false},
{"user/**/hidden*/**/c", "/home/user/work/x/y/hidden/z/a/b/c", true},
{"c:/foo/*test.*", "c:/foo/bar/test.go", false},
{"c:/foo", "c:/foo/bar", true},
{"c:/foo/", "c:/foo/bar", true},
{"c:/foo/*/test.*", "c:/foo/bar/test.go", true},
{"c:/foo/*/bar/test.*", "c:/foo/bar/test.go", false},
}
func testpattern(t *testing.T, pattern, path string, shouldMatch bool) {
match, err := filter.Match(pattern, path)
if err != nil {
t.Errorf("test pattern %q failed: expected no error for path %q, but error returned: %v",
pattern, path, err)
}
if match != shouldMatch {
t.Errorf("test: filter.Match(%q, %q): expected %v, got %v",
pattern, path, shouldMatch, match)
}
}
func TestMatch(t *testing.T) {
for _, test := range matchTests {
t.Run("", func(t *testing.T) {
testpattern(t, test.pattern, test.path, test.match)
})
// Test with native path separator
if filepath.Separator != '/' {
pattern := strings.ReplaceAll(test.pattern, "/", string(filepath.Separator))
// Test with pattern as native
t.Run("pattern-native", func(t *testing.T) {
testpattern(t, pattern, test.path, test.match)
})
path := strings.ReplaceAll(test.path, "/", string(filepath.Separator))
t.Run("path-native", func(t *testing.T) {
// Test with path as native
testpattern(t, test.pattern, path, test.match)
})
t.Run("both-native", func(t *testing.T) {
// Test with both pattern and path as native
testpattern(t, pattern, path, test.match)
})
}
}
}
var childMatchTests = []struct {
pattern string
path string
match bool
}{
{"", "", true},
{"", "/foo", true},
{"", "/x/y/z/foo", true},
{"foo/bar", "/foo", true},
{"baz/bar", "/foo", true},
{"foo", "/foo/bar", true},
{"bar", "/foo", true},
{"baz", "/foo/bar", true},
{"*", "/foo", true},
{"*", "/foo/bar", true},
{"/foo/bar", "/foo", true},
{"/foo/bar/baz", "/foo", true},
{"/foo/bar/baz", "/foo/bar", true},
{"/foo/bar/baz", "/foo/baz", false},
{"/foo/**/baz", "/foo/bar/baz", true},
{"/foo/**/baz", "/foo/bar/baz/blah", true},
{"/foo/**/qux", "/foo/bar/baz/qux", true},
{"/foo/**/qux", "/foo/bar/baz", true},
{"/foo/**/qux", "/foo/bar/baz/boo", true},
{"/foo/**", "/foo/bar/baz", true},
{"/foo/**", "/foo/bar", true},
{"foo/**/bar/**/x", "/home/user/foo", true},
{"foo/**/bar/**/x", "/home/user/foo/bar", true},
{"foo/**/bar/**/x", "/home/user/foo/blaaa/blaz/bar/shared/work/x", true},
{"/foo/*/qux", "/foo/bar", true},
{"/foo/*/qux", "/foo/bar/boo", false},
{"/foo/*/qux", "/foo/bar/boo/xx", false},
{"/baz/bar", "/foo", false},
{"/foo", "/foo/bar", true},
{"/*", "/foo", true},
{"/*", "/foo/bar", true},
{"/foo", "/foo/bar", true},
{"/**", "/foo", true},
{"/*/**", "/foo", true},
{"/*/**", "/foo/bar", true},
{"/*/bar", "/foo", true},
{"/bar/*", "/foo", false},
{"/foo/*/baz", "/foo/bar", true},
{"/foo/*/baz", "/foo/baz", true},
{"/foo/*/baz", "/bar/baz", false},
{"/**/*", "/foo", true},
{"/**/bar", "/foo/bar", true},
}
func testchildpattern(t *testing.T, pattern, path string, shouldMatch bool) {
match, err := filter.ChildMatch(pattern, path)
if err != nil {
t.Errorf("test child pattern %q failed: expected no error for path %q, but error returned: %v",
pattern, path, err)
}
if match != shouldMatch {
t.Errorf("test: filter.ChildMatch(%q, %q): expected %v, got %v",
pattern, path, shouldMatch, match)
}
}
func TestChildMatch(t *testing.T) {
for _, test := range childMatchTests {
t.Run("", func(t *testing.T) {
testchildpattern(t, test.pattern, test.path, test.match)
})
// Test with native path separator
if filepath.Separator != '/' {
pattern := strings.ReplaceAll(test.pattern, "/", string(filepath.Separator))
// Test with pattern as native
t.Run("pattern-native", func(t *testing.T) {
testchildpattern(t, pattern, test.path, test.match)
})
path := strings.ReplaceAll(test.path, "/", string(filepath.Separator))
t.Run("path-native", func(t *testing.T) {
// Test with path as native
testchildpattern(t, test.pattern, path, test.match)
})
t.Run("both-native", func(t *testing.T) {
// Test with both pattern and path as native
testchildpattern(t, pattern, path, test.match)
})
}
}
}
func ExampleMatch() {
match, _ := filter.Match("*.go", "/home/user/file.go")
fmt.Printf("match: %v\n", match)
// Output:
// match: true
}
func ExampleMatch_wildcards() {
match, _ := filter.Match("/home/[uU]ser/?.go", "/home/user/F.go")
fmt.Printf("match: %v\n", match)
// Output:
// match: true
}
var filterListTests = []struct {
patterns []string
path string
match bool
childMatch bool
}{
{[]string{}, "/foo/bar/test.go", false, false},
{[]string{"*.go"}, "/foo/bar/test.go", true, true},
{[]string{"*.go"}, "/foo/bar", false, true},
{[]string{"*.c"}, "/foo/bar/test.go", false, true},
{[]string{"*.go", "*.c"}, "/foo/bar/test.go", true, true},
{[]string{"*"}, "/foo/bar/test.go", true, true},
{[]string{"x"}, "/foo/bar/test.go", false, true},
{[]string{"?"}, "/foo/bar/test.go", false, true},
{[]string{"?", "x"}, "/foo/bar/x", true, true},
{[]string{"/*/*/bar/test.*"}, "/foo/bar/test.go", false, false},
{[]string{"/*/*/bar/test.*"}, "/foo/bar/bar", false, true},
{[]string{"/*/*/bar/test.*", "*.go"}, "/foo/bar/test.go", true, true},
{[]string{"", "*.c"}, "/foo/bar/test.go", false, true},
{[]string{"!**", "*.go"}, "/foo/bar/test.go", true, true},
{[]string{"!**", "*.c"}, "/foo/bar/test.go", false, true},
{[]string{"/foo/*/test.*", "!*.c"}, "/foo/bar/test.c", false, false},
{[]string{"/foo/*/test.*", "!*.c"}, "/foo/bar/test.go", true, true},
{[]string{"/foo/*/*", "!test.*", "*.c"}, "/foo/bar/test.go", false, true},
{[]string{"/foo/*/*", "!test.*", "*.c"}, "/foo/bar/test.c", true, true},
{[]string{"/foo/*/*", "!test.*", "*.c"}, "/foo/bar/file.go", true, true},
{[]string{"/**/*", "!/foo", "/foo/*", "!/foo/bar"}, "/foo/other/test.go", true, true},
{[]string{"/**/*", "!/foo", "/foo/*", "!/foo/bar"}, "/foo/bar", false, false},
{[]string{"/**/*", "!/foo", "/foo/*", "!/foo/bar"}, "/foo/bar/test.go", false, false},
{[]string{"/**/*", "!/foo", "/foo/*", "!/foo/bar"}, "/foo/bar/test.go/child", false, false},
{[]string{"/**/*", "!/foo", "/foo/*", "!/foo/bar", "/foo/bar/test*"}, "/foo/bar/test.go/child", true, true},
{[]string{"/foo/bar/*"}, "/foo", false, true},
{[]string{"/foo/bar/*", "!/foo/bar/[a-m]*"}, "/foo", false, true},
{[]string{"/foo/**/test.c"}, "/foo/bar/foo/bar/test.c", true, true},
{[]string{"/foo/*/test.c"}, "/foo/bar/foo/bar/test.c", false, false},
}
func TestList(t *testing.T) {
for i, test := range filterListTests {
patterns := filter.ParsePatterns(test.patterns)
match, err := filter.List(patterns, test.path)
if err != nil {
t.Errorf("test %d failed: expected no error for patterns %q, but error returned: %v",
i, test.patterns, err)
continue
}
if match != test.match {
t.Errorf("test %d: filter.List(%q, %q): expected %v, got %v",
i, test.patterns, test.path, test.match, match)
}
match, childMatch, err := filter.ListWithChild(patterns, test.path)
if err != nil {
t.Errorf("test %d failed: expected no error for patterns %q, but error returned: %v",
i, test.patterns, err)
continue
}
if match != test.match || childMatch != test.childMatch {
t.Errorf("test %d: filter.ListWithChild(%q, %q): expected %v, %v, got %v, %v",
i, test.patterns, test.path, test.match, test.childMatch, match, childMatch)
}
}
}
func ExampleList() {
patterns := filter.ParsePatterns([]string{"*.c", "*.go"})
match, _ := filter.List(patterns, "/home/user/file.go")
fmt.Printf("match: %v\n", match)
// Output:
// match: true
}
func TestInvalidStrs(t *testing.T) {
_, err := filter.Match("test", "")
if err == nil {
t.Error("Match accepted invalid path")
}
_, err = filter.ChildMatch("test", "")
if err == nil {
t.Error("ChildMatch accepted invalid path")
}
patterns := []string{"test"}
_, err = filter.List(filter.ParsePatterns(patterns), "")
if err == nil {
t.Error("List accepted invalid path")
}
}
func TestInvalidPattern(t *testing.T) {
patterns := []string{"test/["}
_, err := filter.List(filter.ParsePatterns(patterns), "test/example")
if err == nil {
t.Error("List accepted invalid pattern")
}
patterns = []string{"test/**/["}
_, err = filter.List(filter.ParsePatterns(patterns), "test/example")
if err == nil {
t.Error("List accepted invalid pattern")
}
}
func extractTestLines(t testing.TB) (lines []string) {
f, err := os.Open("testdata/libreoffice.txt.bz2")
if err != nil {
t.Fatal(err)
}
defer func() {
if err := f.Close(); err != nil {
t.Fatal(err)
}
}()
sc := bufio.NewScanner(bzip2.NewReader(f))
for sc.Scan() {
lines = append(lines, sc.Text())
}
return lines
}
func TestFilterPatternsFile(t *testing.T) {
lines := extractTestLines(t)
var testPatterns = []struct {
pattern string
hits uint
}{
{"*.html", 18249},
{"sdk", 22186},
{"sdk/*/cpp/*/*vars.html", 3},
}
for _, test := range testPatterns {
var c uint
for _, line := range lines {
match, err := filter.Match(test.pattern, line)
if err != nil {
t.Error(err)
continue
}
if match {
c++
// t.Logf("pattern %q, line %q\n", test.pattern, line)
}
}
if c != test.hits {
t.Errorf("wrong number of hits for pattern %q: want %d, got %d",
test.pattern, test.hits, c)
}
}
}
func BenchmarkFilterLines(b *testing.B) {
pattern := "sdk/*/cpp/*/*vars.html"
lines := extractTestLines(b)
var c uint
b.ResetTimer()
for i := 0; i < b.N; i++ {
c = 0
for _, line := range lines {
match, err := filter.Match(pattern, line)
if err != nil {
b.Fatal(err)
}
if match {
c++
}
}
if c != 3 {
b.Fatalf("wrong number of matches: expected 3, got %d", c)
}
}
}
func BenchmarkFilterPatterns(b *testing.B) {
lines := extractTestLines(b)
modlines := make([]string, 200)
for i, line := range lines {
if i >= len(modlines) {
break
}
modlines[i] = line + "-does-not-match"
}
tests := []struct {
name string
patterns []filter.Pattern
matches uint
}{
{"Relative", filter.ParsePatterns([]string{
"does-not-match",
"sdk/*",
"*.html",
}), 22185},
{"Absolute", filter.ParsePatterns([]string{
"/etc",
"/home/*/test",
"/usr/share/doc/libreoffice/sdk/docs/java",
}), 150},
{"Wildcard", filter.ParsePatterns([]string{
"/etc/**/example",
"/home/**/test",
"/usr/**/java",
}), 150},
{"ManyNoMatch", filter.ParsePatterns(modlines), 0},
}
for _, test := range tests {
b.Run(test.name, func(b *testing.B) {
var c uint
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
c = 0
for _, line := range lines {
match, err := filter.List(test.patterns, line)
if err != nil {
b.Fatal(err)
}
if match {
c++
}
}
if c != test.matches {
b.Fatalf("wrong number of matches: expected %d, got %d", test.matches, c)
}
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/filter/include.go | internal/filter/include.go | package filter
import (
"strings"
"github.com/restic/restic/internal/errors"
"github.com/spf13/pflag"
)
// IncludeByNameFunc is a function that takes a filename that should be included
// in the restore process and returns whether it should be included.
type IncludeByNameFunc func(item string) (matched bool, childMayMatch bool)
type IncludePatternOptions struct {
Includes []string
InsensitiveIncludes []string
IncludeFiles []string
InsensitiveIncludeFiles []string
}
func (opts *IncludePatternOptions) Add(f *pflag.FlagSet) {
f.StringArrayVarP(&opts.Includes, "include", "i", nil, "include a `pattern` (can be specified multiple times)")
f.StringArrayVar(&opts.InsensitiveIncludes, "iinclude", nil, "same as --include `pattern` but ignores the casing of filenames")
f.StringArrayVar(&opts.IncludeFiles, "include-file", nil, "read include patterns from a `file` (can be specified multiple times)")
f.StringArrayVar(&opts.InsensitiveIncludeFiles, "iinclude-file", nil, "same as --include-file but ignores casing of `file`names in patterns")
}
func (opts IncludePatternOptions) CollectPatterns(warnf func(msg string, args ...interface{})) ([]IncludeByNameFunc, error) {
var fs []IncludeByNameFunc
if len(opts.IncludeFiles) > 0 {
includePatterns, err := readPatternsFromFiles(opts.IncludeFiles)
if err != nil {
return nil, err
}
if err := ValidatePatterns(includePatterns); err != nil {
return nil, errors.Fatalf("--include-file: %s", err)
}
opts.Includes = append(opts.Includes, includePatterns...)
}
if len(opts.InsensitiveIncludeFiles) > 0 {
includePatterns, err := readPatternsFromFiles(opts.InsensitiveIncludeFiles)
if err != nil {
return nil, err
}
if err := ValidatePatterns(includePatterns); err != nil {
return nil, errors.Fatalf("--iinclude-file: %s", err)
}
opts.InsensitiveIncludes = append(opts.InsensitiveIncludes, includePatterns...)
}
if len(opts.InsensitiveIncludes) > 0 {
if err := ValidatePatterns(opts.InsensitiveIncludes); err != nil {
return nil, errors.Fatalf("--iinclude: %s", err)
}
fs = append(fs, IncludeByInsensitivePattern(opts.InsensitiveIncludes, warnf))
}
if len(opts.Includes) > 0 {
if err := ValidatePatterns(opts.Includes); err != nil {
return nil, errors.Fatalf("--include: %s", err)
}
fs = append(fs, IncludeByPattern(opts.Includes, warnf))
}
return fs, nil
}
// IncludeByPattern returns a IncludeByNameFunc which includes files that match
// one of the patterns.
func IncludeByPattern(patterns []string, warnf func(msg string, args ...interface{})) IncludeByNameFunc {
parsedPatterns := ParsePatterns(patterns)
return func(item string) (matched bool, childMayMatch bool) {
matched, childMayMatch, err := ListWithChild(parsedPatterns, item)
if err != nil {
warnf("error for include pattern: %v", err)
}
return matched, childMayMatch
}
}
// IncludeByInsensitivePattern returns a IncludeByNameFunc which includes files that match
// one of the patterns, ignoring the casing of the filenames.
func IncludeByInsensitivePattern(patterns []string, warnf func(msg string, args ...interface{})) IncludeByNameFunc {
for index, path := range patterns {
patterns[index] = strings.ToLower(path)
}
includeFunc := IncludeByPattern(patterns, warnf)
return func(item string) (matched bool, childMayMatch bool) {
return includeFunc(strings.ToLower(item))
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/filter/include_test.go | internal/filter/include_test.go | package filter
import (
"testing"
)
func TestIncludeByPattern(t *testing.T) {
var tests = []struct {
filename string
include bool
}{
{filename: "/home/user/foo.go", include: true},
{filename: "/home/user/foo.c", include: false},
{filename: "/home/user/foobar", include: false},
{filename: "/home/user/foobar/x", include: false},
{filename: "/home/user/README", include: false},
{filename: "/home/user/README.md", include: true},
}
patterns := []string{"*.go", "README.md"}
for _, tc := range tests {
t.Run(tc.filename, func(t *testing.T) {
includeFunc := IncludeByPattern(patterns, nil)
matched, _ := includeFunc(tc.filename)
if matched != tc.include {
t.Fatalf("wrong result for filename %v: want %v, got %v",
tc.filename, tc.include, matched)
}
})
}
}
func TestIncludeByInsensitivePattern(t *testing.T) {
var tests = []struct {
filename string
include bool
}{
{filename: "/home/user/foo.GO", include: true},
{filename: "/home/user/foo.c", include: false},
{filename: "/home/user/foobar", include: false},
{filename: "/home/user/FOObar/x", include: false},
{filename: "/home/user/README", include: false},
{filename: "/home/user/readme.MD", include: true},
}
patterns := []string{"*.go", "README.md"}
for _, tc := range tests {
t.Run(tc.filename, func(t *testing.T) {
includeFunc := IncludeByInsensitivePattern(patterns, nil)
matched, _ := includeFunc(tc.filename)
if matched != tc.include {
t.Fatalf("wrong result for filename %v: want %v, got %v",
tc.filename, tc.include, matched)
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/filter/doc.go | internal/filter/doc.go | // Package filter implements filters for files similar to filepath.Glob, but
// in contrast to filepath.Glob a pattern may specify directories.
//
// For a list of valid patterns please see the documentation on filepath.Glob.
package filter
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/filter/filter_patterns_test.go | internal/filter/filter_patterns_test.go | package filter_test
import (
"testing"
"github.com/restic/restic/internal/filter"
rtest "github.com/restic/restic/internal/test"
)
func TestValidPatterns(t *testing.T) {
// Test invalid patterns are detected and returned
t.Run("detect-invalid-patterns", func(t *testing.T) {
err := filter.ValidatePatterns([]string{"*.foo", "*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"})
rtest.Assert(t, err != nil, "Expected invalid patterns to be detected")
if ip, ok := err.(*filter.InvalidPatternError); ok {
rtest.Equals(t, ip.InvalidPatterns, []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"})
} else {
t.Errorf("wrong error type %v", err)
}
})
// Test all patterns defined in matchTests are valid
patterns := make([]string, 0)
for _, data := range matchTests {
patterns = append(patterns, data.pattern)
}
t.Run("validate-patterns", func(t *testing.T) {
err := filter.ValidatePatterns(patterns)
if err != nil {
t.Error(err)
}
})
// Test all patterns defined in childMatchTests are valid
childPatterns := make([]string, 0)
for _, data := range childMatchTests {
childPatterns = append(childPatterns, data.pattern)
}
t.Run("validate-child-patterns", func(t *testing.T) {
err := filter.ValidatePatterns(childPatterns)
if err != nil {
t.Error(err)
}
})
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/global/global_debug.go | internal/global/global_debug.go | //go:build debug || profile
package global
import (
"fmt"
"io"
"net/http"
_ "net/http/pprof"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/pkg/profile"
)
func RegisterProfiling(cmd *cobra.Command, stderr io.Writer) {
var profiler Profiler
origPreRun := cmd.PersistentPreRunE
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
if origPreRun != nil {
if err := origPreRun(cmd, args); err != nil {
return err
}
}
return profiler.Start(profiler.opts, stderr)
}
// Once https://github.com/spf13/cobra/issues/1893 is fixed,
// this could use PersistentPostRunE instead of OnFinalize,
// reverting https://github.com/restic/restic/pull/5373.
cobra.OnFinalize(func() {
profiler.Stop()
})
profiler.opts.AddFlags(cmd.PersistentFlags())
}
type Profiler struct {
opts ProfileOptions
stop interface {
Stop()
}
}
type ProfileOptions struct {
listen string
memPath string
cpuPath string
tracePath string
blockPath string
insecure bool
}
func (opts *ProfileOptions) AddFlags(f *pflag.FlagSet) {
f.StringVar(&opts.listen, "listen-profile", "", "listen on this `address:port` for memory profiling")
f.StringVar(&opts.memPath, "mem-profile", "", "write memory profile to `dir`")
f.StringVar(&opts.cpuPath, "cpu-profile", "", "write cpu profile to `dir`")
f.StringVar(&opts.tracePath, "trace-profile", "", "write trace to `dir`")
f.StringVar(&opts.blockPath, "block-profile", "", "write block profile to `dir`")
f.BoolVar(&opts.insecure, "insecure-kdf", false, "use insecure KDF settings")
}
type fakeTestingTB struct {
stderr io.Writer
}
func (t fakeTestingTB) Logf(msg string, args ...interface{}) {
fmt.Fprintf(t.stderr, msg, args...)
}
func (p *Profiler) Start(profileOpts ProfileOptions, stderr io.Writer) error {
if profileOpts.listen != "" {
fmt.Fprintf(stderr, "running profile HTTP server on %v\n", profileOpts.listen)
go func() {
err := http.ListenAndServe(profileOpts.listen, nil)
if err != nil {
fmt.Fprintf(stderr, "profile HTTP server listen failed: %v\n", err)
}
}()
}
profilesEnabled := 0
if profileOpts.memPath != "" {
profilesEnabled++
}
if profileOpts.cpuPath != "" {
profilesEnabled++
}
if profileOpts.tracePath != "" {
profilesEnabled++
}
if profileOpts.blockPath != "" {
profilesEnabled++
}
if profilesEnabled > 1 {
return errors.Fatal("only one profile (memory, CPU, trace, or block) may be activated at the same time")
}
if profileOpts.memPath != "" {
p.stop = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(profileOpts.memPath))
} else if profileOpts.cpuPath != "" {
p.stop = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(profileOpts.cpuPath))
} else if profileOpts.tracePath != "" {
p.stop = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(profileOpts.tracePath))
} else if profileOpts.blockPath != "" {
p.stop = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.BlockProfile, profile.ProfilePath(profileOpts.blockPath))
}
if profileOpts.insecure {
repository.TestUseLowSecurityKDFParameters(fakeTestingTB{stderr})
}
return nil
}
func (p *Profiler) Stop() {
if p.stop != nil {
p.stop.Stop()
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/global/secondary_repo.go | internal/global/secondary_repo.go | package global
import (
"context"
"os"
"github.com/restic/restic/internal/errors"
"github.com/spf13/pflag"
)
type SecondaryRepoOptions struct {
Password string
// from-repo options
Repo string
RepositoryFile string
PasswordFile string
PasswordCommand string
KeyHint string
InsecureNoPassword bool
// repo2 options
LegacyRepo string
LegacyRepositoryFile string
LegacyPasswordFile string
LegacyPasswordCommand string
LegacyKeyHint string
}
func (opts *SecondaryRepoOptions) AddFlags(f *pflag.FlagSet, repoPrefix string, repoUsage string) {
f.StringVarP(&opts.LegacyRepo, "repo2", "", "", repoPrefix+" `repository` "+repoUsage+" (default: $RESTIC_REPOSITORY2)")
f.StringVarP(&opts.LegacyRepositoryFile, "repository-file2", "", "", "`file` from which to read the "+repoPrefix+" repository location "+repoUsage+" (default: $RESTIC_REPOSITORY_FILE2)")
f.StringVarP(&opts.LegacyPasswordFile, "password-file2", "", "", "`file` to read the "+repoPrefix+" repository password from (default: $RESTIC_PASSWORD_FILE2)")
f.StringVarP(&opts.LegacyKeyHint, "key-hint2", "", "", "key ID of key to try decrypting the "+repoPrefix+" repository first (default: $RESTIC_KEY_HINT2)")
f.StringVarP(&opts.LegacyPasswordCommand, "password-command2", "", "", "shell `command` to obtain the "+repoPrefix+" repository password from (default: $RESTIC_PASSWORD_COMMAND2)")
// hide repo2 options
_ = f.MarkDeprecated("repo2", "use --repo or --from-repo instead")
_ = f.MarkDeprecated("repository-file2", "use --repository-file or --from-repository-file instead")
_ = f.MarkHidden("password-file2")
_ = f.MarkHidden("key-hint2")
_ = f.MarkHidden("password-command2")
opts.LegacyRepo = os.Getenv("RESTIC_REPOSITORY2")
opts.LegacyRepositoryFile = os.Getenv("RESTIC_REPOSITORY_FILE2")
opts.LegacyPasswordFile = os.Getenv("RESTIC_PASSWORD_FILE2")
opts.LegacyKeyHint = os.Getenv("RESTIC_KEY_HINT2")
opts.LegacyPasswordCommand = os.Getenv("RESTIC_PASSWORD_COMMAND2")
f.StringVarP(&opts.Repo, "from-repo", "", "", "source `repository` "+repoUsage+" (default: $RESTIC_FROM_REPOSITORY)")
f.StringVarP(&opts.RepositoryFile, "from-repository-file", "", "", "`file` from which to read the source repository location "+repoUsage+" (default: $RESTIC_FROM_REPOSITORY_FILE)")
f.StringVarP(&opts.PasswordFile, "from-password-file", "", "", "`file` to read the source repository password from (default: $RESTIC_FROM_PASSWORD_FILE)")
f.StringVarP(&opts.KeyHint, "from-key-hint", "", "", "key ID of key to try decrypting the source repository first (default: $RESTIC_FROM_KEY_HINT)")
f.StringVarP(&opts.PasswordCommand, "from-password-command", "", "", "shell `command` to obtain the source repository password from (default: $RESTIC_FROM_PASSWORD_COMMAND)")
f.BoolVar(&opts.InsecureNoPassword, "from-insecure-no-password", false, "use an empty password for the source repository (insecure)")
opts.Repo = os.Getenv("RESTIC_FROM_REPOSITORY")
opts.RepositoryFile = os.Getenv("RESTIC_FROM_REPOSITORY_FILE")
opts.PasswordFile = os.Getenv("RESTIC_FROM_PASSWORD_FILE")
opts.KeyHint = os.Getenv("RESTIC_FROM_KEY_HINT")
opts.PasswordCommand = os.Getenv("RESTIC_FROM_PASSWORD_COMMAND")
}
func (opts *SecondaryRepoOptions) FillGlobalOpts(ctx context.Context, gopts Options, repoPrefix string) (Options, bool, error) {
if opts.Repo == "" && opts.RepositoryFile == "" && opts.LegacyRepo == "" && opts.LegacyRepositoryFile == "" {
return Options{}, false, errors.Fatal("Please specify a source repository location (--from-repo or --from-repository-file)")
}
hasFromRepo := opts.Repo != "" || opts.RepositoryFile != "" || opts.PasswordFile != "" ||
opts.KeyHint != "" || opts.PasswordCommand != "" || opts.InsecureNoPassword
hasRepo2 := opts.LegacyRepo != "" || opts.LegacyRepositoryFile != "" || opts.LegacyPasswordFile != "" ||
opts.LegacyKeyHint != "" || opts.LegacyPasswordCommand != ""
if hasFromRepo && hasRepo2 {
return Options{}, false, errors.Fatal("Option groups repo2 and from-repo are mutually exclusive, please specify only one")
}
var err error
dstGopts := gopts
var pwdEnv string
if hasFromRepo {
if opts.Repo != "" && opts.RepositoryFile != "" {
return Options{}, false, errors.Fatal("Options --from-repo and --from-repository-file are mutually exclusive, please specify only one")
}
dstGopts.Repo = opts.Repo
dstGopts.RepositoryFile = opts.RepositoryFile
dstGopts.PasswordFile = opts.PasswordFile
dstGopts.PasswordCommand = opts.PasswordCommand
dstGopts.KeyHint = opts.KeyHint
dstGopts.InsecureNoPassword = opts.InsecureNoPassword
pwdEnv = "RESTIC_FROM_PASSWORD"
repoPrefix = "source"
} else {
if opts.LegacyRepo != "" && opts.LegacyRepositoryFile != "" {
return Options{}, false, errors.Fatal("Options --repo2 and --repository-file2 are mutually exclusive, please specify only one")
}
dstGopts.Repo = opts.LegacyRepo
dstGopts.RepositoryFile = opts.LegacyRepositoryFile
dstGopts.PasswordFile = opts.LegacyPasswordFile
dstGopts.PasswordCommand = opts.LegacyPasswordCommand
dstGopts.KeyHint = opts.LegacyKeyHint
// keep existing bevhaior for legacy options
dstGopts.InsecureNoPassword = false
pwdEnv = "RESTIC_PASSWORD2"
}
if opts.Password != "" {
dstGopts.Password = opts.Password
} else {
dstGopts.Password, err = resolvePassword(&dstGopts, pwdEnv)
if err != nil {
return Options{}, false, err
}
}
dstGopts.Password, err = readPassword(ctx, dstGopts, "enter password for "+repoPrefix+" repository: ")
if err != nil {
return Options{}, false, err
}
return dstGopts, hasFromRepo, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/global/global_release.go | internal/global/global_release.go | //go:build !debug && !profile
package global
import (
"io"
"github.com/spf13/cobra"
)
func RegisterProfiling(_ *cobra.Command, _ io.Writer) {
// No profiling in release mode
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/global/secondary_repo_test.go | internal/global/secondary_repo_test.go | package global
import (
"context"
"os"
"path/filepath"
"testing"
rtest "github.com/restic/restic/internal/test"
)
// TestFillSecondaryGlobalOpts tests valid and invalid data on fillSecondaryGlobalOpts-function
func TestFillSecondaryGlobalOpts(t *testing.T) {
//secondaryRepoTestCase defines a struct for test cases
type secondaryRepoTestCase struct {
Opts SecondaryRepoOptions
DstGOpts Options
FromRepo bool
}
//validSecondaryRepoTestCases is a list with test cases that must pass
var validSecondaryRepoTestCases = []secondaryRepoTestCase{
{
// Test if Repo and Password are parsed correctly.
Opts: SecondaryRepoOptions{
Repo: "backupDst",
Password: "secretDst",
},
DstGOpts: Options{
Repo: "backupDst",
Password: "secretDst",
},
FromRepo: true,
},
{
// Test if RepositoryFile and PasswordFile are parsed correctly.
Opts: SecondaryRepoOptions{
RepositoryFile: "backupDst",
PasswordFile: "passwordFileDst",
},
DstGOpts: Options{
RepositoryFile: "backupDst",
Password: "secretDst",
PasswordFile: "passwordFileDst",
},
FromRepo: true,
},
{
// Test if RepositoryFile and PasswordCommand are parsed correctly.
Opts: SecondaryRepoOptions{
RepositoryFile: "backupDst",
PasswordCommand: "echo secretDst",
},
DstGOpts: Options{
RepositoryFile: "backupDst",
Password: "secretDst",
PasswordCommand: "echo secretDst",
},
FromRepo: true,
},
{
// Test if LegacyRepo and Password are parsed correctly.
Opts: SecondaryRepoOptions{
LegacyRepo: "backupDst",
Password: "secretDst",
},
DstGOpts: Options{
Repo: "backupDst",
Password: "secretDst",
},
},
{
// Test if LegacyRepositoryFile and LegacyPasswordFile are parsed correctly.
Opts: SecondaryRepoOptions{
LegacyRepositoryFile: "backupDst",
LegacyPasswordFile: "passwordFileDst",
},
DstGOpts: Options{
RepositoryFile: "backupDst",
Password: "secretDst",
PasswordFile: "passwordFileDst",
},
},
{
// Test if LegacyRepositoryFile and LegacyPasswordCommand are parsed correctly.
Opts: SecondaryRepoOptions{
LegacyRepositoryFile: "backupDst",
LegacyPasswordCommand: "echo secretDst",
},
DstGOpts: Options{
RepositoryFile: "backupDst",
Password: "secretDst",
PasswordCommand: "echo secretDst",
},
},
}
//invalidSecondaryRepoTestCases is a list with test cases that must fail
var invalidSecondaryRepoTestCases = []secondaryRepoTestCase{
{
// Test must fail on no repo given.
Opts: SecondaryRepoOptions{},
},
{
// Test must fail as Repo and RepositoryFile are both given
Opts: SecondaryRepoOptions{
Repo: "backupDst",
RepositoryFile: "backupDst",
},
},
{
// Test must fail as PasswordFile and PasswordCommand are both given
Opts: SecondaryRepoOptions{
Repo: "backupDst",
PasswordFile: "passwordFileDst",
PasswordCommand: "notEmpty",
},
},
{
// Test must fail as PasswordFile does not exist
Opts: SecondaryRepoOptions{
Repo: "backupDst",
PasswordFile: "NonExistingFile",
},
},
{
// Test must fail as PasswordCommand does not exist
Opts: SecondaryRepoOptions{
Repo: "backupDst",
PasswordCommand: "notEmpty",
},
},
{
// Test must fail as current and legacy options are mixed
Opts: SecondaryRepoOptions{
Repo: "backupDst",
LegacyRepo: "backupDst",
},
},
{
// Test must fail as current and legacy options are mixed
Opts: SecondaryRepoOptions{
Repo: "backupDst",
LegacyPasswordCommand: "notEmpty",
},
},
}
//gOpts defines the Global options used in the secondary repository tests
var gOpts = Options{
Repo: "backupSrc",
RepositoryFile: "backupSrc",
Password: "secretSrc",
PasswordFile: "passwordFileSrc",
}
//Create temp dir to create password file.
dir := rtest.TempDir(t)
cleanup := rtest.Chdir(t, dir)
defer cleanup()
//Create temporary password file
err := os.WriteFile(filepath.Join(dir, "passwordFileDst"), []byte("secretDst"), 0666)
rtest.OK(t, err)
// Test all valid cases
for _, testCase := range validSecondaryRepoTestCases {
DstGOpts, isFromRepo, err := testCase.Opts.FillGlobalOpts(context.TODO(), gOpts, "destination")
rtest.OK(t, err)
rtest.Equals(t, DstGOpts, testCase.DstGOpts)
rtest.Equals(t, isFromRepo, testCase.FromRepo)
}
// Test all invalid cases
for _, testCase := range invalidSecondaryRepoTestCases {
_, _, err := testCase.Opts.FillGlobalOpts(context.TODO(), gOpts, "destination")
rtest.Assert(t, err != nil, "Expected error, but function did not return an error")
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/global/global_test.go | internal/global/global_test.go | package global
import (
"context"
"os"
"path/filepath"
"strings"
"testing"
rtest "github.com/restic/restic/internal/test"
)
func TestReadRepo(t *testing.T) {
tempDir := rtest.TempDir(t)
// test --repo option
var gopts Options
gopts.Repo = tempDir
repo, err := readRepo(gopts)
rtest.OK(t, err)
rtest.Equals(t, tempDir, repo)
// test --repository-file option
foo := filepath.Join(tempDir, "foo")
err = os.WriteFile(foo, []byte(tempDir+"\n"), 0666)
rtest.OK(t, err)
var gopts2 Options
gopts2.RepositoryFile = foo
repo, err = readRepo(gopts2)
rtest.OK(t, err)
rtest.Equals(t, tempDir, repo)
var gopts3 Options
gopts3.RepositoryFile = foo + "-invalid"
_, err = readRepo(gopts3)
if err == nil {
t.Fatal("must not read repository path from invalid file path")
}
}
func TestReadEmptyPassword(t *testing.T) {
opts := Options{InsecureNoPassword: true}
password, err := readPassword(context.TODO(), opts, "test")
rtest.OK(t, err)
rtest.Equals(t, "", password, "got unexpected password")
opts.Password = "invalid"
_, err = readPassword(context.TODO(), opts, "test")
rtest.Assert(t, strings.Contains(err.Error(), "must not be specified together with providing a password via a cli option or environment variable"), "unexpected error message, got %v", err)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/global/global.go | internal/global/global.go | package global
import (
"context"
"fmt"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/restic/chunker"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/cache"
"github.com/restic/restic/internal/backend/limiter"
"github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/backend/logger"
"github.com/restic/restic/internal/backend/retry"
"github.com/restic/restic/internal/backend/sema"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/textfile"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/progress"
"github.com/spf13/pflag"
"github.com/restic/restic/internal/errors"
)
// ErrNoRepository is used to report if opening a repository failed due
// to a missing backend storage location or config file
var ErrNoRepository = errors.New("repository does not exist")
const Version = "0.18.1-dev (compiled manually)"
// TimeFormat is the format used for all timestamps printed by restic.
const TimeFormat = "2006-01-02 15:04:05"
type BackendWrapper func(r backend.Backend) (backend.Backend, error)
// Options hold all global options for restic.
type Options struct {
Repo string
RepositoryFile string
PasswordFile string
PasswordCommand string
KeyHint string
Quiet bool
Verbose int
NoLock bool
RetryLock time.Duration
JSON bool
CacheDir string
NoCache bool
CleanupCache bool
Compression repository.CompressionMode
PackSize uint
NoExtraVerify bool
InsecureNoPassword bool
backend.TransportOptions
limiter.Limits
Password string
Term ui.Terminal
Backends *location.Registry
BackendTestHook, BackendInnerTestHook BackendWrapper
// Verbosity is set as follows:
// 0 means: don't print any messages except errors, this is used when --quiet is specified
// 1 is the default: print essential messages
// 2 means: print more messages, report minor things, this is used when --verbose is specified
// 3 means: print very detailed debug messages, this is used when --verbose=2 is specified
Verbosity uint
Options []string
Extended options.Options
}
func (opts *Options) AddFlags(f *pflag.FlagSet) {
f.StringVarP(&opts.Repo, "repo", "r", "", "`repository` to backup to or restore from (default: $RESTIC_REPOSITORY)")
f.StringVarP(&opts.RepositoryFile, "repository-file", "", "", "`file` to read the repository location from (default: $RESTIC_REPOSITORY_FILE)")
f.StringVarP(&opts.PasswordFile, "password-file", "p", "", "`file` to read the repository password from (default: $RESTIC_PASSWORD_FILE)")
f.StringVarP(&opts.KeyHint, "key-hint", "", "", "`key` ID of key to try decrypting first (default: $RESTIC_KEY_HINT)")
f.StringVarP(&opts.PasswordCommand, "password-command", "", "", "shell `command` to obtain the repository password from (default: $RESTIC_PASSWORD_COMMAND)")
f.BoolVarP(&opts.Quiet, "quiet", "q", false, "do not output comprehensive progress report")
// use empty parameter name as `-v, --verbose n` instead of the correct `--verbose=n` is confusing
f.CountVarP(&opts.Verbose, "verbose", "v", "be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2)")
f.BoolVar(&opts.NoLock, "no-lock", false, "do not lock the repository, this allows some operations on read-only repositories")
f.DurationVar(&opts.RetryLock, "retry-lock", 0, "retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)")
f.BoolVarP(&opts.JSON, "json", "", false, "set output mode to JSON for commands that support it")
f.StringVar(&opts.CacheDir, "cache-dir", "", "set the cache `directory`. (default: use system default cache directory)")
f.BoolVar(&opts.NoCache, "no-cache", false, "do not use a local cache")
f.StringSliceVar(&opts.RootCertFilenames, "cacert", nil, "`file` to load root certificates from (default: use system certificates or $RESTIC_CACERT)")
f.StringVar(&opts.TLSClientCertKeyFilename, "tls-client-cert", "", "path to a `file` containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)")
f.BoolVar(&opts.InsecureNoPassword, "insecure-no-password", false, "use an empty password for the repository, must be passed to every restic command (insecure)")
f.BoolVar(&opts.InsecureTLS, "insecure-tls", false, "skip TLS certificate verification when connecting to the repository (insecure)")
f.BoolVar(&opts.CleanupCache, "cleanup-cache", false, "auto remove old cache directories")
f.Var(&opts.Compression, "compression", "compression mode (only available for repository format version 2), one of (auto|off|fastest|better|max) (default: $RESTIC_COMPRESSION)")
f.BoolVar(&opts.NoExtraVerify, "no-extra-verify", false, "skip additional verification of data before upload (see documentation)")
f.IntVar(&opts.Limits.UploadKb, "limit-upload", 0, "limits uploads to a maximum `rate` in KiB/s. (default: unlimited)")
f.IntVar(&opts.Limits.DownloadKb, "limit-download", 0, "limits downloads to a maximum `rate` in KiB/s. (default: unlimited)")
f.UintVar(&opts.PackSize, "pack-size", 0, "set target pack `size` in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE)")
f.StringSliceVarP(&opts.Options, "option", "o", []string{}, "set extended option (`key=value`, can be specified multiple times)")
f.StringVar(&opts.HTTPUserAgent, "http-user-agent", "", "set a http user agent for outgoing http requests")
f.DurationVar(&opts.StuckRequestTimeout, "stuck-request-timeout", 5*time.Minute, "`duration` after which to retry stuck requests")
opts.Repo = os.Getenv("RESTIC_REPOSITORY")
opts.RepositoryFile = os.Getenv("RESTIC_REPOSITORY_FILE")
opts.PasswordFile = os.Getenv("RESTIC_PASSWORD_FILE")
opts.KeyHint = os.Getenv("RESTIC_KEY_HINT")
opts.PasswordCommand = os.Getenv("RESTIC_PASSWORD_COMMAND")
if os.Getenv("RESTIC_CACERT") != "" {
opts.RootCertFilenames = strings.Split(os.Getenv("RESTIC_CACERT"), ",")
}
opts.TLSClientCertKeyFilename = os.Getenv("RESTIC_TLS_CLIENT_CERT")
comp := os.Getenv("RESTIC_COMPRESSION")
if comp != "" {
// ignore error as there's no good way to handle it
_ = opts.Compression.Set(comp)
}
// parse target pack size from env, on error the default value will be used
targetPackSize, _ := strconv.ParseUint(os.Getenv("RESTIC_PACK_SIZE"), 10, 32)
opts.PackSize = uint(targetPackSize)
if os.Getenv("RESTIC_HTTP_USER_AGENT") != "" {
opts.HTTPUserAgent = os.Getenv("RESTIC_HTTP_USER_AGENT")
}
}
func (opts *Options) PreRun(needsPassword bool) error {
// set verbosity, default is one
opts.Verbosity = 1
if opts.Quiet && opts.Verbose > 0 {
return errors.Fatal("--quiet and --verbose cannot be specified at the same time")
}
switch {
case opts.Verbose >= 2:
opts.Verbosity = 3
case opts.Verbose > 0:
opts.Verbosity = 2
case opts.Quiet:
opts.Verbosity = 0
}
// parse extended options
extendedOpts, err := options.Parse(opts.Options)
if err != nil {
return err
}
opts.Extended = extendedOpts
if !needsPassword {
return nil
}
pwd, err := resolvePassword(opts, "RESTIC_PASSWORD")
if err != nil {
return errors.Fatalf("Resolving password failed: %v", err)
}
opts.Password = pwd
return nil
}
// resolvePassword determines the password to be used for opening the repository.
func resolvePassword(opts *Options, envStr string) (string, error) {
if opts.PasswordFile != "" && opts.PasswordCommand != "" {
return "", errors.Fatalf("Password file and command are mutually exclusive options")
}
if opts.PasswordCommand != "" {
args, err := backend.SplitShellStrings(opts.PasswordCommand)
if err != nil {
return "", err
}
cmd := exec.Command(args[0], args[1:]...)
cmd.Stderr = os.Stderr
output, err := cmd.Output()
if err != nil {
return "", err
}
return strings.TrimSpace(string(output)), nil
}
if opts.PasswordFile != "" {
return LoadPasswordFromFile(opts.PasswordFile)
}
if pwd := os.Getenv(envStr); pwd != "" {
return pwd, nil
}
return "", nil
}
// LoadPasswordFromFile loads a password from a file while stripping a BOM and
// converting the password to UTF-8.
func LoadPasswordFromFile(pwdFile string) (string, error) {
s, err := textfile.Read(pwdFile)
if errors.Is(err, os.ErrNotExist) {
return "", errors.Fatalf("%s does not exist", pwdFile)
}
return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile")
}
// readPassword reads the password from a password file, the environment
// variable RESTIC_PASSWORD or prompts the user. If the context is canceled,
// the function leaks the password reading goroutine.
func readPassword(ctx context.Context, gopts Options, prompt string) (string, error) {
if gopts.InsecureNoPassword {
if gopts.Password != "" {
return "", errors.Fatal("--insecure-no-password must not be specified together with providing a password via a cli option or environment variable")
}
return "", nil
}
if gopts.Password != "" {
return gopts.Password, nil
}
password, err := gopts.Term.ReadPassword(ctx, prompt)
if err != nil {
return "", fmt.Errorf("unable to read password: %w", err)
}
if len(password) == 0 {
return "", errors.Fatal("an empty password is not allowed by default. Pass the flag `--insecure-no-password` to restic to disable this check")
}
return password, nil
}
// ReadPasswordTwice calls ReadPassword two times and returns an error when the
// passwords don't match. If the context is canceled, the function leaks the
// password reading goroutine.
func ReadPasswordTwice(ctx context.Context, gopts Options, prompt1, prompt2 string) (string, error) {
pw1, err := readPassword(ctx, gopts, prompt1)
if err != nil {
return "", err
}
if gopts.Term.InputIsTerminal() {
pw2, err := readPassword(ctx, gopts, prompt2)
if err != nil {
return "", err
}
if pw1 != pw2 {
return "", errors.Fatal("passwords do not match")
}
}
return pw1, nil
}
func readRepo(gopts Options) (string, error) {
if gopts.Repo == "" && gopts.RepositoryFile == "" {
return "", errors.Fatal("Please specify repository location (-r or --repository-file)")
}
repo := gopts.Repo
if gopts.RepositoryFile != "" {
if repo != "" {
return "", errors.Fatal("Options -r and --repository-file are mutually exclusive, please specify only one")
}
s, err := textfile.Read(gopts.RepositoryFile)
if errors.Is(err, os.ErrNotExist) {
return "", errors.Fatalf("%s does not exist", gopts.RepositoryFile)
}
if err != nil {
return "", err
}
repo = strings.TrimSpace(string(s))
}
return repo, nil
}
const maxKeys = 20
// OpenRepository reads the password and opens the repository.
func OpenRepository(ctx context.Context, gopts Options, printer progress.Printer) (*repository.Repository, error) {
repo, err := readRepo(gopts)
if err != nil {
return nil, err
}
be, err := innerOpenBackend(ctx, repo, gopts, gopts.Extended, false, printer)
if err != nil {
return nil, err
}
err = hasRepositoryConfig(ctx, be, repo, gopts)
if err != nil {
return nil, err
}
s, err := createRepositoryInstance(be, gopts)
if err != nil {
return nil, err
}
err = decryptRepository(ctx, s, &gopts, printer)
if err != nil {
return nil, err
}
printRepositoryInfo(s, gopts, printer)
if gopts.NoCache {
return s, nil
}
err = setupCache(s, gopts, printer)
if err != nil {
return nil, err
}
return s, nil
}
// hasRepositoryConfig checks if the repository config file exists and is not empty.
func hasRepositoryConfig(ctx context.Context, be backend.Backend, repo string, gopts Options) error {
fi, err := be.Stat(ctx, backend.Handle{Type: restic.ConfigFile})
if be.IsNotExist(err) {
//nolint:staticcheck // capitalized error string is intentional
return fmt.Errorf("Fatal: %w: unable to open config file: %v\nIs there a repository at the following location?\n%v", ErrNoRepository, err, location.StripPassword(gopts.Backends, repo))
}
if err != nil {
return errors.Fatalf("unable to open config file: %v\n%v", err, location.StripPassword(gopts.Backends, repo))
}
if fi.Size == 0 {
return errors.New("config file has zero size, invalid repository?")
}
return nil
}
// createRepositoryInstance creates a new repository instance with the given options.
func createRepositoryInstance(be backend.Backend, gopts Options) (*repository.Repository, error) {
s, err := repository.New(be, repository.Options{
Compression: gopts.Compression,
PackSize: gopts.PackSize * 1024 * 1024,
NoExtraVerify: gopts.NoExtraVerify,
})
if err != nil {
return nil, errors.Fatalf("%s", err)
}
return s, nil
}
// decryptRepository handles password reading and decrypts the repository.
func decryptRepository(ctx context.Context, s *repository.Repository, gopts *Options, printer progress.Printer) error {
passwordTriesLeft := 1
if gopts.Term.InputIsTerminal() && gopts.Password == "" && !gopts.InsecureNoPassword {
passwordTriesLeft = 3
}
var err error
for ; passwordTriesLeft > 0; passwordTriesLeft-- {
gopts.Password, err = readPassword(ctx, *gopts, "enter password for repository: ")
if ctx.Err() != nil {
return ctx.Err()
}
if err != nil && passwordTriesLeft > 1 {
gopts.Password = ""
printer.E("%s. Try again", err)
}
if err != nil {
continue
}
err = s.SearchKey(ctx, gopts.Password, maxKeys, gopts.KeyHint)
if err != nil && passwordTriesLeft > 1 {
gopts.Password = ""
printer.E("%s. Try again", err)
}
}
if err != nil {
if errors.IsFatal(err) || errors.Is(err, repository.ErrNoKeyFound) {
return err
}
return errors.Fatalf("%s", err)
}
return nil
}
// printRepositoryInfo displays the repository ID, version and compression level.
func printRepositoryInfo(s *repository.Repository, gopts Options, printer progress.Printer) {
id := s.Config().ID
if len(id) > 8 {
id = id[:8]
}
extra := ""
if s.Config().Version >= 2 {
extra = ", compression level " + gopts.Compression.String()
}
printer.PT("repository %v opened (version %v%s)", id, s.Config().Version, extra)
}
// setupCache creates a new cache and removes old cache directories if instructed to do so.
func setupCache(s *repository.Repository, gopts Options, printer progress.Printer) error {
c, err := cache.New(s.Config().ID, gopts.CacheDir)
if err != nil {
printer.E("unable to open cache: %v", err)
return err
}
if c.Created {
printer.PT("created new cache in %v", c.Base)
}
// start using the cache
s.UseCache(c, printer.E)
oldCacheDirs, err := cache.Old(c.Base)
if err != nil {
printer.E("unable to find old cache directories: %v", err)
}
// nothing more to do if no old cache dirs could be found
if len(oldCacheDirs) == 0 {
return nil
}
// cleanup old cache dirs if instructed to do so
if gopts.CleanupCache {
printer.PT("removing %d old cache dirs from %v", len(oldCacheDirs), c.Base)
for _, item := range oldCacheDirs {
dir := filepath.Join(c.Base, item.Name())
err = os.RemoveAll(dir)
if err != nil {
printer.E("unable to remove %v: %v", dir, err)
}
}
} else {
printer.PT("found %d old cache directories in %v, run `restic cache --cleanup` to remove them",
len(oldCacheDirs), c.Base)
}
return nil
}
// CreateRepository a repository with the given version and chunker polynomial.
func CreateRepository(ctx context.Context, gopts Options, version uint, chunkerPolynomial *chunker.Pol, printer progress.Printer) (*repository.Repository, error) {
if version < restic.MinRepoVersion || version > restic.MaxRepoVersion {
return nil, errors.Fatalf("only repository versions between %v and %v are allowed", restic.MinRepoVersion, restic.MaxRepoVersion)
}
repo, err := readRepo(gopts)
if err != nil {
return nil, err
}
gopts.Password, err = ReadPasswordTwice(ctx, gopts,
"enter password for new repository: ",
"enter password again: ")
if err != nil {
return nil, err
}
be, err := innerOpenBackend(ctx, repo, gopts, gopts.Extended, true, printer)
if err != nil {
return nil, errors.Fatalf("create repository at %s failed: %v", location.StripPassword(gopts.Backends, repo), err)
}
s, err := createRepositoryInstance(be, gopts)
if err != nil {
return nil, err
}
err = s.Init(ctx, version, gopts.Password, chunkerPolynomial)
if err != nil {
return nil, errors.Fatalf("create key in repository at %s failed: %v", location.StripPassword(gopts.Backends, repo), err)
}
return s, nil
}
func innerOpenBackend(ctx context.Context, s string, gopts Options, opts options.Options, create bool, printer progress.Printer) (backend.Backend, error) {
debug.Log("parsing location %v", location.StripPassword(gopts.Backends, s))
scheme, cfg, err := parseConfig(gopts.Backends, s, opts)
if err != nil {
return nil, err
}
rt, lim, err := setupTransport(gopts)
if err != nil {
return nil, err
}
be, err := createOrOpenBackend(ctx, scheme, cfg, rt, lim, gopts, s, create, printer)
if err != nil {
return nil, err
}
be, err = wrapBackend(be, gopts, printer)
if err != nil {
return nil, err
}
return be, nil
}
// parseConfig parses the repository location and extended options and returns the scheme and configuration.
func parseConfig(backends *location.Registry, s string, opts options.Options) (string, interface{}, error) {
loc, err := location.Parse(backends, s)
if err != nil {
return "", nil, errors.Fatalf("parsing repository location failed: %v", err)
}
cfg := loc.Config
if cfg, ok := cfg.(backend.ApplyEnvironmenter); ok {
cfg.ApplyEnvironment("")
}
// only apply options for a particular backend here
opts = opts.Extract(loc.Scheme)
if err := opts.Apply(loc.Scheme, cfg); err != nil {
return "", nil, err
}
debug.Log("opening %v repository at %#v", loc.Scheme, cfg)
return loc.Scheme, cfg, nil
}
// setupTransport creates and configures the transport with rate limiting.
func setupTransport(gopts Options) (http.RoundTripper, limiter.Limiter, error) {
rt, err := backend.Transport(gopts.TransportOptions)
if err != nil {
return nil, nil, errors.Fatalf("%s", err)
}
// wrap the transport so that the throughput via HTTP is limited
lim := limiter.NewStaticLimiter(gopts.Limits)
rt = lim.Transport(rt)
return rt, lim, nil
}
// createOrOpenBackend creates or opens a backend using the appropriate factory method.
func createOrOpenBackend(ctx context.Context, scheme string, cfg interface{}, rt http.RoundTripper, lim limiter.Limiter, gopts Options, s string, create bool, printer progress.Printer) (backend.Backend, error) {
factory := gopts.Backends.Lookup(scheme)
if factory == nil {
return nil, errors.Fatalf("invalid backend: %q", scheme)
}
var be backend.Backend
var err error
if create {
be, err = factory.Create(ctx, cfg, rt, lim, printer.E)
} else {
be, err = factory.Open(ctx, cfg, rt, lim, printer.E)
}
if errors.Is(err, backend.ErrNoRepository) {
//nolint:staticcheck // capitalized error string is intentional
return nil, fmt.Errorf("Fatal: %w at %v: %v", ErrNoRepository, location.StripPassword(gopts.Backends, s), err)
}
if err != nil {
if create {
// init already wraps the error message
return nil, err
}
return nil, errors.Fatalf("unable to open repository at %v: %v", location.StripPassword(gopts.Backends, s), err)
}
return be, nil
}
// wrapBackend applies debug logging, test hooks, and retry wrapper to the backend.
func wrapBackend(be backend.Backend, gopts Options, printer progress.Printer) (backend.Backend, error) {
// wrap with debug logging and connection limiting
be = logger.New(sema.NewBackend(be))
// wrap backend if a test specified an inner hook
if gopts.BackendInnerTestHook != nil {
var err error
be, err = gopts.BackendInnerTestHook(be)
if err != nil {
return nil, err
}
}
report := func(msg string, err error, d time.Duration) {
if d >= 0 {
printer.E("%v returned error, retrying after %v: %v", msg, d, err)
} else {
printer.E("%v failed: %v", msg, err)
}
}
success := func(msg string, retries int) {
printer.E("%v operation successful after %d retries", msg, retries)
}
be = retry.New(be, 15*time.Minute, report, success)
// wrap backend if a test specified a hook
if gopts.BackendTestHook != nil {
var err error
be, err = gopts.BackendTestHook(be)
if err != nil {
return nil, err
}
}
return be, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/options/secret_string.go | internal/options/secret_string.go | package options
type SecretString struct {
s *string
}
func NewSecretString(s string) SecretString {
return SecretString{s: &s}
}
func (s SecretString) GoString() string {
return `"` + s.String() + `"`
}
func (s SecretString) String() string {
if s.s == nil || len(*s.s) == 0 {
return ``
}
return `**redacted**`
}
func (s *SecretString) Unwrap() string {
if s.s == nil {
return ""
}
return *s.s
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/options/options_test.go | internal/options/options_test.go | package options
import (
"fmt"
"reflect"
"regexp"
"testing"
"time"
)
var optsTests = []struct {
input []string
output Options
}{
{
[]string{"foo=bar", "bar=baz ", "k="},
Options{
"foo": "bar",
"bar": "baz",
"k": "",
},
},
{
[]string{"Foo=23", "baR", "k=thing with spaces"},
Options{
"foo": "23",
"bar": "",
"k": "thing with spaces",
},
},
{
[]string{"k=thing with spaces", "k2=more spaces = not evil"},
Options{
"k": "thing with spaces",
"k2": "more spaces = not evil",
},
},
{
[]string{"x=1", "foo=bar", "y=2", "foo=bar"},
Options{
"x": "1",
"y": "2",
"foo": "bar",
},
},
}
func TestParseOptions(t *testing.T) {
for i, test := range optsTests {
t.Run(fmt.Sprintf("test-%v", i), func(t *testing.T) {
opts, err := Parse(test.input)
if err != nil {
t.Fatalf("unable to parse options: %v", err)
}
if !reflect.DeepEqual(opts, test.output) {
t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, opts)
}
})
}
}
var invalidOptsTests = []struct {
input []string
err string
}{
{
[]string{"=bar", "bar=baz", "k="},
"Fatal: empty key is not a valid option",
},
{
[]string{"x=1", "foo=bar", "y=2", "foo=baz"},
`Fatal: key "foo" present more than once`,
},
}
func TestParseInvalidOptions(t *testing.T) {
for _, test := range invalidOptsTests {
t.Run(test.err, func(t *testing.T) {
_, err := Parse(test.input)
if err == nil {
t.Fatalf("expected error (%v) not found, err is nil", test.err)
}
if err.Error() != test.err {
t.Fatalf("expected error %q, got %q", test.err, err.Error())
}
})
}
}
var extractTests = []struct {
input Options
ns string
output Options
}{
{
input: Options{
"foo.bar:": "baz",
"s3.timeout": "10s",
"sftp.timeout": "5s",
"global": "foobar",
},
ns: "s3",
output: Options{
"timeout": "10s",
},
},
}
func TestOptionsExtract(t *testing.T) {
for _, test := range extractTests {
t.Run(test.ns, func(t *testing.T) {
opts := test.input.Extract(test.ns)
if !reflect.DeepEqual(opts, test.output) {
t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, opts)
}
})
}
}
// Target is used for Apply() tests
type Target struct {
Name string `option:"name"`
ID int `option:"id"`
Timeout time.Duration `option:"timeout"`
Switch bool `option:"switch"`
Other string
}
var setTests = []struct {
input Options
output Target
}{
{
Options{
"name": "foobar",
},
Target{
Name: "foobar",
},
},
{
Options{
"name": "foobar",
"id": "1234",
},
Target{
Name: "foobar",
ID: 1234,
},
},
{
Options{
"timeout": "10m3s",
},
Target{
Timeout: 10*time.Minute + 3*time.Second,
},
},
{
Options{
"switch": "true",
},
Target{
Switch: true,
},
},
}
func TestOptionsApply(t *testing.T) {
for i, test := range setTests {
t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) {
var dst Target
err := test.input.Apply("", &dst)
if err != nil {
t.Fatal(err)
}
if dst != test.output {
t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, dst)
}
})
}
}
var invalidSetTests = []struct {
input Options
namespace string
err string
}{
{
Options{
"first_name": "foobar",
},
"ns",
"Fatal: option ns.first_name is not known",
},
{
Options{
"id": "foobar",
},
"ns",
`strconv.ParseInt: parsing "foobar": invalid syntax`,
},
{
Options{
"timeout": "2134",
},
"ns",
`time: missing unit in duration "?2134"?`,
},
{
Options{
"switch": "yes",
},
"ns",
`strconv.ParseBool: parsing "yes": invalid syntax`,
},
}
func TestOptionsApplyInvalid(t *testing.T) {
for i, test := range invalidSetTests {
t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) {
var dst Target
err := test.input.Apply(test.namespace, &dst)
if err == nil {
t.Fatalf("expected error %v not found", test.err)
}
matched, e := regexp.MatchString(test.err, err.Error())
if e != nil {
t.Fatal(e)
}
if !matched {
t.Fatalf("expected error to match %q, got %q", test.err, err.Error())
}
})
}
}
func TestListOptions(t *testing.T) {
teststruct := struct {
Foo string `option:"foo" help:"bar text help"`
}{}
tests := []struct {
cfg interface{}
opts []Help
}{
{
struct {
Foo string `option:"foo" help:"bar text help"`
}{},
[]Help{
{Name: "foo", Text: "bar text help"},
},
},
{
struct {
Foo string `option:"foo" help:"bar text help"`
Bar string `option:"bar" help:"bar text help"`
}{},
[]Help{
{Name: "foo", Text: "bar text help"},
{Name: "bar", Text: "bar text help"},
},
},
{
struct {
Bar string `option:"bar" help:"bar text help"`
Foo string `option:"foo" help:"bar text help"`
}{},
[]Help{
{Name: "bar", Text: "bar text help"},
{Name: "foo", Text: "bar text help"},
},
},
{
&teststruct,
[]Help{
{Name: "foo", Text: "bar text help"},
},
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
opts := listOptions(test.cfg)
if !reflect.DeepEqual(opts, test.opts) {
t.Fatalf("wrong opts, want:\n %v\ngot:\n %v", test.opts, opts)
}
})
}
}
func TestAppendAllOptions(t *testing.T) {
tests := []struct {
cfgs map[string]interface{}
opts []Help
}{
{
map[string]interface{}{
"local": struct {
Foo string `option:"foo" help:"bar text help"`
}{},
"sftp": struct {
Foo string `option:"foo" help:"bar text help2"`
Bar string `option:"bar" help:"bar text help"`
}{},
},
[]Help{
{Namespace: "local", Name: "foo", Text: "bar text help"},
{Namespace: "sftp", Name: "bar", Text: "bar text help"},
{Namespace: "sftp", Name: "foo", Text: "bar text help2"},
},
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
var opts []Help
for ns, cfg := range test.cfgs {
opts = appendAllOptions(opts, ns, cfg)
}
if !reflect.DeepEqual(opts, test.opts) {
t.Fatalf("wrong list, want:\n %v\ngot:\n %v", test.opts, opts)
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/options/options.go | internal/options/options.go | package options
import (
"reflect"
"sort"
"strconv"
"strings"
"time"
"github.com/restic/restic/internal/errors"
)
// Options holds options in the form key=value.
type Options map[string]string
var opts []Help
// Register allows registering options so that they can be listed with List.
func Register(ns string, cfg interface{}) {
opts = appendAllOptions(opts, ns, cfg)
}
// List returns a list of all registered options (using Register()).
func List() (list []Help) {
list = make([]Help, len(opts))
copy(list, opts)
return list
}
// appendAllOptions appends all options in cfg to opts, sorted by namespace.
func appendAllOptions(opts []Help, ns string, cfg interface{}) []Help {
for _, opt := range listOptions(cfg) {
opt.Namespace = ns
opts = append(opts, opt)
}
sort.Sort(helpList(opts))
return opts
}
// listOptions returns a list of options of cfg.
func listOptions(cfg interface{}) (opts []Help) {
// resolve indirection if cfg is a pointer
v := reflect.Indirect(reflect.ValueOf(cfg))
for i := 0; i < v.NumField(); i++ {
f := v.Type().Field(i)
h := Help{
Name: f.Tag.Get("option"),
Text: f.Tag.Get("help"),
}
if h.Name == "" {
continue
}
opts = append(opts, h)
}
return opts
}
// Help contains information about an option.
type Help struct {
Namespace string
Name string
Text string
}
type helpList []Help
// Len is the number of elements in the collection.
func (h helpList) Len() int {
return len(h)
}
// Less reports whether the element with
// index i should sort before the element with index j.
func (h helpList) Less(i, j int) bool {
if h[i].Namespace == h[j].Namespace {
return h[i].Name < h[j].Name
}
return h[i].Namespace < h[j].Namespace
}
// Swap swaps the elements with indexes i and j.
func (h helpList) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
// splitKeyValue splits at the first equals (=) sign.
func splitKeyValue(s string) (key string, value string) {
key, value, _ = strings.Cut(s, "=")
key = strings.ToLower(strings.TrimSpace(key))
value = strings.TrimSpace(value)
return key, value
}
// Parse takes a slice of key=value pairs and returns an Options type.
// The key may include namespaces, separated by dots. Example: "foo.bar=value".
// Keys are converted to lower-case.
func Parse(in []string) (Options, error) {
opts := make(Options, len(in))
for _, opt := range in {
key, value := splitKeyValue(opt)
if key == "" {
return Options{}, errors.Fatalf("empty key is not a valid option")
}
if v, ok := opts[key]; ok && v != value {
return Options{}, errors.Fatalf("key %q present more than once", key)
}
opts[key] = value
}
return opts, nil
}
// Extract returns an Options type with all keys in namespace ns, which is
// also stripped from the keys. ns must end with a dot.
func (o Options) Extract(ns string) Options {
l := len(ns)
if ns[l-1] != '.' {
ns += "."
l++
}
opts := make(Options)
for k, v := range o {
if !strings.HasPrefix(k, ns) {
continue
}
opts[k[l:]] = v
}
return opts
}
// Apply sets the options on dst via reflection, using the struct tag `option`.
// The namespace argument (ns) is only used for error messages.
func (o Options) Apply(ns string, dst interface{}) error {
v := reflect.ValueOf(dst).Elem()
fields := make(map[string]reflect.StructField)
for i := 0; i < v.NumField(); i++ {
f := v.Type().Field(i)
tag := f.Tag.Get("option")
if tag == "" {
continue
}
if _, ok := fields[tag]; ok {
panic("option tag " + tag + " is not unique in " + v.Type().Name())
}
fields[tag] = f
}
for key, value := range o {
field, ok := fields[key]
if !ok {
if ns != "" {
key = ns + "." + key
}
return errors.Fatalf("option %v is not known", key)
}
i := field.Index[0]
switch v.Type().Field(i).Type.Name() {
case "string":
v.Field(i).SetString(value)
case "int":
vi, err := strconv.ParseInt(value, 0, 32)
if err != nil {
return err
}
v.Field(i).SetInt(vi)
case "uint":
vi, err := strconv.ParseUint(value, 0, 32)
if err != nil {
return err
}
v.Field(i).SetUint(vi)
case "bool":
vi, err := strconv.ParseBool(value)
if err != nil {
return err
}
v.Field(i).SetBool(vi)
case "Duration":
d, err := time.ParseDuration(value)
if err != nil {
return err
}
v.Field(i).SetInt(int64(d))
default:
panic("type " + v.Type().Field(i).Type.Name() + " not handled")
}
}
return nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/options/secret_string_test.go | internal/options/secret_string_test.go | package options_test
import (
"fmt"
"strings"
"testing"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/test"
)
type secretTest struct {
str options.SecretString
}
func assertNotIn(t *testing.T, str string, substr string) {
if strings.Contains(str, substr) {
t.Fatalf("'%s' should not contain '%s'", str, substr)
}
}
func TestSecretString(t *testing.T) {
keyStr := "secret-key"
secret := options.NewSecretString(keyStr)
test.Equals(t, "**redacted**", secret.String())
test.Equals(t, `"**redacted**"`, secret.GoString())
test.Equals(t, "**redacted**", fmt.Sprint(secret))
test.Equals(t, "**redacted**", fmt.Sprintf("%v", secret))
test.Equals(t, `"**redacted**"`, fmt.Sprintf("%#v", secret))
test.Equals(t, keyStr, secret.Unwrap())
}
func TestSecretStringStruct(t *testing.T) {
keyStr := "secret-key"
secretStruct := &secretTest{
str: options.NewSecretString(keyStr),
}
assertNotIn(t, fmt.Sprint(secretStruct), keyStr)
assertNotIn(t, fmt.Sprintf("%v", secretStruct), keyStr)
assertNotIn(t, fmt.Sprintf("%#v", secretStruct), keyStr)
}
func TestSecretStringEmpty(t *testing.T) {
keyStr := ""
secret := options.NewSecretString(keyStr)
test.Equals(t, "", secret.String())
test.Equals(t, `""`, secret.GoString())
test.Equals(t, "", fmt.Sprint(secret))
test.Equals(t, "", fmt.Sprintf("%v", secret))
test.Equals(t, `""`, fmt.Sprintf("%#v", secret))
test.Equals(t, keyStr, secret.Unwrap())
}
func TestSecretStringDefault(t *testing.T) {
secretStruct := &secretTest{}
test.Equals(t, "", secretStruct.str.String())
test.Equals(t, `""`, secretStruct.str.GoString())
test.Equals(t, "", secretStruct.str.Unwrap())
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/checker/checker.go | internal/checker/checker.go | package checker
import (
"context"
"fmt"
"runtime"
"sync"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
"golang.org/x/sync/errgroup"
)
// Checker runs various checks on a repository. It is advisable to create an
// exclusive Lock in the repository before running any checks.
//
// A Checker only tests for internal errors within the data structures of the
// repository (e.g. missing blobs), and needs a valid Repository to work on.
type Checker struct {
*repository.Checker
blobRefs struct {
sync.Mutex
M restic.AssociatedBlobSet
}
trackUnused bool
snapshots restic.Lister
repo restic.Repository
// when snapshot filtering is being used
snapshotFilter *data.SnapshotFilter
args []string
}
type checkerRepository interface {
restic.Repository
Checker() *repository.Checker
}
// New returns a new checker which runs on repo.
func New(repo checkerRepository, trackUnused bool) *Checker {
c := &Checker{
Checker: repo.Checker(),
repo: repo,
trackUnused: trackUnused,
}
c.blobRefs.M = c.repo.NewAssociatedBlobSet()
return c
}
func (c *Checker) LoadSnapshots(ctx context.Context, snapshotFilter *data.SnapshotFilter, args []string) error {
var err error
c.snapshots, err = restic.MemorizeList(ctx, c.repo, restic.SnapshotFile)
c.args = args
c.snapshotFilter = snapshotFilter
return err
}
// IsFiltered returns true if snapshot filtering is active
func (c *Checker) IsFiltered() bool {
return len(c.args) != 0 || !c.snapshotFilter.Empty()
}
// Error is an error that occurred while checking a repository.
type Error struct {
TreeID restic.ID
Err error
}
func (e *Error) Error() string {
if !e.TreeID.IsNull() {
return "tree " + e.TreeID.String() + ": " + e.Err.Error()
}
return e.Err.Error()
}
// TreeError collects several errors that occurred while processing a tree.
type TreeError struct {
ID restic.ID
Errors []error
}
func (e *TreeError) Error() string {
return fmt.Sprintf("tree %v: %v", e.ID, e.Errors)
}
// checkTreeWorker checks the trees received and sends out errors to errChan.
func (c *Checker) checkTreeWorker(ctx context.Context, trees <-chan data.TreeItem, out chan<- error) {
for job := range trees {
debug.Log("check tree %v (tree %v, err %v)", job.ID, job.Tree, job.Error)
var errs []error
if job.Error != nil {
errs = append(errs, job.Error)
} else {
errs = c.checkTree(job.ID, job.Tree)
}
if len(errs) == 0 {
continue
}
treeError := &TreeError{ID: job.ID, Errors: errs}
select {
case <-ctx.Done():
return
case out <- treeError:
debug.Log("tree %v: sent %d errors", treeError.ID, len(treeError.Errors))
}
}
}
func loadSnapshotTreeIDs(ctx context.Context, lister restic.Lister, repo restic.LoaderUnpacked) (ids restic.IDs, errs []error) {
err := data.ForAllSnapshots(ctx, lister, repo, nil, func(id restic.ID, sn *data.Snapshot, err error) error {
if err != nil {
errs = append(errs, err)
return nil
}
treeID := *sn.Tree
debug.Log("snapshot %v has tree %v", id, treeID)
ids = append(ids, treeID)
return nil
})
if err != nil {
errs = append(errs, err)
}
return ids, errs
}
func (c *Checker) loadActiveTrees(ctx context.Context, snapshotFilter *data.SnapshotFilter, args []string) (trees restic.IDs, errs []error) {
trees = []restic.ID{}
errs = []error{}
if !c.IsFiltered() {
return loadSnapshotTreeIDs(ctx, c.snapshots, c.repo)
}
err := snapshotFilter.FindAll(ctx, c.snapshots, c.repo, args, func(_ string, sn *data.Snapshot, err error) error {
if err != nil {
errs = append(errs, err)
return err
} else if sn != nil {
trees = append(trees, *sn.Tree)
}
return nil
})
if err != nil {
errs = append(errs, err)
return nil, errs
}
// track blobs to learn which packs need to be checked
c.trackUnused = true
return trees, errs
}
// Structure checks that for all snapshots all referenced data blobs and
// subtrees are available in the index. errChan is closed after all trees have
// been traversed.
func (c *Checker) Structure(ctx context.Context, p *progress.Counter, errChan chan<- error) {
trees, errs := c.loadActiveTrees(ctx, c.snapshotFilter, c.args)
p.SetMax(uint64(len(trees)))
debug.Log("need to check %d trees from snapshots, %d errs returned", len(trees), len(errs))
for _, err := range errs {
select {
case <-ctx.Done():
return
case errChan <- err:
}
}
wg, ctx := errgroup.WithContext(ctx)
treeStream := data.StreamTrees(ctx, wg, c.repo, trees, func(treeID restic.ID) bool {
// blobRefs may be accessed in parallel by checkTree
c.blobRefs.Lock()
h := restic.BlobHandle{ID: treeID, Type: restic.TreeBlob}
blobReferenced := c.blobRefs.M.Has(h)
// noop if already referenced
c.blobRefs.M.Insert(h)
c.blobRefs.Unlock()
return blobReferenced
}, p)
defer close(errChan)
// The checkTree worker only processes already decoded trees and is thus CPU-bound
workerCount := runtime.GOMAXPROCS(0)
for i := 0; i < workerCount; i++ {
wg.Go(func() error {
c.checkTreeWorker(ctx, treeStream, errChan)
return nil
})
}
// the wait group should not return an error because no worker returns an
// error, so panic if that has changed somehow.
err := wg.Wait()
if err != nil {
panic(err)
}
}
func (c *Checker) checkTree(id restic.ID, tree *data.Tree) (errs []error) {
debug.Log("checking tree %v", id)
for _, node := range tree.Nodes {
switch node.Type {
case data.NodeTypeFile:
if node.Content == nil {
errs = append(errs, &Error{TreeID: id, Err: errors.Errorf("file %q has nil blob list", node.Name)})
}
for b, blobID := range node.Content {
if blobID.IsNull() {
errs = append(errs, &Error{TreeID: id, Err: errors.Errorf("file %q blob %d has null ID", node.Name, b)})
continue
}
// Note that we do not use the blob size. The "obvious" check
// whether the sum of the blob sizes matches the file size
// unfortunately fails in some cases that are not resolvable
// by users, so we omit this check, see #1887
_, found := c.repo.LookupBlobSize(restic.DataBlob, blobID)
if !found {
debug.Log("tree %v references blob %v which isn't contained in index", id, blobID)
errs = append(errs, &Error{TreeID: id, Err: errors.Errorf("file %q blob %v not found in index", node.Name, blobID)})
}
}
if c.trackUnused {
// loop a second time to keep the locked section as short as possible
c.blobRefs.Lock()
for _, blobID := range node.Content {
if blobID.IsNull() {
continue
}
h := restic.BlobHandle{ID: blobID, Type: restic.DataBlob}
c.blobRefs.M.Insert(h)
debug.Log("blob %v is referenced", blobID)
}
c.blobRefs.Unlock()
}
case data.NodeTypeDir:
if node.Subtree == nil {
errs = append(errs, &Error{TreeID: id, Err: errors.Errorf("dir node %q has no subtree", node.Name)})
continue
}
if node.Subtree.IsNull() {
errs = append(errs, &Error{TreeID: id, Err: errors.Errorf("dir node %q subtree id is null", node.Name)})
continue
}
case data.NodeTypeSymlink, data.NodeTypeSocket, data.NodeTypeCharDev, data.NodeTypeDev, data.NodeTypeFifo:
// nothing to check
default:
errs = append(errs, &Error{TreeID: id, Err: errors.Errorf("node %q with invalid type %q", node.Name, node.Type)})
}
if node.Name == "" {
errs = append(errs, &Error{TreeID: id, Err: errors.New("node with empty name")})
}
}
return errs
}
// UnusedBlobs returns all blobs that have never been referenced.
func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles, err error) {
if !c.trackUnused {
panic("only works when tracking blob references")
}
c.blobRefs.Lock()
defer c.blobRefs.Unlock()
debug.Log("checking %d blobs", c.blobRefs.M.Len())
ctx, cancel := context.WithCancel(ctx)
defer cancel()
err = c.repo.ListBlobs(ctx, func(blob restic.PackedBlob) {
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
if !c.blobRefs.M.Has(h) {
debug.Log("blob %v not referenced", h)
blobs = append(blobs, h)
}
})
return blobs, err
}
// ReadPacks wraps repository.ReadPacks:
// in case snapshot filtering is not active it calls repository.ReadPacks()
// with an unmodified parameter list
// Otherwise it calculates the packfiles needed, gets their sizes from the full
// packfile set and submits them to repository.ReadPacks()
func (c *Checker) ReadPacks(ctx context.Context, filter func(packs map[restic.ID]int64) map[restic.ID]int64, p *progress.Counter, errChan chan<- error) {
// no snapshot filtering, pass through
if !c.IsFiltered() {
c.Checker.ReadPacks(ctx, filter, p, errChan)
return
}
packfileFilter := func(allPacks map[restic.ID]int64) map[restic.ID]int64 {
filteredPacks := make(map[restic.ID]int64)
// convert used blobs into their encompassing packfiles
for bh := range c.blobRefs.M.Keys() {
for _, pb := range c.repo.LookupBlob(bh.Type, bh.ID) {
filteredPacks[pb.PackID] = allPacks[pb.PackID]
}
}
return filter(filteredPacks)
}
c.Checker.ReadPacks(ctx, packfileFilter, p, errChan)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/checker/checker_test.go | internal/checker/checker_test.go | package checker_test
import (
"context"
"io"
"math/rand"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/restic/restic/internal/archiver"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/checker"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/repository/hashing"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
)
var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz")
func collectErrors(ctx context.Context, f func(context.Context, chan<- error)) (errs []error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
errChan := make(chan error)
go f(ctx, errChan)
for err := range errChan {
errs = append(errs, err)
}
return errs
}
func checkPacks(chkr *checker.Checker) []error {
return collectErrors(context.TODO(), chkr.Packs)
}
func checkStruct(chkr *checker.Checker) []error {
err := chkr.LoadSnapshots(context.TODO(), &data.SnapshotFilter{}, nil)
if err != nil {
return []error{err}
}
return collectErrors(context.TODO(), func(ctx context.Context, errChan chan<- error) {
chkr.Structure(ctx, nil, errChan)
})
}
func checkData(chkr *checker.Checker) []error {
return collectErrors(
context.TODO(),
func(ctx context.Context, errCh chan<- error) {
chkr.ReadPacks(ctx, func(packs map[restic.ID]int64) map[restic.ID]int64 {
return packs
}, nil, errCh)
},
)
}
func assertOnlyMixedPackHints(t *testing.T, hints []error) {
for _, err := range hints {
if _, ok := err.(*repository.ErrMixedPack); !ok {
t.Fatalf("expected mixed pack hint, got %v", err)
}
}
}
func TestCheckRepo(t *testing.T) {
repo, _, cleanup := repository.TestFromFixture(t, checkerTestData)
defer cleanup()
chkr := checker.New(repo, false)
hints, errs := chkr.LoadIndex(context.TODO(), nil)
if len(errs) > 0 {
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
}
assertOnlyMixedPackHints(t, hints)
if len(hints) == 0 {
t.Fatal("expected mixed pack warnings, got none")
}
test.OKs(t, checkPacks(chkr))
test.OKs(t, checkStruct(chkr))
}
func TestMissingPack(t *testing.T) {
repo, be, cleanup := repository.TestFromFixture(t, checkerTestData)
defer cleanup()
packID := restic.TestParseID("657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6")
test.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: packID.String()}))
chkr := checker.New(repo, false)
hints, errs := chkr.LoadIndex(context.TODO(), nil)
if len(errs) > 0 {
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
}
assertOnlyMixedPackHints(t, hints)
errs = checkPacks(chkr)
test.Assert(t, len(errs) == 1,
"expected exactly one error, got %v", len(errs))
if err, ok := errs[0].(*repository.PackError); ok {
test.Equals(t, packID, err.ID)
} else {
t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err)
}
}
func TestUnreferencedPack(t *testing.T) {
repo, be, cleanup := repository.TestFromFixture(t, checkerTestData)
defer cleanup()
// index 3f1a only references pack 60e0
packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e"
indexID := restic.TestParseID("3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44")
test.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: indexID.String()}))
chkr := checker.New(repo, false)
hints, errs := chkr.LoadIndex(context.TODO(), nil)
if len(errs) > 0 {
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
}
assertOnlyMixedPackHints(t, hints)
errs = checkPacks(chkr)
test.Assert(t, len(errs) == 1,
"expected exactly one error, got %v", len(errs))
if err, ok := errs[0].(*repository.PackError); ok {
test.Equals(t, packID, err.ID.String())
} else {
t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err)
}
}
func TestUnreferencedBlobs(t *testing.T) {
repo, be, cleanup := repository.TestFromFixture(t, checkerTestData)
defer cleanup()
snapshotID := restic.TestParseID("51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02")
test.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.SnapshotFile, Name: snapshotID.String()}))
unusedBlobsBySnapshot := restic.BlobHandles{
restic.TestParseHandle("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849", restic.DataBlob),
restic.TestParseHandle("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235", restic.DataBlob),
restic.TestParseHandle("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8", restic.TreeBlob),
restic.TestParseHandle("bec3a53d7dc737f9a9bee68b107ec9e8ad722019f649b34d474b9982c3a3fec7", restic.TreeBlob),
restic.TestParseHandle("2a6f01e5e92d8343c4c6b78b51c5a4dc9c39d42c04e26088c7614b13d8d0559d", restic.TreeBlob),
restic.TestParseHandle("18b51b327df9391732ba7aaf841a4885f350d8a557b2da8352c9acf8898e3f10", restic.DataBlob),
}
sort.Sort(unusedBlobsBySnapshot)
chkr := checker.New(repo, true)
hints, errs := chkr.LoadIndex(context.TODO(), nil)
if len(errs) > 0 {
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
}
assertOnlyMixedPackHints(t, hints)
test.OKs(t, checkPacks(chkr))
test.OKs(t, checkStruct(chkr))
blobs, err := chkr.UnusedBlobs(context.TODO())
test.OK(t, err)
sort.Sort(blobs)
test.Equals(t, unusedBlobsBySnapshot, blobs)
}
func TestModifiedIndex(t *testing.T) {
repo, be, cleanup := repository.TestFromFixture(t, checkerTestData)
defer cleanup()
done := make(chan struct{})
defer close(done)
h := backend.Handle{
Type: restic.IndexFile,
Name: "90f838b4ac28735fda8644fe6a08dbc742e57aaf81b30977b4fefa357010eafd",
}
tmpfile, err := os.CreateTemp("", "restic-test-mod-index-")
if err != nil {
t.Fatal(err)
}
defer func() {
err := tmpfile.Close()
if err != nil {
t.Fatal(err)
}
err = os.Remove(tmpfile.Name())
if err != nil {
t.Fatal(err)
}
}()
wr := io.Writer(tmpfile)
var hw *hashing.Writer
if be.Hasher() != nil {
hw = hashing.NewWriter(wr, be.Hasher())
wr = hw
}
// read the file from the backend
err = be.Load(context.TODO(), h, 0, 0, func(rd io.Reader) error {
_, err := io.Copy(wr, rd)
return err
})
test.OK(t, err)
// save the index again with a modified name so that the hash doesn't match
// the content any more
h2 := backend.Handle{
Type: restic.IndexFile,
Name: "80f838b4ac28735fda8644fe6a08dbc742e57aaf81b30977b4fefa357010eafd",
}
var hash []byte
if hw != nil {
hash = hw.Sum(nil)
}
rd, err := backend.NewFileReader(tmpfile, hash)
if err != nil {
t.Fatal(err)
}
err = be.Save(context.TODO(), h2, rd)
if err != nil {
t.Fatal(err)
}
chkr := checker.New(repo, false)
hints, errs := chkr.LoadIndex(context.TODO(), nil)
if len(errs) == 0 {
t.Fatalf("expected errors not found")
}
for _, err := range errs {
t.Logf("found expected error %v", err)
}
assertOnlyMixedPackHints(t, hints)
}
var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz")
func TestDuplicatePacksInIndex(t *testing.T) {
repo, _, cleanup := repository.TestFromFixture(t, checkerDuplicateIndexTestData)
defer cleanup()
chkr := checker.New(repo, false)
hints, errs := chkr.LoadIndex(context.TODO(), nil)
if len(hints) == 0 {
t.Fatalf("did not get expected checker hints for duplicate packs in indexes")
}
found := false
for _, hint := range hints {
if _, ok := hint.(*repository.ErrDuplicatePacks); ok {
found = true
} else {
t.Errorf("got unexpected hint: %v", hint)
}
}
if !found {
t.Fatalf("did not find hint ErrDuplicatePacks")
}
if len(errs) > 0 {
t.Errorf("expected no errors, got %v: %v", len(errs), errs)
}
}
// errorBackend randomly modifies data after reading.
type errorBackend struct {
backend.Backend
ProduceErrors bool
}
func (b errorBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, consumer func(rd io.Reader) error) error {
return b.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error {
if b.ProduceErrors {
return consumer(errorReadCloser{Reader: rd})
}
return consumer(rd)
})
}
type errorReadCloser struct {
io.Reader
shortenBy int
maxErrorOffset int // if 0, the error can be injected at any offset
}
func (erd errorReadCloser) Read(p []byte) (int, error) {
n, err := erd.Reader.Read(p)
if n > 0 {
maxOffset := n
if erd.maxErrorOffset > 0 {
maxOffset = min(erd.maxErrorOffset, maxOffset)
}
induceError(p[:maxOffset])
}
if n > erd.shortenBy {
n -= erd.shortenBy
}
return n, err
}
// induceError flips a bit in the slice.
func induceError(data []byte) {
pos := rand.Intn(len(data))
data[pos] ^= 1
}
// errorOnceBackend randomly modifies data when reading a file for the first time.
type errorOnceBackend struct {
backend.Backend
m sync.Map
shortenBy int
maxErrorOffset int
}
func (b *errorOnceBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, consumer func(rd io.Reader) error) error {
_, isRetry := b.m.LoadOrStore(h, struct{}{})
err := b.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error {
if !isRetry && h.Type != restic.ConfigFile {
return consumer(errorReadCloser{Reader: rd, shortenBy: b.shortenBy, maxErrorOffset: b.maxErrorOffset})
}
return consumer(rd)
})
if err == nil {
return nil
}
// retry if the consumer returned an error
return b.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error {
return consumer(rd)
})
}
func TestCheckerModifiedData(t *testing.T) {
repo, _, be := repository.TestRepositoryWithVersion(t, 0)
sn := archiver.TestSnapshot(t, repo, ".", nil)
t.Logf("archived as %v", sn.ID().Str())
errBe := &errorBackend{Backend: be}
for _, test := range []struct {
name string
be backend.Backend
damage func()
check func(t *testing.T, err error)
}{
{
"errorBackend",
errBe,
func() {
errBe.ProduceErrors = true
},
func(t *testing.T, err error) {
if err == nil {
t.Fatal("no error found, checker is broken")
}
},
},
{
"errorOnceBackend",
&errorOnceBackend{Backend: be},
func() {},
func(t *testing.T, err error) {
if !strings.Contains(err.Error(), "check successful on second attempt, original error pack") {
t.Fatalf("wrong error found, got %v", err)
}
},
},
{
// ignore if a backend returns incomplete garbled data on the first try
"corruptPartialOnceBackend",
&errorOnceBackend{Backend: be, shortenBy: 10, maxErrorOffset: 100},
func() {},
func(t *testing.T, err error) {
test.Assert(t, err == nil, "unexpected error found, got %v", err)
},
},
} {
t.Run(test.name, func(t *testing.T) {
checkRepo := repository.TestOpenBackend(t, test.be)
chkr := checker.New(checkRepo, false)
hints, errs := chkr.LoadIndex(context.TODO(), nil)
if len(errs) > 0 {
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
}
if len(hints) > 0 {
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
}
test.damage()
var err error
for _, err := range checkPacks(chkr) {
t.Logf("pack error: %v", err)
}
for _, err := range checkStruct(chkr) {
t.Logf("struct error: %v", err)
}
for _, cerr := range checkData(chkr) {
t.Logf("data error: %v", cerr)
if err == nil {
err = cerr
}
}
test.check(t, err)
})
}
}
// loadTreesOnceRepository allows each tree to be loaded only once
type loadTreesOnceRepository struct {
*repository.Repository
loadedTrees restic.IDSet
mutex sync.Mutex
DuplicateTree bool
}
func (r *loadTreesOnceRepository) LoadBlob(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) ([]byte, error) {
if t != restic.TreeBlob {
return r.Repository.LoadBlob(ctx, t, id, buf)
}
r.mutex.Lock()
defer r.mutex.Unlock()
if r.loadedTrees.Has(id) {
// additionally store error to ensure that it cannot be swallowed
r.DuplicateTree = true
return nil, errors.Errorf("trying to load tree with id %v twice", id)
}
r.loadedTrees.Insert(id)
return r.Repository.LoadBlob(ctx, t, id, buf)
}
func TestCheckerNoDuplicateTreeDecodes(t *testing.T) {
repo, _, cleanup := repository.TestFromFixture(t, checkerTestData)
defer cleanup()
checkRepo := &loadTreesOnceRepository{
Repository: repo,
loadedTrees: restic.NewIDSet(),
}
chkr := checker.New(checkRepo, false)
hints, errs := chkr.LoadIndex(context.TODO(), nil)
if len(errs) > 0 {
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
}
assertOnlyMixedPackHints(t, hints)
test.OKs(t, checkPacks(chkr))
test.OKs(t, checkStruct(chkr))
test.Assert(t, len(checkRepo.loadedTrees) > 0, "intercepting tree loading did not work")
test.Assert(t, !checkRepo.DuplicateTree, "detected duplicate tree loading")
}
// delayRepository delays read of a specific handle.
type delayRepository struct {
*repository.Repository
DelayTree restic.ID
UnblockChannel chan struct{}
Unblocker sync.Once
Triggered bool
}
func (r *delayRepository) LoadBlob(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) ([]byte, error) {
if t == restic.TreeBlob && id == r.DelayTree {
<-r.UnblockChannel
}
return r.Repository.LoadBlob(ctx, t, id, buf)
}
func (r *delayRepository) LookupBlobSize(t restic.BlobType, id restic.ID) (uint, bool) {
if id == r.DelayTree && t == restic.DataBlob {
r.Unblock()
}
return r.Repository.LookupBlobSize(t, id)
}
func (r *delayRepository) Unblock() {
r.Unblocker.Do(func() {
close(r.UnblockChannel)
r.Triggered = true
})
}
func TestCheckerBlobTypeConfusion(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
repo := repository.TestRepository(t)
damagedNode := &data.Node{
Name: "damaged",
Type: data.NodeTypeFile,
Mode: 0644,
Size: 42,
Content: restic.IDs{restic.TestParseID("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")},
}
damagedTree := &data.Tree{
Nodes: []*data.Node{damagedNode},
}
var id restic.ID
test.OK(t, repo.WithBlobUploader(ctx, func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
var err error
id, err = data.SaveTree(ctx, uploader, damagedTree)
return err
}))
buf, err := repo.LoadBlob(ctx, restic.TreeBlob, id, nil)
test.OK(t, err)
test.OK(t, repo.WithBlobUploader(ctx, func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
var err error
_, _, _, err = uploader.SaveBlob(ctx, restic.DataBlob, buf, id, false)
return err
}))
malNode := &data.Node{
Name: "aaaaa",
Type: data.NodeTypeFile,
Mode: 0644,
Size: uint64(len(buf)),
Content: restic.IDs{id},
}
dirNode := &data.Node{
Name: "bbbbb",
Type: data.NodeTypeDir,
Mode: 0755,
Subtree: &id,
}
rootTree := &data.Tree{
Nodes: []*data.Node{malNode, dirNode},
}
var rootID restic.ID
test.OK(t, repo.WithBlobUploader(ctx, func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
var err error
rootID, err = data.SaveTree(ctx, uploader, rootTree)
return err
}))
snapshot, err := data.NewSnapshot([]string{"/damaged"}, []string{"test"}, "foo", time.Now())
test.OK(t, err)
snapshot.Tree = &rootID
snapID, err := data.SaveSnapshot(ctx, repo, snapshot)
test.OK(t, err)
t.Logf("saved snapshot %v", snapID.Str())
delayRepo := &delayRepository{
Repository: repo,
DelayTree: id,
UnblockChannel: make(chan struct{}),
}
chkr := checker.New(delayRepo, false)
go func() {
<-ctx.Done()
delayRepo.Unblock()
}()
hints, errs := chkr.LoadIndex(ctx, nil)
if len(errs) > 0 {
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
}
if len(hints) > 0 {
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
}
errFound := false
for _, err := range checkStruct(chkr) {
t.Logf("struct error: %v", err)
errFound = true
}
test.OK(t, ctx.Err())
if !errFound {
t.Fatal("no error found, checker is broken")
}
test.Assert(t, delayRepo.Triggered, "delay repository did not trigger")
}
func loadBenchRepository(t *testing.B) (*checker.Checker, restic.Repository, func()) {
repo, _, cleanup := repository.TestFromFixture(t, checkerTestData)
chkr := checker.New(repo, false)
hints, errs := chkr.LoadIndex(context.TODO(), nil)
if len(errs) > 0 {
defer cleanup()
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
}
for _, err := range hints {
if _, ok := err.(*repository.ErrMixedPack); !ok {
t.Fatalf("expected mixed pack hint, got %v", err)
}
}
return chkr, repo, cleanup
}
func BenchmarkChecker(t *testing.B) {
chkr, _, cleanup := loadBenchRepository(t)
defer cleanup()
t.ResetTimer()
for i := 0; i < t.N; i++ {
test.OKs(t, checkPacks(chkr))
test.OKs(t, checkStruct(chkr))
test.OKs(t, checkData(chkr))
}
}
func benchmarkSnapshotScaling(t *testing.B, newSnapshots int) {
chkr, repo, cleanup := loadBenchRepository(t)
defer cleanup()
snID := restic.TestParseID("51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02")
sn2, err := data.LoadSnapshot(context.TODO(), repo, snID)
if err != nil {
t.Fatal(err)
}
treeID := sn2.Tree
for i := 0; i < newSnapshots; i++ {
sn, err := data.NewSnapshot([]string{"test" + strconv.Itoa(i)}, nil, "", time.Now())
if err != nil {
t.Fatal(err)
}
sn.Tree = treeID
_, err = data.SaveSnapshot(context.TODO(), repo, sn)
if err != nil {
t.Fatal(err)
}
}
t.ResetTimer()
for i := 0; i < t.N; i++ {
test.OKs(t, checkPacks(chkr))
test.OKs(t, checkStruct(chkr))
test.OKs(t, checkData(chkr))
}
}
func BenchmarkCheckerSnapshotScaling(b *testing.B) {
counts := []int{50, 100, 200}
for _, count := range counts {
b.Run(strconv.Itoa(count), func(b *testing.B) {
benchmarkSnapshotScaling(b, count)
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/checker/testing.go | internal/checker/testing.go | package checker
import (
"context"
"testing"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/restic"
)
// TestCheckRepo runs the checker on repo.
func TestCheckRepo(t testing.TB, repo checkerRepository) {
chkr := New(repo, true)
hints, errs := chkr.LoadIndex(context.TODO(), nil)
if len(errs) != 0 {
t.Fatalf("errors loading index: %v", errs)
}
if len(hints) != 0 {
t.Fatalf("errors loading index: %v", hints)
}
err := chkr.LoadSnapshots(context.TODO(), &data.SnapshotFilter{}, nil)
if err != nil {
t.Error(err)
}
// packs
errChan := make(chan error)
go chkr.Packs(context.TODO(), errChan)
for err := range errChan {
t.Error(err)
}
// structure
errChan = make(chan error)
go chkr.Structure(context.TODO(), nil, errChan)
for err := range errChan {
t.Error(err)
}
// unused blobs
blobs, err := chkr.UnusedBlobs(context.TODO())
if err != nil {
t.Error(err)
}
if len(blobs) > 0 {
t.Errorf("unused blobs found: %v", blobs)
}
// read data
errChan = make(chan error)
go chkr.ReadPacks(context.TODO(), func(packs map[restic.ID]int64) map[restic.ID]int64 {
return packs
}, nil, errChan)
for err := range errChan {
t.Error(err)
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/errors/errors.go | internal/errors/errors.go | package errors
import (
stderrors "errors"
"github.com/pkg/errors"
)
// New creates a new error based on message. Wrapped so that this package does
// not appear in the stack trace.
var New = errors.New
// Errorf creates an error based on a format string and values. Wrapped so that
// this package does not appear in the stack trace.
var Errorf = errors.Errorf
// Wrap wraps an error retrieved from outside of restic. Wrapped so that this
// package does not appear in the stack trace.
var Wrap = errors.Wrap
// Wrapf returns an error annotating err with the format specifier. If err is
// nil, Wrapf returns nil.
var Wrapf = errors.Wrapf
// WithStack annotates err with a stack trace at the point WithStack was called.
// If err is nil, WithStack returns nil.
var WithStack = errors.WithStack
// Go 1.13-style error handling.
// As finds the first error in err's tree that matches target, and if one is found,
// sets target to that error value and returns true. Otherwise, it returns false.
func As(err error, tgt interface{}) bool { return stderrors.As(err, tgt) }
// Is reports whether any error in err's tree matches target.
func Is(x, y error) bool { return stderrors.Is(x, y) }
func Join(errs ...error) error { return stderrors.Join(errs...) }
// Unwrap returns the result of calling the Unwrap method on err, if err's type contains
// an Unwrap method returning error. Otherwise, Unwrap returns nil.
//
// Unwrap only calls a method of the form "Unwrap() error". In particular Unwrap does not
// unwrap errors returned by [Join].
func Unwrap(err error) error { return stderrors.Unwrap(err) }
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/errors/fatal.go | internal/errors/fatal.go | package errors
import (
"errors"
"fmt"
)
// fatalError is an error that should be printed to the user, then the program
// should exit with an error code.
type fatalError struct {
msg string
err error // Underlying error
}
func (e *fatalError) Error() string {
return e.msg
}
func (e *fatalError) Unwrap() error {
return e.err
}
// IsFatal returns true if err is a fatal message that should be printed to the
// user. Then, the program should exit.
func IsFatal(err error) bool {
var fatal *fatalError
return errors.As(err, &fatal)
}
// Fatal returns an error that is marked fatal.
func Fatal(s string) error {
return Wrap(&fatalError{msg: s}, "Fatal")
}
// Fatalf returns an error that is marked fatal, preserving an underlying error if passed.
func Fatalf(s string, data ...interface{}) error {
// Use the last error found.
var underlyingErr error
for i := len(data) - 1; i >= 0; i-- {
if err, ok := data[i].(error); ok {
underlyingErr = err
break
}
}
msg := fmt.Sprintf(s, data...)
fatal := &fatalError{
msg: msg,
err: underlyingErr,
}
return Wrap(fatal, "Fatal")
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/errors/fatal_test.go | internal/errors/fatal_test.go | package errors_test
import (
"testing"
"github.com/restic/restic/internal/errors"
)
func TestFatal(t *testing.T) {
for _, v := range []struct {
err error
expected bool
}{
{errors.Fatal("broken"), true},
{errors.Fatalf("broken %d", 42), true},
{errors.New("error"), false},
} {
if errors.IsFatal(v.err) != v.expected {
t.Fatalf("IsFatal for %q, expected: %v, got: %v", v.err, v.expected, errors.IsFatal(v.err))
}
}
}
func TestFatalErrorWrapping(t *testing.T) {
underlying := errors.New("underlying error")
fatal := errors.Fatalf("fatal error: %v", underlying)
// Test that the fatal error message is preserved
if fatal.Error() != "Fatal: fatal error: underlying error" {
t.Errorf("unexpected error message: %v", fatal.Error())
}
// Test that we can unwrap to get the underlying error
if !errors.Is(fatal, underlying) {
t.Error("fatal error should wrap the underlying error")
}
// Test that the error is marked as fatal
if !errors.IsFatal(fatal) {
t.Error("error should be marked as fatal")
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/errors/doc.go | internal/errors/doc.go | // Package errors provides custom error types used within restic.
package errors
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/json.go | internal/restic/json.go | package restic
import (
"context"
"encoding/json"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
)
// LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
// the item.
func LoadJSONUnpacked(ctx context.Context, repo LoaderUnpacked, t FileType, id ID, item interface{}) (err error) {
buf, err := repo.LoadUnpacked(ctx, t, id)
if err != nil {
return err
}
return json.Unmarshal(buf, item)
}
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
// backend as type t, without a pack. It returns the storage hash.
func SaveJSONUnpacked[FT FileTypes](ctx context.Context, repo SaverUnpacked[FT], t FT, item interface{}) (ID, error) {
debug.Log("save new blob %v", t)
plaintext, err := json.Marshal(item)
if err != nil {
return ID{}, errors.Wrap(err, "json.Marshal")
}
return repo.SaveUnpacked(ctx, t, plaintext)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/lock_windows.go | internal/restic/lock_windows.go | package restic
import (
"os"
"os/user"
"github.com/restic/restic/internal/debug"
)
// UidGidInt always returns 0 on Windows, since uid isn't numbers
func UidGidInt(_ *user.User) (uid, gid uint32, err error) {
return 0, 0, nil
}
// checkProcess will check if the process retaining the lock exists.
// Returns true if the process exists.
func (l *Lock) processExists() bool {
proc, err := os.FindProcess(l.PID)
if err != nil {
debug.Log("error searching for process %d: %v\n", l.PID, err)
return false
}
err = proc.Release()
if err != nil {
debug.Log("error releasing process %d: %v\n", l.PID, err)
}
return true
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/backend_find.go | internal/restic/backend_find.go | package restic
import (
"context"
"fmt"
)
// A MultipleIDMatchesError is returned by Find() when multiple IDs with a
// given prefix are found.
type MultipleIDMatchesError struct{ prefix string }
func (e *MultipleIDMatchesError) Error() string {
return fmt.Sprintf("multiple IDs with prefix %q found", e.prefix)
}
// A NoIDByPrefixError is returned by Find() when no ID for a given prefix
// could be found.
type NoIDByPrefixError struct{ prefix string }
func (e *NoIDByPrefixError) Error() string {
return fmt.Sprintf("no matching ID found for prefix %q", e.prefix)
}
// Find loads the list of all files of type t and searches for names which
// start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned.
// If more than one is found, nil and ErrMultipleIDMatches is returned.
func Find(ctx context.Context, be Lister, t FileType, prefix string) (ID, error) {
match := ID{}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
err := be.List(ctx, t, func(id ID, _ int64) error {
name := id.String()
if len(name) >= len(prefix) && prefix == name[:len(prefix)] {
if match.IsNull() {
match = id
} else {
return &MultipleIDMatchesError{prefix}
}
}
return nil
})
if err != nil {
return ID{}, err
}
if !match.IsNull() {
return match, nil
}
return ID{}, &NoIDByPrefixError{prefix}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/id_int_test.go | internal/restic/id_int_test.go | package restic
import "testing"
func TestIDMethods(t *testing.T) {
var id ID
if id.Str() != "[null]" {
t.Errorf("ID.Str() returned wrong value, want %v, got %v", "[null]", id.Str())
}
var pid *ID
if pid.Str() != "[nil]" {
t.Errorf("ID.Str() returned wrong value, want %v, got %v", "[nil]", pid.Str())
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/idset.go | internal/restic/idset.go | package restic
import (
"maps"
"sort"
)
// IDSet is a set of IDs.
type IDSet map[ID]struct{}
// NewIDSet returns a new IDSet, populated with ids.
func NewIDSet(ids ...ID) IDSet {
m := make(IDSet)
for _, id := range ids {
m[id] = struct{}{}
}
return m
}
// Has returns true iff id is contained in the set.
func (s IDSet) Has(id ID) bool {
_, ok := s[id]
return ok
}
// Insert adds id to the set.
func (s IDSet) Insert(id ID) {
s[id] = struct{}{}
}
// Delete removes id from the set.
func (s IDSet) Delete(id ID) {
delete(s, id)
}
// List returns a sorted slice of all IDs in the set.
func (s IDSet) List() IDs {
list := make(IDs, 0, len(s))
for id := range s {
list = append(list, id)
}
sort.Sort(list)
return list
}
// Equals returns true iff s equals other.
func (s IDSet) Equals(other IDSet) bool { return maps.Equal(s, other) }
// Merge adds the blobs in other to the current set.
func (s IDSet) Merge(other IDSet) { maps.Copy(s, other) }
// Intersect returns a new set containing the IDs that are present in both sets.
func (s IDSet) Intersect(other IDSet) (result IDSet) {
result = NewIDSet()
set1 := s
set2 := other
// iterate over the smaller set
if len(set2) < len(set1) {
set1, set2 = set2, set1
}
for id := range set1 {
if set2.Has(id) {
result.Insert(id)
}
}
return result
}
// Sub returns a new set containing all IDs that are present in s but not in
// other.
func (s IDSet) Sub(other IDSet) (result IDSet) {
result = NewIDSet()
for id := range s {
if !other.Has(id) {
result.Insert(id)
}
}
return result
}
func (s IDSet) String() string {
str := s.List().String()
return "{" + str[1:len(str)-1] + "}"
}
func (s IDSet) Clone() IDSet { return maps.Clone(s) }
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/zeroprefix.go | internal/restic/zeroprefix.go | package restic
import "bytes"
// ZeroPrefixLen returns the length of the longest all-zero prefix of p.
func ZeroPrefixLen(p []byte) (n int) {
// First skip 1kB-sized blocks, for speed.
var zeros [1024]byte
for len(p) >= len(zeros) && bytes.Equal(p[:len(zeros)], zeros[:]) {
p = p[len(zeros):]
n += len(zeros)
}
for len(p) > 0 && p[0] == 0 {
p = p[1:]
n++
}
return n
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/lock_unix.go | internal/restic/lock_unix.go | //go:build !windows
package restic
import (
"os"
"os/user"
"strconv"
"syscall"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
)
// UidGidInt returns uid, gid of the user as a number.
//
//nolint:revive // captialization is correct as is
func UidGidInt(u *user.User) (uid, gid uint32, err error) {
ui, err := strconv.ParseUint(u.Uid, 10, 32)
if err != nil {
return 0, 0, errors.Errorf("invalid UID %q", u.Uid)
}
gi, err := strconv.ParseUint(u.Gid, 10, 32)
if err != nil {
return 0, 0, errors.Errorf("invalid GID %q", u.Gid)
}
return uint32(ui), uint32(gi), nil
}
// checkProcess will check if the process retaining the lock
// exists and responds to SIGHUP signal.
// Returns true if the process exists and responds.
func (l *Lock) processExists() bool {
proc, err := os.FindProcess(l.PID)
if err != nil {
debug.Log("error searching for process %d: %v\n", l.PID, err)
return false
}
defer func() {
_ = proc.Release()
}()
debug.Log("sending SIGHUP to process %d\n", l.PID)
err = proc.Signal(syscall.SIGHUP)
if err != nil {
debug.Log("signal error: %v, lock is probably stale\n", err)
return false
}
return true
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/id_test.go | internal/restic/id_test.go | package restic
import (
"reflect"
"testing"
)
var TestStrings = []struct {
id string
data string
}{
{"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", "foobar"},
{"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"},
{"cc5d46bdb4991c6eae3eb739c9c8a7a46fe9654fab79c47b4fe48383b5b25e1c", "foo/bar"},
{"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"},
}
func TestID(t *testing.T) {
for _, test := range TestStrings {
id, err := ParseID(test.id)
if err != nil {
t.Error(err)
}
id2, err := ParseID(test.id)
if err != nil {
t.Error(err)
}
if !id.Equal(id2) {
t.Errorf("ID.Equal() does not work as expected")
}
// test json marshalling
buf, err := id.MarshalJSON()
if err != nil {
t.Error(err)
}
want := `"` + test.id + `"`
if string(buf) != want {
t.Errorf("string comparison failed, wanted %q, got %q", want, string(buf))
}
var id3 ID
err = id3.UnmarshalJSON(buf)
if err != nil {
t.Fatalf("error for %q: %v", buf, err)
}
if !reflect.DeepEqual(id, id3) {
t.Error("ids are not equal")
}
}
}
func TestIDUnmarshal(t *testing.T) {
var tests = []struct {
s string
valid bool
}{
{`"`, false},
{`""`, false},
{`'`, false},
{`"`, false},
{`"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4"`, false},
{`"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f"`, false},
{`"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2"`, true},
}
wantID, err := ParseID("c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2")
if err != nil {
t.Fatal(err)
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
id := &ID{}
err := id.UnmarshalJSON([]byte(test.s))
if test.valid && err != nil {
t.Fatal(err)
}
if !test.valid && err == nil {
t.Fatalf("want error for invalid value, got nil")
}
if test.valid && !id.Equal(wantID) {
t.Fatalf("wrong ID returned, want %s, got %s", wantID, id)
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/idset_test.go | internal/restic/idset_test.go | package restic
import (
"testing"
rtest "github.com/restic/restic/internal/test"
)
var idsetTests = []struct {
id ID
seen bool
}{
{TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), false},
{TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), false},
{TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
{TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
{TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true},
{TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), false},
{TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
{TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true},
{TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), true},
{TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
}
func TestIDSet(t *testing.T) {
set := NewIDSet()
rtest.Equals(t, "{}", set.String())
for i, test := range idsetTests {
seen := set.Has(test.id)
if seen != test.seen {
t.Errorf("IDSet test %v failed: wanted %v, got %v", i, test.seen, seen)
}
set.Insert(test.id)
}
rtest.Equals(t, "{1285b303 7bb086db f658198b}", set.String())
copied := set.Clone()
rtest.Equals(t, "{1285b303 7bb086db f658198b}", copied.String())
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/config.go | internal/restic/config.go | package restic
import (
"context"
"sync"
"testing"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/debug"
"github.com/restic/chunker"
)
// Config contains the configuration for a repository.
type Config struct {
Version uint `json:"version"`
ID string `json:"id"`
ChunkerPolynomial chunker.Pol `json:"chunker_polynomial"`
}
const MinRepoVersion = 1
const MaxRepoVersion = 2
// StableRepoVersion is the version that is written to the config when a repository
// is newly created with Init().
const StableRepoVersion = 2
// JSONUnpackedLoader loads unpacked JSON.
type JSONUnpackedLoader interface {
LoadJSONUnpacked(context.Context, FileType, ID, interface{}) error
}
// CreateConfig creates a config file with a randomly selected polynomial and
// ID.
func CreateConfig(version uint) (Config, error) {
var (
err error
cfg Config
)
cfg.ChunkerPolynomial, err = chunker.RandomPolynomial()
if err != nil {
return Config{}, errors.Wrap(err, "chunker.RandomPolynomial")
}
cfg.ID = NewRandomID().String()
cfg.Version = version
debug.Log("New config: %#v", cfg)
return cfg, nil
}
var checkPolynomial = true
var checkPolynomialOnce sync.Once
// TestDisableCheckPolynomial disables the check that the polynomial used for
// the chunker.
func TestDisableCheckPolynomial(t testing.TB) {
t.Logf("disabling check of the chunker polynomial")
checkPolynomialOnce.Do(func() {
checkPolynomial = false
})
}
// LoadConfig returns loads, checks and returns the config for a repository.
func LoadConfig(ctx context.Context, r LoaderUnpacked) (Config, error) {
var (
cfg Config
)
err := LoadJSONUnpacked(ctx, r, ConfigFile, ID{}, &cfg)
if err != nil {
return Config{}, err
}
if cfg.Version < MinRepoVersion || cfg.Version > MaxRepoVersion {
return Config{}, errors.Errorf("unsupported repository version %v", cfg.Version)
}
if checkPolynomial {
if !cfg.ChunkerPolynomial.Irreducible() {
return Config{}, errors.New("invalid chunker polynomial")
}
}
return cfg, nil
}
func SaveConfig(ctx context.Context, r SaverUnpacked[FileType], cfg Config) error {
_, err := SaveJSONUnpacked(ctx, r, ConfigFile, cfg)
return err
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/lock.go | internal/restic/lock.go | package restic
import (
"context"
"fmt"
"os"
"os/signal"
"os/user"
"sync"
"syscall"
"testing"
"time"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/debug"
)
// UnlockCancelDelay bounds the duration how long lock cleanup operations will wait
// if the passed in context was canceled.
const UnlockCancelDelay = 1 * time.Minute
// Lock represents a process locking the repository for an operation.
//
// There are two types of locks: exclusive and non-exclusive. There may be many
// different non-exclusive locks, but at most one exclusive lock, which can
// only be acquired while no non-exclusive lock is held.
//
// A lock must be refreshed regularly to not be considered stale, this must be
// triggered by regularly calling Refresh.
type Lock struct {
lock sync.Mutex
Time time.Time `json:"time"`
Exclusive bool `json:"exclusive"`
Hostname string `json:"hostname"`
Username string `json:"username"`
PID int `json:"pid"`
UID uint32 `json:"uid,omitempty"`
GID uint32 `json:"gid,omitempty"`
repo Unpacked[FileType]
lockID *ID
}
// alreadyLockedError is returned when NewLock or NewExclusiveLock are unable to
// acquire the desired lock.
type alreadyLockedError struct {
otherLock *Lock
}
func (e *alreadyLockedError) Error() string {
s := ""
if e.otherLock.Exclusive {
s = "exclusively "
}
return fmt.Sprintf("repository is already locked %sby %v", s, e.otherLock)
}
// IsAlreadyLocked returns true iff err indicates that a repository is
// already locked.
func IsAlreadyLocked(err error) bool {
var e *alreadyLockedError
return errors.As(err, &e)
}
// invalidLockError is returned when NewLock or NewExclusiveLock fail due
// to an invalid lock.
type invalidLockError struct {
err error
}
func (e *invalidLockError) Error() string {
return fmt.Sprintf("invalid lock file: %v", e.err)
}
func (e *invalidLockError) Unwrap() error {
return e.err
}
// IsInvalidLock returns true iff err indicates that locking failed due to
// an invalid lock.
func IsInvalidLock(err error) bool {
var e *invalidLockError
return errors.As(err, &e)
}
var ErrRemovedLock = errors.New("lock file was removed in the meantime")
var waitBeforeLockCheck = 200 * time.Millisecond
// delay increases by factor 2 on each retry
var initialWaitBetweenLockRetries = 5 * time.Second
// TestSetLockTimeout can be used to reduce the lock wait timeout for tests.
func TestSetLockTimeout(t testing.TB, d time.Duration) {
t.Logf("setting lock timeout to %v", d)
waitBeforeLockCheck = d
initialWaitBetweenLockRetries = d
}
// NewLock returns a new lock for the repository. If an
// exclusive lock is already held by another process, it returns an error
// that satisfies IsAlreadyLocked. If the new lock is exclude, then other
// non-exclusive locks also result in an IsAlreadyLocked error.
func NewLock(ctx context.Context, repo Unpacked[FileType], exclusive bool) (*Lock, error) {
lock := &Lock{
Time: time.Now(),
PID: os.Getpid(),
Exclusive: exclusive,
repo: repo,
}
hn, err := os.Hostname()
if err == nil {
lock.Hostname = hn
}
if err = lock.fillUserInfo(); err != nil {
return nil, err
}
if err = lock.checkForOtherLocks(ctx); err != nil {
return nil, err
}
lockID, err := lock.createLock(ctx)
if err != nil {
return nil, err
}
lock.lockID = &lockID
time.Sleep(waitBeforeLockCheck)
if err = lock.checkForOtherLocks(ctx); err != nil {
_ = lock.Unlock(ctx)
return nil, err
}
return lock, nil
}
func (l *Lock) fillUserInfo() error {
usr, err := user.Current()
if err != nil {
return nil
}
l.Username = usr.Username
l.UID, l.GID, err = UidGidInt(usr)
return err
}
// checkForOtherLocks looks for other locks that currently exist in the repository.
//
// If an exclusive lock is to be created, checkForOtherLocks returns an error
// if there are any other locks, regardless if exclusive or not. If a
// non-exclusive lock is to be created, an error is only returned when an
// exclusive lock is found.
func (l *Lock) checkForOtherLocks(ctx context.Context) error {
var err error
checkedIDs := NewIDSet()
if l.lockID != nil {
checkedIDs.Insert(*l.lockID)
}
delay := initialWaitBetweenLockRetries
// retry locking a few times
for i := 0; i < 4; i++ {
if i != 0 {
// sleep between retries to give backend some time to settle
if err := cancelableDelay(ctx, delay); err != nil {
return err
}
delay *= 2
}
// Store updates in new IDSet to prevent data races
var m sync.Mutex
newCheckedIDs := checkedIDs.Clone()
err = ForAllLocks(ctx, l.repo, checkedIDs, func(id ID, lock *Lock, err error) error {
if err != nil {
// if we cannot load a lock then it is unclear whether it can be ignored
// it could either be invalid or just unreadable due to network/permission problems
debug.Log("ignore lock %v: %v", id, err)
return err
}
if l.Exclusive {
return &alreadyLockedError{otherLock: lock}
}
if !l.Exclusive && lock.Exclusive {
return &alreadyLockedError{otherLock: lock}
}
// valid locks will remain valid
m.Lock()
newCheckedIDs.Insert(id)
m.Unlock()
return nil
})
checkedIDs = newCheckedIDs
// no lock detected
if err == nil {
return nil
}
// lock conflicts are permanent
if _, ok := err.(*alreadyLockedError); ok {
return err
}
}
if errors.Is(err, ErrInvalidData) {
return &invalidLockError{err}
}
return err
}
func cancelableDelay(ctx context.Context, delay time.Duration) error {
// delay next try a bit
timer := time.NewTimer(delay)
select {
case <-ctx.Done():
timer.Stop()
return ctx.Err()
case <-timer.C:
}
return nil
}
// createLock acquires the lock by creating a file in the repository.
func (l *Lock) createLock(ctx context.Context) (ID, error) {
id, err := SaveJSONUnpacked(ctx, l.repo, LockFile, l)
if err != nil {
return ID{}, err
}
return id, nil
}
// Unlock removes the lock from the repository.
func (l *Lock) Unlock(ctx context.Context) error {
if l == nil || l.lockID == nil {
return nil
}
ctx, cancel := delayedCancelContext(ctx, UnlockCancelDelay)
defer cancel()
return l.repo.RemoveUnpacked(ctx, LockFile, *l.lockID)
}
var StaleLockTimeout = 30 * time.Minute
// Stale returns true if the lock is stale. A lock is stale if the timestamp is
// older than 30 minutes or if it was created on the current machine and the
// process isn't alive any more.
func (l *Lock) Stale() bool {
l.lock.Lock()
defer l.lock.Unlock()
debug.Log("testing if lock %v for process %d is stale", l.lockID, l.PID)
if time.Since(l.Time) > StaleLockTimeout {
debug.Log("lock is stale, timestamp is too old: %v\n", l.Time)
return true
}
hn, err := os.Hostname()
if err != nil {
debug.Log("unable to find current hostname: %v", err)
// since we cannot find the current hostname, assume that the lock is
// not stale.
return false
}
if hn != l.Hostname {
// lock was created on a different host, assume the lock is not stale.
return false
}
// check if we can reach the process retaining the lock
exists := l.processExists()
if !exists {
debug.Log("could not reach process, %d, lock is probably stale\n", l.PID)
return true
}
debug.Log("lock not stale\n")
return false
}
func delayedCancelContext(parentCtx context.Context, delay time.Duration) (context.Context, context.CancelFunc) {
ctx, cancel := context.WithCancel(context.Background())
go func() {
select {
case <-parentCtx.Done():
case <-ctx.Done():
return
}
time.Sleep(delay)
cancel()
}()
return ctx, cancel
}
// Refresh refreshes the lock by creating a new file in the backend with a new
// timestamp. Afterwards the old lock is removed.
func (l *Lock) Refresh(ctx context.Context) error {
debug.Log("refreshing lock %v", l.lockID)
l.lock.Lock()
l.Time = time.Now()
l.lock.Unlock()
id, err := l.createLock(ctx)
if err != nil {
return err
}
l.lock.Lock()
defer l.lock.Unlock()
debug.Log("new lock ID %v", id)
oldLockID := l.lockID
l.lockID = &id
ctx, cancel := delayedCancelContext(ctx, UnlockCancelDelay)
defer cancel()
return l.repo.RemoveUnpacked(ctx, LockFile, *oldLockID)
}
// RefreshStaleLock is an extended variant of Refresh that can also refresh stale lock files.
func (l *Lock) RefreshStaleLock(ctx context.Context) error {
debug.Log("refreshing stale lock %v", l.lockID)
// refreshing a stale lock is possible if it still exists and continues to do
// so until after creating a new lock. The initial check avoids creating a new
// lock file if this lock was already removed in the meantime.
exists, err := l.checkExistence(ctx)
if err != nil {
return err
} else if !exists {
return ErrRemovedLock
}
l.lock.Lock()
l.Time = time.Now()
l.lock.Unlock()
id, err := l.createLock(ctx)
if err != nil {
return err
}
time.Sleep(waitBeforeLockCheck)
exists, err = l.checkExistence(ctx)
ctx, cancel := delayedCancelContext(ctx, UnlockCancelDelay)
defer cancel()
if err != nil {
// cleanup replacement lock
_ = l.repo.RemoveUnpacked(ctx, LockFile, id)
return err
}
if !exists {
// cleanup replacement lock
_ = l.repo.RemoveUnpacked(ctx, LockFile, id)
return ErrRemovedLock
}
l.lock.Lock()
defer l.lock.Unlock()
debug.Log("new lock ID %v", id)
oldLockID := l.lockID
l.lockID = &id
return l.repo.RemoveUnpacked(ctx, LockFile, *oldLockID)
}
func (l *Lock) checkExistence(ctx context.Context) (bool, error) {
l.lock.Lock()
defer l.lock.Unlock()
exists := false
err := l.repo.List(ctx, LockFile, func(id ID, _ int64) error {
if id.Equal(*l.lockID) {
exists = true
}
return nil
})
return exists, err
}
func (l *Lock) String() string {
l.lock.Lock()
defer l.lock.Unlock()
text := fmt.Sprintf("PID %d on %s by %s (UID %d, GID %d)\nlock was created at %s (%s ago)\nstorage ID %v",
l.PID, l.Hostname, l.Username, l.UID, l.GID,
l.Time.Format("2006-01-02 15:04:05"), time.Since(l.Time),
l.lockID.Str())
return text
}
// listen for incoming SIGHUP and ignore
var ignoreSIGHUP sync.Once
func init() {
ignoreSIGHUP.Do(func() {
go func() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP)
for s := range c {
debug.Log("Signal received: %v\n", s)
}
}()
})
}
// LoadLock loads and unserializes a lock from a repository.
func LoadLock(ctx context.Context, repo LoaderUnpacked, id ID) (*Lock, error) {
lock := &Lock{}
if err := LoadJSONUnpacked(ctx, repo, LockFile, id, lock); err != nil {
return nil, err
}
lock.lockID = &id
return lock, nil
}
// ForAllLocks reads all locks in parallel and calls the given callback.
// It is guaranteed that the function is not run concurrently. If the
// callback returns an error, this function is cancelled and also returns that error.
// If a lock ID is passed via excludeID, it will be ignored.
func ForAllLocks(ctx context.Context, repo ListerLoaderUnpacked, excludeIDs IDSet, fn func(ID, *Lock, error) error) error {
var m sync.Mutex
// For locks decoding is nearly for free, thus just assume were only limited by IO
return ParallelList(ctx, repo, LockFile, repo.Connections(), func(ctx context.Context, id ID, size int64) error {
if excludeIDs.Has(id) {
return nil
}
if size == 0 {
// Ignore empty lock files as some backends do not guarantee atomic uploads.
// These may leave empty files behind if an upload was interrupted between
// creating the file and writing its data.
return nil
}
lock, err := LoadLock(ctx, repo, id)
m.Lock()
defer m.Unlock()
return fn(id, lock, err)
})
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/id.go | internal/restic/id.go | package restic
import (
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
)
// Hash returns the ID for data.
func Hash(data []byte) ID {
return sha256.Sum256(data)
}
// idSize contains the size of an ID, in bytes.
const idSize = sha256.Size
// ID references content within a repository.
type ID [idSize]byte
// ParseID converts the given string to an ID.
func ParseID(s string) (ID, error) {
if len(s) != hex.EncodedLen(idSize) {
return ID{}, fmt.Errorf("invalid length for ID: %q", s)
}
b, err := hex.DecodeString(s)
if err != nil {
return ID{}, fmt.Errorf("invalid ID: %s", err)
}
id := ID{}
copy(id[:], b)
return id, nil
}
func (id ID) String() string {
return hex.EncodeToString(id[:])
}
// NewRandomID returns a randomly generated ID. When reading from rand fails,
// the function panics.
func NewRandomID() ID {
id := ID{}
_, err := io.ReadFull(rand.Reader, id[:])
if err != nil {
panic(err)
}
return id
}
const shortStr = 4
// Str returns the shortened string version of id.
func (id *ID) Str() string {
if id == nil {
return "[nil]"
}
if id.IsNull() {
return "[null]"
}
return hex.EncodeToString(id[:shortStr])
}
// IsNull returns true iff id only consists of null bytes.
func (id ID) IsNull() bool {
var nullID ID
return id == nullID
}
// Equal compares an ID to another other.
func (id ID) Equal(other ID) bool {
return id == other
}
// MarshalJSON returns the JSON encoding of id.
func (id ID) MarshalJSON() ([]byte, error) {
buf := make([]byte, 2+hex.EncodedLen(len(id)))
buf[0] = '"'
hex.Encode(buf[1:], id[:])
buf[len(buf)-1] = '"'
return buf, nil
}
// UnmarshalJSON parses the JSON-encoded data and stores the result in id.
func (id *ID) UnmarshalJSON(b []byte) error {
// check string length
if len(b) != len(`""`)+hex.EncodedLen(idSize) {
return fmt.Errorf("invalid length for ID: %q", b)
}
if b[0] != '"' {
return fmt.Errorf("invalid start of string: %q", b[0])
}
// Strip JSON string delimiters. The json.Unmarshaler contract says we get
// a valid JSON value, so we don't need to check that b[len(b)-1] == '"'.
b = b[1 : len(b)-1]
_, err := hex.Decode(id[:], b)
if err != nil {
return fmt.Errorf("invalid ID: %s", err)
}
return nil
}
// IDFromHash returns the ID for the hash.
func IDFromHash(hash []byte) (id ID) {
if len(hash) != idSize {
panic("invalid hash type, not enough/too many bytes")
}
copy(id[:], hash)
return id
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/parallel_test.go | internal/restic/parallel_test.go | package restic
import (
"context"
"sync"
"testing"
"time"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/ui/progress"
)
type mockRemoverUnpacked struct {
removeUnpacked func(ctx context.Context, t FileType, id ID) error
}
func (m *mockRemoverUnpacked) Connections() uint {
return 2
}
func (m *mockRemoverUnpacked) RemoveUnpacked(ctx context.Context, t FileType, id ID) error {
return m.removeUnpacked(ctx, t, id)
}
func NewTestID(i byte) ID {
return Hash([]byte{i})
}
func TestParallelRemove(t *testing.T) {
ctx := context.Background()
fileType := SnapshotFile // this can be any FileType
tests := []struct {
name string
removeUnpacked func(ctx context.Context, t FileType, id ID) error
fileList IDSet
wantRemoved IDSet
wantReportIDSet IDSet
wantBarCount int
}{
{
name: "remove files",
removeUnpacked: func(ctx context.Context, t FileType, id ID) error {
return nil
},
fileList: NewIDSet(NewTestID(1), NewTestID(2), NewTestID(3)),
wantRemoved: NewIDSet(NewTestID(1), NewTestID(2), NewTestID(3)),
wantReportIDSet: NewIDSet(NewTestID(1), NewTestID(2), NewTestID(3)),
wantBarCount: 3,
},
{
name: "remove files with error",
removeUnpacked: func(ctx context.Context, t FileType, id ID) error {
return errors.New("error")
},
fileList: NewIDSet(NewTestID(1), NewTestID(2), NewTestID(3)),
wantRemoved: NewIDSet(),
wantReportIDSet: NewIDSet(),
wantBarCount: 0,
},
{
name: "fail 2 files",
removeUnpacked: func(ctx context.Context, t FileType, id ID) error {
if id == NewTestID(2) {
return errors.New("error")
}
if id == NewTestID(3) {
return errors.New("error")
}
return nil
},
fileList: NewIDSet(NewTestID(1), NewTestID(2), NewTestID(3), NewTestID(4)),
wantRemoved: NewIDSet(NewTestID(1), NewTestID(4)),
wantReportIDSet: NewIDSet(NewTestID(1), NewTestID(4)),
wantBarCount: 2,
},
}
mu := sync.Mutex{}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
repo := &mockRemoverUnpacked{removeUnpacked: test.removeUnpacked}
reportIDSet := NewIDSet()
bar := progress.NewCounter(time.Millisecond, 0, func(value uint64, total uint64, runtime time.Duration, final bool) {})
report := func(id ID, err error) error {
if err == nil {
mu.Lock()
reportIDSet.Insert(id)
mu.Unlock()
return nil
}
return nil
}
_ = ParallelRemove(ctx, repo, test.fileList, fileType, report, bar)
barCount, _ := bar.Get()
if barCount != uint64(test.wantBarCount) {
t.Errorf("ParallelRemove() barCount = %d, want %d", barCount, test.wantBarCount)
}
if !reportIDSet.Equals(test.wantReportIDSet) {
t.Errorf("ParallelRemove() reportIDSet = %v, want %v", reportIDSet, test.wantReportIDSet)
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/parallel.go | internal/restic/parallel.go | package restic
import (
"context"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/ui/progress"
"golang.org/x/sync/errgroup"
)
func ParallelList(ctx context.Context, r Lister, t FileType, parallelism uint, fn func(context.Context, ID, int64) error) error {
type FileIDInfo struct {
ID
Size int64
}
// track spawned goroutines using wg, create a new context which is
// cancelled as soon as an error occurs.
wg, ctx := errgroup.WithContext(ctx)
ch := make(chan FileIDInfo)
// send list of index files through ch, which is closed afterwards
wg.Go(func() error {
defer close(ch)
return r.List(ctx, t, func(id ID, size int64) error {
select {
case <-ctx.Done():
return nil
case ch <- FileIDInfo{id, size}:
}
return nil
})
})
// a worker receives an index ID from ch, loads the index, and sends it to indexCh
worker := func() error {
for fi := range ch {
debug.Log("worker got file %v/%v", t, fi.ID.Str())
err := fn(ctx, fi.ID, fi.Size)
if err != nil {
return err
}
}
return nil
}
// run workers on ch
for i := uint(0); i < parallelism; i++ {
wg.Go(worker)
}
return wg.Wait()
}
// ParallelRemove deletes the given fileList of fileType in parallel.
// If report returns an error, it aborts.
func ParallelRemove[FT FileTypes](ctx context.Context, repo RemoverUnpacked[FT], fileList IDSet, fileType FT, report func(id ID, err error) error, bar *progress.Counter) error {
wg, ctx := errgroup.WithContext(ctx)
wg.SetLimit(int(repo.Connections())) // deleting files is IO-bound
bar.SetMax(uint64(len(fileList)))
loop:
for id := range fileList {
select {
case <-ctx.Done():
break loop
default:
}
wg.Go(func() error {
err := repo.RemoveUnpacked(ctx, fileType, id)
if err == nil {
bar.Add(1)
}
if report != nil {
err = report(id, err)
}
return err
})
}
return wg.Wait()
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/ids_test.go | internal/restic/ids_test.go | package restic
import (
"testing"
rtest "github.com/restic/restic/internal/test"
)
func TestIDsString(t *testing.T) {
ids := IDs{
TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
}
rtest.Equals(t, "[7bb086db 1285b303 7bb086db]", ids.String())
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/lock_test.go | internal/restic/lock_test.go | package restic_test
import (
"context"
"fmt"
"io"
"os"
"testing"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/mem"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
func TestLock(t *testing.T) {
repo := repository.TestRepository(t)
restic.TestSetLockTimeout(t, 5*time.Millisecond)
lock, err := repository.TestNewLock(t, repo, false)
rtest.OK(t, err)
rtest.OK(t, lock.Unlock(context.TODO()))
}
func TestDoubleUnlock(t *testing.T) {
repo := repository.TestRepository(t)
restic.TestSetLockTimeout(t, 5*time.Millisecond)
lock, err := repository.TestNewLock(t, repo, false)
rtest.OK(t, err)
rtest.OK(t, lock.Unlock(context.TODO()))
err = lock.Unlock(context.TODO())
rtest.Assert(t, err != nil,
"double unlock didn't return an error, got %v", err)
}
func TestMultipleLock(t *testing.T) {
repo := repository.TestRepository(t)
restic.TestSetLockTimeout(t, 5*time.Millisecond)
lock1, err := repository.TestNewLock(t, repo, false)
rtest.OK(t, err)
lock2, err := repository.TestNewLock(t, repo, false)
rtest.OK(t, err)
rtest.OK(t, lock1.Unlock(context.TODO()))
rtest.OK(t, lock2.Unlock(context.TODO()))
}
type failLockLoadingBackend struct {
backend.Backend
}
func (be *failLockLoadingBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
if h.Type == restic.LockFile {
return fmt.Errorf("error loading lock")
}
return be.Backend.Load(ctx, h, length, offset, fn)
}
func TestMultipleLockFailure(t *testing.T) {
be := &failLockLoadingBackend{Backend: mem.New()}
repo, _ := repository.TestRepositoryWithBackend(t, be, 0, repository.Options{})
restic.TestSetLockTimeout(t, 5*time.Millisecond)
lock1, err := repository.TestNewLock(t, repo, false)
rtest.OK(t, err)
_, err = repository.TestNewLock(t, repo, false)
rtest.Assert(t, err != nil, "unreadable lock file did not result in an error")
rtest.OK(t, lock1.Unlock(context.TODO()))
}
func TestLockExclusive(t *testing.T) {
repo := repository.TestRepository(t)
elock, err := repository.TestNewLock(t, repo, true)
rtest.OK(t, err)
rtest.OK(t, elock.Unlock(context.TODO()))
}
func TestLockOnExclusiveLockedRepo(t *testing.T) {
repo := repository.TestRepository(t)
restic.TestSetLockTimeout(t, 5*time.Millisecond)
elock, err := repository.TestNewLock(t, repo, true)
rtest.OK(t, err)
lock, err := repository.TestNewLock(t, repo, false)
rtest.Assert(t, err != nil,
"create normal lock with exclusively locked repo didn't return an error")
rtest.Assert(t, restic.IsAlreadyLocked(err),
"create normal lock with exclusively locked repo didn't return the correct error")
rtest.OK(t, lock.Unlock(context.TODO()))
rtest.OK(t, elock.Unlock(context.TODO()))
}
func TestExclusiveLockOnLockedRepo(t *testing.T) {
repo := repository.TestRepository(t)
restic.TestSetLockTimeout(t, 5*time.Millisecond)
elock, err := repository.TestNewLock(t, repo, false)
rtest.OK(t, err)
lock, err := repository.TestNewLock(t, repo, true)
rtest.Assert(t, err != nil,
"create normal lock with exclusively locked repo didn't return an error")
rtest.Assert(t, restic.IsAlreadyLocked(err),
"create normal lock with exclusively locked repo didn't return the correct error")
rtest.OK(t, lock.Unlock(context.TODO()))
rtest.OK(t, elock.Unlock(context.TODO()))
}
var staleLockTests = []struct {
timestamp time.Time
stale bool
staleOnOtherHost bool
pid int
}{
{
timestamp: time.Now(),
stale: false,
staleOnOtherHost: false,
pid: os.Getpid(),
},
{
timestamp: time.Now().Add(-time.Hour),
stale: true,
staleOnOtherHost: true,
pid: os.Getpid(),
},
{
timestamp: time.Now().Add(3 * time.Minute),
stale: false,
staleOnOtherHost: false,
pid: os.Getpid(),
},
{
timestamp: time.Now(),
stale: true,
staleOnOtherHost: false,
pid: os.Getpid() + 500000,
},
}
func TestLockStale(t *testing.T) {
hostname, err := os.Hostname()
rtest.OK(t, err)
otherHostname := "other-" + hostname
for i, test := range staleLockTests {
lock := restic.Lock{
Time: test.timestamp,
PID: test.pid,
Hostname: hostname,
}
rtest.Assert(t, lock.Stale() == test.stale,
"TestStaleLock: test %d failed: expected stale: %v, got %v",
i, test.stale, !test.stale)
lock.Hostname = otherHostname
rtest.Assert(t, lock.Stale() == test.staleOnOtherHost,
"TestStaleLock: test %d failed: expected staleOnOtherHost: %v, got %v",
i, test.staleOnOtherHost, !test.staleOnOtherHost)
}
}
func checkSingleLock(t *testing.T, repo restic.Lister) restic.ID {
t.Helper()
var lockID *restic.ID
err := repo.List(context.TODO(), restic.LockFile, func(id restic.ID, size int64) error {
if lockID != nil {
t.Error("more than one lock found")
}
lockID = &id
return nil
})
if err != nil {
t.Fatal(err)
}
if lockID == nil {
t.Fatal("no lock found")
}
return *lockID
}
func testLockRefresh(t *testing.T, refresh func(lock *restic.Lock) error) {
repo := repository.TestRepository(t)
restic.TestSetLockTimeout(t, 5*time.Millisecond)
lock, err := repository.TestNewLock(t, repo, false)
rtest.OK(t, err)
time0 := lock.Time
lockID := checkSingleLock(t, repo)
time.Sleep(time.Millisecond)
rtest.OK(t, refresh(lock))
lockID2 := checkSingleLock(t, repo)
rtest.Assert(t, !lockID.Equal(lockID2),
"expected a new ID after lock refresh, got the same")
lock2, err := restic.LoadLock(context.TODO(), repo, lockID2)
rtest.OK(t, err)
rtest.Assert(t, lock2.Time.After(time0),
"expected a later timestamp after lock refresh")
rtest.OK(t, lock.Unlock(context.TODO()))
}
func TestLockRefresh(t *testing.T) {
testLockRefresh(t, func(lock *restic.Lock) error {
return lock.Refresh(context.TODO())
})
}
func TestLockRefreshStale(t *testing.T) {
testLockRefresh(t, func(lock *restic.Lock) error {
return lock.RefreshStaleLock(context.TODO())
})
}
func TestLockRefreshStaleMissing(t *testing.T) {
repo, _, be := repository.TestRepositoryWithVersion(t, 0)
restic.TestSetLockTimeout(t, 5*time.Millisecond)
lock, err := repository.TestNewLock(t, repo, false)
rtest.OK(t, err)
lockID := checkSingleLock(t, repo)
// refresh must fail if lock was removed
rtest.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.LockFile, Name: lockID.String()}))
time.Sleep(time.Millisecond)
err = lock.RefreshStaleLock(context.TODO())
rtest.Assert(t, err == restic.ErrRemovedLock, "unexpected error, expected %v, got %v", restic.ErrRemovedLock, err)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/backend_find_test.go | internal/restic/backend_find_test.go | package restic_test
import (
"context"
"strings"
"testing"
"github.com/restic/restic/internal/restic"
)
var samples = restic.IDs{
restic.TestParseID("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"),
restic.TestParseID("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"),
restic.TestParseID("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"),
restic.TestParseID("20ff988befa5fc40350f00d531a767606efefe242c837aaccb80673f286be53d"),
restic.TestParseID("326cb59dfe802304f96ee9b5b9af93bdee73a30f53981e5ec579aedb6f1d0f07"),
restic.TestParseID("86b60b9594d1d429c4aa98fa9562082cabf53b98c7dc083abe5dae31074dd15a"),
restic.TestParseID("96c8dbe225079e624b5ce509f5bd817d1453cd0a85d30d536d01b64a8669aeae"),
restic.TestParseID("fa31d65b87affcd167b119e9d3d2a27b8236ca4836cb077ed3e96fcbe209b792"),
}
func TestFind(t *testing.T) {
list := samples
m := &ListHelper{}
m.ListFn = func(ctx context.Context, t restic.FileType, fn func(id restic.ID, size int64) error) error {
for _, id := range list {
err := fn(id, 0)
if err != nil {
return err
}
}
return nil
}
f, err := restic.Find(context.TODO(), m, restic.SnapshotFile, "20bdc1402a6fc9b633aa")
if err != nil {
t.Error(err)
}
expectedMatch := restic.TestParseID("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff")
if f != expectedMatch {
t.Errorf("Wrong match returned want %s, got %s", expectedMatch, f)
}
f, err = restic.Find(context.TODO(), m, restic.SnapshotFile, "NotAPrefix")
if _, ok := err.(*restic.NoIDByPrefixError); !ok || !strings.Contains(err.Error(), "NotAPrefix") {
t.Error("Expected no snapshots to be found.")
}
if !f.IsNull() {
t.Errorf("Find should not return a match on error.")
}
// Try to match with a prefix longer than any ID.
extraLengthID := samples[0].String() + "f"
f, err = restic.Find(context.TODO(), m, restic.SnapshotFile, extraLengthID)
if _, ok := err.(*restic.NoIDByPrefixError); !ok || !strings.Contains(err.Error(), extraLengthID) {
t.Errorf("Wrong error %v for no snapshots matched", err)
}
if !f.IsNull() {
t.Errorf("Find should not return a match on error.")
}
// Use a prefix that will match the prefix of multiple Ids in `samples`.
f, err = restic.Find(context.TODO(), m, restic.SnapshotFile, "20bdc140")
if _, ok := err.(*restic.MultipleIDMatchesError); !ok {
t.Errorf("Wrong error %v for multiple snapshots", err)
}
if !f.IsNull() {
t.Errorf("Find should not return a match on error.")
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/config_test.go | internal/restic/config_test.go | package restic_test
import (
"context"
"testing"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
type saver struct {
fn func(restic.FileType, []byte) (restic.ID, error)
}
func (s saver) SaveUnpacked(_ context.Context, t restic.FileType, buf []byte) (restic.ID, error) {
return s.fn(t, buf)
}
func (s saver) Connections() uint {
return 2
}
type loader struct {
fn func(restic.FileType, restic.ID) ([]byte, error)
}
func (l loader) LoadUnpacked(_ context.Context, t restic.FileType, id restic.ID) (data []byte, err error) {
return l.fn(t, id)
}
func (l loader) Connections() uint {
return 2
}
func TestConfig(t *testing.T) {
var resultBuf []byte
save := func(tpe restic.FileType, buf []byte) (restic.ID, error) {
rtest.Assert(t, tpe == restic.ConfigFile,
"wrong backend type: got %v, wanted %v",
tpe, restic.ConfigFile)
resultBuf = buf
return restic.ID{}, nil
}
cfg1, err := restic.CreateConfig(restic.MaxRepoVersion)
rtest.OK(t, err)
err = restic.SaveConfig(context.TODO(), saver{save}, cfg1)
rtest.OK(t, err)
load := func(tpe restic.FileType, id restic.ID) ([]byte, error) {
rtest.Assert(t, tpe == restic.ConfigFile,
"wrong backend type: got %v, wanted %v",
tpe, restic.ConfigFile)
return resultBuf, nil
}
cfg2, err := restic.LoadConfig(context.TODO(), loader{load})
rtest.OK(t, err)
rtest.Assert(t, cfg1 == cfg2,
"configs aren't equal: %v != %v", cfg1, cfg2)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/zeroprefix_test.go | internal/restic/zeroprefix_test.go | package restic_test
import (
"math/rand"
"testing"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
)
func TestZeroPrefixLen(t *testing.T) {
var buf [2048]byte
// test zero prefixes of various lengths
for i := len(buf) - 1; i >= 0; i-- {
buf[i] = 42
skipped := restic.ZeroPrefixLen(buf[:])
test.Equals(t, i, skipped)
}
// test buffers of various sizes
for i := 0; i < len(buf); i++ {
skipped := restic.ZeroPrefixLen(buf[i:])
test.Equals(t, 0, skipped)
}
}
func BenchmarkZeroPrefixLen(b *testing.B) {
var (
buf [4<<20 + 37]byte
r = rand.New(rand.NewSource(0x618732))
sumSkipped int64
)
b.ReportAllocs()
b.SetBytes(int64(len(buf)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
j := r.Intn(len(buf))
buf[j] = 0xff
skipped := restic.ZeroPrefixLen(buf[:])
sumSkipped += int64(skipped)
buf[j] = 0
}
// The closer this is to .5, the better. If it's far off, give the
// benchmark more time to run with -benchtime.
b.Logf("average number of zeros skipped: %.3f",
float64(sumSkipped)/(float64(b.N*len(buf))))
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/blob_set_test.go | internal/restic/blob_set_test.go | package restic
import (
"math/rand"
"regexp"
"testing"
rtest "github.com/restic/restic/internal/test"
)
func TestBlobSetString(t *testing.T) {
random := rand.New(rand.NewSource(42))
s := NewBlobSet()
rtest.Equals(t, "{}", s.String())
id, _ := ParseID(
"1111111111111111111111111111111111111111111111111111111111111111")
s.Insert(BlobHandle{ID: id, Type: TreeBlob})
rtest.Equals(t, "{<tree/11111111>}", s.String())
var h BlobHandle
for i := 0; i < 100; i++ {
h.Type = DataBlob
_, _ = random.Read(h.ID[:])
s.Insert(h)
}
r := regexp.MustCompile(
`^{(?:<(?:data|tree)/[0-9a-f]{8}> ){10}\(90 more\)}$`)
str := s.String()
rtest.Assert(t, r.MatchString(str), "%q doesn't match pattern", str)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/blob_test.go | internal/restic/blob_test.go | package restic
import (
"encoding/json"
"testing"
)
var blobTypeJSON = []struct {
t BlobType
res string
}{
{DataBlob, `"data"`},
{TreeBlob, `"tree"`},
}
func TestBlobTypeJSON(t *testing.T) {
for _, test := range blobTypeJSON {
// test serialize
buf, err := json.Marshal(test.t)
if err != nil {
t.Error(err)
continue
}
if test.res != string(buf) {
t.Errorf("want %q, got %q", test.res, string(buf))
continue
}
// test unserialize
var v BlobType
err = json.Unmarshal([]byte(test.res), &v)
if err != nil {
t.Error(err)
continue
}
if test.t != v {
t.Errorf("want %v, got %v", test.t, v)
continue
}
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/lister_test.go | internal/restic/lister_test.go | package restic_test
import (
"context"
"fmt"
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
type ListHelper struct {
ListFn func(ctx context.Context, t restic.FileType, fn func(restic.ID, int64) error) error
}
func (l *ListHelper) List(ctx context.Context, t restic.FileType, fn func(restic.ID, int64) error) error {
return l.ListFn(ctx, t, fn)
}
func TestMemoizeList(t *testing.T) {
// setup backend to serve as data source for memoized list
be := &ListHelper{}
type FileInfo struct {
ID restic.ID
Size int64
}
files := []FileInfo{
{ID: restic.NewRandomID(), Size: 42},
{ID: restic.NewRandomID(), Size: 45},
}
be.ListFn = func(ctx context.Context, t restic.FileType, fn func(restic.ID, int64) error) error {
for _, fi := range files {
if err := fn(fi.ID, fi.Size); err != nil {
return err
}
}
return nil
}
mem, err := restic.MemorizeList(context.TODO(), be, backend.SnapshotFile)
rtest.OK(t, err)
err = mem.List(context.TODO(), backend.IndexFile, func(id restic.ID, size int64) error {
t.Fatal("file type mismatch")
return nil // the memoized lister must return an error by itself
})
rtest.Assert(t, err != nil, "missing error on file typ mismatch")
var memFiles []FileInfo
err = mem.List(context.TODO(), backend.SnapshotFile, func(id restic.ID, size int64) error {
memFiles = append(memFiles, FileInfo{ID: id, Size: size})
return nil
})
rtest.OK(t, err)
rtest.Equals(t, files, memFiles)
}
func TestMemoizeListError(t *testing.T) {
// setup backend to serve as data source for memoized list
be := &ListHelper{}
be.ListFn = func(ctx context.Context, t backend.FileType, fn func(restic.ID, int64) error) error {
return fmt.Errorf("list error")
}
_, err := restic.MemorizeList(context.TODO(), be, backend.SnapshotFile)
rtest.Assert(t, err != nil, "missing error on list error")
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/lister.go | internal/restic/lister.go | package restic
import (
"context"
"fmt"
)
type fileInfo struct {
id ID
size int64
}
type memorizedLister struct {
fileInfos []fileInfo
tpe FileType
}
func (m *memorizedLister) List(ctx context.Context, t FileType, fn func(ID, int64) error) error {
if t != m.tpe {
return fmt.Errorf("filetype mismatch, expected %s got %s", m.tpe, t)
}
for _, fi := range m.fileInfos {
if ctx.Err() != nil {
break
}
err := fn(fi.id, fi.size)
if err != nil {
return err
}
}
return ctx.Err()
}
func MemorizeList(ctx context.Context, be Lister, t FileType) (Lister, error) {
if _, ok := be.(*memorizedLister); ok {
return be, nil
}
var fileInfos []fileInfo
err := be.List(ctx, t, func(id ID, size int64) error {
fileInfos = append(fileInfos, fileInfo{id, size})
return nil
})
if err != nil {
return nil, err
}
return &memorizedLister{
fileInfos: fileInfos,
tpe: t,
}, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/testing.go | internal/restic/testing.go | package restic
import (
"fmt"
)
// TestParseID parses s as a ID and panics if that fails.
func TestParseID(s string) ID {
id, err := ParseID(s)
if err != nil {
panic(fmt.Sprintf("unable to parse string %q as ID: %v", s, err))
}
return id
}
// TestParseHandle parses s as a ID, panics if that fails and creates a BlobHandle with t.
func TestParseHandle(s string, t BlobType) BlobHandle {
return BlobHandle{ID: TestParseID(s), Type: t}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/repository.go | internal/restic/repository.go | package restic
import (
"context"
"iter"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/ui/progress"
)
// ErrInvalidData is used to report that a file is corrupted
var ErrInvalidData = errors.New("invalid data returned")
// Repository stores data in a backend. It provides high-level functions and
// transparently encrypts/decrypts data.
type Repository interface {
// Connections returns the maximum number of concurrent backend operations
Connections() uint
Config() Config
PackSize() uint
Key() *crypto.Key
LoadIndex(ctx context.Context, p TerminalCounterFactory) error
LookupBlob(t BlobType, id ID) []PackedBlob
LookupBlobSize(t BlobType, id ID) (size uint, exists bool)
NewAssociatedBlobSet() AssociatedBlobSet
// ListBlobs runs fn on all blobs known to the index. When the context is cancelled,
// the index iteration returns immediately with ctx.Err(). This blocks any modification of the index.
ListBlobs(ctx context.Context, fn func(PackedBlob)) error
ListPacksFromIndex(ctx context.Context, packs IDSet) <-chan PackBlobs
// ListPack returns the list of blobs saved in the pack id and the length of
// the pack header.
ListPack(ctx context.Context, id ID, packSize int64) (entries []Blob, hdrSize uint32, err error)
LoadBlob(ctx context.Context, t BlobType, id ID, buf []byte) ([]byte, error)
LoadBlobsFromPack(ctx context.Context, packID ID, blobs []Blob, handleBlobFn func(blob BlobHandle, buf []byte, err error) error) error
// WithUploader starts the necessary workers to upload new blobs. Once the callback returns,
// the workers are stopped and the index is written to the repository. The callback must use
// the passed context and must not keep references to any of its parameters after returning.
WithBlobUploader(ctx context.Context, fn func(ctx context.Context, uploader BlobSaverWithAsync) error) error
// List calls the function fn for each file of type t in the repository.
// When an error is returned by fn, processing stops and List() returns the
// error.
//
// The function fn is called in the same Goroutine List() was called from.
List(ctx context.Context, t FileType, fn func(ID, int64) error) error
// LoadRaw reads all data stored in the backend for the file with id and filetype t.
// If the backend returns data that does not match the id, then the buffer is returned
// along with an error that is a restic.ErrInvalidData error.
LoadRaw(ctx context.Context, t FileType, id ID) (data []byte, err error)
// LoadUnpacked loads and decrypts the file with the given type and ID.
LoadUnpacked(ctx context.Context, t FileType, id ID) (data []byte, err error)
// SaveUnpacked stores a file in the repository. This is restricted to snapshots.
SaveUnpacked(ctx context.Context, t WriteableFileType, buf []byte) (ID, error)
// RemoveUnpacked removes a file from the repository. This is restricted to snapshots.
RemoveUnpacked(ctx context.Context, t WriteableFileType, id ID) error
// StartWarmup creates a new warmup job, requesting the backend to warmup the specified packs.
StartWarmup(ctx context.Context, packs IDSet) (WarmupJob, error)
}
type FileType = backend.FileType
// These are the different data types a backend can store. Only filetypes contained
// in the `WriteableFileType` subset can be modified via the Repository interface.
// All other filetypes are considered internal datastructures of the Repository.
const (
PackFile = backend.PackFile
KeyFile = backend.KeyFile
LockFile = backend.LockFile
SnapshotFile = backend.SnapshotFile
IndexFile = backend.IndexFile
ConfigFile = backend.ConfigFile
)
// WriteableFileType defines the different data types that can be modified via SaveUnpacked or RemoveUnpacked.
type WriteableFileType backend.FileType
const (
// WriteableSnapshotFile is the WriteableFileType for snapshots.
WriteableSnapshotFile = WriteableFileType(SnapshotFile)
)
func (w *WriteableFileType) ToFileType() FileType {
switch *w {
case WriteableSnapshotFile:
return SnapshotFile
default:
panic("invalid WriteableFileType")
}
}
type FileTypes interface {
FileType | WriteableFileType
}
// LoaderUnpacked allows loading a blob not stored in a pack file
type LoaderUnpacked interface {
// Connections returns the maximum number of concurrent backend operations
Connections() uint
LoadUnpacked(ctx context.Context, t FileType, id ID) (data []byte, err error)
}
// SaverUnpacked allows saving a blob not stored in a pack file
type SaverUnpacked[FT FileTypes] interface {
// Connections returns the maximum number of concurrent backend operations
Connections() uint
SaveUnpacked(ctx context.Context, t FT, buf []byte) (ID, error)
}
// RemoverUnpacked allows removing an unpacked blob
type RemoverUnpacked[FT FileTypes] interface {
// Connections returns the maximum number of concurrent backend operations
Connections() uint
RemoveUnpacked(ctx context.Context, t FT, id ID) error
}
type SaverRemoverUnpacked[FT FileTypes] interface {
SaverUnpacked[FT]
RemoverUnpacked[FT]
}
type PackBlobs struct {
PackID ID
Blobs []Blob
}
type TerminalCounterFactory interface {
// NewCounterTerminalOnly returns a new progress counter that is only shown if stdout points to a
// terminal. It is not shown if --quiet or --json is specified.
NewCounterTerminalOnly(description string) *progress.Counter
}
// Lister allows listing files in a backend.
type Lister interface {
List(ctx context.Context, t FileType, fn func(ID, int64) error) error
}
type ListerLoaderUnpacked interface {
Lister
LoaderUnpacked
}
type Unpacked[FT FileTypes] interface {
ListerLoaderUnpacked
SaverUnpacked[FT]
RemoverUnpacked[FT]
}
type ListBlobser interface {
ListBlobs(ctx context.Context, fn func(PackedBlob)) error
}
type BlobLoader interface {
LoadBlob(context.Context, BlobType, ID, []byte) ([]byte, error)
}
type WithBlobUploader interface {
WithBlobUploader(ctx context.Context, fn func(ctx context.Context, uploader BlobSaverWithAsync) error) error
}
type BlobSaverWithAsync interface {
BlobSaver
BlobSaverAsync
}
type BlobSaver interface {
// SaveBlob saves a blob to the repository. ctx must be derived from the context created by WithBlobUploader.
SaveBlob(ctx context.Context, tpe BlobType, buf []byte, id ID, storeDuplicate bool) (newID ID, known bool, sizeInRepo int, err error)
}
type BlobSaverAsync interface {
// SaveBlobAsync saves a blob to the repository. ctx must be derived from the context created by WithBlobUploader.
// The callback is called asynchronously from a different goroutine.
SaveBlobAsync(ctx context.Context, tpe BlobType, buf []byte, id ID, storeDuplicate bool, cb func(newID ID, known bool, sizeInRepo int, err error))
}
// Loader loads a blob from a repository.
type Loader interface {
LoadBlob(context.Context, BlobType, ID, []byte) ([]byte, error)
LookupBlobSize(tpe BlobType, id ID) (uint, bool)
Connections() uint
}
type WarmupJob interface {
// HandleCount returns the number of handles that are currently warming up.
HandleCount() int
// Wait waits for all handles to be warm.
Wait(ctx context.Context) error
}
// FindBlobSet is a set of blob handles used by prune.
type FindBlobSet interface {
Has(bh BlobHandle) bool
Insert(bh BlobHandle)
}
type AssociatedBlobSet interface {
Has(bh BlobHandle) bool
Insert(bh BlobHandle)
Delete(bh BlobHandle)
Len() int
Keys() iter.Seq[BlobHandle]
Intersect(other AssociatedBlobSet) AssociatedBlobSet
Sub(other AssociatedBlobSet) AssociatedBlobSet
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/doc.go | internal/restic/doc.go | // Package restic is the top level package for the restic backup program,
// please see https://github.com/restic/restic for more information.
//
// This package exposes the main objects that are handled in restic.
package restic
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/blob_set.go | internal/restic/blob_set.go | package restic
import (
"fmt"
"sort"
"strings"
)
// BlobSet is a set of blobs.
type BlobSet map[BlobHandle]struct{}
// NewBlobSet returns a new BlobSet, populated with ids.
func NewBlobSet(handles ...BlobHandle) BlobSet {
m := make(BlobSet)
for _, h := range handles {
m[h] = struct{}{}
}
return m
}
// Has returns true iff id is contained in the set.
func (s BlobSet) Has(h BlobHandle) bool {
_, ok := s[h]
return ok
}
// Insert adds id to the set.
func (s BlobSet) Insert(h BlobHandle) {
s[h] = struct{}{}
}
// Delete removes id from the set.
func (s BlobSet) Delete(h BlobHandle) {
delete(s, h)
}
func (s BlobSet) Len() int {
return len(s)
}
// Equals returns true iff s equals other.
func (s BlobSet) Equals(other BlobSet) bool {
if len(s) != len(other) {
return false
}
for h := range s {
if _, ok := other[h]; !ok {
return false
}
}
return true
}
// Merge adds the blobs in other to the current set.
func (s BlobSet) Merge(other BlobSet) {
for h := range other {
s.Insert(h)
}
}
// Intersect returns a new set containing the handles that are present in both sets.
func (s BlobSet) Intersect(other BlobSet) (result BlobSet) {
result = NewBlobSet()
set1 := s
set2 := other
// iterate over the smaller set
if len(set2) < len(set1) {
set1, set2 = set2, set1
}
for h := range set1 {
if set2.Has(h) {
result.Insert(h)
}
}
return result
}
// Sub returns a new set containing all handles that are present in s but not in
// other.
func (s BlobSet) Sub(other BlobSet) (result BlobSet) {
result = NewBlobSet()
for h := range s {
if !other.Has(h) {
result.Insert(h)
}
}
return result
}
// List returns a sorted slice of all BlobHandle in the set.
func (s BlobSet) List() BlobHandles {
list := make(BlobHandles, 0, len(s))
for h := range s {
list = append(list, h)
}
sort.Sort(list)
return list
}
// String produces a human-readable representation of ids.
// It is meant for producing error messages,
// so it only returns a summary if ids is long.
func (s BlobSet) String() string {
const maxelems = 10
sb := new(strings.Builder)
sb.WriteByte('{')
n := 0
for k := range s {
if n != 0 {
sb.WriteByte(' ')
}
sb.WriteString(k.String())
if n++; n == maxelems {
fmt.Fprintf(sb, " (%d more)", len(s)-n-1)
break
}
}
sb.WriteByte('}')
return sb.String()
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/ids.go | internal/restic/ids.go | package restic
import (
"encoding/hex"
"strings"
)
// IDs is a slice of IDs that implements sort.Interface and fmt.Stringer.
type IDs []ID
func (ids IDs) Len() int {
return len(ids)
}
func (ids IDs) Less(i, j int) bool {
return string(ids[i][:]) < string(ids[j][:])
}
func (ids IDs) Swap(i, j int) {
ids[i], ids[j] = ids[j], ids[i]
}
func (ids IDs) String() string {
var sb strings.Builder
sb.Grow(1 + (1+2*shortStr)*len(ids))
buf := make([]byte, 2*shortStr)
sb.WriteByte('[')
for i, id := range ids {
if i > 0 {
sb.WriteByte(' ')
}
hex.Encode(buf, id[:shortStr])
sb.Write(buf)
}
sb.WriteByte(']')
return sb.String()
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/restic/blob.go | internal/restic/blob.go | package restic
import (
"fmt"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/errors"
)
// Blob is one part of a file or a tree.
type Blob struct {
BlobHandle
Length uint
Offset uint
UncompressedLength uint
}
func (b Blob) String() string {
return fmt.Sprintf("<Blob (%v) %v, offset %v, length %v, uncompressed length %v>",
b.Type, b.ID.Str(), b.Offset, b.Length, b.UncompressedLength)
}
func (b Blob) DataLength() uint {
if b.UncompressedLength != 0 {
return b.UncompressedLength
}
return uint(crypto.PlaintextLength(int(b.Length)))
}
func (b Blob) IsCompressed() bool {
return b.UncompressedLength != 0
}
// PackedBlob is a blob stored within a file.
type PackedBlob struct {
Blob
PackID ID
}
// BlobHandle identifies a blob of a given type.
type BlobHandle struct {
ID ID
Type BlobType
}
func (h BlobHandle) String() string {
return fmt.Sprintf("<%s/%s>", h.Type, h.ID.Str())
}
func NewRandomBlobHandle() BlobHandle {
return BlobHandle{ID: NewRandomID(), Type: DataBlob}
}
// BlobType specifies what a blob stored in a pack is.
type BlobType uint8
// These are the blob types that can be stored in a pack.
const (
InvalidBlob BlobType = iota
DataBlob
TreeBlob
NumBlobTypes // Number of types. Must be last in this enumeration.
)
func (t BlobType) String() string {
switch t {
case DataBlob:
return "data"
case TreeBlob:
return "tree"
case InvalidBlob:
return "invalid"
}
return fmt.Sprintf("<BlobType %d>", t)
}
func (t BlobType) IsMetadata() bool {
switch t {
case TreeBlob:
return true
default:
return false
}
}
// MarshalJSON encodes the BlobType into JSON.
func (t BlobType) MarshalJSON() ([]byte, error) {
switch t {
case DataBlob:
return []byte(`"data"`), nil
case TreeBlob:
return []byte(`"tree"`), nil
}
return nil, errors.New("unknown blob type")
}
// UnmarshalJSON decodes the BlobType from JSON.
func (t *BlobType) UnmarshalJSON(buf []byte) error {
switch string(buf) {
case `"data"`:
*t = DataBlob
case `"tree"`:
*t = TreeBlob
default:
return errors.New("unknown blob type")
}
return nil
}
// BlobHandles is an ordered list of BlobHandles that implements sort.Interface.
type BlobHandles []BlobHandle
func (h BlobHandles) Len() int {
return len(h)
}
func (h BlobHandles) Less(i, j int) bool {
for k, b := range h[i].ID {
if b == h[j].ID[k] {
continue
}
return b < h[j].ID[k]
}
return h[i].Type < h[j].Type
}
func (h BlobHandles) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
func (h BlobHandles) String() string {
elements := make([]string, 0, len(h))
for _, e := range h {
elements = append(elements, e.String())
}
return fmt.Sprint(elements)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/test/vars.go | internal/test/vars.go | package test
import (
"fmt"
"os"
"strings"
"testing"
)
var (
TestPassword = getStringVar("RESTIC_TEST_PASSWORD", "geheim")
TestCleanupTempDirs = getBoolVar("RESTIC_TEST_CLEANUP", true)
TestTempDir = getStringVar("RESTIC_TEST_TMPDIR", "")
RunIntegrationTest = getBoolVar("RESTIC_TEST_INTEGRATION", true)
RunFuseTest = getBoolVar("RESTIC_TEST_FUSE", true)
TestSFTPPath = getStringVar("RESTIC_TEST_SFTPPATH", "/usr/lib/ssh:/usr/lib/openssh:/usr/libexec")
TestWalkerPath = getStringVar("RESTIC_TEST_PATH", ".")
BenchArchiveDirectory = getStringVar("RESTIC_BENCH_DIR", ".")
TestS3Server = getStringVar("RESTIC_TEST_S3_SERVER", "")
TestRESTServer = getStringVar("RESTIC_TEST_REST_SERVER", "")
TestIntegrationDisallowSkip = getStringVar("RESTIC_TEST_DISALLOW_SKIP", "")
)
func getStringVar(name, defaultValue string) string {
if e := os.Getenv(name); e != "" {
return e
}
return defaultValue
}
func getBoolVar(name string, defaultValue bool) bool {
if e := os.Getenv(name); e != "" {
switch e {
case "1", "true":
return true
case "0", "false":
return false
default:
fmt.Fprintf(os.Stderr, "invalid value for variable %q, using default\n", name)
}
}
return defaultValue
}
// SkipDisallowed fails the test if it needs to run. The environment
// variable RESTIC_TEST_DISALLOW_SKIP contains a comma-separated list of test
// names that must be run. If name is in this list, the test is marked as
// failed.
func SkipDisallowed(t testing.TB, name string) {
for _, s := range strings.Split(TestIntegrationDisallowSkip, ",") {
if s == name {
t.Fatalf("test %v is in list of tests that need to run ($RESTIC_TEST_DISALLOW_SKIP)", name)
}
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/test/doc.go | internal/test/doc.go | // Package test provides helper functions for writing tests for restic.
package test
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/test/helpers.go | internal/test/helpers.go | package test
import (
"compress/bzip2"
"compress/gzip"
"fmt"
"io"
"math/rand"
"os"
"os/exec"
"path/filepath"
"reflect"
"testing"
"github.com/restic/restic/internal/errors"
)
// Assert fails the test if the condition is false.
func Assert(tb testing.TB, condition bool, msg string, v ...interface{}) {
tb.Helper()
if !condition {
tb.Fatalf("\033[31m"+msg+"\033[39m\n\n", v...)
}
}
// OK fails the test if an err is not nil.
func OK(tb testing.TB, err error) {
tb.Helper()
if err != nil {
tb.Fatalf("\033[31munexpected error: %+v\033[39m\n\n", err)
}
}
// OKs fails the test if any error from errs is not nil.
func OKs(tb testing.TB, errs []error) {
tb.Helper()
errFound := false
for _, err := range errs {
if err != nil {
errFound = true
tb.Logf("\033[31munexpected error: %+v\033[39m\n\n", err.Error())
}
}
if errFound {
tb.FailNow()
}
}
// Equals fails the test if exp is not equal to act.
// msg is optional message to be printed, first param being format string and rest being arguments.
func Equals(tb testing.TB, exp, act interface{}, msgs ...string) {
tb.Helper()
if !reflect.DeepEqual(exp, act) {
var msgString string
length := len(msgs)
if length == 1 {
msgString = msgs[0]
} else if length > 1 {
args := make([]interface{}, length-1)
for i, msg := range msgs[1:] {
args[i] = msg
}
msgString = fmt.Sprintf(msgs[0], args...)
}
tb.Fatalf("\033[31m\n\n\t"+msgString+"\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", exp, act)
}
}
// Random returns size bytes of pseudo-random data derived from the seed.
func Random(seed, count int) []byte {
p := make([]byte, count)
rnd := rand.New(rand.NewSource(int64(seed)))
for i := 0; i < len(p); i += 8 {
val := rnd.Int63()
var data = []byte{
byte((val >> 0) & 0xff),
byte((val >> 8) & 0xff),
byte((val >> 16) & 0xff),
byte((val >> 24) & 0xff),
byte((val >> 32) & 0xff),
byte((val >> 40) & 0xff),
byte((val >> 48) & 0xff),
byte((val >> 56) & 0xff),
}
for j := range data {
cur := i + j
if cur >= len(p) {
break
}
p[cur] = data[j]
}
}
return p
}
// SetupTarTestFixture extracts the tarFile to outputDir.
func SetupTarTestFixture(t testing.TB, outputDir, tarFile string) {
t.Helper()
input, err := os.Open(tarFile)
OK(t, err)
defer func() {
OK(t, input.Close())
}()
var rd io.Reader
switch filepath.Ext(tarFile) {
case ".gz":
r, err := gzip.NewReader(input)
OK(t, err)
defer func() {
OK(t, r.Close())
}()
rd = r
case ".bzip2":
rd = bzip2.NewReader(input)
default:
rd = input
}
cmd := exec.Command("tar", "xf", "-")
cmd.Dir = outputDir
cmd.Stdin = rd
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
t.Fatalf("running command %v %v failed: %v", cmd.Path, cmd.Args, err)
}
}
// Env creates a test environment and extracts the repository fixture.
// Returned is the repo path and a cleanup function.
func Env(t testing.TB, repoFixture string) (repodir string, cleanup func()) {
t.Helper()
tempdir, err := os.MkdirTemp(TestTempDir, "restic-test-env-")
OK(t, err)
fd, err := os.Open(repoFixture)
if err != nil {
t.Fatal(err)
}
OK(t, fd.Close())
SetupTarTestFixture(t, tempdir, repoFixture)
return filepath.Join(tempdir, "repo"), func() {
if !TestCleanupTempDirs {
t.Logf("leaving temporary directory %v used for test", tempdir)
return
}
RemoveAll(t, tempdir)
}
}
func isFile(fi os.FileInfo) bool {
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
}
// ResetReadOnly recursively resets the read-only flag recursively for dir.
// This is mainly used for tests on Windows, which is unable to delete a file
// set read-only.
func ResetReadOnly(t testing.TB, dir string) {
t.Helper()
err := filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
if fi == nil {
return err
}
if fi.IsDir() {
return os.Chmod(path, 0777)
}
if isFile(fi) {
return os.Chmod(path, 0666)
}
return nil
})
if errors.Is(err, os.ErrNotExist) {
err = nil
}
OK(t, err)
}
// RemoveAll recursively resets the read-only flag of all files and dirs and
// afterwards uses os.RemoveAll() to remove the path.
func RemoveAll(t testing.TB, path string) {
t.Helper()
ResetReadOnly(t, path)
err := os.RemoveAll(path)
if errors.Is(err, os.ErrNotExist) {
err = nil
}
OK(t, err)
}
// TempDir returns a temporary directory that is removed by t.Cleanup,
// except if TestCleanupTempDirs is set to false.
func TempDir(t testing.TB) string {
t.Helper()
tempdir, err := os.MkdirTemp(TestTempDir, "restic-test-")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if !TestCleanupTempDirs {
t.Logf("leaving temporary directory %v used for test", tempdir)
return
}
RemoveAll(t, tempdir)
})
return tempdir
}
// Chdir changes the current directory to dest.
// The function back returns to the previous directory.
func Chdir(t testing.TB, dest string) (back func()) {
t.Helper()
prev, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
t.Logf("chdir to %v", dest)
err = os.Chdir(dest)
if err != nil {
t.Fatal(err)
}
return func() {
t.Helper()
t.Logf("chdir back to %v", prev)
err = os.Chdir(prev)
if err != nil {
t.Fatal(err)
}
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/bloblru/cache.go | internal/bloblru/cache.go | package bloblru
import (
"sync"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/restic"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
// Crude estimate of the overhead per blob: a SHA-256, a linked list node
// and some pointers. See comment in Cache.add.
const overhead = len(restic.ID{}) + 64
// A Cache is a fixed-size LRU cache of blob contents.
// It is safe for concurrent access.
type Cache struct {
mu sync.Mutex
c *simplelru.LRU[restic.ID, []byte]
free, size int // Current and max capacity, in bytes.
inProgress map[restic.ID]chan struct{}
}
// New constructs a blob cache that stores at most size bytes worth of blobs.
func New(size int) *Cache {
c := &Cache{
free: size,
size: size,
inProgress: make(map[restic.ID]chan struct{}),
}
// NewLRU wants us to specify some max. number of entries, else it errors.
// The actual maximum will be smaller than size/overhead, because we
// evict entries (RemoveOldest in add) to maintain our size bound.
maxEntries := size / overhead
lru, err := simplelru.NewLRU[restic.ID, []byte](maxEntries, c.evict)
if err != nil {
panic(err) // Can only be maxEntries <= 0.
}
c.c = lru
return c
}
// Add adds key id with value blob to c.
// It may return an evicted buffer for reuse.
func (c *Cache) Add(id restic.ID, blob []byte) (old []byte) {
debug.Log("bloblru.Cache: add %v", id)
size := cap(blob) + overhead
if size > c.size {
return
}
c.mu.Lock()
defer c.mu.Unlock()
if c.c.Contains(id) { // Doesn't update the recency list.
return
}
// This loop takes at most min(maxEntries, maxchunksize/overhead)
// iterations.
for size > c.free {
_, b, _ := c.c.RemoveOldest()
if cap(b) > cap(old) {
// We can only return one buffer, so pick the largest.
old = b
}
}
c.c.Add(id, blob)
c.free -= size
return old
}
func (c *Cache) Get(id restic.ID) ([]byte, bool) {
c.mu.Lock()
blob, ok := c.c.Get(id)
c.mu.Unlock()
debug.Log("bloblru.Cache: get %v, hit %v", id, ok)
return blob, ok
}
func (c *Cache) GetOrCompute(id restic.ID, compute func() ([]byte, error)) ([]byte, error) {
// check if already cached
blob, ok := c.Get(id)
if ok {
return blob, nil
}
// check for parallel download or start our own
finish := make(chan struct{})
c.mu.Lock()
waitForResult, isComputing := c.inProgress[id]
if !isComputing {
c.inProgress[id] = finish
}
c.mu.Unlock()
if isComputing {
// wait for result of parallel download
<-waitForResult
} else {
// remove progress channel once finished here
defer func() {
c.mu.Lock()
delete(c.inProgress, id)
c.mu.Unlock()
close(finish)
}()
}
// try again. This is necessary independent of whether isComputing is true or not.
// The calls to `c.Get()` and checking/adding the entry in `c.inProgress` are not atomic,
// thus the item might have been computed in the meantime.
// The following scenario would compute() the value multiple times otherwise:
// Goroutine A does not find a value in the initial call to `c.Get`, then goroutine B
// takes over, caches the computed value and cleans up its channel in c.inProgress.
// Then goroutine A continues, does not detect a parallel computation and would try
// to call compute() again.
blob, ok = c.Get(id)
if ok {
return blob, nil
}
// download it
blob, err := compute()
if err == nil {
c.Add(id, blob)
}
return blob, err
}
func (c *Cache) evict(key restic.ID, blob []byte) {
debug.Log("bloblru.Cache: evict %v, %d bytes", key, cap(blob))
c.free += cap(blob) + overhead
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/bloblru/cache_test.go | internal/bloblru/cache_test.go | package bloblru
import (
"context"
"fmt"
"math/rand"
"testing"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
"golang.org/x/sync/errgroup"
)
func TestCache(t *testing.T) {
var id1, id2, id3 restic.ID
id1[0] = 1
id2[0] = 2
id3[0] = 3
const (
kiB = 1 << 10
cacheSize = 64*kiB + 3*overhead
)
c := New(cacheSize)
addAndCheck := func(id restic.ID, exp []byte) {
c.Add(id, exp)
blob, ok := c.Get(id)
rtest.Assert(t, ok, "blob %v added but not found in cache", id)
rtest.Equals(t, &exp[0], &blob[0])
rtest.Equals(t, exp, blob)
}
// Our blobs have len 1 but larger cap. The cache should check the cap,
// since it more reliably indicates the amount of memory kept alive.
addAndCheck(id1, make([]byte, 1, 32*kiB))
addAndCheck(id2, make([]byte, 1, 30*kiB))
addAndCheck(id3, make([]byte, 1, 10*kiB))
_, ok := c.Get(id2)
rtest.Assert(t, ok, "blob %v not present", id2)
_, ok = c.Get(id1)
rtest.Assert(t, !ok, "blob %v present, but should have been evicted", id1)
c.Add(id1, make([]byte, 1+c.size))
_, ok = c.Get(id1)
rtest.Assert(t, !ok, "blob %v too large but still added to cache")
c.c.Remove(id1)
c.c.Remove(id3)
c.c.Remove(id2)
rtest.Equals(t, cacheSize, c.size)
rtest.Equals(t, cacheSize, c.free)
}
func TestCacheGetOrCompute(t *testing.T) {
var id1, id2 restic.ID
id1[0] = 1
id2[0] = 2
const (
kiB = 1 << 10
cacheSize = 64*kiB + 3*overhead
)
c := New(cacheSize)
e := fmt.Errorf("broken")
_, err := c.GetOrCompute(id1, func() ([]byte, error) {
return nil, e
})
rtest.Equals(t, e, err, "expected error was not returned")
// fill buffer
data1 := make([]byte, 10*kiB)
blob, err := c.GetOrCompute(id1, func() ([]byte, error) {
return data1, nil
})
rtest.OK(t, err)
rtest.Equals(t, &data1[0], &blob[0], "wrong buffer returned")
// now the buffer should be returned without calling the compute function
blob, err = c.GetOrCompute(id1, func() ([]byte, error) {
return nil, e
})
rtest.OK(t, err)
rtest.Equals(t, &data1[0], &blob[0], "wrong buffer returned")
// check concurrency
wg, _ := errgroup.WithContext(context.TODO())
wait := make(chan struct{})
calls := make(chan struct{}, 10)
// start a bunch of blocking goroutines
for i := 0; i < 10; i++ {
wg.Go(func() error {
buf, err := c.GetOrCompute(id2, func() ([]byte, error) {
// block to ensure that multiple requests are waiting in parallel
<-wait
calls <- struct{}{}
return make([]byte, 42), nil
})
if len(buf) != 42 {
return fmt.Errorf("wrong buffer")
}
return err
})
}
close(wait)
rtest.OK(t, wg.Wait())
close(calls)
count := 0
for range calls {
count++
}
rtest.Equals(t, 1, count, "expected exactly one call of the compute function")
}
func BenchmarkAdd(b *testing.B) {
const (
MiB = 1 << 20
nblobs = 64
)
c := New(64 * MiB)
buf := make([]byte, 8*MiB)
ids := make([]restic.ID, nblobs)
sizes := make([]int, nblobs)
r := rand.New(rand.NewSource(100))
for i := range ids {
r.Read(ids[i][:])
sizes[i] = r.Intn(8 * MiB)
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
c.Add(ids[i%nblobs], buf[:sizes[i%nblobs]])
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/archiver.go | internal/archiver/archiver.go | package archiver
import (
"context"
"fmt"
"os"
"path"
"runtime"
"sort"
"strings"
"sync"
"time"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/feature"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
"golang.org/x/sync/errgroup"
)
// SelectByNameFunc returns true for all items that should be included (files and
// dirs). If false is returned, files are ignored and dirs are not even walked.
type SelectByNameFunc func(item string) bool
// SelectFunc returns true for all items that should be included (files and
// dirs). If false is returned, files are ignored and dirs are not even walked.
type SelectFunc func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool
// ErrorFunc is called when an error during archiving occurs. When nil is
// returned, the archiver continues, otherwise it aborts and passes the error
// up the call stack.
type ErrorFunc func(file string, err error) error
// ItemStats collects some statistics about a particular file or directory.
type ItemStats struct {
DataBlobs int // number of new data blobs added for this item
DataSize uint64 // sum of the sizes of all new data blobs
DataSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead)
TreeBlobs int // number of new tree blobs added for this item
TreeSize uint64 // sum of the sizes of all new tree blobs
TreeSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead)
}
type ChangeStats struct {
New uint
Changed uint
Unchanged uint
}
type Summary struct {
BackupStart time.Time
BackupEnd time.Time
Files, Dirs ChangeStats
ProcessedBytes uint64
ItemStats
}
// Add adds other to the current ItemStats.
func (s *ItemStats) Add(other ItemStats) {
s.DataBlobs += other.DataBlobs
s.DataSize += other.DataSize
s.DataSizeInRepo += other.DataSizeInRepo
s.TreeBlobs += other.TreeBlobs
s.TreeSize += other.TreeSize
s.TreeSizeInRepo += other.TreeSizeInRepo
}
// ToNoder returns a data.Node for a File.
type ToNoder interface {
ToNode(ignoreXattrListError bool, warnf func(format string, args ...any)) (*data.Node, error)
}
type archiverRepo interface {
restic.Loader
restic.WithBlobUploader
restic.SaverUnpacked[restic.WriteableFileType]
Config() restic.Config
}
// Archiver saves a directory structure to the repo.
//
// An Archiver has a number of worker goroutines handling saving the different
// data structures to the repository, the details are implemented by the
// fileSaver, blobSaver, and treeSaver types.
//
// The main goroutine (the one calling Snapshot()) traverses the directory tree
// and delegates all work to these worker pools. They return a futureNode which
// can be resolved later, by calling Wait() on it.
type Archiver struct {
Repo archiverRepo
SelectByName SelectByNameFunc
Select SelectFunc
FS fs.FS
Options Options
fileSaver *fileSaver
treeSaver *treeSaver
mu sync.Mutex
summary *Summary
// Error is called for all errors that occur during backup.
Error ErrorFunc
// CompleteItem is called for all files and dirs once they have been
// processed successfully. The parameter item contains the path as it will
// be in the snapshot after saving. s contains some statistics about this
// particular file/dir.
//
// Once reading a file has completed successfully (but not saving it yet),
// CompleteItem will be called with current == nil.
//
// CompleteItem may be called asynchronously from several different
// goroutines!
CompleteItem func(item string, previous, current *data.Node, s ItemStats, d time.Duration)
// StartFile is called when a file is being processed by a worker.
StartFile func(filename string)
// CompleteBlob is called for all saved blobs for files.
CompleteBlob func(bytes uint64)
// WithAtime configures if the access time for files and directories should
// be saved. Enabling it may result in much metadata, so it's off by
// default.
WithAtime bool
// Flags controlling change detection. See doc/040_backup.rst for details.
ChangeIgnoreFlags uint
}
// Flags for the ChangeIgnoreFlags bitfield.
const (
ChangeIgnoreCtime = 1 << iota
ChangeIgnoreInode
)
// Options is used to configure the archiver.
type Options struct {
// ReadConcurrency sets how many files are read in concurrently. If
// it's set to zero, at most two files are read in concurrently (which
// turned out to be a good default for most situations).
ReadConcurrency uint
// SaveTreeConcurrency sets how many trees are marshalled and saved to the
// repo concurrently.
SaveTreeConcurrency uint
}
// ApplyDefaults returns a copy of o with the default options set for all unset
// fields.
func (o Options) ApplyDefaults() Options {
if o.ReadConcurrency == 0 {
// two is a sweet spot for almost all situations. We've done some
// experiments documented here:
// https://github.com/borgbackup/borg/issues/3500
o.ReadConcurrency = 2
}
if o.SaveTreeConcurrency == 0 {
// can either wait for a file, wait for a tree, serialize a tree or wait for saveblob
// the last two are cpu-bound and thus mutually exclusive.
// Also allow waiting for FileReadConcurrency files, this is the maximum of files
// which currently can be in progress. The main backup loop blocks when trying to queue
// more files to read.
o.SaveTreeConcurrency = uint(runtime.GOMAXPROCS(0)) + o.ReadConcurrency
}
return o
}
// New initializes a new archiver.
func New(repo archiverRepo, filesystem fs.FS, opts Options) *Archiver {
arch := &Archiver{
Repo: repo,
SelectByName: func(_ string) bool { return true },
Select: func(_ string, _ *fs.ExtendedFileInfo, _ fs.FS) bool { return true },
FS: filesystem,
Options: opts.ApplyDefaults(),
CompleteItem: func(string, *data.Node, *data.Node, ItemStats, time.Duration) {},
StartFile: func(string) {},
CompleteBlob: func(uint64) {},
}
return arch
}
// error calls arch.Error if it is set and the error is different from context.Canceled.
func (arch *Archiver) error(item string, err error) error {
if arch.Error == nil || err == nil {
return err
}
if err == context.Canceled {
return err
}
// not all errors include the filepath, thus add it if it is missing
if !strings.Contains(err.Error(), item) {
err = fmt.Errorf("%v: %w", item, err)
}
errf := arch.Error(item, err)
if err != errf {
debug.Log("item %v: error was filtered by handler, before: %q, after: %v", item, err, errf)
}
return errf
}
func (arch *Archiver) trackItem(item string, previous, current *data.Node, s ItemStats, d time.Duration) {
arch.CompleteItem(item, previous, current, s, d)
arch.mu.Lock()
defer arch.mu.Unlock()
arch.summary.ItemStats.Add(s)
if current != nil {
arch.summary.ProcessedBytes += current.Size
} else {
// last item or an error occurred
return
}
switch current.Type {
case data.NodeTypeDir:
switch {
case previous == nil:
arch.summary.Dirs.New++
case previous.Equals(*current):
arch.summary.Dirs.Unchanged++
default:
arch.summary.Dirs.Changed++
}
case data.NodeTypeFile:
switch {
case previous == nil:
arch.summary.Files.New++
case previous.Equals(*current):
arch.summary.Files.Unchanged++
default:
arch.summary.Files.Changed++
}
}
}
// nodeFromFileInfo returns the restic node from an os.FileInfo.
func (arch *Archiver) nodeFromFileInfo(snPath, filename string, meta ToNoder, ignoreXattrListError bool) (*data.Node, error) {
node, err := meta.ToNode(ignoreXattrListError, func(format string, args ...any) {
_ = arch.error(filename, fmt.Errorf(format, args...))
})
// node does not exist. This prevents all further processing for this file.
// If an error and a node are returned, then preserve as much data as possible (see below).
if err != nil && node == nil {
return nil, err
}
if !arch.WithAtime {
node.AccessTime = node.ModTime
}
if feature.Flag.Enabled(feature.DeviceIDForHardlinks) {
if node.Links == 1 || node.Type == data.NodeTypeDir {
// the DeviceID is only necessary for hardlinked files
// when using subvolumes or snapshots their deviceIDs tend to change which causes
// restic to upload new tree blobs
node.DeviceID = 0
}
}
// overwrite name to match that within the snapshot
node.Name = path.Base(snPath)
// do not filter error for nodes of irregular or invalid type
if node.Type != data.NodeTypeIrregular && node.Type != data.NodeTypeInvalid && err != nil {
err = fmt.Errorf("incomplete metadata for %v: %w", filename, err)
return node, arch.error(filename, err)
}
return node, err
}
// loadSubtree tries to load the subtree referenced by node. In case of an error, nil is returned.
// If there is no node to load, then nil is returned without an error.
func (arch *Archiver) loadSubtree(ctx context.Context, node *data.Node) (*data.Tree, error) {
if node == nil || node.Type != data.NodeTypeDir || node.Subtree == nil {
return nil, nil
}
tree, err := data.LoadTree(ctx, arch.Repo, *node.Subtree)
if err != nil {
debug.Log("unable to load tree %v: %v", node.Subtree.Str(), err)
// a tree in the repository is not readable -> warn the user
return nil, arch.wrapLoadTreeError(*node.Subtree, err)
}
return tree, nil
}
func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error {
if _, ok := arch.Repo.LookupBlobSize(restic.TreeBlob, id); ok {
err = errors.Errorf("tree %v could not be loaded; the repository could be damaged: %v", id, err)
} else {
err = errors.Errorf("tree %v is not known; the repository could be damaged, run `repair index` to try to repair it", id)
}
return err
}
// saveDir stores a directory in the repo and returns the node. snPath is the
// path within the current snapshot.
func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, meta fs.File, previous *data.Tree, complete fileCompleteFunc) (d futureNode, err error) {
debug.Log("%v %v", snPath, dir)
treeNode, names, err := arch.dirToNodeAndEntries(snPath, dir, meta)
if err != nil {
return futureNode{}, err
}
nodes := make([]futureNode, 0, len(names))
for _, name := range names {
// test if context has been cancelled
if ctx.Err() != nil {
debug.Log("context has been cancelled, aborting")
return futureNode{}, ctx.Err()
}
pathname := arch.FS.Join(dir, name)
oldNode := previous.Find(name)
snItem := join(snPath, name)
fn, excluded, err := arch.save(ctx, snItem, pathname, oldNode)
// return error early if possible
if err != nil {
err = arch.error(pathname, err)
if err == nil {
// ignore error
continue
}
return futureNode{}, err
}
if excluded {
continue
}
nodes = append(nodes, fn)
}
fn := arch.treeSaver.Save(ctx, snPath, dir, treeNode, nodes, complete)
return fn, nil
}
func (arch *Archiver) dirToNodeAndEntries(snPath, dir string, meta fs.File) (node *data.Node, names []string, err error) {
err = meta.MakeReadable()
if err != nil {
return nil, nil, fmt.Errorf("openfile for readdirnames failed: %w", err)
}
node, err = arch.nodeFromFileInfo(snPath, dir, meta, false)
if err != nil {
return nil, nil, err
}
if node.Type != data.NodeTypeDir {
return nil, nil, fmt.Errorf("directory %q changed type, refusing to archive", snPath)
}
names, err = meta.Readdirnames(-1)
if err != nil {
return nil, nil, fmt.Errorf("readdirnames %v failed: %w", dir, err)
}
sort.Strings(names)
return node, names, nil
}
// futureNode holds a reference to a channel that returns a FutureNodeResult
// or a reference to an already existing result. If the result is available
// immediately, then storing a reference directly requires less memory than
// using the indirection via a channel.
type futureNode struct {
ch <-chan futureNodeResult
res *futureNodeResult
}
type futureNodeResult struct {
snPath, target string
node *data.Node
stats ItemStats
err error
}
func newFutureNode() (futureNode, chan<- futureNodeResult) {
ch := make(chan futureNodeResult, 1)
return futureNode{ch: ch}, ch
}
func newFutureNodeWithResult(res futureNodeResult) futureNode {
return futureNode{
res: &res,
}
}
func (fn *futureNode) take(ctx context.Context) futureNodeResult {
if fn.res != nil {
res := fn.res
// free result
fn.res = nil
return *res
}
select {
case res, ok := <-fn.ch:
if ok {
// free channel
fn.ch = nil
return res
}
case <-ctx.Done():
return futureNodeResult{err: ctx.Err()}
}
return futureNodeResult{err: errors.Errorf("no result")}
}
// allBlobsPresent checks if all blobs (contents) of the given node are
// present in the index.
func (arch *Archiver) allBlobsPresent(previous *data.Node) bool {
// check if all blobs are contained in index
for _, id := range previous.Content {
if _, ok := arch.Repo.LookupBlobSize(restic.DataBlob, id); !ok {
return false
}
}
return true
}
// save saves a target (file or directory) to the repo. If the item is
// excluded, this function returns a nil node and error, with excluded set to
// true.
//
// Errors and completion needs to be handled by the caller.
//
// snPath is the path within the current snapshot.
func (arch *Archiver) save(ctx context.Context, snPath, target string, previous *data.Node) (fn futureNode, excluded bool, err error) {
start := time.Now()
debug.Log("%v target %q, previous %v", snPath, target, previous)
abstarget, err := arch.FS.Abs(target)
if err != nil {
return futureNode{}, false, err
}
filterError := func(err error) (futureNode, bool, error) {
err = arch.error(abstarget, err)
if err != nil {
return futureNode{}, false, errors.WithStack(err)
}
return futureNode{}, true, nil
}
filterNotExist := func(err error) error {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return err
}
// exclude files by path before running Lstat to reduce number of lstat calls
if !arch.SelectByName(abstarget) {
debug.Log("%v is excluded by path", target)
return futureNode{}, true, nil
}
meta, err := arch.FS.OpenFile(target, fs.O_NOFOLLOW, true)
if err != nil {
debug.Log("open metadata for %v returned error: %v", target, err)
// ignore if file disappeared since it was returned by readdir
return filterError(filterNotExist(err))
}
closeFile := true
defer func() {
if closeFile {
cerr := meta.Close()
if err == nil {
err = cerr
}
}
}()
// get file info and run remaining select functions that require file information
fi, err := meta.Stat()
if err != nil {
debug.Log("lstat() for %v returned error: %v", target, err)
// ignore if file disappeared since it was returned by readdir
return filterError(filterNotExist(err))
}
if !arch.Select(abstarget, fi, arch.FS) {
debug.Log("%v is excluded", target)
return futureNode{}, true, nil
}
switch {
case fi.Mode.IsRegular():
debug.Log(" %v regular file", target)
// check if the file has not changed before performing a fopen operation (more expensive, specially
// in network filesystems)
if previous != nil && !fileChanged(fi, previous, arch.ChangeIgnoreFlags) {
if arch.allBlobsPresent(previous) {
debug.Log("%v hasn't changed, using old list of blobs", target)
arch.trackItem(snPath, previous, previous, ItemStats{}, time.Since(start))
arch.CompleteBlob(previous.Size)
node, err := arch.nodeFromFileInfo(snPath, target, meta, false)
if err != nil {
return futureNode{}, false, err
}
// copy list of blobs
node.Content = previous.Content
fn = newFutureNodeWithResult(futureNodeResult{
snPath: snPath,
target: target,
node: node,
})
return fn, false, nil
}
debug.Log("%v hasn't changed, but contents are missing!", target)
// There are contents missing - inform user!
err := errors.Errorf("parts of %v not found in the repository index; storing the file again", target)
err = arch.error(abstarget, err)
if err != nil {
return futureNode{}, false, err
}
}
// reopen file and do an fstat() on the open file to check it is still
// a file (and has not been exchanged for e.g. a symlink)
err := meta.MakeReadable()
if err != nil {
debug.Log("MakeReadable() for %v returned error: %v", target, err)
return filterError(err)
}
fi, err := meta.Stat()
if err != nil {
debug.Log("stat() on opened file %v returned error: %v", target, err)
return filterError(err)
}
// make sure it's still a file
if !fi.Mode.IsRegular() {
err = errors.Errorf("file %q changed type, refusing to archive", target)
return filterError(err)
}
closeFile = false
// Save will close the file, we don't need to do that
fn = arch.fileSaver.Save(ctx, snPath, target, meta, func() {
arch.StartFile(snPath)
}, func() {
arch.trackItem(snPath, nil, nil, ItemStats{}, 0)
}, func(node *data.Node, stats ItemStats) {
arch.trackItem(snPath, previous, node, stats, time.Since(start))
})
case fi.Mode.IsDir():
debug.Log(" %v dir", target)
snItem := snPath + "/"
oldSubtree, err := arch.loadSubtree(ctx, previous)
if err != nil {
err = arch.error(abstarget, err)
}
if err != nil {
return futureNode{}, false, err
}
fn, err = arch.saveDir(ctx, snPath, target, meta, oldSubtree,
func(node *data.Node, stats ItemStats) {
arch.trackItem(snItem, previous, node, stats, time.Since(start))
})
if err != nil {
debug.Log("SaveDir for %v returned error: %v", snPath, err)
return futureNode{}, false, err
}
case fi.Mode&os.ModeSocket > 0:
debug.Log(" %v is a socket, ignoring", target)
return futureNode{}, true, nil
default:
debug.Log(" %v other", target)
node, err := arch.nodeFromFileInfo(snPath, target, meta, false)
if err != nil {
return futureNode{}, false, err
}
fn = newFutureNodeWithResult(futureNodeResult{
snPath: snPath,
target: target,
node: node,
})
}
debug.Log("return after %.3f", time.Since(start).Seconds())
return fn, false, nil
}
// fileChanged tries to detect whether a file's content has changed compared
// to the contents of node, which describes the same path in the parent backup.
// It should only be run for regular files.
func fileChanged(fi *fs.ExtendedFileInfo, node *data.Node, ignoreFlags uint) bool {
switch {
case node == nil:
return true
case node.Type != data.NodeTypeFile:
// We're only called for regular files, so this is a type change.
return true
case uint64(fi.Size) != node.Size:
return true
case !fi.ModTime.Equal(node.ModTime):
return true
}
checkCtime := ignoreFlags&ChangeIgnoreCtime == 0
checkInode := ignoreFlags&ChangeIgnoreInode == 0
switch {
case checkCtime && !fi.ChangeTime.Equal(node.ChangeTime):
return true
case checkInode && node.Inode != fi.Inode:
return true
}
return false
}
// join returns all elements separated with a forward slash.
func join(elem ...string) string {
return path.Join(elem...)
}
// saveTree stores a Tree in the repo, returned is the tree. snPath is the path
// within the current snapshot.
func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *tree, previous *data.Tree, complete fileCompleteFunc) (futureNode, int, error) {
var node *data.Node
if snPath != "/" {
if atree.FileInfoPath == "" {
return futureNode{}, 0, errors.Errorf("FileInfoPath for %v is empty", snPath)
}
var err error
node, err = arch.dirPathToNode(snPath, atree.FileInfoPath)
if err != nil {
return futureNode{}, 0, err
}
} else {
// fake root node
node = &data.Node{}
}
debug.Log("%v (%v nodes), parent %v", snPath, len(atree.Nodes), previous)
nodeNames := atree.NodeNames()
nodes := make([]futureNode, 0, len(nodeNames))
// iterate over the nodes of atree in lexicographic (=deterministic) order
for _, name := range nodeNames {
subatree := atree.Nodes[name]
// test if context has been cancelled
if ctx.Err() != nil {
return futureNode{}, 0, ctx.Err()
}
// this is a leaf node
if subatree.Leaf() {
fn, excluded, err := arch.save(ctx, join(snPath, name), subatree.Path, previous.Find(name))
if err != nil {
err = arch.error(subatree.Path, err)
if err == nil {
// ignore error
continue
}
return futureNode{}, 0, err
}
if !excluded {
nodes = append(nodes, fn)
}
continue
}
snItem := join(snPath, name) + "/"
start := time.Now()
oldNode := previous.Find(name)
oldSubtree, err := arch.loadSubtree(ctx, oldNode)
if err != nil {
err = arch.error(join(snPath, name), err)
}
if err != nil {
return futureNode{}, 0, err
}
// not a leaf node, archive subtree
fn, _, err := arch.saveTree(ctx, join(snPath, name), &subatree, oldSubtree, func(n *data.Node, is ItemStats) {
arch.trackItem(snItem, oldNode, n, is, time.Since(start))
})
if err != nil {
err = arch.error(join(snPath, name), err)
if err == nil {
// ignore error
continue
}
return futureNode{}, 0, err
}
nodes = append(nodes, fn)
}
fn := arch.treeSaver.Save(ctx, snPath, atree.FileInfoPath, node, nodes, complete)
return fn, len(nodes), nil
}
func (arch *Archiver) dirPathToNode(snPath, target string) (node *data.Node, err error) {
meta, err := arch.FS.OpenFile(target, 0, true)
if err != nil {
return nil, err
}
defer func() {
cerr := meta.Close()
if err == nil {
err = cerr
}
}()
debug.Log("%v, reading dir node data from %v", snPath, target)
// in some cases reading xattrs for directories above the backup source is not allowed
// thus ignore errors for such folders.
node, err = arch.nodeFromFileInfo(snPath, target, meta, true)
if err != nil {
return nil, err
}
if node.Type != data.NodeTypeDir {
return nil, errors.Errorf("path is not a directory: %v", target)
}
return node, err
}
// resolveRelativeTargets replaces targets that only contain relative
// directories ("." or "../../") with the contents of the directory. Each
// element of target is processed with fs.Clean().
func resolveRelativeTargets(filesys fs.FS, targets []string) ([]string, error) {
debug.Log("targets before resolving: %v", targets)
result := make([]string, 0, len(targets))
for _, target := range targets {
if target != "" && filesys.VolumeName(target) == target {
// special case to allow users to also specify a volume name "C:" instead of a path "C:\"
target = target + filesys.Separator()
} else {
target = filesys.Clean(target)
}
pc, _ := pathComponents(filesys, target, false)
if len(pc) > 0 {
result = append(result, target)
continue
}
debug.Log("replacing %q with readdir(%q)", target, target)
entries, err := fs.Readdirnames(filesys, target, fs.O_NOFOLLOW)
if err != nil {
return nil, err
}
sort.Strings(entries)
for _, name := range entries {
result = append(result, filesys.Join(target, name))
}
}
debug.Log("targets after resolving: %v", result)
return result, nil
}
// SnapshotOptions collect attributes for a new snapshot.
type SnapshotOptions struct {
Tags data.TagList
Hostname string
Excludes []string
BackupStart time.Time
Time time.Time
ParentSnapshot *data.Snapshot
ProgramVersion string
// SkipIfUnchanged omits the snapshot creation if it is identical to the parent snapshot.
SkipIfUnchanged bool
}
// loadParentTree loads a tree referenced by snapshot id. If id is null, nil is returned.
func (arch *Archiver) loadParentTree(ctx context.Context, sn *data.Snapshot) *data.Tree {
if sn == nil {
return nil
}
if sn.Tree == nil {
debug.Log("snapshot %v has empty tree %v", *sn.ID())
return nil
}
debug.Log("load parent tree %v", *sn.Tree)
tree, err := data.LoadTree(ctx, arch.Repo, *sn.Tree)
if err != nil {
debug.Log("unable to load tree %v: %v", *sn.Tree, err)
_ = arch.error("/", arch.wrapLoadTreeError(*sn.Tree, err))
return nil
}
return tree
}
// runWorkers starts the worker pools, which are stopped when the context is cancelled.
func (arch *Archiver) runWorkers(ctx context.Context, wg *errgroup.Group, uploader restic.BlobSaverAsync) {
arch.fileSaver = newFileSaver(ctx, wg,
uploader,
arch.Repo.Config().ChunkerPolynomial,
arch.Options.ReadConcurrency)
arch.fileSaver.CompleteBlob = arch.CompleteBlob
arch.fileSaver.NodeFromFileInfo = arch.nodeFromFileInfo
arch.treeSaver = newTreeSaver(ctx, wg, arch.Options.SaveTreeConcurrency, uploader, arch.Error)
}
func (arch *Archiver) stopWorkers() {
arch.fileSaver.TriggerShutdown()
arch.treeSaver.TriggerShutdown()
arch.fileSaver = nil
arch.treeSaver = nil
}
// Snapshot saves several targets and returns a snapshot.
func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*data.Snapshot, restic.ID, *Summary, error) {
arch.summary = &Summary{
BackupStart: opts.BackupStart,
}
cleanTargets, err := resolveRelativeTargets(arch.FS, targets)
if err != nil {
return nil, restic.ID{}, nil, err
}
atree, err := newTree(arch.FS, cleanTargets)
if err != nil {
return nil, restic.ID{}, nil, err
}
var rootTreeID restic.ID
err = arch.Repo.WithBlobUploader(ctx, func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
wg, wgCtx := errgroup.WithContext(ctx)
start := time.Now()
wg.Go(func() error {
arch.runWorkers(wgCtx, wg, uploader)
debug.Log("starting snapshot")
fn, nodeCount, err := arch.saveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot), func(_ *data.Node, is ItemStats) {
arch.trackItem("/", nil, nil, is, time.Since(start))
})
if err != nil {
return err
}
fnr := fn.take(wgCtx)
if fnr.err != nil {
return fnr.err
}
if wgCtx.Err() != nil {
return wgCtx.Err()
}
if nodeCount == 0 {
return errors.New("snapshot is empty")
}
rootTreeID = *fnr.node.Subtree
arch.stopWorkers()
return nil
})
err = wg.Wait()
debug.Log("err is %v", err)
if err != nil {
debug.Log("error while saving tree: %v", err)
return err
}
return nil
})
if err != nil {
return nil, restic.ID{}, nil, err
}
if opts.ParentSnapshot != nil && opts.SkipIfUnchanged {
ps := opts.ParentSnapshot
if ps.Tree != nil && rootTreeID.Equal(*ps.Tree) {
arch.summary.BackupEnd = time.Now()
return nil, restic.ID{}, arch.summary, nil
}
}
sn, err := data.NewSnapshot(targets, opts.Tags, opts.Hostname, opts.Time)
if err != nil {
return nil, restic.ID{}, nil, err
}
sn.ProgramVersion = opts.ProgramVersion
sn.Excludes = opts.Excludes
if opts.ParentSnapshot != nil {
sn.Parent = opts.ParentSnapshot.ID()
}
sn.Tree = &rootTreeID
arch.summary.BackupEnd = time.Now()
sn.Summary = &data.SnapshotSummary{
BackupStart: arch.summary.BackupStart,
BackupEnd: arch.summary.BackupEnd,
FilesNew: arch.summary.Files.New,
FilesChanged: arch.summary.Files.Changed,
FilesUnmodified: arch.summary.Files.Unchanged,
DirsNew: arch.summary.Dirs.New,
DirsChanged: arch.summary.Dirs.Changed,
DirsUnmodified: arch.summary.Dirs.Unchanged,
DataBlobs: arch.summary.ItemStats.DataBlobs,
TreeBlobs: arch.summary.ItemStats.TreeBlobs,
DataAdded: arch.summary.ItemStats.DataSize + arch.summary.ItemStats.TreeSize,
DataAddedPacked: arch.summary.ItemStats.DataSizeInRepo + arch.summary.ItemStats.TreeSizeInRepo,
TotalFilesProcessed: arch.summary.Files.New + arch.summary.Files.Changed + arch.summary.Files.Unchanged,
TotalBytesProcessed: arch.summary.ProcessedBytes,
}
id, err := data.SaveSnapshot(ctx, arch.Repo, sn)
if err != nil {
return nil, restic.ID{}, nil, err
}
return sn, id, arch.summary, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/exclude_test.go | internal/archiver/exclude_test.go | package archiver
import (
"os"
"path/filepath"
"testing"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/test"
)
func TestIsExcludedByFile(t *testing.T) {
const (
tagFilename = "CACHEDIR.TAG"
header = "Signature: 8a477f597d28d172789f06886806bc55"
)
tests := []struct {
name string
tagFile string
content string
want bool
}{
{"NoTagfile", "", "", false},
{"EmptyTagfile", tagFilename, "", true},
{"UnnamedTagFile", "", header, false},
{"WrongTagFile", "notatagfile", header, false},
{"IncorrectSig", tagFilename, header[1:], false},
{"ValidSig", tagFilename, header, true},
{"ValidPlusStuff", tagFilename, header + "foo", true},
{"ValidPlusNewlineAndStuff", tagFilename, header + "\nbar", true},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
tempDir := test.TempDir(t)
foo := filepath.Join(tempDir, "foo")
err := os.WriteFile(foo, []byte("foo"), 0666)
if err != nil {
t.Fatalf("could not write file: %v", err)
}
if tc.tagFile != "" {
tagFile := filepath.Join(tempDir, tc.tagFile)
err = os.WriteFile(tagFile, []byte(tc.content), 0666)
if err != nil {
t.Fatalf("could not write tagfile: %v", err)
}
}
h := header
if tc.content == "" {
h = ""
}
if got := isExcludedByFile(foo, tagFilename, h, newRejectionCache(), &fs.Local{}, func(msg string, args ...interface{}) { t.Logf(msg, args...) }); tc.want != got {
t.Fatalf("expected %v, got %v", tc.want, got)
}
})
}
}
// TestMultipleIsExcludedByFile is for testing that multiple instances of
// the --exclude-if-present parameter (or the shortcut --exclude-caches do not
// cancel each other out. It was initially written to demonstrate a bug in
// rejectIfPresent.
func TestMultipleIsExcludedByFile(t *testing.T) {
tempDir := test.TempDir(t)
// Create some files in a temporary directory.
// Files in UPPERCASE will be used as exclusion triggers later on.
// We will test the inclusion later, so we add the expected value as
// a bool.
files := []struct {
path string
incl bool
}{
{"42", true},
// everything in foodir except the NOFOO tagfile
// should not be included.
{"foodir/NOFOO", true},
{"foodir/foo", false},
{"foodir/foosub/underfoo", false},
// everything in bardir except the NOBAR tagfile
// should not be included.
{"bardir/NOBAR", true},
{"bardir/bar", false},
{"bardir/barsub/underbar", false},
// everything in bazdir should be included.
{"bazdir/baz", true},
{"bazdir/bazsub/underbaz", true},
}
var errs []error
for _, f := range files {
// create directories first, then the file
p := filepath.Join(tempDir, filepath.FromSlash(f.path))
errs = append(errs, os.MkdirAll(filepath.Dir(p), 0700))
errs = append(errs, os.WriteFile(p, []byte(f.path), 0600))
}
test.OKs(t, errs) // see if anything went wrong during the creation
// create two rejection functions, one that tests for the NOFOO file
// and one for the NOBAR file
fooExclude, _ := RejectIfPresent("NOFOO", nil)
barExclude, _ := RejectIfPresent("NOBAR", nil)
// To mock the archiver scanning walk, we create filepath.WalkFn
// that tests against the two rejection functions and stores
// the result in a map against we can test later.
m := make(map[string]bool)
walk := func(p string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
excludedByFoo := fooExclude(p, nil, &fs.Local{})
excludedByBar := barExclude(p, nil, &fs.Local{})
excluded := excludedByFoo || excludedByBar
// the log message helps debugging in case the test fails
t.Logf("%q: %v || %v = %v", p, excludedByFoo, excludedByBar, excluded)
m[p] = !excluded
if excluded {
return filepath.SkipDir
}
return nil
}
// walk through the temporary file and check the error
test.OK(t, filepath.Walk(tempDir, walk))
// compare whether the walk gave the expected values for the test cases
for _, f := range files {
p := filepath.Join(tempDir, filepath.FromSlash(f.path))
if m[p] != f.incl {
t.Errorf("inclusion status of %s is wrong: want %v, got %v", f.path, f.incl, m[p])
}
}
}
// TestIsExcludedByFileSize is for testing the instance of
// --exclude-larger-than parameters
func TestIsExcludedByFileSize(t *testing.T) {
tempDir := test.TempDir(t)
// Create some files in a temporary directory.
// Files in UPPERCASE will be used as exclusion triggers later on.
// We will test the inclusion later, so we add the expected value as
// a bool.
files := []struct {
path string
size int64
incl bool
}{
{"42", 100, true},
// everything in foodir except the FOOLARGE tagfile
// should not be included.
{"foodir/FOOLARGE", 2048, false},
{"foodir/foo", 1002, true},
{"foodir/foosub/underfoo", 100, true},
// everything in bardir except the BARLARGE tagfile
// should not be included.
{"bardir/BARLARGE", 1030, false},
{"bardir/bar", 1000, true},
{"bardir/barsub/underbar", 500, true},
// everything in bazdir should be included.
{"bazdir/baz", 100, true},
{"bazdir/bazsub/underbaz", 200, true},
}
var errs []error
for _, f := range files {
// create directories first, then the file
p := filepath.Join(tempDir, filepath.FromSlash(f.path))
errs = append(errs, os.MkdirAll(filepath.Dir(p), 0700))
file, err := os.OpenFile(p, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
errs = append(errs, err)
if err == nil {
// create a file with given size
errs = append(errs, file.Truncate(f.size))
}
errs = append(errs, file.Close())
}
test.OKs(t, errs) // see if anything went wrong during the creation
// create rejection function
sizeExclude, _ := RejectBySize(1024)
// To mock the archiver scanning walk, we create filepath.WalkFn
// that tests against the two rejection functions and stores
// the result in a map against we can test later.
m := make(map[string]bool)
walk := func(p string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
excluded := sizeExclude(p, fs.ExtendedStat(fi), nil)
// the log message helps debugging in case the test fails
t.Logf("%q: dir:%t; size:%d; excluded:%v", p, fi.IsDir(), fi.Size(), excluded)
m[p] = !excluded
return nil
}
// walk through the temporary file and check the error
test.OK(t, filepath.Walk(tempDir, walk))
// compare whether the walk gave the expected values for the test cases
for _, f := range files {
p := filepath.Join(tempDir, filepath.FromSlash(f.path))
if m[p] != f.incl {
t.Errorf("inclusion status of %s is wrong: want %v, got %v", f.path, f.incl, m[p])
}
}
}
func TestDeviceMap(t *testing.T) {
deviceMap := deviceMap{
filepath.FromSlash("/"): 1,
filepath.FromSlash("/usr/local"): 5,
}
var tests = []struct {
item string
deviceID uint64
allowed bool
}{
{"/root", 1, true},
{"/usr", 1, true},
{"/proc", 2, false},
{"/proc/1234", 2, false},
{"/usr", 3, false},
{"/usr/share", 3, false},
{"/usr/local", 5, true},
{"/usr/local/foobar", 5, true},
{"/usr/local/foobar/submount", 23, false},
{"/usr/local/foobar/submount/file", 23, false},
{"/usr/local/foobar/outhersubmount", 1, false},
{"/usr/local/foobar/outhersubmount/otherfile", 1, false},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
res, err := deviceMap.IsAllowed(filepath.FromSlash(test.item), test.deviceID, &fs.Local{})
if err != nil {
t.Fatal(err)
}
if res != test.allowed {
t.Fatalf("wrong result returned by IsAllowed(%v): want %v, got %v", test.item, test.allowed, res)
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/tree.go | internal/archiver/tree.go | package archiver
import (
"fmt"
"sort"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
)
// tree recursively defines how a snapshot should look like when
// archived.
//
// When `Path` is set, this is a leaf node and the contents of `Path` should be
// inserted at this point in the tree.
//
// The attribute `Root` is used to distinguish between files/dirs which have
// the same name, but live in a separate directory on the local file system.
//
// `FileInfoPath` is used to extract metadata for intermediate (=non-leaf)
// trees.
type tree struct {
Nodes map[string]tree
Path string // where the files/dirs to be saved are found
FileInfoPath string // where the dir can be found that is not included itself, but its subdirs
Root string // parent directory of the tree
}
// pathComponents returns all path components of p. If a virtual directory
// (volume name on Windows) is added, virtualPrefix is set to true. See the
// tests for examples.
func pathComponents(fs fs.FS, p string, includeRelative bool) (components []string, virtualPrefix bool) {
volume := fs.VolumeName(p)
if !fs.IsAbs(p) {
if !includeRelative {
p = fs.Join(fs.Separator(), p)
}
}
p = fs.Clean(p)
for {
dir, file := fs.Dir(p), fs.Base(p)
if p == dir {
break
}
components = append(components, file)
p = dir
}
// reverse components
for i := len(components)/2 - 1; i >= 0; i-- {
opp := len(components) - 1 - i
components[i], components[opp] = components[opp], components[i]
}
if volume != "" {
// strip colon
if len(volume) == 2 && volume[1] == ':' {
volume = volume[:1]
}
components = append([]string{volume}, components...)
virtualPrefix = true
}
return components, virtualPrefix
}
// rootDirectory returns the directory which contains the first element of target.
func rootDirectory(fs fs.FS, target string) string {
if target == "" {
return ""
}
if fs.IsAbs(target) {
return fs.Join(fs.VolumeName(target), fs.Separator())
}
target = fs.Clean(target)
pc, _ := pathComponents(fs, target, true)
rel := "."
for _, c := range pc {
if c == ".." {
rel = fs.Join(rel, c)
}
}
return rel
}
// Add adds a new file or directory to the tree.
func (t *tree) Add(fs fs.FS, path string) error {
if path == "" {
panic("invalid path (empty string)")
}
if t.Nodes == nil {
t.Nodes = make(map[string]tree)
}
pc, virtualPrefix := pathComponents(fs, path, false)
if len(pc) == 0 {
return errors.New("invalid path (no path components)")
}
name := pc[0]
root := rootDirectory(fs, path)
tree := tree{Root: root}
origName := name
i := 0
for {
other, ok := t.Nodes[name]
if !ok {
break
}
i++
if other.Root == root {
tree = other
break
}
// resolve conflict and try again
name = fmt.Sprintf("%s-%d", origName, i)
continue
}
if len(pc) > 1 {
subroot := fs.Join(root, origName)
if virtualPrefix {
// use the original root dir if this is a virtual directory (volume name on Windows)
subroot = root
}
err := tree.add(fs, path, subroot, pc[1:])
if err != nil {
return err
}
tree.FileInfoPath = subroot
} else {
tree.Path = path
}
t.Nodes[name] = tree
return nil
}
// add adds a new target path into the tree.
func (t *tree) add(fs fs.FS, target, root string, pc []string) error {
if len(pc) == 0 {
return errors.Errorf("invalid path %q", target)
}
if t.Nodes == nil {
t.Nodes = make(map[string]tree)
}
name := pc[0]
if len(pc) == 1 {
node, ok := t.Nodes[name]
if !ok {
t.Nodes[name] = tree{Path: target}
return nil
}
if node.Path != "" {
return errors.Errorf("path is already set for target %v", target)
}
node.Path = target
t.Nodes[name] = node
return nil
}
node := tree{}
if other, ok := t.Nodes[name]; ok {
node = other
}
subroot := fs.Join(root, name)
node.FileInfoPath = subroot
err := node.add(fs, target, subroot, pc[1:])
if err != nil {
return err
}
t.Nodes[name] = node
return nil
}
func (t tree) String() string {
return formatTree(t, "")
}
// Leaf returns true if this is a leaf node, which means Path is set to a
// non-empty string and the contents of Path should be inserted at this point
// in the tree.
func (t tree) Leaf() bool {
return t.Path != ""
}
// NodeNames returns the sorted list of subtree names.
func (t tree) NodeNames() []string {
// iterate over the nodes of atree in lexicographic (=deterministic) order
names := make([]string, 0, len(t.Nodes))
for name := range t.Nodes {
names = append(names, name)
}
sort.Strings(names)
return names
}
// formatTree returns a text representation of the tree t.
func formatTree(t tree, indent string) (s string) {
for name, node := range t.Nodes {
s += fmt.Sprintf("%v/%v, root %q, path %q, meta %q\n", indent, name, node.Root, node.Path, node.FileInfoPath)
s += formatTree(node, indent+" ")
}
return s
}
// unrollTree unrolls the tree so that only leaf nodes have Path set.
func unrollTree(f fs.FS, t *tree) error {
// if the current tree is a leaf node (Path is set) and has additional
// nodes, add the contents of Path to the nodes.
if t.Path != "" && len(t.Nodes) > 0 {
debug.Log("resolve path %v", t.Path)
entries, err := fs.Readdirnames(f, t.Path, 0)
if err != nil {
return err
}
for _, entry := range entries {
if node, ok := t.Nodes[entry]; ok {
if node.Path == "" {
node.Path = f.Join(t.Path, entry)
t.Nodes[entry] = node
continue
}
if node.Path == f.Join(t.Path, entry) {
continue
}
return errors.Errorf("tree unrollTree: collision on path, node %#v, path %q", node, f.Join(t.Path, entry))
}
t.Nodes[entry] = tree{Path: f.Join(t.Path, entry)}
}
t.Path = ""
}
for i, subtree := range t.Nodes {
err := unrollTree(f, &subtree)
if err != nil {
return err
}
t.Nodes[i] = subtree
}
return nil
}
// newTree creates a Tree from the target files/directories.
func newTree(fs fs.FS, targets []string) (*tree, error) {
debug.Log("targets: %v", targets)
tree := &tree{}
seen := make(map[string]struct{})
for _, target := range targets {
target = fs.Clean(target)
// skip duplicate targets
if _, ok := seen[target]; ok {
continue
}
seen[target] = struct{}{}
err := tree.Add(fs, target)
if err != nil {
return nil, err
}
}
debug.Log("before unroll:\n%v", tree)
err := unrollTree(fs, tree)
if err != nil {
return nil, err
}
debug.Log("result:\n%v", tree)
return tree, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/exclude.go | internal/archiver/exclude.go | package archiver
import (
"bytes"
"fmt"
"io"
"os"
"runtime"
"strings"
"sync"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
)
// RejectByNameFunc is a function that takes a filename of a
// file that would be included in the backup. The function returns true if it
// should be excluded (rejected) from the backup.
type RejectByNameFunc func(path string) bool
// RejectFunc is a function that takes a filename and os.FileInfo of a
// file that would be included in the backup. The function returns true if it
// should be excluded (rejected) from the backup.
type RejectFunc func(path string, fi *fs.ExtendedFileInfo, fs fs.FS) bool
func CombineRejectByNames(funcs []RejectByNameFunc) SelectByNameFunc {
return func(item string) bool {
for _, reject := range funcs {
if reject(item) {
return false
}
}
return true
}
}
func CombineRejects(funcs []RejectFunc) SelectFunc {
return func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool {
for _, reject := range funcs {
if reject(item, fi, fs) {
return false
}
}
return true
}
}
type rejectionCache struct {
m map[string]bool
mtx sync.Mutex
}
func newRejectionCache() *rejectionCache {
return &rejectionCache{m: make(map[string]bool)}
}
// Lock locks the mutex in rc.
func (rc *rejectionCache) Lock() {
rc.mtx.Lock()
}
// Unlock unlocks the mutex in rc.
func (rc *rejectionCache) Unlock() {
rc.mtx.Unlock()
}
// Get returns the last stored value for dir and a second boolean that
// indicates whether that value was actually written to the cache. It is the
// callers responsibility to call rc.Lock and rc.Unlock before using this
// method, otherwise data races may occur.
func (rc *rejectionCache) Get(dir string) (bool, bool) {
v, ok := rc.m[dir]
return v, ok
}
// Store stores a new value for dir. It is the callers responsibility to call
// rc.Lock and rc.Unlock before using this method, otherwise data races may
// occur.
func (rc *rejectionCache) Store(dir string, rejected bool) {
rc.m[dir] = rejected
}
// RejectIfPresent returns a RejectByNameFunc which itself returns whether a path
// should be excluded. The RejectByNameFunc considers a file to be excluded when
// it resides in a directory with an exclusion file, that is specified by
// excludeFileSpec in the form "filename[:content]". The returned error is
// non-nil if the filename component of excludeFileSpec is empty. If rc is
// non-nil, it is going to be used in the RejectByNameFunc to expedite the evaluation
// of a directory based on previous visits.
func RejectIfPresent(excludeFileSpec string, warnf func(msg string, args ...interface{})) (RejectFunc, error) {
if excludeFileSpec == "" {
return nil, errors.New("name for exclusion tagfile is empty")
}
colon := strings.Index(excludeFileSpec, ":")
if colon == 0 {
return nil, fmt.Errorf("no name for exclusion tagfile provided")
}
tf, tc := "", ""
if colon > 0 {
tf = excludeFileSpec[:colon]
tc = excludeFileSpec[colon+1:]
} else {
tf = excludeFileSpec
}
debug.Log("using %q as exclusion tagfile", tf)
rc := newRejectionCache()
return func(filename string, _ *fs.ExtendedFileInfo, fs fs.FS) bool {
return isExcludedByFile(filename, tf, tc, rc, fs, warnf)
}, nil
}
// isExcludedByFile interprets filename as a path and returns true if that file
// is in an excluded directory. A directory is identified as excluded if it contains a
// tagfile which bears the name specified in tagFilename and starts with
// header. If rc is non-nil, it is used to expedite the evaluation of a
// directory based on previous visits.
func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache, fs fs.FS, warnf func(msg string, args ...interface{})) bool {
if tagFilename == "" {
return false
}
if fs.Base(filename) == tagFilename {
return false // do not exclude the tagfile itself
}
rc.Lock()
defer rc.Unlock()
dir := fs.Dir(filename)
rejected, visited := rc.Get(dir)
if visited {
return rejected
}
rejected = isDirExcludedByFile(dir, tagFilename, header, fs, warnf)
rc.Store(dir, rejected)
return rejected
}
func isDirExcludedByFile(dir, tagFilename, header string, fsInst fs.FS, warnf func(msg string, args ...interface{})) bool {
tf := fsInst.Join(dir, tagFilename)
_, err := fsInst.Lstat(tf)
if errors.Is(err, os.ErrNotExist) {
return false
}
if err != nil {
warnf("could not access exclusion tagfile: %v", err)
return false
}
// when no signature is given, the mere presence of tf is enough reason
// to exclude filename
if len(header) == 0 {
return true
}
// From this stage, errors mean tagFilename exists but it is malformed.
// Warnings will be generated so that the user is informed that the
// indented ignore-action is not performed.
f, err := fsInst.OpenFile(tf, fs.O_RDONLY, false)
if err != nil {
warnf("could not open exclusion tagfile: %v", err)
return false
}
defer func() {
_ = f.Close()
}()
buf := make([]byte, len(header))
_, err = io.ReadFull(f, buf)
// EOF is handled with a dedicated message, otherwise the warning were too cryptic
if err == io.EOF {
warnf("invalid (too short) signature in exclusion tagfile %q\n", tf)
return false
}
if err != nil {
warnf("could not read signature from exclusion tagfile %q: %v\n", tf, err)
return false
}
if !bytes.Equal(buf, []byte(header)) {
warnf("invalid signature in exclusion tagfile %q\n", tf)
return false
}
return true
}
// deviceMap is used to track allowed source devices for backup. This is used to
// check for crossing mount points during backup (for --one-file-system). It
// maps the name of a source path to its device ID.
type deviceMap map[string]uint64
// newDeviceMap creates a new device map from the list of source paths.
func newDeviceMap(allowedSourcePaths []string, fs fs.FS) (deviceMap, error) {
if runtime.GOOS == "windows" {
return nil, errors.New("Device IDs are not supported on Windows")
}
deviceMap := make(map[string]uint64)
for _, item := range allowedSourcePaths {
item, err := fs.Abs(fs.Clean(item))
if err != nil {
return nil, err
}
fi, err := fs.Lstat(item)
if err != nil {
return nil, err
}
deviceMap[item] = fi.DeviceID
}
if len(deviceMap) == 0 {
return nil, errors.New("zero allowed devices")
}
return deviceMap, nil
}
// IsAllowed returns true if the path is located on an allowed device.
func (m deviceMap) IsAllowed(item string, deviceID uint64, fs fs.FS) (bool, error) {
for dir := item; ; dir = fs.Dir(dir) {
debug.Log("item %v, test dir %v", item, dir)
// find a parent directory that is on an allowed device (otherwise
// we would not traverse the directory at all)
allowedID, ok := m[dir]
if !ok {
if dir == fs.Dir(dir) {
// arrived at root, no allowed device found. this should not happen.
break
}
continue
}
// if the item has a different device ID than the parent directory,
// we crossed a file system boundary
if allowedID != deviceID {
debug.Log("item %v (dir %v) on disallowed device %d", item, dir, deviceID)
return false, nil
}
// item is on allowed device, accept it
debug.Log("item %v allowed", item)
return true, nil
}
return false, fmt.Errorf("item %v (device ID %v) not found, deviceMap: %v", item, deviceID, m)
}
// RejectByDevice returns a RejectFunc that rejects files which are on a
// different file systems than the files/dirs in samples.
func RejectByDevice(samples []string, filesystem fs.FS) (RejectFunc, error) {
deviceMap, err := newDeviceMap(samples, filesystem)
if err != nil {
return nil, err
}
debug.Log("allowed devices: %v\n", deviceMap)
return func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool {
allowed, err := deviceMap.IsAllowed(fs.Clean(item), fi.DeviceID, fs)
if err != nil {
// this should not happen
panic(fmt.Sprintf("error checking device ID of %v: %v", item, err))
}
if allowed {
// accept item
return false
}
// reject everything except directories
if !fi.Mode.IsDir() {
return true
}
// special case: make sure we keep mountpoints (directories which
// contain a mounted file system). Test this by checking if the parent
// directory would be included.
parentDir := fs.Dir(fs.Clean(item))
parentFI, err := fs.Lstat(parentDir)
if err != nil {
debug.Log("item %v: error running lstat() on parent directory: %v", item, err)
// if in doubt, reject
return true
}
parentAllowed, err := deviceMap.IsAllowed(parentDir, parentFI.DeviceID, fs)
if err != nil {
debug.Log("item %v: error checking parent directory: %v", item, err)
// if in doubt, reject
return true
}
if parentAllowed {
// we found a mount point, so accept the directory
return false
}
// reject everything else
return true
}, nil
}
func RejectBySize(maxSize int64) (RejectFunc, error) {
return func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool {
// directory will be ignored
if fi.Mode.IsDir() {
return false
}
filesize := fi.Size
if filesize > maxSize {
debug.Log("file %s is oversize: %d", item, filesize)
return true
}
return false
}, nil
}
// RejectCloudFiles returns a func which rejects files which are online-only cloud files
func RejectCloudFiles(warnf func(msg string, args ...interface{})) (RejectFunc, error) {
return func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool {
recall, err := fi.RecallOnDataAccess()
if err != nil {
warnf("item %v: error checking online-only status: %v", item, err)
return false
}
if recall {
debug.Log("rejecting online-only cloud file %s", item)
return true
}
return false
}, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/file_saver_test.go | internal/archiver/file_saver_test.go | package archiver
import (
"context"
"fmt"
"os"
"path/filepath"
"runtime"
"testing"
"github.com/restic/chunker"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/test"
"golang.org/x/sync/errgroup"
)
func createTestFiles(t testing.TB, num int) (files []string) {
tempdir := test.TempDir(t)
for i := 0; i < num; i++ {
filename := fmt.Sprintf("testfile-%d", i)
err := os.WriteFile(filepath.Join(tempdir, filename), []byte(filename), 0600)
if err != nil {
t.Fatal(err)
}
files = append(files, filepath.Join(tempdir, filename))
}
return files
}
func startFileSaver(ctx context.Context, t testing.TB, _ fs.FS) (*fileSaver, *mockSaver, context.Context, *errgroup.Group) {
wg, ctx := errgroup.WithContext(ctx)
workers := uint(runtime.NumCPU())
pol, err := chunker.RandomPolynomial()
if err != nil {
t.Fatal(err)
}
saver := &mockSaver{saved: make(map[string]int)}
s := newFileSaver(ctx, wg, saver, pol, workers)
s.NodeFromFileInfo = func(snPath, filename string, meta ToNoder, ignoreXattrListError bool) (*data.Node, error) {
return meta.ToNode(ignoreXattrListError, t.Logf)
}
return s, saver, ctx, wg
}
func TestFileSaver(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
startFn := func() {}
completeReadingFn := func() {}
completeFn := func(*data.Node, ItemStats) {}
files := createTestFiles(t, 15)
testFs := fs.Local{}
s, saver, ctx, wg := startFileSaver(ctx, t, testFs)
var results []futureNode
for _, filename := range files {
f, err := testFs.OpenFile(filename, os.O_RDONLY, false)
if err != nil {
t.Fatal(err)
}
ff := s.Save(ctx, filename, filename, f, startFn, completeReadingFn, completeFn)
results = append(results, ff)
}
for _, file := range results {
fnr := file.take(ctx)
if fnr.err != nil {
t.Errorf("unable to save file: %v", fnr.err)
}
}
test.Assert(t, len(saver.saved) == len(files), "expected %d saved files, got %d", len(files), len(saver.saved))
s.TriggerShutdown()
err := wg.Wait()
if err != nil {
t.Fatal(err)
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/archiver_unix_test.go | internal/archiver/archiver_unix_test.go | //go:build !windows
package archiver
import (
"testing"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/feature"
"github.com/restic/restic/internal/fs"
rtest "github.com/restic/restic/internal/test"
)
func statAndSnapshot(t *testing.T, repo archiverRepo, name string) (*data.Node, *data.Node) {
want := nodeFromFile(t, &fs.Local{}, name)
_, node := snapshot(t, repo, &fs.Local{}, nil, name)
return want, node
}
func TestHardlinkMetadata(t *testing.T) {
defer feature.TestSetFlag(t, feature.Flag, feature.DeviceIDForHardlinks, true)()
files := TestDir{
"testfile": TestFile{
Content: "foo bar test file",
},
"linktarget": TestFile{
Content: "test file",
},
"testlink": TestHardlink{
Target: "./linktarget",
},
"testdir": TestDir{},
}
tempdir, repo := prepareTempdirRepoSrc(t, files)
back := rtest.Chdir(t, tempdir)
defer back()
want, node := statAndSnapshot(t, repo, "testlink")
rtest.Assert(t, node.DeviceID == want.DeviceID, "device id mismatch expected %v got %v", want.DeviceID, node.DeviceID)
rtest.Assert(t, node.Links == want.Links, "link count mismatch expected %v got %v", want.Links, node.Links)
rtest.Assert(t, node.Inode == want.Inode, "inode mismatch expected %v got %v", want.Inode, node.Inode)
_, node = statAndSnapshot(t, repo, "testfile")
rtest.Assert(t, node.DeviceID == 0, "device id mismatch for testfile expected %v got %v", 0, node.DeviceID)
_, node = statAndSnapshot(t, repo, "testdir")
rtest.Assert(t, node.DeviceID == 0, "device id mismatch for testdir expected %v got %v", 0, node.DeviceID)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/scanner_test.go | internal/archiver/scanner_test.go | package archiver
import (
"context"
"os"
"path/filepath"
"runtime"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/restic/restic/internal/fs"
rtest "github.com/restic/restic/internal/test"
)
func TestScanner(t *testing.T) {
var tests = []struct {
name string
src TestDir
want map[string]ScanStats
selFn SelectFunc
}{
{
name: "include-all",
src: TestDir{
"other": TestFile{Content: "another file"},
"work": TestDir{
"foo": TestFile{Content: "foo"},
"foo.txt": TestFile{Content: "foo text file"},
"subdir": TestDir{
"other": TestFile{Content: "other in subdir"},
"bar.txt": TestFile{Content: "bar.txt in subdir"},
},
},
},
want: map[string]ScanStats{
filepath.FromSlash("other"): {Files: 1, Bytes: 12},
filepath.FromSlash("work/foo"): {Files: 2, Bytes: 15},
filepath.FromSlash("work/foo.txt"): {Files: 3, Bytes: 28},
filepath.FromSlash("work/subdir/bar.txt"): {Files: 4, Bytes: 45},
filepath.FromSlash("work/subdir/other"): {Files: 5, Bytes: 60},
filepath.FromSlash("work/subdir"): {Files: 5, Dirs: 1, Bytes: 60},
filepath.FromSlash("work"): {Files: 5, Dirs: 2, Bytes: 60},
filepath.FromSlash(""): {Files: 5, Dirs: 2, Bytes: 60},
},
},
{
name: "select-txt",
src: TestDir{
"other": TestFile{Content: "another file"},
"work": TestDir{
"foo": TestFile{Content: "foo"},
"foo.txt": TestFile{Content: "foo text file"},
"subdir": TestDir{
"other": TestFile{Content: "other in subdir"},
"bar.txt": TestFile{Content: "bar.txt in subdir"},
},
},
},
selFn: func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool {
if fi.Mode.IsDir() {
return true
}
if filepath.Ext(item) == ".txt" {
return true
}
return false
},
want: map[string]ScanStats{
filepath.FromSlash("work/foo.txt"): {Files: 1, Bytes: 13},
filepath.FromSlash("work/subdir/bar.txt"): {Files: 2, Bytes: 30},
filepath.FromSlash("work/subdir"): {Files: 2, Dirs: 1, Bytes: 30},
filepath.FromSlash("work"): {Files: 2, Dirs: 2, Bytes: 30},
filepath.FromSlash(""): {Files: 2, Dirs: 2, Bytes: 30},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempdir := rtest.TempDir(t)
TestCreateFiles(t, tempdir, test.src)
back := rtest.Chdir(t, tempdir)
defer back()
cur, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
sc := NewScanner(fs.Track{FS: fs.Local{}})
if test.selFn != nil {
sc.Select = test.selFn
}
results := make(map[string]ScanStats)
sc.Result = func(item string, s ScanStats) {
var p string
var err error
if item != "" {
p, err = filepath.Rel(cur, item)
if err != nil {
panic(err)
}
}
results[p] = s
}
err = sc.Scan(ctx, []string{"."})
if err != nil {
t.Fatal(err)
}
if !cmp.Equal(test.want, results) {
t.Error(cmp.Diff(test.want, results))
}
})
}
}
func TestScannerError(t *testing.T) {
var tests = []struct {
name string
unix bool
src TestDir
result ScanStats
selFn SelectFunc
errFn func(t testing.TB, item string, err error) error
resFn func(t testing.TB, item string, s ScanStats)
prepare func(t testing.TB)
}{
{
name: "no-error",
src: TestDir{
"other": TestFile{Content: "another file"},
"work": TestDir{
"foo": TestFile{Content: "foo"},
"foo.txt": TestFile{Content: "foo text file"},
"subdir": TestDir{
"other": TestFile{Content: "other in subdir"},
"bar.txt": TestFile{Content: "bar.txt in subdir"},
},
},
},
result: ScanStats{Files: 5, Dirs: 2, Bytes: 60},
},
{
name: "unreadable-dir",
unix: true,
src: TestDir{
"other": TestFile{Content: "another file"},
"work": TestDir{
"foo": TestFile{Content: "foo"},
"foo.txt": TestFile{Content: "foo text file"},
"subdir": TestDir{
"other": TestFile{Content: "other in subdir"},
"bar.txt": TestFile{Content: "bar.txt in subdir"},
},
},
},
result: ScanStats{Files: 3, Dirs: 1, Bytes: 28},
prepare: func(t testing.TB) {
err := os.Chmod(filepath.Join("work", "subdir"), 0000)
if err != nil {
t.Fatal(err)
}
},
errFn: func(t testing.TB, item string, err error) error {
if item == filepath.FromSlash("work/subdir") {
return nil
}
return err
},
},
{
name: "removed-item",
src: TestDir{
"bar": TestFile{Content: "bar"},
"baz": TestFile{Content: "baz"},
"foo": TestFile{Content: "foo"},
"other": TestFile{Content: "other"},
},
result: ScanStats{Files: 3, Dirs: 0, Bytes: 11},
resFn: func(t testing.TB, item string, s ScanStats) {
if item == "bar" {
err := os.Remove("foo")
if err != nil {
t.Fatal(err)
}
}
},
errFn: func(t testing.TB, item string, err error) error {
if item == "foo" {
t.Logf("ignoring error for %v: %v", item, err)
return nil
}
return err
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if test.unix && runtime.GOOS == "windows" {
t.Skipf("skip on windows")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempdir := rtest.TempDir(t)
TestCreateFiles(t, tempdir, test.src)
back := rtest.Chdir(t, tempdir)
defer back()
cur, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
if test.prepare != nil {
test.prepare(t)
}
sc := NewScanner(fs.Track{FS: fs.Local{}})
if test.selFn != nil {
sc.Select = test.selFn
}
var stats ScanStats
sc.Result = func(item string, s ScanStats) {
if item == "" {
stats = s
return
}
if test.resFn != nil {
p, relErr := filepath.Rel(cur, item)
if relErr != nil {
panic(relErr)
}
test.resFn(t, p, s)
}
}
if test.errFn != nil {
sc.Error = func(item string, err error) error {
p, relErr := filepath.Rel(cur, item)
if relErr != nil {
panic(relErr)
}
return test.errFn(t, p, err)
}
}
err = sc.Scan(ctx, []string{"."})
if err != nil {
t.Fatal(err)
}
if stats != test.result {
t.Errorf("wrong final result, want\n %#v\ngot:\n %#v", test.result, stats)
}
})
}
}
func TestScannerCancel(t *testing.T) {
src := TestDir{
"bar": TestFile{Content: "bar"},
"baz": TestFile{Content: "baz"},
"foo": TestFile{Content: "foo"},
"other": TestFile{Content: "other"},
}
result := ScanStats{Files: 2, Dirs: 0, Bytes: 6}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempdir := rtest.TempDir(t)
TestCreateFiles(t, tempdir, src)
back := rtest.Chdir(t, tempdir)
defer back()
cur, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
sc := NewScanner(fs.Track{FS: fs.Local{}})
var lastStats ScanStats
sc.Result = func(item string, s ScanStats) {
lastStats = s
if item == filepath.Join(cur, "baz") {
t.Logf("found baz")
cancel()
}
}
err = sc.Scan(ctx, []string{"."})
if err != nil {
t.Errorf("unexpected error %v found", err)
}
if lastStats != result {
t.Errorf("wrong final result, want\n %#v\ngot:\n %#v", result, lastStats)
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/file_saver.go | internal/archiver/file_saver.go | package archiver
import (
"context"
"fmt"
"io"
"sync"
"github.com/restic/chunker"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
"golang.org/x/sync/errgroup"
)
// fileSaver concurrently saves incoming files to the repo.
type fileSaver struct {
saveFilePool *bufferPool
uploader restic.BlobSaverAsync
pol chunker.Pol
ch chan<- saveFileJob
CompleteBlob func(bytes uint64)
NodeFromFileInfo func(snPath, filename string, meta ToNoder, ignoreXattrListError bool) (*data.Node, error)
}
// newFileSaver returns a new file saver. A worker pool with fileWorkers is
// started, it is stopped when ctx is cancelled.
func newFileSaver(ctx context.Context, wg *errgroup.Group, uploader restic.BlobSaverAsync, pol chunker.Pol, fileWorkers uint) *fileSaver {
ch := make(chan saveFileJob)
debug.Log("new file saver with %v file workers", fileWorkers)
s := &fileSaver{
uploader: uploader,
saveFilePool: newBufferPool(chunker.MaxSize),
pol: pol,
ch: ch,
CompleteBlob: func(uint64) {},
}
for i := uint(0); i < fileWorkers; i++ {
wg.Go(func() error {
s.worker(ctx, ch)
return nil
})
}
return s
}
func (s *fileSaver) TriggerShutdown() {
close(s.ch)
}
// fileCompleteFunc is called when the file has been saved.
type fileCompleteFunc func(*data.Node, ItemStats)
// Save stores the file f and returns the data once it has been completed. The
// file is closed by Save. completeReading is only called if the file was read
// successfully. complete is always called. If completeReading is called, then
// this will always happen before calling complete.
func (s *fileSaver) Save(ctx context.Context, snPath string, target string, file fs.File, start func(), completeReading func(), complete fileCompleteFunc) futureNode {
fn, ch := newFutureNode()
job := saveFileJob{
snPath: snPath,
target: target,
file: file,
ch: ch,
start: start,
completeReading: completeReading,
complete: complete,
}
select {
case s.ch <- job:
case <-ctx.Done():
debug.Log("not sending job, context is cancelled: %v", ctx.Err())
_ = file.Close()
close(ch)
}
return fn
}
type saveFileJob struct {
snPath string
target string
file fs.File
ch chan<- futureNodeResult
start func()
completeReading func()
complete fileCompleteFunc
}
// saveFile stores the file f in the repo, then closes it.
func (s *fileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPath string, target string, f fs.File, start func(), finishReading func(), finish func(res futureNodeResult)) {
start()
fnr := futureNodeResult{
snPath: snPath,
target: target,
}
var lock sync.Mutex
remaining := 0
isCompleted := false
completeBlob := func() {
lock.Lock()
defer lock.Unlock()
remaining--
if remaining == 0 && fnr.err == nil {
if isCompleted {
panic("completed twice")
}
for _, id := range fnr.node.Content {
if id.IsNull() {
panic("completed file with null ID")
}
}
isCompleted = true
finish(fnr)
}
}
completeError := func(err error) {
lock.Lock()
defer lock.Unlock()
if fnr.err == nil {
if isCompleted {
panic("completed twice")
}
isCompleted = true
fnr.err = fmt.Errorf("failed to save %v: %w", target, err)
fnr.node = nil
fnr.stats = ItemStats{}
finish(fnr)
}
}
debug.Log("%v", snPath)
node, err := s.NodeFromFileInfo(snPath, target, f, false)
if err != nil {
_ = f.Close()
completeError(err)
return
}
if node.Type != data.NodeTypeFile {
_ = f.Close()
completeError(errors.Errorf("node type %q is wrong", node.Type))
return
}
// reuse the chunker
chnker.Reset(f, s.pol)
node.Content = []restic.ID{}
node.Size = 0
var idx int
for {
buf := s.saveFilePool.Get()
chunk, err := chnker.Next(buf.Data)
if err == io.EOF {
buf.Release()
break
}
buf.Data = chunk.Data
node.Size += uint64(chunk.Length)
if err != nil {
_ = f.Close()
completeError(err)
return
}
// test if the context has been cancelled, return the error
if ctx.Err() != nil {
_ = f.Close()
completeError(ctx.Err())
return
}
// add a place to store the saveBlob result
pos := idx
lock.Lock()
node.Content = append(node.Content, restic.ID{})
lock.Unlock()
s.uploader.SaveBlobAsync(ctx, restic.DataBlob, buf.Data, restic.ID{}, false, func(newID restic.ID, known bool, sizeInRepo int, err error) {
defer buf.Release()
if err != nil {
completeError(err)
return
}
lock.Lock()
if !known {
fnr.stats.DataBlobs++
fnr.stats.DataSize += uint64(len(buf.Data))
fnr.stats.DataSizeInRepo += uint64(sizeInRepo)
}
node.Content[pos] = newID
lock.Unlock()
completeBlob()
})
idx++
// test if the context has been cancelled, return the error
if ctx.Err() != nil {
_ = f.Close()
completeError(ctx.Err())
return
}
s.CompleteBlob(uint64(len(chunk.Data)))
}
err = f.Close()
if err != nil {
completeError(err)
return
}
fnr.node = node
lock.Lock()
// require one additional completeFuture() call to ensure that the future only completes
// after reaching the end of this method
remaining += idx + 1
lock.Unlock()
finishReading()
completeBlob()
}
func (s *fileSaver) worker(ctx context.Context, jobs <-chan saveFileJob) {
// a worker has one chunker which is reused for each file (because it contains a rather large buffer)
chnker := chunker.New(nil, s.pol)
for {
var job saveFileJob
var ok bool
select {
case <-ctx.Done():
return
case job, ok = <-jobs:
if !ok {
return
}
}
s.saveFile(ctx, chnker, job.snPath, job.target, job.file, job.start, func() {
if job.completeReading != nil {
job.completeReading()
}
}, func(res futureNodeResult) {
if job.complete != nil {
job.complete(res.node, res.stats)
}
job.ch <- res
close(job.ch)
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/archiver_test.go | internal/archiver/archiver_test.go | package archiver
import (
"bytes"
"context"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"sync/atomic"
"syscall"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/mem"
"github.com/restic/restic/internal/checker"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/feature"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
"golang.org/x/sync/errgroup"
)
func prepareTempdirRepoSrc(t testing.TB, src TestDir) (string, *repository.Repository) {
tempdir := rtest.TempDir(t)
repo := repository.TestRepository(t)
TestCreateFiles(t, tempdir, src)
return tempdir, repo
}
func saveFile(t testing.TB, repo archiverRepo, filename string, filesystem fs.FS) (*data.Node, ItemStats) {
var (
completeReadingCallback bool
completeCallbackNode *data.Node
completeCallbackStats ItemStats
completeCallback bool
startCallback bool
fnr futureNodeResult
)
arch := New(repo, filesystem, Options{})
arch.Error = func(item string, err error) error {
t.Errorf("archiver error for %v: %v", item, err)
return err
}
err := repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
wg, ctx := errgroup.WithContext(ctx)
arch.runWorkers(ctx, wg, uploader)
completeReading := func() {
completeReadingCallback = true
if completeCallback {
t.Error("callbacks called in wrong order")
}
}
complete := func(node *data.Node, stats ItemStats) {
completeCallback = true
completeCallbackNode = node
completeCallbackStats = stats
}
start := func() {
startCallback = true
}
file, err := arch.FS.OpenFile(filename, fs.O_NOFOLLOW, false)
if err != nil {
t.Fatal(err)
}
res := arch.fileSaver.Save(ctx, "/", filename, file, start, completeReading, complete)
fnr = res.take(ctx)
if fnr.err != nil {
t.Fatal(fnr.err)
}
arch.stopWorkers()
return wg.Wait()
})
if err != nil {
t.Fatal(err)
}
if !startCallback {
t.Errorf("start callback did not happen")
}
if !completeReadingCallback {
t.Errorf("completeReading callback did not happen")
}
if !completeCallback {
t.Errorf("complete callback did not happen")
}
if completeCallbackNode == nil {
t.Errorf("no node returned for complete callback")
}
if completeCallbackNode != nil && !fnr.node.Equals(*completeCallbackNode) {
t.Errorf("different node returned for complete callback")
}
if completeCallbackStats != fnr.stats {
t.Errorf("different stats return for complete callback, want:\n %v\ngot:\n %v", fnr.stats, completeCallbackStats)
}
return fnr.node, fnr.stats
}
func TestArchiverSaveFile(t *testing.T) {
var tests = []TestFile{
{Content: ""},
{Content: "foo"},
{Content: string(rtest.Random(23, 12*1024*1024+1287898))},
}
for _, testfile := range tests {
t.Run("", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempdir, repo := prepareTempdirRepoSrc(t, TestDir{"file": testfile})
node, stats := saveFile(t, repo, filepath.Join(tempdir, "file"), fs.Track{FS: fs.Local{}})
TestEnsureFileContent(ctx, t, repo, "file", node, testfile)
if stats.DataSize != uint64(len(testfile.Content)) {
t.Errorf("wrong stats returned in DataSize, want %d, got %d", len(testfile.Content), stats.DataSize)
}
if stats.DataBlobs <= 0 && len(testfile.Content) > 0 {
t.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs)
}
if stats.TreeSize != 0 {
t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize)
}
if stats.TreeBlobs != 0 {
t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
}
})
}
}
func TestArchiverSaveFileReaderFS(t *testing.T) {
var tests = []struct {
Data string
}{
{Data: "foo"},
{Data: string(rtest.Random(23, 12*1024*1024+1287898))},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
repo := repository.TestRepository(t)
ts := time.Now()
filename := "xx"
readerFs, err := fs.NewReader(filename, io.NopCloser(strings.NewReader(test.Data)), fs.ReaderOptions{
ModTime: ts,
Mode: 0123,
})
rtest.OK(t, err)
node, stats := saveFile(t, repo, filename, readerFs)
TestEnsureFileContent(ctx, t, repo, "file", node, TestFile{Content: test.Data})
if stats.DataSize != uint64(len(test.Data)) {
t.Errorf("wrong stats returned in DataSize, want %d, got %d", len(test.Data), stats.DataSize)
}
if stats.DataBlobs <= 0 && len(test.Data) > 0 {
t.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs)
}
if stats.TreeSize != 0 {
t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize)
}
if stats.TreeBlobs != 0 {
t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
}
})
}
}
func TestArchiverSave(t *testing.T) {
var tests = []TestFile{
{Content: ""},
{Content: "foo"},
{Content: string(rtest.Random(23, 12*1024*1024+1287898))},
}
for _, testfile := range tests {
t.Run("", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempdir, repo := prepareTempdirRepoSrc(t, TestDir{"file": testfile})
arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
arch.Error = func(item string, err error) error {
t.Errorf("archiver error for %v: %v", item, err)
return err
}
arch.summary = &Summary{}
var fnr futureNodeResult
err := repo.WithBlobUploader(ctx, func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
wg, ctx := errgroup.WithContext(ctx)
arch.runWorkers(ctx, wg, uploader)
node, excluded, err := arch.save(ctx, "/", filepath.Join(tempdir, "file"), nil)
if err != nil {
t.Fatal(err)
}
if excluded {
t.Errorf("Save() excluded the node, that's unexpected")
}
fnr = node.take(ctx)
if fnr.err != nil {
t.Fatal(fnr.err)
}
if fnr.node == nil {
t.Fatalf("returned node is nil")
}
arch.stopWorkers()
return wg.Wait()
})
if err != nil {
t.Fatal(err)
}
TestEnsureFileContent(ctx, t, repo, "file", fnr.node, testfile)
stats := fnr.stats
if stats.DataSize != uint64(len(testfile.Content)) {
t.Errorf("wrong stats returned in DataSize, want %d, got %d", len(testfile.Content), stats.DataSize)
}
if stats.DataBlobs <= 0 && len(testfile.Content) > 0 {
t.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs)
}
if stats.TreeSize != 0 {
t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize)
}
if stats.TreeBlobs != 0 {
t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
}
})
}
}
func TestArchiverSaveReaderFS(t *testing.T) {
var tests = []struct {
Data string
}{
{Data: "foo"},
{Data: string(rtest.Random(23, 12*1024*1024+1287898))},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
repo := repository.TestRepository(t)
ts := time.Now()
filename := "xx"
readerFs, err := fs.NewReader(filename, io.NopCloser(strings.NewReader(test.Data)), fs.ReaderOptions{
ModTime: ts,
Mode: 0123,
})
rtest.OK(t, err)
arch := New(repo, readerFs, Options{})
arch.Error = func(item string, err error) error {
t.Errorf("archiver error for %v: %v", item, err)
return err
}
arch.summary = &Summary{}
var fnr futureNodeResult
err = repo.WithBlobUploader(ctx, func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
wg, ctx := errgroup.WithContext(ctx)
arch.runWorkers(ctx, wg, uploader)
node, excluded, err := arch.save(ctx, "/", filename, nil)
t.Logf("Save returned %v %v", node, err)
if err != nil {
t.Fatal(err)
}
if excluded {
t.Errorf("Save() excluded the node, that's unexpected")
}
fnr = node.take(ctx)
if fnr.err != nil {
t.Fatal(fnr.err)
}
if fnr.node == nil {
t.Fatalf("returned node is nil")
}
arch.stopWorkers()
return wg.Wait()
})
if err != nil {
t.Fatal(err)
}
TestEnsureFileContent(ctx, t, repo, "file", fnr.node, TestFile{Content: test.Data})
stats := fnr.stats
if stats.DataSize != uint64(len(test.Data)) {
t.Errorf("wrong stats returned in DataSize, want %d, got %d", len(test.Data), stats.DataSize)
}
if stats.DataBlobs <= 0 && len(test.Data) > 0 {
t.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs)
}
if stats.TreeSize != 0 {
t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize)
}
if stats.TreeBlobs != 0 {
t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
}
})
}
}
func BenchmarkArchiverSaveFileSmall(b *testing.B) {
const fileSize = 4 * 1024
d := TestDir{"file": TestFile{
Content: string(rtest.Random(23, fileSize)),
}}
b.SetBytes(fileSize)
for i := 0; i < b.N; i++ {
b.StopTimer()
tempdir, repo := prepareTempdirRepoSrc(b, d)
b.StartTimer()
_, stats := saveFile(b, repo, filepath.Join(tempdir, "file"), fs.Track{FS: fs.Local{}})
b.StopTimer()
if stats.DataSize != fileSize {
b.Errorf("wrong stats returned in DataSize, want %d, got %d", fileSize, stats.DataSize)
}
if stats.DataBlobs <= 0 {
b.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs)
}
if stats.TreeSize != 0 {
b.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize)
}
if stats.TreeBlobs != 0 {
b.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
}
b.StartTimer()
}
}
func BenchmarkArchiverSaveFileLarge(b *testing.B) {
const fileSize = 40*1024*1024 + 1287898
d := TestDir{"file": TestFile{
Content: string(rtest.Random(23, fileSize)),
}}
b.SetBytes(fileSize)
for i := 0; i < b.N; i++ {
b.StopTimer()
tempdir, repo := prepareTempdirRepoSrc(b, d)
b.StartTimer()
_, stats := saveFile(b, repo, filepath.Join(tempdir, "file"), fs.Track{FS: fs.Local{}})
b.StopTimer()
if stats.DataSize != fileSize {
b.Errorf("wrong stats returned in DataSize, want %d, got %d", fileSize, stats.DataSize)
}
if stats.DataBlobs <= 0 {
b.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs)
}
if stats.TreeSize != 0 {
b.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize)
}
if stats.TreeBlobs != 0 {
b.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
}
b.StartTimer()
}
}
type blobCountingRepo struct {
archiverRepo
m sync.Mutex
saved map[restic.BlobHandle]uint
}
func (repo *blobCountingRepo) WithBlobUploader(ctx context.Context, fn func(ctx context.Context, uploader restic.BlobSaverWithAsync) error) error {
return repo.archiverRepo.WithBlobUploader(ctx, func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
return fn(ctx, &blobCountingSaver{saver: uploader, blobCountingRepo: repo})
})
}
type blobCountingSaver struct {
saver restic.BlobSaverWithAsync
blobCountingRepo *blobCountingRepo
}
func (repo *blobCountingSaver) count(exists bool, h restic.BlobHandle) {
if exists {
return
}
repo.blobCountingRepo.m.Lock()
repo.blobCountingRepo.saved[h]++
repo.blobCountingRepo.m.Unlock()
}
func (repo *blobCountingSaver) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error) {
id, exists, size, err := repo.saver.SaveBlob(ctx, t, buf, id, storeDuplicate)
repo.count(exists, restic.BlobHandle{ID: id, Type: t})
return id, exists, size, err
}
func (repo *blobCountingSaver) SaveBlobAsync(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool, cb func(newID restic.ID, known bool, size int, err error)) {
repo.saver.SaveBlobAsync(ctx, t, buf, id, storeDuplicate, func(newID restic.ID, known bool, size int, err error) {
repo.count(known, restic.BlobHandle{ID: newID, Type: t})
cb(newID, known, size, err)
})
}
func appendToFile(t testing.TB, filename string, data []byte) {
f, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
t.Fatal(err)
}
_, err = f.Write(data)
if err != nil {
_ = f.Close()
t.Fatal(err)
}
err = f.Close()
if err != nil {
t.Fatal(err)
}
}
func TestArchiverSaveFileIncremental(t *testing.T) {
tempdir := rtest.TempDir(t)
repo := &blobCountingRepo{
archiverRepo: repository.TestRepository(t),
saved: make(map[restic.BlobHandle]uint),
}
data := rtest.Random(23, 512*1024+887898)
testfile := filepath.Join(tempdir, "testfile")
for i := 0; i < 3; i++ {
appendToFile(t, testfile, data)
node, _ := saveFile(t, repo, testfile, fs.Track{FS: fs.Local{}})
t.Logf("node blobs: %v", node.Content)
for h, n := range repo.saved {
if n > 1 {
t.Errorf("iteration %v: blob %v saved more than once (%d times)", i, h, n)
}
}
}
}
func save(t testing.TB, filename string, data []byte) {
f, err := os.Create(filename)
if err != nil {
t.Fatal(err)
}
_, err = f.Write(data)
if err != nil {
t.Fatal(err)
}
err = f.Sync()
if err != nil {
t.Fatal(err)
}
err = f.Close()
if err != nil {
t.Fatal(err)
}
}
func chmodTwice(t testing.TB, name string) {
// POSIX says that ctime is updated "even if the file status does not
// change", but let's make sure it does change, just in case.
err := os.Chmod(name, 0700)
rtest.OK(t, err)
sleep()
err = os.Chmod(name, 0600)
rtest.OK(t, err)
}
func lstat(t testing.TB, name string) *fs.ExtendedFileInfo {
fi, err := os.Lstat(name)
if err != nil {
t.Fatal(err)
}
return fs.ExtendedStat(fi)
}
func setTimestamp(t testing.TB, filename string, atime, mtime time.Time) {
var utimes = [...]syscall.Timespec{
syscall.NsecToTimespec(atime.UnixNano()),
syscall.NsecToTimespec(mtime.UnixNano()),
}
err := syscall.UtimesNano(filename, utimes[:])
if err != nil {
t.Fatal(err)
}
}
func remove(t testing.TB, filename string) {
err := os.Remove(filename)
if err != nil {
t.Fatal(err)
}
}
func rename(t testing.TB, oldname, newname string) {
err := os.Rename(oldname, newname)
if err != nil {
t.Fatal(err)
}
}
func nodeFromFile(t testing.TB, localFs fs.FS, filename string) *data.Node {
meta, err := localFs.OpenFile(filename, fs.O_NOFOLLOW, true)
rtest.OK(t, err)
node, err := meta.ToNode(false, t.Logf)
rtest.OK(t, err)
rtest.OK(t, meta.Close())
return node
}
// sleep sleeps long enough to ensure a timestamp change.
func sleep() {
d := 50 * time.Millisecond
if runtime.GOOS == "darwin" {
// On older Darwin instances, the file system only supports one second
// granularity.
d = 1500 * time.Millisecond
}
time.Sleep(d)
}
func TestFileChanged(t *testing.T) {
var defaultContent = []byte("foobar")
var tests = []struct {
Name string
SkipForWindows bool
Content []byte
Modify func(t testing.TB, filename string)
ChangeIgnore uint
SameFile bool
}{
{
Name: "same-content-new-file",
Modify: func(t testing.TB, filename string) {
remove(t, filename)
sleep()
save(t, filename, defaultContent)
},
},
{
Name: "same-content-new-timestamp",
Modify: func(t testing.TB, filename string) {
sleep()
save(t, filename, defaultContent)
},
},
{
Name: "new-content-same-timestamp",
// on Windows, there's no "create time" field users cannot modify,
// so we're unable to detect if a file has been modified when the
// timestamps are reset, so we skip this test for Windows
SkipForWindows: true,
Modify: func(t testing.TB, filename string) {
fi, err := os.Stat(filename)
if err != nil {
t.Fatal(err)
}
extFI := fs.ExtendedStat(fi)
save(t, filename, bytes.ToUpper(defaultContent))
sleep()
setTimestamp(t, filename, extFI.AccessTime, extFI.ModTime)
},
},
{
Name: "other-content",
Modify: func(t testing.TB, filename string) {
remove(t, filename)
sleep()
save(t, filename, []byte("xxxxxx"))
},
},
{
Name: "longer-content",
Modify: func(t testing.TB, filename string) {
save(t, filename, []byte("xxxxxxxxxxxxxxxxxxxxxx"))
},
},
{
Name: "new-file",
Modify: func(t testing.TB, filename string) {
remove(t, filename)
sleep()
save(t, filename, defaultContent)
},
},
{
Name: "ctime-change",
Modify: chmodTwice,
SameFile: false,
SkipForWindows: true, // No ctime on Windows, so this test would fail.
},
{
Name: "ignore-ctime-change",
Modify: chmodTwice,
ChangeIgnore: ChangeIgnoreCtime,
SameFile: true,
SkipForWindows: true, // No ctime on Windows, so this test is meaningless.
},
{
Name: "ignore-inode",
Modify: func(t testing.TB, filename string) {
fi := lstat(t, filename)
// First create the new file, then remove the old one,
// so that the old file retains its inode number.
tempname := filename + ".old"
rename(t, filename, tempname)
save(t, filename, defaultContent)
remove(t, tempname)
setTimestamp(t, filename, fi.ModTime, fi.ModTime)
},
ChangeIgnore: ChangeIgnoreCtime | ChangeIgnoreInode,
SameFile: true,
},
}
for _, test := range tests {
t.Run(test.Name, func(t *testing.T) {
if runtime.GOOS == "windows" && test.SkipForWindows {
t.Skip("don't run test on Windows")
}
tempdir := rtest.TempDir(t)
filename := filepath.Join(tempdir, "file")
content := defaultContent
if test.Content != nil {
content = test.Content
}
save(t, filename, content)
fs := &fs.Local{}
fiBefore, err := fs.Lstat(filename)
rtest.OK(t, err)
node := nodeFromFile(t, fs, filename)
if fileChanged(fiBefore, node, 0) {
t.Fatalf("unchanged file detected as changed")
}
test.Modify(t, filename)
fiAfter := lstat(t, filename)
if test.SameFile {
// file should be detected as unchanged
if fileChanged(fiAfter, node, test.ChangeIgnore) {
t.Fatalf("unmodified file detected as changed")
}
} else {
// file should be detected as changed
if !fileChanged(fiAfter, node, test.ChangeIgnore) && !test.SameFile {
t.Fatalf("modified file detected as unchanged")
}
}
})
}
}
func TestFilChangedSpecialCases(t *testing.T) {
tempdir := rtest.TempDir(t)
filename := filepath.Join(tempdir, "file")
content := []byte("foobar")
save(t, filename, content)
t.Run("nil-node", func(t *testing.T) {
fi := lstat(t, filename)
if !fileChanged(fi, nil, 0) {
t.Fatal("nil node detected as unchanged")
}
})
t.Run("type-change", func(t *testing.T) {
fi := lstat(t, filename)
node := nodeFromFile(t, &fs.Local{}, filename)
node.Type = data.NodeTypeSymlink
if !fileChanged(fi, node, 0) {
t.Fatal("node with changed type detected as unchanged")
}
})
}
func TestArchiverSaveDir(t *testing.T) {
const targetNodeName = "targetdir"
var tests = []struct {
src TestDir
chdir string
target string
want TestDir
}{
{
src: TestDir{
"targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))},
},
target: ".",
want: TestDir{
"targetdir": TestDir{
"targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))},
},
},
},
{
src: TestDir{
"targetdir": TestDir{
"foo": TestFile{Content: "foo"},
"emptyfile": TestFile{Content: ""},
"bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"},
"largefile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))},
"largerfile": TestFile{Content: string(rtest.Random(234, 5*1024*1024+5000))},
},
},
target: "targetdir",
},
{
src: TestDir{
"foo": TestFile{Content: "foo"},
"emptyfile": TestFile{Content: ""},
"bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"},
},
target: ".",
want: TestDir{
"targetdir": TestDir{
"foo": TestFile{Content: "foo"},
"emptyfile": TestFile{Content: ""},
"bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"},
},
},
},
{
src: TestDir{
"foo": TestDir{
"subdir": TestDir{
"x": TestFile{Content: "xxx"},
"y": TestFile{Content: "yyyyyyyyyyyyyyyy"},
"z": TestFile{Content: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"},
},
"file": TestFile{Content: "just a test"},
},
},
chdir: "foo/subdir",
target: "../../",
want: TestDir{
"targetdir": TestDir{
"foo": TestDir{
"subdir": TestDir{
"x": TestFile{Content: "xxx"},
"y": TestFile{Content: "yyyyyyyyyyyyyyyy"},
"z": TestFile{Content: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"},
},
"file": TestFile{Content: "just a test"},
},
},
},
},
{
src: TestDir{
"foo": TestDir{
"file": TestFile{Content: "just a test"},
"file2": TestFile{Content: "again"},
},
},
target: "./foo",
want: TestDir{
"targetdir": TestDir{
"file": TestFile{Content: "just a test"},
"file2": TestFile{Content: "again"},
},
},
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
tempdir, repo := prepareTempdirRepoSrc(t, test.src)
testFS := fs.Track{FS: fs.Local{}}
arch := New(repo, testFS, Options{})
arch.summary = &Summary{}
chdir := tempdir
if test.chdir != "" {
chdir = filepath.Join(chdir, test.chdir)
}
back := rtest.Chdir(t, chdir)
defer back()
var treeID restic.ID
err := repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
wg, ctx := errgroup.WithContext(ctx)
arch.runWorkers(ctx, wg, uploader)
meta, err := testFS.OpenFile(test.target, fs.O_NOFOLLOW, true)
rtest.OK(t, err)
ft, err := arch.saveDir(ctx, "/", test.target, meta, nil, nil)
rtest.OK(t, err)
rtest.OK(t, meta.Close())
fnr := ft.take(ctx)
node, stats := fnr.node, fnr.stats
t.Logf("stats: %v", stats)
if stats.DataSize != 0 {
t.Errorf("wrong stats returned in DataSize, want 0, got %d", stats.DataSize)
}
if stats.DataBlobs != 0 {
t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
}
if stats.TreeSize == 0 {
t.Errorf("wrong stats returned in TreeSize, want > 0, got %d", stats.TreeSize)
}
if stats.TreeBlobs <= 0 {
t.Errorf("wrong stats returned in TreeBlobs, want > 0, got %d", stats.TreeBlobs)
}
node.Name = targetNodeName
tree := &data.Tree{Nodes: []*data.Node{node}}
treeID, err = data.SaveTree(ctx, uploader, tree)
if err != nil {
t.Fatal(err)
}
arch.stopWorkers()
return wg.Wait()
})
if err != nil {
t.Fatal(err)
}
want := test.want
if want == nil {
want = test.src
}
TestEnsureTree(context.TODO(), t, "/", repo, treeID, want)
})
}
}
func TestArchiverSaveDirIncremental(t *testing.T) {
tempdir := rtest.TempDir(t)
repo := &blobCountingRepo{
archiverRepo: repository.TestRepository(t),
saved: make(map[restic.BlobHandle]uint),
}
appendToFile(t, filepath.Join(tempdir, "testfile"), []byte("foobar"))
// save the empty directory several times in a row, then have a look if the
// archiver did save the same tree several times
for i := 0; i < 5; i++ {
testFS := fs.Track{FS: fs.Local{}}
arch := New(repo, testFS, Options{})
arch.summary = &Summary{}
var fnr futureNodeResult
err := repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
wg, ctx := errgroup.WithContext(ctx)
arch.runWorkers(ctx, wg, uploader)
meta, err := testFS.OpenFile(tempdir, fs.O_NOFOLLOW, true)
rtest.OK(t, err)
ft, err := arch.saveDir(ctx, "/", tempdir, meta, nil, nil)
rtest.OK(t, err)
rtest.OK(t, meta.Close())
fnr = ft.take(ctx)
arch.stopWorkers()
return wg.Wait()
})
if err != nil {
t.Fatal(err)
}
node, stats := fnr.node, fnr.stats
if i == 0 {
// operation must have added new tree data
if stats.DataSize != 0 {
t.Errorf("wrong stats returned in DataSize, want 0, got %d", stats.DataSize)
}
if stats.DataBlobs != 0 {
t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
}
if stats.TreeSize == 0 {
t.Errorf("wrong stats returned in TreeSize, want > 0, got %d", stats.TreeSize)
}
if stats.TreeBlobs <= 0 {
t.Errorf("wrong stats returned in TreeBlobs, want > 0, got %d", stats.TreeBlobs)
}
} else {
// operation must not have added any new data
if stats.DataSize != 0 {
t.Errorf("wrong stats returned in DataSize, want 0, got %d", stats.DataSize)
}
if stats.DataBlobs != 0 {
t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
}
if stats.TreeSize != 0 {
t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize)
}
if stats.TreeBlobs != 0 {
t.Errorf("wrong stats returned in TreeBlobs, want 0, got %d", stats.TreeBlobs)
}
}
t.Logf("node subtree %v", node.Subtree)
for h, n := range repo.saved {
if n > 1 {
t.Errorf("iteration %v: blob %v saved more than once (%d times)", i, h, n)
}
}
}
}
// bothZeroOrNeither fails the test if only one of exp, act is zero.
func bothZeroOrNeither(tb testing.TB, exp, act uint64) {
tb.Helper()
if (exp == 0 && act != 0) || (exp != 0 && act == 0) {
rtest.Equals(tb, exp, act)
}
}
func TestArchiverSaveTree(t *testing.T) {
symlink := func(from, to string) func(t testing.TB) {
return func(t testing.TB) {
err := os.Symlink(from, to)
if err != nil {
t.Fatal(err)
}
}
}
// The toplevel directory is not counted in the ItemStats
var tests = []struct {
src TestDir
prepare func(t testing.TB)
targets []string
want TestDir
stat Summary
}{
{
src: TestDir{
"targetfile": TestFile{Content: "foobar"},
},
targets: []string{"targetfile"},
want: TestDir{
"targetfile": TestFile{Content: "foobar"},
},
stat: Summary{
ItemStats: ItemStats{1, 6, 32 + 6, 0, 0, 0},
ProcessedBytes: 6,
Files: ChangeStats{1, 0, 0},
Dirs: ChangeStats{0, 0, 0},
},
},
{
src: TestDir{
"targetfile": TestFile{Content: "foobar"},
},
prepare: symlink("targetfile", "filesymlink"),
targets: []string{"targetfile", "filesymlink"},
want: TestDir{
"targetfile": TestFile{Content: "foobar"},
"filesymlink": TestSymlink{Target: "targetfile"},
},
stat: Summary{
ItemStats: ItemStats{1, 6, 32 + 6, 0, 0, 0},
ProcessedBytes: 6,
Files: ChangeStats{1, 0, 0},
Dirs: ChangeStats{0, 0, 0},
},
},
{
src: TestDir{
"dir": TestDir{
"subdir": TestDir{
"subsubdir": TestDir{
"targetfile": TestFile{Content: "foobar"},
},
},
"otherfile": TestFile{Content: "xxx"},
},
},
prepare: symlink("subdir", filepath.FromSlash("dir/symlink")),
targets: []string{filepath.FromSlash("dir/symlink")},
want: TestDir{
"dir": TestDir{
"symlink": TestSymlink{Target: "subdir"},
},
},
stat: Summary{
ItemStats: ItemStats{0, 0, 0, 1, 0x154, 0x16a},
ProcessedBytes: 0,
Files: ChangeStats{0, 0, 0},
Dirs: ChangeStats{1, 0, 0},
},
},
{
src: TestDir{
"dir": TestDir{
"subdir": TestDir{
"subsubdir": TestDir{
"targetfile": TestFile{Content: "foobar"},
},
},
"otherfile": TestFile{Content: "xxx"},
},
},
prepare: symlink("subdir", filepath.FromSlash("dir/symlink")),
targets: []string{filepath.FromSlash("dir/symlink/subsubdir")},
want: TestDir{
"dir": TestDir{
"symlink": TestDir{
"subsubdir": TestDir{
"targetfile": TestFile{Content: "foobar"},
},
},
},
},
stat: Summary{
ItemStats: ItemStats{1, 6, 32 + 6, 3, 0x47f, 0x4c1},
ProcessedBytes: 6,
Files: ChangeStats{1, 0, 0},
Dirs: ChangeStats{3, 0, 0},
},
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
tempdir, repo := prepareTempdirRepoSrc(t, test.src)
testFS := fs.Track{FS: fs.Local{}}
arch := New(repo, testFS, Options{})
arch.summary = &Summary{}
back := rtest.Chdir(t, tempdir)
defer back()
if test.prepare != nil {
test.prepare(t)
}
var treeID restic.ID
err := repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
wg, ctx := errgroup.WithContext(ctx)
arch.runWorkers(ctx, wg, uploader)
atree, err := newTree(testFS, test.targets)
if err != nil {
t.Fatal(err)
}
fn, _, err := arch.saveTree(ctx, "/", atree, nil, nil)
if err != nil {
t.Fatal(err)
}
fnr := fn.take(ctx)
if fnr.err != nil {
t.Fatal(fnr.err)
}
treeID = *fnr.node.Subtree
arch.stopWorkers()
return wg.Wait()
})
if err != nil {
t.Fatal(err)
}
want := test.want
if want == nil {
want = test.src
}
TestEnsureTree(context.TODO(), t, "/", repo, treeID, want)
stat := arch.summary
bothZeroOrNeither(t, uint64(test.stat.DataBlobs), uint64(stat.DataBlobs))
bothZeroOrNeither(t, uint64(test.stat.TreeBlobs), uint64(stat.TreeBlobs))
bothZeroOrNeither(t, test.stat.DataSize, stat.DataSize)
bothZeroOrNeither(t, test.stat.DataSizeInRepo, stat.DataSizeInRepo)
bothZeroOrNeither(t, test.stat.TreeSizeInRepo, stat.TreeSizeInRepo)
rtest.Equals(t, test.stat.ProcessedBytes, stat.ProcessedBytes)
rtest.Equals(t, test.stat.Files, stat.Files)
rtest.Equals(t, test.stat.Dirs, stat.Dirs)
})
}
}
func TestArchiverSnapshot(t *testing.T) {
var tests = []struct {
name string
src TestDir
want TestDir
chdir string
targets []string
}{
{
name: "single-file",
src: TestDir{
"foo": TestFile{Content: "foo"},
},
targets: []string{"foo"},
},
{
name: "file-current-dir",
src: TestDir{
"foo": TestFile{Content: "foo"},
},
targets: []string{"./foo"},
},
{
name: "dir",
src: TestDir{
"target": TestDir{
"foo": TestFile{Content: "foo"},
},
},
targets: []string{"target"},
},
{
name: "dir-current-dir",
src: TestDir{
"target": TestDir{
"foo": TestFile{Content: "foo"},
},
},
targets: []string{"./target"},
},
{
name: "content-dir-current-dir",
src: TestDir{
"target": TestDir{
"foo": TestFile{Content: "foo"},
},
},
targets: []string{"./target/."},
},
{
name: "current-dir",
src: TestDir{
"target": TestDir{
"foo": TestFile{Content: "foo"},
},
},
targets: []string{"."},
},
{
name: "subdir",
src: TestDir{
"subdir": TestDir{
"foo": TestFile{Content: "foo"},
"subsubdir": TestDir{
"foo": TestFile{Content: "foo in subsubdir"},
},
},
"other": TestFile{Content: "another file"},
},
targets: []string{"subdir"},
want: TestDir{
"subdir": TestDir{
"foo": TestFile{Content: "foo"},
"subsubdir": TestDir{
"foo": TestFile{Content: "foo in subsubdir"},
},
},
},
},
{
name: "subsubdir",
src: TestDir{
"subdir": TestDir{
"foo": TestFile{Content: "foo"},
"subsubdir": TestDir{
"foo": TestFile{Content: "foo in subsubdir"},
},
},
"other": TestFile{Content: "another file"},
},
targets: []string{"subdir/subsubdir"},
want: TestDir{
"subdir": TestDir{
"subsubdir": TestDir{
"foo": TestFile{Content: "foo in subsubdir"},
},
},
},
},
{
name: "parent-dir",
src: TestDir{
"subdir": TestDir{
"foo": TestFile{Content: "foo"},
},
"other": TestFile{Content: "another file"},
},
chdir: "subdir",
targets: []string{".."},
},
{
name: "parent-parent-dir",
src: TestDir{
"subdir": TestDir{
"foo": TestFile{Content: "foo"},
"subsubdir": TestDir{
"empty": TestFile{Content: ""},
},
},
"other": TestFile{Content: "another file"},
},
chdir: "subdir/subsubdir",
targets: []string{"../.."},
},
{
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | true |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/buffer.go | internal/archiver/buffer.go | package archiver
import "sync"
// buffer is a reusable buffer. After the buffer has been used, Release should
// be called so the underlying slice is put back into the pool.
type buffer struct {
Data []byte
pool *bufferPool
}
// Release puts the buffer back into the pool it came from.
func (b *buffer) Release() {
pool := b.pool
if pool == nil || cap(b.Data) > pool.defaultSize {
return
}
pool.pool.Put(b)
}
// bufferPool implements a limited set of reusable buffers.
type bufferPool struct {
pool sync.Pool
defaultSize int
}
// newBufferPool initializes a new buffer pool. The pool stores at most max
// items. New buffers are created with defaultSize. Buffers that have grown
// larger are not put back.
func newBufferPool(defaultSize int) *bufferPool {
b := &bufferPool{
defaultSize: defaultSize,
}
b.pool = sync.Pool{New: func() any {
return &buffer{
Data: make([]byte, defaultSize),
pool: b,
}
}}
return b
}
// Get returns a new buffer, either from the pool or newly allocated.
func (pool *bufferPool) Get() *buffer {
return pool.pool.Get().(*buffer)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/testing_test.go | internal/archiver/testing_test.go | package archiver
import (
"context"
"fmt"
"os"
"path/filepath"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/repository"
rtest "github.com/restic/restic/internal/test"
)
// MockT passes through all logging functions from T, but catches Fail(),
// Error/f() and Fatal/f(). It is used to test helper functions.
type MockT struct {
*testing.T
HasFailed bool
}
// Fail marks the function as having failed but continues execution.
func (t *MockT) Fail() {
t.T.Log("MockT Fail() called")
t.HasFailed = true
}
// Fatal is equivalent to Log followed by FailNow.
func (t *MockT) Fatal(args ...interface{}) {
t.T.Logf("MockT Fatal called with %v", args)
t.HasFailed = true
}
// Fatalf is equivalent to Logf followed by FailNow.
func (t *MockT) Fatalf(msg string, args ...interface{}) {
t.T.Logf("MockT Fatal called: "+msg, args...)
t.HasFailed = true
}
// Error is equivalent to Log followed by Fail.
func (t *MockT) Error(args ...interface{}) {
t.T.Logf("MockT Error called with %v", args)
t.HasFailed = true
}
// Errorf is equivalent to Logf followed by Fail.
func (t *MockT) Errorf(msg string, args ...interface{}) {
t.T.Logf("MockT Error called: "+msg, args...)
t.HasFailed = true
}
func createFilesAt(t testing.TB, targetdir string, files map[string]interface{}) {
for name, item := range files {
target := filepath.Join(targetdir, filepath.FromSlash(name))
err := os.MkdirAll(filepath.Dir(target), 0700)
if err != nil {
t.Fatal(err)
}
switch it := item.(type) {
case TestFile:
err := os.WriteFile(target, []byte(it.Content), 0600)
if err != nil {
t.Fatal(err)
}
case TestSymlink:
err := os.Symlink(filepath.FromSlash(it.Target), target)
if err != nil {
t.Fatal(err)
}
}
}
}
func TestTestCreateFiles(t *testing.T) {
var tests = []struct {
dir TestDir
files map[string]interface{}
}{
{
dir: TestDir{
"foo": TestFile{Content: "foo"},
"subdir": TestDir{
"subfile": TestFile{Content: "bar"},
},
"sub": TestDir{
"subsub": TestDir{
"link": TestSymlink{Target: filepath.Clean("x/y/z")},
},
},
},
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
"subdir": TestDir{},
"subdir/subfile": TestFile{Content: "bar"},
"sub/subsub/link": TestSymlink{Target: filepath.Clean("x/y/z")},
},
},
}
for i, test := range tests {
tempdir := rtest.TempDir(t)
t.Run("", func(t *testing.T) {
tempdir := filepath.Join(tempdir, fmt.Sprintf("test-%d", i))
err := os.MkdirAll(tempdir, 0700)
if err != nil {
t.Fatal(err)
}
TestCreateFiles(t, tempdir, test.dir)
for name, item := range test.files {
targetPath := filepath.Join(tempdir, filepath.FromSlash(name))
fi, err := os.Lstat(targetPath)
if err != nil {
t.Error(err)
continue
}
switch node := item.(type) {
case TestFile:
if !fi.Mode().IsRegular() {
t.Errorf("is not regular file: %v", name)
continue
}
content, err := os.ReadFile(targetPath)
if err != nil {
t.Error(err)
continue
}
if string(content) != node.Content {
t.Errorf("wrong content for %v: want %q, got %q", name, node.Content, content)
}
case TestSymlink:
if fi.Mode()&os.ModeType != os.ModeSymlink {
t.Errorf("is not symlink: %v, %o != %o", name, fi.Mode(), os.ModeSymlink)
continue
}
target, err := os.Readlink(targetPath)
if err != nil {
t.Error(err)
continue
}
if target != node.Target {
t.Errorf("wrong target for %v: want %q, got %q", name, node.Target, target)
}
case TestDir:
if !fi.IsDir() {
t.Errorf("is not directory: %v", name)
}
}
}
})
}
}
func TestTestWalkFiles(t *testing.T) {
var tests = []struct {
dir TestDir
want map[string]string
}{
{
dir: TestDir{
"foo": TestFile{Content: "foo"},
"subdir": TestDir{
"subfile": TestFile{Content: "bar"},
},
"x": TestDir{
"y": TestDir{
"link": TestSymlink{Target: filepath.FromSlash("../../foo")},
},
},
},
want: map[string]string{
"foo": "<File>",
"subdir": "<Dir>",
filepath.FromSlash("subdir/subfile"): "<File>",
"x": "<Dir>",
filepath.FromSlash("x/y"): "<Dir>",
filepath.FromSlash("x/y/link"): "<Symlink>",
},
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
tempdir := rtest.TempDir(t)
got := make(map[string]string)
TestCreateFiles(t, tempdir, test.dir)
TestWalkFiles(t, tempdir, test.dir, func(path string, item interface{}) error {
p, err := filepath.Rel(tempdir, path)
if err != nil {
return err
}
got[p] = fmt.Sprint(item)
return nil
})
if !cmp.Equal(test.want, got) {
t.Error(cmp.Diff(test.want, got))
}
})
}
}
func TestTestEnsureFiles(t *testing.T) {
var tests = []struct {
expectFailure bool
files map[string]interface{}
want TestDir
}{
{
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
"subdir/subfile": TestFile{Content: "bar"},
"x/y/link": TestSymlink{Target: filepath.Clean("../../foo")},
},
want: TestDir{
"foo": TestFile{Content: "foo"},
"subdir": TestDir{
"subfile": TestFile{Content: "bar"},
},
"x": TestDir{
"y": TestDir{
"link": TestSymlink{Target: filepath.Clean("../../foo")},
},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
},
want: TestDir{
"foo": TestFile{Content: "foo"},
"subdir": TestDir{
"subfile": TestFile{Content: "bar"},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
"subdir/subfile": TestFile{Content: "bar"},
},
want: TestDir{
"foo": TestFile{Content: "foo"},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "xxx"},
},
want: TestDir{
"foo": TestFile{Content: "foo"},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestSymlink{Target: "/xxx"},
},
want: TestDir{
"foo": TestFile{Content: "foo"},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
},
want: TestDir{
"foo": TestSymlink{Target: "/xxx"},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestSymlink{Target: "xxx"},
},
want: TestDir{
"foo": TestSymlink{Target: "/yyy"},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestDir{
"foo": TestFile{Content: "foo"},
},
},
want: TestDir{
"foo": TestFile{Content: "foo"},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
},
want: TestDir{
"foo": TestDir{
"foo": TestFile{Content: "foo"},
},
},
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
tempdir := rtest.TempDir(t)
createFilesAt(t, tempdir, test.files)
subtestT := testing.TB(t)
if test.expectFailure {
subtestT = &MockT{T: t}
}
TestEnsureFiles(subtestT, tempdir, test.want)
if test.expectFailure && !subtestT.(*MockT).HasFailed {
t.Fatal("expected failure of TestEnsureFiles not found")
}
})
}
}
func TestTestEnsureSnapshot(t *testing.T) {
var tests = []struct {
expectFailure bool
files map[string]interface{}
want TestDir
}{
{
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
filepath.FromSlash("subdir/subfile"): TestFile{Content: "bar"},
filepath.FromSlash("x/y/link"): TestSymlink{Target: filepath.FromSlash("../../foo")},
},
want: TestDir{
"target": TestDir{
"foo": TestFile{Content: "foo"},
"subdir": TestDir{
"subfile": TestFile{Content: "bar"},
},
"x": TestDir{
"y": TestDir{
"link": TestSymlink{Target: filepath.FromSlash("../../foo")},
},
},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
},
want: TestDir{
"target": TestDir{
"bar": TestFile{Content: "foo"},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
"bar": TestFile{Content: "bar"},
},
want: TestDir{
"target": TestDir{
"foo": TestFile{Content: "foo"},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
},
want: TestDir{
"target": TestDir{
"foo": TestFile{Content: "foo"},
"bar": TestFile{Content: "bar"},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
},
want: TestDir{
"target": TestDir{
"foo": TestDir{
"foo": TestFile{Content: "foo"},
},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestSymlink{Target: filepath.FromSlash("x/y/z")},
},
want: TestDir{
"target": TestDir{
"foo": TestFile{Content: "foo"},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestSymlink{Target: filepath.FromSlash("x/y/z")},
},
want: TestDir{
"target": TestDir{
"foo": TestSymlink{Target: filepath.FromSlash("x/y/z2")},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
},
want: TestDir{
"target": TestDir{
"foo": TestFile{Content: "xxx"},
},
},
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempdir := rtest.TempDir(t)
targetDir := filepath.Join(tempdir, "target")
err := os.Mkdir(targetDir, 0700)
if err != nil {
t.Fatal(err)
}
createFilesAt(t, targetDir, test.files)
back := rtest.Chdir(t, tempdir)
defer back()
repo := repository.TestRepository(t)
arch := New(repo, fs.Local{}, Options{})
opts := SnapshotOptions{
Time: time.Now(),
Hostname: "localhost",
Tags: []string{"test"},
}
_, id, _, err := arch.Snapshot(ctx, []string{"."}, opts)
if err != nil {
t.Fatal(err)
}
t.Logf("snapshot saved as %v", id.Str())
subtestT := testing.TB(t)
if test.expectFailure {
subtestT = &MockT{T: t}
}
TestEnsureSnapshot(subtestT, repo, id, test.want)
if test.expectFailure && !subtestT.(*MockT).HasFailed {
t.Fatal("expected failure of TestEnsureSnapshot not found")
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/buffer_test.go | internal/archiver/buffer_test.go | package archiver
import (
"testing"
)
func TestBufferPoolReuse(t *testing.T) {
success := false
// retries to avoid flakiness. The test can fail depending on the GC.
for i := 0; i < 100; i++ {
// Test that buffers are actually reused from the pool
pool := newBufferPool(1024)
// Get a buffer and modify it
buf1 := pool.Get()
buf1.Data[0] = 0xFF
originalAddr := &buf1.Data[0]
buf1.Release()
// Get another buffer and check if it's the same underlying slice
buf2 := pool.Get()
if &buf2.Data[0] == originalAddr {
success = true
break
}
buf2.Release()
}
if !success {
t.Error("buffer was not reused from pool")
}
}
func TestBufferPoolLargeBuffers(t *testing.T) {
success := false
// retries to avoid flakiness. The test can fail depending on the GC.
for i := 0; i < 100; i++ {
// Test that buffers larger than defaultSize are not returned to pool
pool := newBufferPool(1024)
buf := pool.Get()
// Grow the buffer beyond default size
buf.Data = append(buf.Data, make([]byte, 2048)...)
originalCap := cap(buf.Data)
buf.Release()
// Get a new buffer - should not be the same slice
newBuf := pool.Get()
if cap(newBuf.Data) != originalCap {
success = true
break
}
}
if !success {
t.Error("large buffer was incorrectly returned to pool")
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/tree_saver.go | internal/archiver/tree_saver.go | package archiver
import (
"context"
"errors"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/restic"
"golang.org/x/sync/errgroup"
)
// treeSaver concurrently saves incoming trees to the repo.
type treeSaver struct {
uploader restic.BlobSaverAsync
errFn ErrorFunc
ch chan<- saveTreeJob
}
// newTreeSaver returns a new tree saver. A worker pool with treeWorkers is
// started, it is stopped when ctx is cancelled.
func newTreeSaver(ctx context.Context, wg *errgroup.Group, treeWorkers uint, uploader restic.BlobSaverAsync, errFn ErrorFunc) *treeSaver {
ch := make(chan saveTreeJob)
s := &treeSaver{
ch: ch,
uploader: uploader,
errFn: errFn,
}
for i := uint(0); i < treeWorkers; i++ {
wg.Go(func() error {
return s.worker(ctx, ch)
})
}
return s
}
func (s *treeSaver) TriggerShutdown() {
close(s.ch)
}
// Save stores the dir d and returns the data once it has been completed.
func (s *treeSaver) Save(ctx context.Context, snPath string, target string, node *data.Node, nodes []futureNode, complete fileCompleteFunc) futureNode {
fn, ch := newFutureNode()
job := saveTreeJob{
snPath: snPath,
target: target,
node: node,
nodes: nodes,
ch: ch,
complete: complete,
}
select {
case s.ch <- job:
case <-ctx.Done():
debug.Log("not saving tree, context is cancelled")
close(ch)
}
return fn
}
type saveTreeJob struct {
snPath string
target string
node *data.Node
nodes []futureNode
ch chan<- futureNodeResult
complete fileCompleteFunc
}
// save stores the nodes as a tree in the repo.
func (s *treeSaver) save(ctx context.Context, job *saveTreeJob) (*data.Node, ItemStats, error) {
var stats ItemStats
node := job.node
nodes := job.nodes
// allow GC of nodes array once the loop is finished
job.nodes = nil
builder := data.NewTreeJSONBuilder()
var lastNode *data.Node
for i, fn := range nodes {
// fn is a copy, so clear the original value explicitly
nodes[i] = futureNode{}
fnr := fn.take(ctx)
// return the error if it wasn't ignored
if fnr.err != nil {
debug.Log("err for %v: %v", fnr.snPath, fnr.err)
if fnr.err == context.Canceled {
return nil, stats, fnr.err
}
fnr.err = s.errFn(fnr.target, fnr.err)
if fnr.err == nil {
// ignore error
continue
}
return nil, stats, fnr.err
}
// when the error is ignored, the node could not be saved, so ignore it
if fnr.node == nil {
debug.Log("%v excluded: %v", fnr.snPath, fnr.target)
continue
}
err := builder.AddNode(fnr.node)
if err != nil && errors.Is(err, data.ErrTreeNotOrdered) && lastNode != nil && fnr.node.Equals(*lastNode) {
debug.Log("insert %v failed: %v", fnr.node.Name, err)
// ignore error if an _identical_ node already exists, but nevertheless issue a warning
_ = s.errFn(fnr.target, err)
err = nil
}
if err != nil {
debug.Log("insert %v failed: %v", fnr.node.Name, err)
return nil, stats, err
}
lastNode = fnr.node
}
buf, err := builder.Finalize()
if err != nil {
return nil, stats, err
}
var (
known bool
length int
sizeInRepo int
id restic.ID
)
ch := make(chan struct{}, 1)
s.uploader.SaveBlobAsync(ctx, restic.TreeBlob, buf, restic.ID{}, false, func(newID restic.ID, cbKnown bool, cbSizeInRepo int, cbErr error) {
known = cbKnown
length = len(buf)
sizeInRepo = cbSizeInRepo
id = newID
err = cbErr
ch <- struct{}{}
})
select {
case <-ch:
if err != nil {
return nil, stats, err
}
if !known {
stats.TreeBlobs++
stats.TreeSize += uint64(length)
stats.TreeSizeInRepo += uint64(sizeInRepo)
}
node.Subtree = &id
return node, stats, nil
case <-ctx.Done():
return nil, stats, ctx.Err()
}
}
func (s *treeSaver) worker(ctx context.Context, jobs <-chan saveTreeJob) error {
for {
var job saveTreeJob
var ok bool
select {
case <-ctx.Done():
return nil
case job, ok = <-jobs:
if !ok {
return nil
}
}
node, stats, err := s.save(ctx, &job)
if err != nil {
debug.Log("error saving tree blob: %v", err)
close(job.ch)
return err
}
if job.complete != nil {
job.complete(node, stats)
}
job.ch <- futureNodeResult{
snPath: job.snPath,
target: job.target,
node: node,
stats: stats,
}
close(job.ch)
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/scanner.go | internal/archiver/scanner.go | package archiver
import (
"context"
"sort"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/fs"
)
// Scanner traverses the targets and calls the function Result with cumulated
// stats concerning the files and folders found. Select is used to decide which
// items should be included. Error is called when an error occurs.
type Scanner struct {
FS fs.FS
SelectByName SelectByNameFunc
Select SelectFunc
Error ErrorFunc
Result func(item string, s ScanStats)
}
// NewScanner initializes a new Scanner.
func NewScanner(filesystem fs.FS) *Scanner {
return &Scanner{
FS: filesystem,
SelectByName: func(_ string) bool { return true },
Select: func(_ string, _ *fs.ExtendedFileInfo, _ fs.FS) bool { return true },
Error: func(_ string, err error) error { return err },
Result: func(_ string, _ ScanStats) {},
}
}
// ScanStats collect statistics.
type ScanStats struct {
Files, Dirs, Others uint
Bytes uint64
}
func (s *Scanner) scanTree(ctx context.Context, stats ScanStats, tree tree) (ScanStats, error) {
// traverse the path in the file system for all leaf nodes
if tree.Leaf() {
abstarget, err := s.FS.Abs(tree.Path)
if err != nil {
return ScanStats{}, err
}
stats, err = s.scan(ctx, stats, abstarget)
if err != nil {
return ScanStats{}, err
}
return stats, nil
}
// otherwise recurse into the nodes in a deterministic order
for _, name := range tree.NodeNames() {
var err error
stats, err = s.scanTree(ctx, stats, tree.Nodes[name])
if err != nil {
return ScanStats{}, err
}
if ctx.Err() != nil {
return stats, nil
}
}
return stats, nil
}
// Scan traverses the targets. The function Result is called for each new item
// found, the complete result is also returned by Scan.
func (s *Scanner) Scan(ctx context.Context, targets []string) error {
debug.Log("start scan for %v", targets)
cleanTargets, err := resolveRelativeTargets(s.FS, targets)
if err != nil {
return err
}
debug.Log("clean targets %v", cleanTargets)
// we're using the same tree representation as the archiver does
tree, err := newTree(s.FS, cleanTargets)
if err != nil {
return err
}
stats, err := s.scanTree(ctx, ScanStats{}, *tree)
if err != nil {
return err
}
s.Result("", stats)
debug.Log("result: %+v", stats)
return nil
}
func (s *Scanner) scan(ctx context.Context, stats ScanStats, target string) (ScanStats, error) {
if ctx.Err() != nil {
return stats, nil
}
// exclude files by path before running stat to reduce number of lstat calls
if !s.SelectByName(target) {
return stats, nil
}
// get file information
fi, err := s.FS.Lstat(target)
if err != nil {
return stats, s.Error(target, err)
}
// run remaining select functions that require file information
if !s.Select(target, fi, s.FS) {
return stats, nil
}
switch {
case fi.Mode.IsRegular():
stats.Files++
stats.Bytes += uint64(fi.Size)
case fi.Mode.IsDir():
names, err := fs.Readdirnames(s.FS, target, fs.O_NOFOLLOW)
if err != nil {
return stats, s.Error(target, err)
}
sort.Strings(names)
for _, name := range names {
stats, err = s.scan(ctx, stats, s.FS.Join(target, name))
if err != nil {
return stats, err
}
}
stats.Dirs++
default:
stats.Others++
}
s.Result(target, stats)
return stats, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/tree_saver_test.go | internal/archiver/tree_saver_test.go | package archiver
import (
"context"
"fmt"
"runtime"
"sync"
"testing"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
"golang.org/x/sync/errgroup"
)
type mockSaver struct {
saved map[string]int
mutex sync.Mutex
}
func (m *mockSaver) SaveBlobAsync(_ context.Context, _ restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool, cb func(newID restic.ID, known bool, sizeInRepo int, err error)) {
// Fake async operation
go func() {
m.mutex.Lock()
m.saved[string(buf)]++
m.mutex.Unlock()
cb(restic.Hash(buf), false, len(buf), nil)
}()
}
func setupTreeSaver() (context.Context, context.CancelFunc, *treeSaver, func() error) {
ctx, cancel := context.WithCancel(context.Background())
wg, ctx := errgroup.WithContext(ctx)
errFn := func(snPath string, err error) error {
return err
}
b := newTreeSaver(ctx, wg, uint(runtime.NumCPU()), &mockSaver{saved: make(map[string]int)}, errFn)
shutdown := func() error {
b.TriggerShutdown()
return wg.Wait()
}
return ctx, cancel, b, shutdown
}
func TestTreeSaver(t *testing.T) {
ctx, cancel, b, shutdown := setupTreeSaver()
defer cancel()
var results []futureNode
for i := 0; i < 20; i++ {
node := &data.Node{
Name: fmt.Sprintf("file-%d", i),
}
fb := b.Save(ctx, join("/", node.Name), node.Name, node, nil, nil)
results = append(results, fb)
}
for _, tree := range results {
tree.take(ctx)
}
err := shutdown()
if err != nil {
t.Fatal(err)
}
}
func TestTreeSaverError(t *testing.T) {
var tests = []struct {
trees int
failAt int
}{
{1, 1},
{20, 2},
{20, 5},
{20, 15},
{200, 150},
}
errTest := errors.New("test error")
for _, test := range tests {
t.Run("", func(t *testing.T) {
ctx, cancel, b, shutdown := setupTreeSaver()
defer cancel()
var results []futureNode
for i := 0; i < test.trees; i++ {
node := &data.Node{
Name: fmt.Sprintf("file-%d", i),
}
nodes := []futureNode{
newFutureNodeWithResult(futureNodeResult{node: &data.Node{
Name: fmt.Sprintf("child-%d", i),
}}),
}
if (i + 1) == test.failAt {
nodes = append(nodes, newFutureNodeWithResult(futureNodeResult{
err: errTest,
}))
}
fb := b.Save(ctx, join("/", node.Name), node.Name, node, nodes, nil)
results = append(results, fb)
}
for _, tree := range results {
tree.take(ctx)
}
err := shutdown()
if err == nil {
t.Errorf("expected error not found")
}
if err != errTest {
t.Fatalf("unexpected error found: %v", err)
}
})
}
}
func TestTreeSaverDuplicates(t *testing.T) {
for _, identicalNodes := range []bool{true, false} {
t.Run("", func(t *testing.T) {
ctx, cancel, b, shutdown := setupTreeSaver()
defer cancel()
node := &data.Node{
Name: "file",
}
nodes := []futureNode{
newFutureNodeWithResult(futureNodeResult{node: &data.Node{
Name: "child",
}}),
}
if identicalNodes {
nodes = append(nodes, newFutureNodeWithResult(futureNodeResult{node: &data.Node{
Name: "child",
}}))
} else {
nodes = append(nodes, newFutureNodeWithResult(futureNodeResult{node: &data.Node{
Name: "child",
Size: 42,
}}))
}
fb := b.Save(ctx, join("/", node.Name), node.Name, node, nodes, nil)
fb.take(ctx)
err := shutdown()
if identicalNodes {
test.Assert(t, err == nil, "unexpected error found: %v", err)
} else {
test.Assert(t, err != nil, "expected error not found")
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/testing.go | internal/archiver/testing.go | package archiver
import (
"context"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strings"
"testing"
"time"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
)
// TestSnapshot creates a new snapshot of path.
func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *restic.ID) *data.Snapshot {
arch := New(repo, fs.Local{}, Options{})
opts := SnapshotOptions{
Time: time.Now(),
Hostname: "localhost",
Tags: []string{"test"},
}
if parent != nil {
sn, err := data.LoadSnapshot(context.TODO(), repo, *parent)
if err != nil {
t.Fatal(err)
}
opts.ParentSnapshot = sn
}
sn, _, _, err := arch.Snapshot(context.TODO(), []string{path}, opts)
if err != nil {
t.Fatal(err)
}
return sn
}
// TestDir describes a directory structure to create for a test.
type TestDir map[string]interface{}
func (d TestDir) String() string {
return "<Dir>"
}
// TestFile describes a file created for a test.
type TestFile struct {
Content string
}
func (f TestFile) String() string {
return "<File>"
}
// TestSymlink describes a symlink created for a test.
type TestSymlink struct {
Target string
}
func (s TestSymlink) String() string {
return "<Symlink>"
}
// TestHardlink describes a hardlink created for a test.
type TestHardlink struct {
Target string
}
func (s TestHardlink) String() string {
return "<Hardlink>"
}
// TestCreateFiles creates a directory structure described by dir at target,
// which must already exist. On Windows, symlinks aren't created.
func TestCreateFiles(t testing.TB, target string, dir TestDir) {
t.Helper()
// ensure a stable order such that it can be guaranteed that a hardlink target already exists
var names []string
for name := range dir {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
item := dir[name]
targetPath := filepath.Join(target, name)
switch it := item.(type) {
case TestFile:
err := os.WriteFile(targetPath, []byte(it.Content), 0644)
if err != nil {
t.Fatal(err)
}
case TestSymlink:
err := os.Symlink(filepath.FromSlash(it.Target), targetPath)
if err != nil {
t.Fatal(err)
}
case TestHardlink:
err := os.Link(filepath.Join(target, filepath.FromSlash(it.Target)), targetPath)
if err != nil {
t.Fatal(err)
}
case TestDir:
err := os.Mkdir(targetPath, 0755)
if err != nil {
t.Fatal(err)
}
TestCreateFiles(t, targetPath, it)
}
}
}
// TestWalkFunc is used by TestWalkFiles to traverse the dir. When an error is
// returned, traversal stops and the surrounding test is marked as failed.
type TestWalkFunc func(path string, item interface{}) error
// TestWalkFiles runs fn for each file/directory in dir, the filename will be
// constructed with target as the prefix. Symlinks on Windows are ignored.
func TestWalkFiles(t testing.TB, target string, dir TestDir, fn TestWalkFunc) {
t.Helper()
for name, item := range dir {
targetPath := filepath.Join(target, name)
err := fn(targetPath, item)
if err != nil {
t.Fatalf("TestWalkFunc returned error for %v: %v", targetPath, err)
return
}
if dir, ok := item.(TestDir); ok {
TestWalkFiles(t, targetPath, dir, fn)
}
}
}
// fixpath removes UNC paths (starting with `\\?`) on windows. On Linux, it's a noop.
func fixpath(item string) string {
if runtime.GOOS != "windows" {
return item
}
if strings.HasPrefix(item, `\\?`) {
return item[4:]
}
return item
}
// TestEnsureFiles tests if the directory structure at target is the same as
// described in dir.
func TestEnsureFiles(t testing.TB, target string, dir TestDir) {
t.Helper()
pathsChecked := make(map[string]struct{})
// first, test that all items are there
TestWalkFiles(t, target, dir, func(path string, item interface{}) error {
fi, err := os.Lstat(path)
if err != nil {
return err
}
switch node := item.(type) {
case TestDir:
if !fi.IsDir() {
t.Errorf("is not a directory: %v", path)
}
return nil
case TestFile:
if !fi.Mode().IsRegular() {
t.Errorf("is not a regular file: %v", path)
return nil
}
content, err := os.ReadFile(path)
if err != nil {
return err
}
if string(content) != node.Content {
t.Errorf("wrong content for %v, want %q, got %q", path, node.Content, content)
}
case TestSymlink:
if fi.Mode()&os.ModeType != os.ModeSymlink {
t.Errorf("is not a symlink: %v", path)
return nil
}
target, err := os.Readlink(path)
if err != nil {
return err
}
if target != node.Target {
t.Errorf("wrong target for %v, want %v, got %v", path, node.Target, target)
}
}
pathsChecked[path] = struct{}{}
for parent := filepath.Dir(path); parent != target; parent = filepath.Dir(parent) {
pathsChecked[parent] = struct{}{}
}
return nil
})
// then, traverse the directory again, looking for additional files
err := filepath.Walk(target, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
path = fixpath(path)
if path == target {
return nil
}
_, ok := pathsChecked[path]
if !ok {
t.Errorf("additional item found: %v %v", path, fi.Mode())
}
return nil
})
if err != nil {
t.Fatal(err)
}
}
// TestEnsureFileContent checks if the file in the repo is the same as file.
func TestEnsureFileContent(ctx context.Context, t testing.TB, repo restic.BlobLoader, filename string, node *data.Node, file TestFile) {
if int(node.Size) != len(file.Content) {
t.Fatalf("%v: wrong node size: want %d, got %d", filename, node.Size, len(file.Content))
return
}
content := make([]byte, len(file.Content))
pos := 0
for _, id := range node.Content {
part, err := repo.LoadBlob(ctx, restic.DataBlob, id, content[pos:])
if err != nil {
t.Fatalf("error loading blob %v: %v", id.Str(), err)
return
}
copy(content[pos:pos+len(part)], part)
pos += len(part)
}
content = content[:pos]
if string(content) != file.Content {
t.Fatalf("%v: wrong content returned, want %q, got %q", filename, file.Content, content)
}
}
// TestEnsureTree checks that the tree ID in the repo matches dir. On Windows,
// Symlinks are ignored.
func TestEnsureTree(ctx context.Context, t testing.TB, prefix string, repo restic.BlobLoader, treeID restic.ID, dir TestDir) {
t.Helper()
tree, err := data.LoadTree(ctx, repo, treeID)
if err != nil {
t.Fatal(err)
return
}
var nodeNames []string
for _, node := range tree.Nodes {
nodeNames = append(nodeNames, node.Name)
}
debug.Log("%v (%v) %v", prefix, treeID.Str(), nodeNames)
checked := make(map[string]struct{})
for _, node := range tree.Nodes {
nodePrefix := path.Join(prefix, node.Name)
entry, ok := dir[node.Name]
if !ok {
t.Errorf("unexpected tree node %q found, want: %#v", node.Name, dir)
return
}
checked[node.Name] = struct{}{}
switch e := entry.(type) {
case TestDir:
if node.Type != data.NodeTypeDir {
t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "dir")
return
}
if node.Subtree == nil {
t.Errorf("tree node %v has nil subtree", nodePrefix)
return
}
TestEnsureTree(ctx, t, path.Join(prefix, node.Name), repo, *node.Subtree, e)
case TestFile:
if node.Type != data.NodeTypeFile {
t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "file")
}
TestEnsureFileContent(ctx, t, repo, nodePrefix, node, e)
case TestSymlink:
if node.Type != data.NodeTypeSymlink {
t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "symlink")
}
if e.Target != node.LinkTarget {
t.Errorf("symlink %v has wrong target, want %q, got %q", nodePrefix, e.Target, node.LinkTarget)
}
}
}
for name := range dir {
_, ok := checked[name]
if !ok {
t.Errorf("tree %v: expected node %q not found, has: %v", prefix, name, nodeNames)
}
}
}
// TestEnsureSnapshot tests if the snapshot in the repo has exactly the same
// structure as dir. On Windows, Symlinks are ignored.
func TestEnsureSnapshot(t testing.TB, repo restic.Repository, snapshotID restic.ID, dir TestDir) {
t.Helper()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sn, err := data.LoadSnapshot(ctx, repo, snapshotID)
if err != nil {
t.Fatal(err)
return
}
if sn.Tree == nil {
t.Fatal("snapshot has nil tree ID")
return
}
TestEnsureTree(ctx, t, "/", repo, *sn.Tree, dir)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/doc.go | internal/archiver/doc.go | // Package archiver contains the code which reads files, splits them into
// chunks and saves the data to the repository.
package archiver
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/archiver/tree_test.go | internal/archiver/tree_test.go | package archiver
import (
"fmt"
"path/filepath"
"runtime"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/restic/restic/internal/fs"
rtest "github.com/restic/restic/internal/test"
)
// debug.Log requires Tree.String.
var _ fmt.Stringer = tree{}
func TestPathComponents(t *testing.T) {
var tests = []struct {
p string
c []string
virtual bool
rel bool
win bool
}{
{
p: "/foo/bar/baz",
c: []string{"foo", "bar", "baz"},
},
{
p: "/foo/bar/baz",
c: []string{"foo", "bar", "baz"},
rel: true,
},
{
p: "foo/bar/baz",
c: []string{"foo", "bar", "baz"},
},
{
p: "foo/bar/baz",
c: []string{"foo", "bar", "baz"},
rel: true,
},
{
p: "../foo/bar/baz",
c: []string{"foo", "bar", "baz"},
},
{
p: "../foo/bar/baz",
c: []string{"..", "foo", "bar", "baz"},
rel: true,
},
{
p: "c:/foo/bar/baz",
c: []string{"c", "foo", "bar", "baz"},
virtual: true,
rel: true,
win: true,
},
{
p: "c:/foo/../bar/baz",
c: []string{"c", "bar", "baz"},
virtual: true,
win: true,
},
{
p: `c:\foo\..\bar\baz`,
c: []string{"c", "bar", "baz"},
virtual: true,
win: true,
},
{
p: "c:/foo/../bar/baz",
c: []string{"c", "bar", "baz"},
virtual: true,
rel: true,
win: true,
},
{
p: `c:\foo\..\bar\baz`,
c: []string{"c", "bar", "baz"},
virtual: true,
rel: true,
win: true,
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
if test.win && runtime.GOOS != "windows" {
t.Skip("skip test on unix")
}
c, v := pathComponents(fs.Local{}, filepath.FromSlash(test.p), test.rel)
if !cmp.Equal(test.c, c) {
t.Error(test.c, c)
}
if v != test.virtual {
t.Errorf("unexpected virtual prefix count returned, want %v, got %v", test.virtual, v)
}
})
}
}
func TestRootDirectory(t *testing.T) {
var tests = []struct {
target string
root string
unix bool
win bool
}{
{target: ".", root: "."},
{target: "foo/bar/baz", root: "."},
{target: "../foo/bar/baz", root: ".."},
{target: "..", root: ".."},
{target: "../../..", root: "../../.."},
{target: "/home/foo", root: "/", unix: true},
{target: "c:/home/foo", root: "c:/", win: true},
{target: `c:\home\foo`, root: `c:\`, win: true},
{target: "//host/share/foo", root: "//host/share/", win: true},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
if test.unix && runtime.GOOS == "windows" {
t.Skip("skip test on windows")
}
if test.win && runtime.GOOS != "windows" {
t.Skip("skip test on unix")
}
root := rootDirectory(fs.Local{}, filepath.FromSlash(test.target))
want := filepath.FromSlash(test.root)
if root != want {
t.Fatalf("wrong root directory, want %v, got %v", want, root)
}
})
}
}
func TestTree(t *testing.T) {
var tests = []struct {
targets []string
src TestDir
want tree
unix bool
win bool
mustError bool
}{
{
targets: []string{"foo"},
want: tree{Nodes: map[string]tree{
"foo": {Path: "foo", Root: "."},
}},
},
{
targets: []string{"foo", "bar", "baz"},
want: tree{Nodes: map[string]tree{
"foo": {Path: "foo", Root: "."},
"bar": {Path: "bar", Root: "."},
"baz": {Path: "baz", Root: "."},
}},
},
{
targets: []string{"foo/user1", "foo/user2", "foo/other"},
want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"user1": {Path: filepath.FromSlash("foo/user1")},
"user2": {Path: filepath.FromSlash("foo/user2")},
"other": {Path: filepath.FromSlash("foo/other")},
}},
}},
},
{
targets: []string{"foo/work/user1", "foo/work/user2"},
want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]tree{
"user1": {Path: filepath.FromSlash("foo/work/user1")},
"user2": {Path: filepath.FromSlash("foo/work/user2")},
}},
}},
}},
},
{
targets: []string{"foo/user1", "bar/user1", "foo/other"},
want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"user1": {Path: filepath.FromSlash("foo/user1")},
"other": {Path: filepath.FromSlash("foo/other")},
}},
"bar": {Root: ".", FileInfoPath: "bar", Nodes: map[string]tree{
"user1": {Path: filepath.FromSlash("bar/user1")},
}},
}},
},
{
targets: []string{"../work"},
want: tree{Nodes: map[string]tree{
"work": {Root: "..", Path: filepath.FromSlash("../work")},
}},
},
{
targets: []string{"../work/other"},
want: tree{Nodes: map[string]tree{
"work": {Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]tree{
"other": {Path: filepath.FromSlash("../work/other")},
}},
}},
},
{
targets: []string{"foo/user1", "../work/other", "foo/user2"},
want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"user1": {Path: filepath.FromSlash("foo/user1")},
"user2": {Path: filepath.FromSlash("foo/user2")},
}},
"work": {Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]tree{
"other": {Path: filepath.FromSlash("../work/other")},
}},
}},
},
{
targets: []string{"foo/user1", "../foo/other", "foo/user2"},
want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"user1": {Path: filepath.FromSlash("foo/user1")},
"user2": {Path: filepath.FromSlash("foo/user2")},
}},
"foo-1": {Root: "..", FileInfoPath: filepath.FromSlash("../foo"), Nodes: map[string]tree{
"other": {Path: filepath.FromSlash("../foo/other")},
}},
}},
},
{
src: TestDir{
"foo": TestDir{
"file": TestFile{Content: "file content"},
"work": TestFile{Content: "work file content"},
},
},
targets: []string{"foo", "foo/work"},
want: tree{Nodes: map[string]tree{
"foo": {
Root: ".",
FileInfoPath: "foo",
Nodes: map[string]tree{
"file": {Path: filepath.FromSlash("foo/file")},
"work": {Path: filepath.FromSlash("foo/work")},
},
},
}},
},
{
src: TestDir{
"foo": TestDir{
"file": TestFile{Content: "file content"},
"work": TestDir{
"other": TestFile{Content: "other file content"},
},
},
},
targets: []string{"foo/work", "foo"},
want: tree{Nodes: map[string]tree{
"foo": {
Root: ".",
FileInfoPath: "foo",
Nodes: map[string]tree{
"file": {Path: filepath.FromSlash("foo/file")},
"work": {Path: filepath.FromSlash("foo/work")},
},
},
}},
},
{
src: TestDir{
"foo": TestDir{
"work": TestDir{
"user1": TestFile{Content: "file content"},
"user2": TestFile{Content: "other file content"},
},
},
},
targets: []string{"foo/work", "foo/work/user2"},
want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"work": {
FileInfoPath: filepath.FromSlash("foo/work"),
Nodes: map[string]tree{
"user1": {Path: filepath.FromSlash("foo/work/user1")},
"user2": {Path: filepath.FromSlash("foo/work/user2")},
},
},
}},
}},
},
{
src: TestDir{
"foo": TestDir{
"work": TestDir{
"user1": TestFile{Content: "file content"},
"user2": TestFile{Content: "other file content"},
},
},
},
targets: []string{"foo/work/user2", "foo/work"},
want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"work": {FileInfoPath: filepath.FromSlash("foo/work"),
Nodes: map[string]tree{
"user1": {Path: filepath.FromSlash("foo/work/user1")},
"user2": {Path: filepath.FromSlash("foo/work/user2")},
},
},
}},
}},
},
{
src: TestDir{
"foo": TestDir{
"other": TestFile{Content: "file content"},
"work": TestDir{
"user2": TestDir{
"data": TestDir{
"secret": TestFile{Content: "secret file content"},
},
},
"user3": TestDir{
"important.txt": TestFile{Content: "important work"},
},
},
},
},
targets: []string{"foo/work/user2/data/secret", "foo"},
want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"other": {Path: filepath.FromSlash("foo/other")},
"work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]tree{
"user2": {FileInfoPath: filepath.FromSlash("foo/work/user2"), Nodes: map[string]tree{
"data": {FileInfoPath: filepath.FromSlash("foo/work/user2/data"), Nodes: map[string]tree{
"secret": {
Path: filepath.FromSlash("foo/work/user2/data/secret"),
},
}},
}},
"user3": {Path: filepath.FromSlash("foo/work/user3")},
}},
}},
}},
},
{
src: TestDir{
"mnt": TestDir{
"driveA": TestDir{
"work": TestDir{
"driveB": TestDir{
"secret": TestFile{Content: "secret file content"},
},
"test1": TestDir{
"important.txt": TestFile{Content: "important work"},
},
},
"test2": TestDir{
"important.txt": TestFile{Content: "other important work"},
},
},
},
},
unix: true,
targets: []string{"mnt/driveA", "mnt/driveA/work/driveB"},
want: tree{Nodes: map[string]tree{
"mnt": {Root: ".", FileInfoPath: filepath.FromSlash("mnt"), Nodes: map[string]tree{
"driveA": {FileInfoPath: filepath.FromSlash("mnt/driveA"), Nodes: map[string]tree{
"work": {FileInfoPath: filepath.FromSlash("mnt/driveA/work"), Nodes: map[string]tree{
"driveB": {
Path: filepath.FromSlash("mnt/driveA/work/driveB"),
},
"test1": {Path: filepath.FromSlash("mnt/driveA/work/test1")},
}},
"test2": {Path: filepath.FromSlash("mnt/driveA/test2")},
}},
}},
}},
},
{
targets: []string{"foo/work/user", "foo/work/user"},
want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]tree{
"user": {Path: filepath.FromSlash("foo/work/user")},
}},
}},
}},
},
{
targets: []string{"./foo/work/user", "foo/work/user"},
want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]tree{
"user": {Path: filepath.FromSlash("foo/work/user")},
}},
}},
}},
},
{
win: true,
targets: []string{`c:\users\foobar\temp`},
want: tree{Nodes: map[string]tree{
"c": {Root: `c:\`, FileInfoPath: `c:\`, Nodes: map[string]tree{
"users": {FileInfoPath: `c:\users`, Nodes: map[string]tree{
"foobar": {FileInfoPath: `c:\users\foobar`, Nodes: map[string]tree{
"temp": {Path: `c:\users\foobar\temp`},
}},
}},
}},
}},
},
{
targets: []string{"."},
mustError: true,
},
{
targets: []string{".."},
mustError: true,
},
{
targets: []string{"../.."},
mustError: true,
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
if test.unix && runtime.GOOS == "windows" {
t.Skip("skip test on windows")
}
if test.win && runtime.GOOS != "windows" {
t.Skip("skip test on unix")
}
tempdir := rtest.TempDir(t)
TestCreateFiles(t, tempdir, test.src)
back := rtest.Chdir(t, tempdir)
defer back()
tree, err := newTree(fs.Local{}, test.targets)
if test.mustError {
if err == nil {
t.Fatal("expected error, got nil")
}
t.Logf("found expected error: %v", err)
return
}
if err != nil {
t.Fatal(err)
}
if !cmp.Equal(&test.want, tree) {
t.Error(cmp.Diff(&test.want, tree))
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/repository_internal_test.go | internal/repository/repository_internal_test.go | package repository
import (
"bytes"
"context"
"encoding/json"
"io"
"math/rand"
"sort"
"strings"
"testing"
"github.com/cenkalti/backoff/v4"
"github.com/google/go-cmp/cmp"
"github.com/klauspost/compress/zstd"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository/index"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
type mapcache map[backend.Handle]bool
func (c mapcache) Has(h backend.Handle) bool { return c[h] }
func TestSortCachedPacksFirst(t *testing.T) {
var (
blobs, sorted [100]restic.PackedBlob
cache = make(mapcache)
r = rand.New(rand.NewSource(1261))
)
for i := 0; i < len(blobs); i++ {
var id restic.ID
r.Read(id[:])
blobs[i] = restic.PackedBlob{PackID: id}
if i%3 == 0 {
h := backend.Handle{Name: id.String(), Type: backend.PackFile}
cache[h] = true
}
}
copy(sorted[:], blobs[:])
sort.SliceStable(sorted[:], func(i, j int) bool {
hi := backend.Handle{Type: backend.PackFile, Name: sorted[i].PackID.String()}
hj := backend.Handle{Type: backend.PackFile, Name: sorted[j].PackID.String()}
return cache.Has(hi) && !cache.Has(hj)
})
sortCachedPacksFirst(cache, blobs[:])
rtest.Equals(t, sorted, blobs)
}
func BenchmarkSortCachedPacksFirst(b *testing.B) {
const nblobs = 512 // Corresponds to a file of ca. 2GB.
var (
blobs [nblobs]restic.PackedBlob
cache = make(mapcache)
r = rand.New(rand.NewSource(1261))
)
for i := 0; i < nblobs; i++ {
var id restic.ID
r.Read(id[:])
blobs[i] = restic.PackedBlob{PackID: id}
if i%3 == 0 {
h := backend.Handle{Name: id.String(), Type: backend.PackFile}
cache[h] = true
}
}
var cpy [nblobs]restic.PackedBlob
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
copy(cpy[:], blobs[:])
sortCachedPacksFirst(cache, cpy[:])
}
}
func BenchmarkLoadIndex(b *testing.B) {
BenchmarkAllVersions(b, benchmarkLoadIndex)
}
func benchmarkLoadIndex(b *testing.B, version uint) {
TestUseLowSecurityKDFParameters(b)
repo, _, be := TestRepositoryWithVersion(b, version)
idx := index.NewIndex()
for i := 0; i < 5000; i++ {
idx.StorePack(restic.NewRandomID(), []restic.Blob{
{
BlobHandle: restic.NewRandomBlobHandle(),
Length: 1234,
Offset: 1235,
},
})
}
idx.Finalize()
id, err := idx.SaveIndex(context.TODO(), &internalRepository{repo})
rtest.OK(b, err)
b.Logf("index saved as %v", id.Str())
fi, err := be.Stat(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: id.String()})
rtest.OK(b, err)
b.Logf("filesize is %v", fi.Size)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := loadIndex(context.TODO(), repo, id)
rtest.OK(b, err)
}
}
// loadIndex loads the index id from backend and returns it.
func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (*index.Index, error) {
buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id)
if err != nil {
return nil, err
}
return index.DecodeIndex(buf, id)
}
// buildPackfileWithoutHeader returns a manually built pack file without a header.
func buildPackfileWithoutHeader(blobSizes []int, key *crypto.Key, compress bool) (blobs []restic.Blob, packfile []byte) {
opts := []zstd.EOption{
// Set the compression level configured.
zstd.WithEncoderLevel(zstd.SpeedDefault),
// Disable CRC, we have enough checks in place, makes the
// compressed data four bytes shorter.
zstd.WithEncoderCRC(false),
// Set a window of 512kbyte, so we have good lookbehind for usual
// blob sizes.
zstd.WithWindowSize(512 * 1024),
}
enc, err := zstd.NewWriter(nil, opts...)
if err != nil {
panic(err)
}
var offset uint
for i, size := range blobSizes {
plaintext := rtest.Random(800+i, size)
id := restic.Hash(plaintext)
uncompressedLength := uint(0)
if compress {
uncompressedLength = uint(len(plaintext))
plaintext = enc.EncodeAll(plaintext, nil)
}
// we use a deterministic nonce here so the whole process is
// deterministic, last byte is the blob index
var nonce = []byte{
0x15, 0x98, 0xc0, 0xf7, 0xb9, 0x65, 0x97, 0x74,
0x12, 0xdc, 0xd3, 0x62, 0xa9, 0x6e, 0x20, byte(i),
}
before := len(packfile)
packfile = append(packfile, nonce...)
packfile = key.Seal(packfile, nonce, plaintext, nil)
after := len(packfile)
ciphertextLength := after - before
blobs = append(blobs, restic.Blob{
BlobHandle: restic.BlobHandle{
Type: restic.DataBlob,
ID: id,
},
Length: uint(ciphertextLength),
UncompressedLength: uncompressedLength,
Offset: offset,
})
offset = uint(len(packfile))
}
return blobs, packfile
}
func TestStreamPack(t *testing.T) {
TestAllVersions(t, testStreamPack)
}
func testStreamPack(t *testing.T, version uint) {
dec, err := zstd.NewReader(nil)
if err != nil {
panic(dec)
}
defer dec.Close()
// always use the same key for deterministic output
key := testKey(t)
blobSizes := []int{
5522811,
10,
5231,
18812,
123123,
13522811,
12301,
892242,
28616,
13351,
252287,
188883,
3522811,
18883,
}
var compress bool
switch version {
case 1:
compress = false
case 2:
compress = true
default:
t.Fatal("test does not support repository version", version)
}
packfileBlobs, packfile := buildPackfileWithoutHeader(blobSizes, &key, compress)
loadCalls := 0
shortFirstLoad := false
loadBytes := func(length int, offset int64) []byte {
data := packfile
if offset > int64(len(data)) {
offset = 0
length = 0
}
data = data[offset:]
if length > len(data) {
length = len(data)
}
if shortFirstLoad {
length /= 2
shortFirstLoad = false
}
return data[:length]
}
load := func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
data := loadBytes(length, offset)
if shortFirstLoad {
data = data[:len(data)/2]
shortFirstLoad = false
}
loadCalls++
err := fn(bytes.NewReader(data))
if err == nil {
return nil
}
var permanent *backoff.PermanentError
if errors.As(err, &permanent) {
return err
}
// retry loading once
return fn(bytes.NewReader(loadBytes(length, offset)))
}
// first, test regular usage
t.Run("regular", func(t *testing.T) {
tests := []struct {
blobs []restic.Blob
calls int
shortFirstLoad bool
}{
{packfileBlobs[1:2], 1, false},
{packfileBlobs[2:5], 1, false},
{packfileBlobs[2:8], 1, false},
{[]restic.Blob{
packfileBlobs[0],
packfileBlobs[4],
packfileBlobs[2],
}, 1, false},
{[]restic.Blob{
packfileBlobs[0],
packfileBlobs[len(packfileBlobs)-1],
}, 2, false},
{packfileBlobs[:], 1, true},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
gotBlobs := make(map[restic.ID]int)
handleBlob := func(blob restic.BlobHandle, buf []byte, err error) error {
gotBlobs[blob.ID]++
id := restic.Hash(buf)
if !id.Equal(blob.ID) {
t.Fatalf("wrong id %v for blob %s returned", id, blob.ID)
}
return err
}
wantBlobs := make(map[restic.ID]int)
for _, blob := range test.blobs {
wantBlobs[blob.ID] = 1
}
loadCalls = 0
shortFirstLoad = test.shortFirstLoad
err := streamPack(ctx, load, nil, dec, &key, restic.ID{}, test.blobs, handleBlob)
if err != nil {
t.Fatal(err)
}
if !cmp.Equal(wantBlobs, gotBlobs) {
t.Fatal(cmp.Diff(wantBlobs, gotBlobs))
}
rtest.Equals(t, test.calls, loadCalls)
})
}
})
shortFirstLoad = false
// next, test invalid uses, which should return an error
t.Run("invalid", func(t *testing.T) {
tests := []struct {
blobs []restic.Blob
err string
}{
{
// pass one blob several times
blobs: []restic.Blob{
packfileBlobs[3],
packfileBlobs[8],
packfileBlobs[3],
packfileBlobs[4],
},
err: "overlapping blobs in pack",
},
{
// pass something that's not a valid blob in the current pack file
blobs: []restic.Blob{
{
Offset: 123,
Length: 20000,
},
},
err: "ciphertext verification failed",
},
{
// pass a blob that's too small
blobs: []restic.Blob{
{
Offset: 123,
Length: 10,
},
},
err: "invalid blob length",
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
handleBlob := func(blob restic.BlobHandle, buf []byte, err error) error {
return err
}
err := streamPack(ctx, load, nil, dec, &key, restic.ID{}, test.blobs, handleBlob)
if err == nil {
t.Fatalf("wanted error %v, got nil", test.err)
}
if !strings.Contains(err.Error(), test.err) {
t.Fatalf("wrong error returned, it should contain %q but was %q", test.err, err)
}
})
}
})
}
func TestBlobVerification(t *testing.T) {
repo := TestRepository(t)
type DamageType string
const (
damageData DamageType = "data"
damageCompressed DamageType = "compressed"
damageCiphertext DamageType = "ciphertext"
)
for _, test := range []struct {
damage DamageType
msg string
}{
{"", ""},
{damageData, "hash mismatch"},
{damageCompressed, "decompression failed"},
{damageCiphertext, "ciphertext verification failed"},
} {
plaintext := rtest.Random(800, 1234)
id := restic.Hash(plaintext)
if test.damage == damageData {
plaintext[42] ^= 0x42
}
uncompressedLength := uint(len(plaintext))
plaintext = repo.getZstdEncoder().EncodeAll(plaintext, nil)
if test.damage == damageCompressed {
plaintext = plaintext[:len(plaintext)-8]
}
nonce := crypto.NewRandomNonce()
ciphertext := append([]byte{}, nonce...)
ciphertext = repo.Key().Seal(ciphertext, nonce, plaintext, nil)
if test.damage == damageCiphertext {
ciphertext[42] ^= 0x42
}
err := repo.verifyCiphertext(ciphertext, int(uncompressedLength), id)
if test.msg == "" {
rtest.Assert(t, err == nil, "expected no error, got %v", err)
} else {
rtest.Assert(t, strings.Contains(err.Error(), test.msg), "expected error to contain %q, got %q", test.msg, err)
}
}
}
func TestUnpackedVerification(t *testing.T) {
repo := TestRepository(t)
type DamageType string
const (
damageData DamageType = "data"
damageCompressed DamageType = "compressed"
damageCiphertext DamageType = "ciphertext"
)
for _, test := range []struct {
damage DamageType
msg string
}{
{"", ""},
{damageData, "data mismatch"},
{damageCompressed, "decompression failed"},
{damageCiphertext, "ciphertext verification failed"},
} {
plaintext := rtest.Random(800, 1234)
orig := append([]byte{}, plaintext...)
if test.damage == damageData {
plaintext[42] ^= 0x42
}
compressed := []byte{2}
compressed = repo.getZstdEncoder().EncodeAll(plaintext, compressed)
if test.damage == damageCompressed {
compressed = compressed[:len(compressed)-8]
}
nonce := crypto.NewRandomNonce()
ciphertext := append([]byte{}, nonce...)
ciphertext = repo.Key().Seal(ciphertext, nonce, compressed, nil)
if test.damage == damageCiphertext {
ciphertext[42] ^= 0x42
}
err := repo.verifyUnpacked(ciphertext, restic.IndexFile, orig)
if test.msg == "" {
rtest.Assert(t, err == nil, "expected no error, got %v", err)
} else {
rtest.Assert(t, strings.Contains(err.Error(), test.msg), "expected error to contain %q, got %q", test.msg, err)
}
}
}
func testKey(t *testing.T) crypto.Key {
const jsonKey = `{"mac":{"k":"eQenuI8adktfzZMuC8rwdA==","r":"k8cfAly2qQSky48CQK7SBA=="},"encrypt":"MKO9gZnRiQFl8mDUurSDa9NMjiu9MUifUrODTHS05wo="}`
var key crypto.Key
err := json.Unmarshal([]byte(jsonKey), &key)
if err != nil {
t.Fatal(err)
}
return key
}
func TestStreamPackFallback(t *testing.T) {
dec, err := zstd.NewReader(nil)
if err != nil {
panic(dec)
}
defer dec.Close()
test := func(t *testing.T, failLoad bool) {
key := testKey(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
plaintext := rtest.Random(800, 42)
blobID := restic.Hash(plaintext)
blobs := []restic.Blob{
{
Length: uint(crypto.CiphertextLength(len(plaintext))),
Offset: 0,
BlobHandle: restic.BlobHandle{
ID: blobID,
Type: restic.DataBlob,
},
},
}
var loadPack backendLoadFn
if failLoad {
loadPack = func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
return errors.New("load error")
}
} else {
loadPack = func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
// just return an empty array to provoke an error
data := make([]byte, length)
return fn(bytes.NewReader(data))
}
}
loadBlob := func(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) ([]byte, error) {
if id == blobID {
return plaintext, nil
}
return nil, errors.New("unknown blob")
}
blobOK := false
handleBlob := func(blob restic.BlobHandle, buf []byte, err error) error {
rtest.OK(t, err)
rtest.Equals(t, blobID, blob.ID)
rtest.Equals(t, plaintext, buf)
blobOK = true
return err
}
err := streamPack(ctx, loadPack, loadBlob, dec, &key, restic.ID{}, blobs, handleBlob)
rtest.OK(t, err)
rtest.Assert(t, blobOK, "blob failed to load")
}
t.Run("corrupted blob", func(t *testing.T) {
test(t, false)
})
// test fallback for failed pack loading
t.Run("failed load", func(t *testing.T) {
test(t, true)
})
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/raw_test.go | internal/repository/raw_test.go | package repository_test
import (
"bytes"
"context"
"io"
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/cache"
"github.com/restic/restic/internal/backend/mem"
"github.com/restic/restic/internal/backend/mock"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
const KiB = 1 << 10
const MiB = 1 << 20
func TestLoadRaw(t *testing.T) {
b := mem.New()
repo, err := repository.New(b, repository.Options{})
rtest.OK(t, err)
for i := 0; i < 5; i++ {
data := rtest.Random(23+i, 500*KiB)
id := restic.Hash(data)
h := backend.Handle{Name: id.String(), Type: backend.PackFile}
err := b.Save(context.TODO(), h, backend.NewByteReader(data, b.Hasher()))
rtest.OK(t, err)
buf, err := repo.LoadRaw(context.TODO(), backend.PackFile, id)
rtest.OK(t, err)
if len(buf) != len(data) {
t.Errorf("length of returned buffer does not match, want %d, got %d", len(data), len(buf))
continue
}
if !bytes.Equal(buf, data) {
t.Errorf("wrong data returned")
continue
}
}
}
func TestLoadRawBroken(t *testing.T) {
b := mock.NewBackend()
repo, err := repository.New(b, repository.Options{})
rtest.OK(t, err)
data := rtest.Random(23, 10*KiB)
id := restic.Hash(data)
// damage buffer
data[0] ^= 0xff
b.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
return io.NopCloser(bytes.NewReader(data)), nil
}
// must detect but still return corrupt data
buf, err := repo.LoadRaw(context.TODO(), backend.PackFile, id)
rtest.Assert(t, bytes.Equal(buf, data), "wrong data returned")
rtest.Assert(t, errors.Is(err, restic.ErrInvalidData), "missing expected ErrInvalidData error, got %v", err)
// cause the first access to fail, but repair the data for the second access
data[0] ^= 0xff
loadCtr := 0
b.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
data[0] ^= 0xff
loadCtr++
return io.NopCloser(bytes.NewReader(data)), nil
}
// must retry load of corrupted data
buf, err = repo.LoadRaw(context.TODO(), backend.PackFile, id)
rtest.OK(t, err)
rtest.Assert(t, bytes.Equal(buf, data), "wrong data returned")
rtest.Equals(t, 2, loadCtr, "missing retry on broken data")
}
func TestLoadRawBrokenWithCache(t *testing.T) {
b := mock.NewBackend()
c := cache.TestNewCache(t)
repo, err := repository.New(b, repository.Options{})
rtest.OK(t, err)
repo.UseCache(c, t.Logf)
data := rtest.Random(23, 10*KiB)
id := restic.Hash(data)
loadCtr := 0
// cause the first access to fail, but repair the data for the second access
b.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
data[0] ^= 0xff
loadCtr++
return io.NopCloser(bytes.NewReader(data)), nil
}
// must retry load of corrupted data
buf, err := repo.LoadRaw(context.TODO(), backend.SnapshotFile, id)
rtest.OK(t, err)
rtest.Assert(t, bytes.Equal(buf, data), "wrong data returned")
rtest.Equals(t, 2, loadCtr, "missing retry on broken data")
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/repair_index.go | internal/repository/repair_index.go | package repository
import (
"context"
"github.com/restic/restic/internal/repository/index"
"github.com/restic/restic/internal/repository/pack"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
)
type RepairIndexOptions struct {
ReadAllPacks bool
}
func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, printer progress.Printer) error {
var obsoleteIndexes restic.IDs
packSizeFromList := make(map[restic.ID]int64)
packSizeFromIndex := make(map[restic.ID]int64)
removePacks := restic.NewIDSet()
if opts.ReadAllPacks {
// get list of old index files but start with empty index
err := repo.List(ctx, restic.IndexFile, func(id restic.ID, _ int64) error {
obsoleteIndexes = append(obsoleteIndexes, id)
return nil
})
if err != nil {
return err
}
repo.clearIndex()
} else {
printer.P("loading indexes...\n")
err := repo.loadIndexWithCallback(ctx, nil, func(id restic.ID, _ *index.Index, err error) error {
if err != nil {
printer.E("removing invalid index %v: %v\n", id, err)
obsoleteIndexes = append(obsoleteIndexes, id)
return nil
}
return nil
})
if err != nil {
return err
}
packSizeFromIndex, err = pack.Size(ctx, repo, false)
if err != nil {
return err
}
}
oldIndexes := repo.idx.IDs()
printer.P("getting pack files to read...\n")
err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error {
size, ok := packSizeFromIndex[id]
if !ok || size != packSize {
// Pack was not referenced in index or size does not match
packSizeFromList[id] = packSize
removePacks.Insert(id)
}
if !ok {
printer.E("adding pack file to index %v\n", id)
} else if size != packSize {
printer.E("reindexing pack file %v with unexpected size %v instead of %v\n", id, packSize, size)
}
delete(packSizeFromIndex, id)
return nil
})
if err != nil {
return err
}
for id := range packSizeFromIndex {
// forget pack files that are referenced in the index but do not exist
// when rebuilding the index
removePacks.Insert(id)
printer.E("removing not found pack file %v\n", id)
}
if len(packSizeFromList) > 0 {
printer.P("reading pack files\n")
bar := printer.NewCounter("packs")
bar.SetMax(uint64(len(packSizeFromList)))
invalidFiles, err := repo.createIndexFromPacks(ctx, packSizeFromList, bar)
bar.Done()
if err != nil {
return err
}
for _, id := range invalidFiles {
printer.V("skipped incomplete pack file: %v\n", id)
}
}
err = rewriteIndexFiles(ctx, repo, removePacks, oldIndexes, obsoleteIndexes, printer)
if err != nil {
return err
}
// drop outdated in-memory index
repo.clearIndex()
return nil
}
func rewriteIndexFiles(ctx context.Context, repo *Repository, removePacks restic.IDSet, oldIndexes restic.IDSet, extraObsolete restic.IDs, printer progress.Printer) error {
printer.P("rebuilding index\n")
bar := printer.NewCounter("indexes processed")
return repo.idx.Rewrite(ctx, &internalRepository{repo}, removePacks, oldIndexes, extraObsolete, index.MasterIndexRewriteOpts{
SaveProgress: bar,
DeleteProgress: func() *progress.Counter {
return printer.NewCounter("old indexes deleted")
},
DeleteReport: func(id restic.ID, err error) {
if err != nil {
printer.VV("failed to remove index %v: %v\n", id.String(), err)
} else {
printer.VV("removed index %v\n", id.String())
}
},
})
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/upgrade_repo_test.go | internal/repository/upgrade_repo_test.go | package repository
import (
"context"
"os"
"path/filepath"
"sync"
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/errors"
rtest "github.com/restic/restic/internal/test"
)
func TestUpgradeRepoV2(t *testing.T) {
repo, _, _ := TestRepositoryWithVersion(t, 1)
if repo.Config().Version != 1 {
t.Fatal("test repo has wrong version")
}
err := UpgradeRepo(context.Background(), repo)
rtest.OK(t, err)
}
type failBackend struct {
backend.Backend
mu sync.Mutex
ConfigFileSavesUntilError uint
}
func (be *failBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
if h.Type != backend.ConfigFile {
return be.Backend.Save(ctx, h, rd)
}
be.mu.Lock()
if be.ConfigFileSavesUntilError == 0 {
be.mu.Unlock()
return errors.New("failure induced for testing")
}
be.ConfigFileSavesUntilError--
be.mu.Unlock()
return be.Backend.Save(ctx, h, rd)
}
func TestUpgradeRepoV2Failure(t *testing.T) {
be := TestBackend(t)
// wrap backend so that it fails upgrading the config after the initial write
be = &failBackend{
ConfigFileSavesUntilError: 1,
Backend: be,
}
repo, _ := TestRepositoryWithBackend(t, be, 1, Options{})
if repo.Config().Version != 1 {
t.Fatal("test repo has wrong version")
}
err := UpgradeRepo(context.Background(), repo)
if err == nil {
t.Fatal("expected error returned from Apply(), got nil")
}
upgradeErr := err.(*upgradeRepoV2Error)
if upgradeErr.UploadNewConfigError == nil {
t.Fatal("expected upload error, got nil")
}
if upgradeErr.ReuploadOldConfigError == nil {
t.Fatal("expected reupload error, got nil")
}
if upgradeErr.BackupFilePath == "" {
t.Fatal("no backup file path found")
}
rtest.OK(t, os.Remove(upgradeErr.BackupFilePath))
rtest.OK(t, os.Remove(filepath.Dir(upgradeErr.BackupFilePath)))
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/packer_manager.go | internal/repository/packer_manager.go | package repository
import (
"bufio"
"context"
"crypto/rand"
"crypto/sha256"
"io"
"math/big"
"os"
"sync"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository/hashing"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/repository/pack"
)
// packer holds a pack.packer together with a hash writer.
type packer struct {
*pack.Packer
tmpfile *os.File
bufWr *bufio.Writer
}
// packerManager keeps a list of open packs and creates new on demand.
type packerManager struct {
tpe restic.BlobType
key *crypto.Key
queueFn func(ctx context.Context, t restic.BlobType, p *packer) error
pm sync.Mutex
packers []*packer
packSize uint
}
const defaultPackerCount = 2
// newPackerManager returns a new packer manager which writes temporary files
// to a temporary directory
func newPackerManager(key *crypto.Key, tpe restic.BlobType, packSize uint, packerCount int, queueFn func(ctx context.Context, t restic.BlobType, p *packer) error) *packerManager {
return &packerManager{
tpe: tpe,
key: key,
queueFn: queueFn,
packers: make([]*packer, packerCount),
packSize: packSize,
}
}
func (r *packerManager) Flush(ctx context.Context) error {
r.pm.Lock()
defer r.pm.Unlock()
pendingPackers, err := r.mergePackers()
if err != nil {
return err
}
for _, packer := range pendingPackers {
debug.Log("manually flushing pending pack")
err := r.queueFn(ctx, r.tpe, packer)
if err != nil {
return err
}
}
return nil
}
// mergePackers merges small pack files before those are uploaded by Flush(). The main
// purpose of this method is to reduce information leaks if a small file is backed up
// and the blobs end up in spearate pack files. If the file only consists of two blobs
// this would leak the size of the individual blobs.
func (r *packerManager) mergePackers() ([]*packer, error) {
pendingPackers := []*packer{}
var p *packer
for i, packer := range r.packers {
if packer == nil {
continue
}
r.packers[i] = nil
if p == nil {
p = packer
} else if p.Size()+packer.Size() < r.packSize {
// merge if the result stays below the target pack size
err := packer.bufWr.Flush()
if err != nil {
return nil, err
}
_, err = packer.tmpfile.Seek(0, io.SeekStart)
if err != nil {
return nil, err
}
err = p.Merge(packer.Packer, packer.tmpfile)
if err != nil {
return nil, err
}
} else {
pendingPackers = append(pendingPackers, p)
p = packer
}
}
if p != nil {
pendingPackers = append(pendingPackers, p)
}
return pendingPackers, nil
}
func (r *packerManager) SaveBlob(ctx context.Context, t restic.BlobType, id restic.ID, ciphertext []byte, uncompressedLength int) (int, error) {
r.pm.Lock()
defer r.pm.Unlock()
packer, err := r.pickPacker(len(ciphertext))
if err != nil {
return 0, err
}
// save ciphertext
// Add only appends bytes in memory to avoid being a scaling bottleneck
size, err := packer.Add(t, id, ciphertext, uncompressedLength)
if err != nil {
return 0, err
}
// if the pack and header is not full enough, put back to the list
if packer.Size() < r.packSize && !packer.HeaderFull() {
debug.Log("pack is not full enough (%d bytes)", packer.Size())
return size, nil
}
// forget full packer
r.forgetPacker(packer)
// call while holding lock to prevent findPacker from creating new packers if the uploaders are busy
// else write the pack to the backend
err = r.queueFn(ctx, t, packer)
if err != nil {
return 0, err
}
return size + packer.HeaderOverhead(), nil
}
func randomInt(max int) (int, error) {
rangeSize := big.NewInt(int64(max))
randomInt, err := rand.Int(rand.Reader, rangeSize)
if err != nil {
return 0, err
}
return int(randomInt.Int64()), nil
}
// pickPacker returns or creates a randomly selected packer into which the blob should be stored. If the
// ciphertext is larger than the packSize, a new packer is returned.
func (r *packerManager) pickPacker(ciphertextLen int) (*packer, error) {
// use separate packer if compressed length is larger than the packsize
// this speeds up the garbage collection of oversized blobs and reduces the cache size
// as the oversize blobs are only downloaded if necessary
if ciphertextLen >= int(r.packSize) {
return r.newPacker()
}
// randomly distribute blobs onto multiple packer instances. This makes it harder for
// an attacker to learn at which points a file was chunked and therefore mitigates the attack described in
// https://www.daemonology.net/blog/chunking-attacks.pdf .
// See https://github.com/restic/restic/issues/5291#issuecomment-2746146193 for details on the mitigation.
idx, err := randomInt(len(r.packers))
if err != nil {
return nil, err
}
// retrieve packer or get a new one
packer := r.packers[idx]
if packer == nil {
packer, err = r.newPacker()
if err != nil {
return nil, err
}
r.packers[idx] = packer
}
return packer, nil
}
// forgetPacker drops the given packer from the internal list. This is used to forget full packers.
func (r *packerManager) forgetPacker(packer *packer) {
for i, p := range r.packers {
if packer == p {
r.packers[i] = nil
}
}
}
// findPacker returns a packer for a new blob of size bytes. Either a new one is
// created or one is returned that already has some blobs.
func (r *packerManager) newPacker() (pck *packer, err error) {
debug.Log("create new pack")
tmpfile, err := fs.TempFile("", "restic-temp-pack-")
if err != nil {
return nil, errors.WithStack(err)
}
bufWr := bufio.NewWriter(tmpfile)
p := pack.NewPacker(r.key, bufWr)
pck = &packer{
Packer: p,
tmpfile: tmpfile,
bufWr: bufWr,
}
return pck, nil
}
// savePacker stores p in the backend.
func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *packer) error {
debug.Log("save packer for %v with %d blobs (%d bytes)\n", t, p.Packer.Count(), p.Packer.Size())
err := p.Packer.Finalize()
if err != nil {
return err
}
err = p.bufWr.Flush()
if err != nil {
return err
}
// calculate sha256 hash in a second pass
var rd io.Reader
rd, err = backend.NewFileReader(p.tmpfile, nil)
if err != nil {
return err
}
beHasher := r.be.Hasher()
var beHr *hashing.Reader
if beHasher != nil {
beHr = hashing.NewReader(rd, beHasher)
rd = beHr
}
hr := hashing.NewReader(rd, sha256.New())
_, err = io.Copy(io.Discard, hr)
if err != nil {
return err
}
id := restic.IDFromHash(hr.Sum(nil))
h := backend.Handle{Type: backend.PackFile, Name: id.String(), IsMetadata: t.IsMetadata()}
var beHash []byte
if beHr != nil {
beHash = beHr.Sum(nil)
}
rrd, err := backend.NewFileReader(p.tmpfile, beHash)
if err != nil {
return err
}
err = r.be.Save(ctx, h, rrd)
if err != nil {
debug.Log("Save(%v) error: %v", h, err)
return err
}
debug.Log("saved as %v", h)
err = p.tmpfile.Close()
if err != nil {
return errors.Wrap(err, "close tempfile")
}
// update blobs in the index
debug.Log(" updating blobs %v to pack %v", p.Packer.Blobs(), id)
return r.idx.StorePack(ctx, id, p.Packer.Blobs(), &internalRepository{r})
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/checker.go | internal/repository/checker.go | package repository
import (
"bufio"
"context"
"fmt"
"github.com/klauspost/compress/zstd"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository/index"
"github.com/restic/restic/internal/repository/pack"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
"golang.org/x/sync/errgroup"
)
const maxStreamBufferSize = 4 * 1024 * 1024
// ErrDuplicatePacks is returned when a pack is found in more than one index.
type ErrDuplicatePacks struct {
PackID restic.ID
Indexes restic.IDSet
}
func (e *ErrDuplicatePacks) Error() string {
return fmt.Sprintf("pack %v contained in several indexes: %v", e.PackID, e.Indexes)
}
// ErrMixedPack is returned when a pack is found that contains both tree and data blobs.
type ErrMixedPack struct {
PackID restic.ID
}
func (e *ErrMixedPack) Error() string {
return fmt.Sprintf("pack %v contains a mix of tree and data blobs", e.PackID.Str())
}
// PackError describes an error with a specific pack.
type PackError struct {
ID restic.ID
Orphaned bool
Truncated bool
Err error
}
func (e *PackError) Error() string {
return "pack " + e.ID.String() + ": " + e.Err.Error()
}
// Checker handles index-related operations for repository checking.
type Checker struct {
repo *Repository
}
// NewChecker creates a new Checker.
func NewChecker(repo *Repository) *Checker {
return &Checker{
repo: repo,
}
}
func computePackTypes(ctx context.Context, idx restic.ListBlobser) (map[restic.ID]restic.BlobType, error) {
packs := make(map[restic.ID]restic.BlobType)
err := idx.ListBlobs(ctx, func(pb restic.PackedBlob) {
tpe, exists := packs[pb.PackID]
if exists {
if pb.Type != tpe {
tpe = restic.InvalidBlob
}
} else {
tpe = pb.Type
}
packs[pb.PackID] = tpe
})
return packs, err
}
// LoadIndex loads all index files.
func (c *Checker) LoadIndex(ctx context.Context, p restic.TerminalCounterFactory) (hints []error, errs []error) {
debug.Log("Start")
packToIndex := make(map[restic.ID]restic.IDSet)
// Use the repository's internal loadIndexWithCallback to handle per-index errors
err := c.repo.loadIndexWithCallback(ctx, p, func(id restic.ID, idx *index.Index, err error) error {
debug.Log("process index %v, err %v", id, err)
err = errors.Wrapf(err, "error loading index %v", id)
if err != nil {
errs = append(errs, err)
return nil
}
debug.Log("process blobs")
cnt := 0
for blob := range idx.Values() {
if ctx.Err() != nil {
return ctx.Err()
}
cnt++
if _, ok := packToIndex[blob.PackID]; !ok {
packToIndex[blob.PackID] = restic.NewIDSet()
}
packToIndex[blob.PackID].Insert(id)
}
debug.Log("%d blobs processed", cnt)
return nil
})
if err != nil {
// failed to load the index
return hints, append(errs, err)
}
packTypes, err := computePackTypes(ctx, c.repo)
if err != nil {
return hints, append(errs, err)
}
debug.Log("checking for duplicate packs")
for packID := range packTypes {
debug.Log(" check pack %v: contained in %d indexes", packID, len(packToIndex[packID]))
if len(packToIndex[packID]) > 1 {
hints = append(hints, &ErrDuplicatePacks{
PackID: packID,
Indexes: packToIndex[packID],
})
}
if packTypes[packID] == restic.InvalidBlob {
hints = append(hints, &ErrMixedPack{
PackID: packID,
})
}
}
return hints, errs
}
// Packs checks that all packs referenced in the index are still available and
// there are no packs that aren't in an index. errChan is closed after all
// packs have been checked.
func (c *Checker) Packs(ctx context.Context, errChan chan<- error) {
defer close(errChan)
// compute pack size using index entries
packs, err := pack.Size(ctx, c.repo, false)
if err != nil {
errChan <- err
return
}
debug.Log("checking for %d packs", len(packs))
debug.Log("listing repository packs")
repoPacks := make(map[restic.ID]int64)
err = c.repo.List(ctx, restic.PackFile, func(id restic.ID, size int64) error {
repoPacks[id] = size
return nil
})
if err != nil {
errChan <- err
}
for id, size := range packs {
reposize, ok := repoPacks[id]
// remove from repoPacks so we can find orphaned packs
delete(repoPacks, id)
// missing: present in c.packs but not in the repo
if !ok {
select {
case <-ctx.Done():
return
case errChan <- &PackError{ID: id, Err: errors.New("does not exist")}:
}
continue
}
// size not matching: present in c.packs and in the repo, but sizes do not match
if size != reposize {
select {
case <-ctx.Done():
return
case errChan <- &PackError{ID: id, Truncated: true, Err: errors.Errorf("unexpected file size: got %d, expected %d", reposize, size)}:
}
}
}
// orphaned: present in the repo but not in c.packs
for orphanID := range repoPacks {
select {
case <-ctx.Done():
return
case errChan <- &PackError{ID: orphanID, Orphaned: true, Err: errors.New("not referenced in any index")}:
}
}
}
// ReadPacks loads data from specified packs and checks the integrity.
func (c *Checker) ReadPacks(ctx context.Context, filter func(packs map[restic.ID]int64) map[restic.ID]int64, p *progress.Counter, errChan chan<- error) {
defer close(errChan)
// compute pack size using index entries
packs, err := pack.Size(ctx, c.repo, false)
if err != nil {
errChan <- err
return
}
packs = filter(packs)
p.SetMax(uint64(len(packs)))
g, ctx := errgroup.WithContext(ctx)
type checkTask struct {
id restic.ID
size int64
blobs []restic.Blob
}
ch := make(chan checkTask)
// as packs are streamed the concurrency is limited by IO
workerCount := int(c.repo.Connections())
// run workers
for i := 0; i < workerCount; i++ {
g.Go(func() error {
bufRd := bufio.NewReaderSize(nil, maxStreamBufferSize)
dec, err := zstd.NewReader(nil)
if err != nil {
panic(dec)
}
defer dec.Close()
for {
var ps checkTask
var ok bool
select {
case <-ctx.Done():
return nil
case ps, ok = <-ch:
if !ok {
return nil
}
}
err := CheckPack(ctx, c.repo, ps.id, ps.blobs, ps.size, bufRd, dec)
p.Add(1)
if err == nil {
continue
}
select {
case <-ctx.Done():
return nil
case errChan <- err:
}
}
})
}
packSet := restic.NewIDSet()
for pack := range packs {
packSet.Insert(pack)
}
// push packs to ch
for pbs := range c.repo.ListPacksFromIndex(ctx, packSet) {
size := packs[pbs.PackID]
debug.Log("listed %v", pbs.PackID)
select {
case ch <- checkTask{id: pbs.PackID, size: size, blobs: pbs.Blobs}:
case <-ctx.Done():
}
}
close(ch)
err = g.Wait()
if err != nil {
select {
case <-ctx.Done():
return
case errChan <- err:
}
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/repack_test.go | internal/repository/repack_test.go | package repository_test
import (
"context"
"math/rand"
"testing"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/ui/progress"
)
func randomSize(random *rand.Rand, min, max int) int {
return random.Intn(max-min) + min
}
func createRandomBlobs(t testing.TB, random *rand.Rand, repo restic.Repository, blobs int, pData float32, smallBlobs bool) {
// two loops to allow creating multiple pack files
for blobs > 0 {
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
for blobs > 0 {
blobs--
var (
tpe restic.BlobType
length int
)
if random.Float32() < pData {
tpe = restic.DataBlob
if smallBlobs {
length = randomSize(random, 1*1024, 20*1024) // 1KiB to 20KiB of data
} else {
length = randomSize(random, 10*1024, 1024*1024) // 10KiB to 1MiB of data
}
} else {
tpe = restic.TreeBlob
length = randomSize(random, 1*1024, 20*1024) // 1KiB to 20KiB
}
buf := make([]byte, length)
random.Read(buf)
id, exists, _, err := uploader.SaveBlob(ctx, tpe, buf, restic.ID{}, false)
if err != nil {
t.Fatalf("SaveFrom() error %v", err)
}
if exists {
t.Errorf("duplicate blob %v/%v ignored", id, restic.DataBlob)
continue
}
if rand.Float32() < 0.2 {
break
}
}
return nil
}))
}
}
func createRandomWrongBlob(t testing.TB, random *rand.Rand, repo restic.Repository) restic.BlobHandle {
length := randomSize(random, 10*1024, 1024*1024) // 10KiB to 1MiB of data
buf := make([]byte, length)
random.Read(buf)
id := restic.Hash(buf)
// invert first data byte
buf[0] ^= 0xff
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
_, _, _, err := uploader.SaveBlob(ctx, restic.DataBlob, buf, id, false)
return err
}))
return restic.BlobHandle{ID: id, Type: restic.DataBlob}
}
// selectBlobs splits the list of all blobs randomly into two lists. A blob
// will be contained in the firstone with probability p.
func selectBlobs(t *testing.T, random *rand.Rand, repo restic.Repository, p float32) (list1, list2 restic.BlobSet) {
list1 = restic.NewBlobSet()
list2 = restic.NewBlobSet()
blobs := restic.NewBlobSet()
err := repo.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
entries, _, err := repo.ListPack(context.TODO(), id, size)
if err != nil {
t.Fatalf("error listing pack %v: %v", id, err)
}
for _, entry := range entries {
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
if blobs.Has(h) {
t.Errorf("ignoring duplicate blob %v", h)
return nil
}
blobs.Insert(h)
if random.Float32() <= p {
list1.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type})
} else {
list2.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type})
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
return list1, list2
}
func listPacks(t *testing.T, repo restic.Lister) restic.IDSet {
return listFiles(t, repo, restic.PackFile)
}
func listFiles(t *testing.T, repo restic.Lister, tpe backend.FileType) restic.IDSet {
list := restic.NewIDSet()
err := repo.List(context.TODO(), tpe, func(id restic.ID, size int64) error {
list.Insert(id)
return nil
})
if err != nil {
t.Fatal(err)
}
return list
}
func findPacksForBlobs(t *testing.T, repo restic.Repository, blobs restic.BlobSet) restic.IDSet {
packs := restic.NewIDSet()
for h := range blobs {
list := repo.LookupBlob(h.Type, h.ID)
if len(list) == 0 {
t.Fatal("Failed to find blob", h.ID.Str(), "with type", h.Type)
}
for _, pb := range list {
packs.Insert(pb.PackID)
}
}
return packs
}
func repack(t *testing.T, repo restic.Repository, be backend.Backend, packs restic.IDSet, blobs restic.BlobSet) {
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
return repository.CopyBlobs(ctx, repo, repo, uploader, packs, blobs, nil, nil)
}))
for id := range packs {
rtest.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()}))
}
}
func rebuildAndReloadIndex(t *testing.T, repo *repository.Repository) {
rtest.OK(t, repository.RepairIndex(context.TODO(), repo, repository.RepairIndexOptions{
ReadAllPacks: true,
}, &progress.NoopPrinter{}))
rtest.OK(t, repo.LoadIndex(context.TODO(), nil))
}
func TestRepack(t *testing.T) {
repository.TestAllVersions(t, testRepack)
}
func testRepack(t *testing.T, version uint) {
repo, _, be := repository.TestRepositoryWithVersion(t, version)
seed := time.Now().UnixNano()
random := rand.New(rand.NewSource(seed))
t.Logf("rand seed is %v", seed)
// add a small amount of blobs twice to create multiple pack files
createRandomBlobs(t, random, repo, 10, 0.7, false)
createRandomBlobs(t, random, repo, 10, 0.7, false)
packsBefore := listPacks(t, repo)
// Running repack on empty ID sets should not do anything at all.
repack(t, repo, be, nil, nil)
packsAfter := listPacks(t, repo)
if !packsAfter.Equals(packsBefore) {
t.Fatalf("packs are not equal, Repack modified something. Before:\n %v\nAfter:\n %v",
packsBefore, packsAfter)
}
removeBlobs, keepBlobs := selectBlobs(t, random, repo, 0.2)
removePacks := findPacksForBlobs(t, repo, removeBlobs)
repack(t, repo, be, removePacks, keepBlobs)
rebuildAndReloadIndex(t, repo)
packsAfter = listPacks(t, repo)
for id := range removePacks {
if packsAfter.Has(id) {
t.Errorf("pack %v still present although it should have been repacked and removed", id.Str())
}
}
for h := range keepBlobs {
list := repo.LookupBlob(h.Type, h.ID)
if len(list) == 0 {
t.Errorf("unable to find blob %v in repo", h.ID.Str())
continue
}
if len(list) != 1 {
t.Errorf("expected one pack in the list, got: %v", list)
continue
}
pb := list[0]
if removePacks.Has(pb.PackID) {
t.Errorf("lookup returned pack ID %v that should've been removed", pb.PackID)
}
}
for h := range removeBlobs {
if _, found := repo.LookupBlobSize(h.Type, h.ID); found {
t.Errorf("blob %v still contained in the repo", h)
}
}
}
func TestRepackCopy(t *testing.T) {
repository.TestAllVersions(t, testRepackCopy)
}
type oneConnectionRepo struct {
restic.Repository
}
func (r oneConnectionRepo) Connections() uint {
return 1
}
func testRepackCopy(t *testing.T, version uint) {
repo, _, _ := repository.TestRepositoryWithVersion(t, version)
dstRepo, _, _ := repository.TestRepositoryWithVersion(t, version)
// test with minimal possible connection count
repoWrapped := &oneConnectionRepo{repo}
dstRepoWrapped := &oneConnectionRepo{dstRepo}
seed := time.Now().UnixNano()
random := rand.New(rand.NewSource(seed))
t.Logf("rand seed is %v", seed)
// add a small amount of blobs twice to create multiple pack files
createRandomBlobs(t, random, repo, 10, 0.7, false)
createRandomBlobs(t, random, repo, 10, 0.7, false)
_, keepBlobs := selectBlobs(t, random, repo, 0.2)
copyPacks := findPacksForBlobs(t, repo, keepBlobs)
rtest.OK(t, repoWrapped.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
return repository.CopyBlobs(ctx, repoWrapped, dstRepoWrapped, uploader, copyPacks, keepBlobs, nil, nil)
}))
rebuildAndReloadIndex(t, dstRepo)
for h := range keepBlobs {
list := dstRepo.LookupBlob(h.Type, h.ID)
if len(list) == 0 {
t.Errorf("unable to find blob %v in repo", h.ID.Str())
continue
}
if len(list) != 1 {
t.Errorf("expected one pack in the list, got: %v", list)
continue
}
}
}
func TestRepackWrongBlob(t *testing.T) {
repository.TestAllVersions(t, testRepackWrongBlob)
}
func testRepackWrongBlob(t *testing.T, version uint) {
// disable verification to allow adding corrupted blobs to the repository
repo, _ := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true})
seed := time.Now().UnixNano()
random := rand.New(rand.NewSource(seed))
t.Logf("rand seed is %v", seed)
createRandomBlobs(t, random, repo, 5, 0.7, false)
createRandomWrongBlob(t, random, repo)
// just keep all blobs, but also rewrite every pack
_, keepBlobs := selectBlobs(t, random, repo, 0)
rewritePacks := findPacksForBlobs(t, repo, keepBlobs)
err := repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
return repository.CopyBlobs(ctx, repo, repo, uploader, rewritePacks, keepBlobs, nil, nil)
})
if err == nil {
t.Fatal("expected repack to fail but got no error")
}
t.Logf("found expected error: %v", err)
}
func TestRepackBlobFallback(t *testing.T) {
repository.TestAllVersions(t, testRepackBlobFallback)
}
func testRepackBlobFallback(t *testing.T, version uint) {
// disable verification to allow adding corrupted blobs to the repository
repo, _ := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true})
seed := time.Now().UnixNano()
random := rand.New(rand.NewSource(seed))
t.Logf("rand seed is %v", seed)
length := randomSize(random, 10*1024, 1024*1024) // 10KiB to 1MiB of data
buf := make([]byte, length)
random.Read(buf)
id := restic.Hash(buf)
// corrupted copy
modbuf := make([]byte, len(buf))
copy(modbuf, buf)
// invert first data byte
modbuf[0] ^= 0xff
// create pack with broken copy
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
_, _, _, err := uploader.SaveBlob(ctx, restic.DataBlob, modbuf, id, false)
return err
}))
// find pack with damaged blob
keepBlobs := restic.NewBlobSet(restic.BlobHandle{Type: restic.DataBlob, ID: id})
rewritePacks := findPacksForBlobs(t, repo, keepBlobs)
// create pack with valid copy
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
_, _, _, err := uploader.SaveBlob(ctx, restic.DataBlob, buf, id, true)
return err
}))
// repack must fallback to valid copy
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
return repository.CopyBlobs(ctx, repo, repo, uploader, rewritePacks, keepBlobs, nil, nil)
}))
keepBlobs = restic.NewBlobSet(restic.BlobHandle{Type: restic.DataBlob, ID: id})
packs := findPacksForBlobs(t, repo, keepBlobs)
rtest.Assert(t, len(packs) == 3, "unexpected number of copies: %v", len(packs))
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/fuzz_test.go | internal/repository/fuzz_test.go | package repository
import (
"context"
"testing"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
// Test saving a blob and loading it again, with varying buffer sizes.
// Also a regression test for #3783.
func FuzzSaveLoadBlob(f *testing.F) {
f.Fuzz(func(t *testing.T, blob []byte, buflen uint) {
if buflen > 64<<20 {
// Don't allocate enormous buffers. We're not testing the allocator.
t.Skip()
}
id := restic.Hash(blob)
repo, _, _ := TestRepositoryWithVersion(t, 2)
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
_, _, _, err := uploader.SaveBlob(ctx, restic.DataBlob, blob, id, false)
return err
}))
buf, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, make([]byte, buflen))
if err != nil {
t.Fatal(err)
}
if restic.Hash(buf) != id {
t.Fatal("mismatch")
}
})
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/packer_manager_test.go | internal/repository/packer_manager_test.go | package repository
import (
"context"
"io"
"math/rand"
"sync"
"testing"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
)
func randomID(rd io.Reader) restic.ID {
id := restic.ID{}
_, err := io.ReadFull(rd, id[:])
if err != nil {
panic(err)
}
return id
}
const maxBlobSize = 1 << 20
func fillPacks(t testing.TB, rnd *rand.Rand, pm *packerManager, buf []byte) (bytes int) {
for i := 0; i < 102; i++ {
l := rnd.Intn(maxBlobSize)
id := randomID(rnd)
buf = buf[:l]
// Only change a few bytes so we know we're not benchmarking the RNG.
rnd.Read(buf[:min(l, 4)])
n, err := pm.SaveBlob(context.TODO(), restic.DataBlob, id, buf, 0)
if err != nil {
t.Fatal(err)
}
if n != l+37 && n != l+37+36 {
t.Errorf("Add() returned invalid number of bytes: want %v, got %v", l, n)
}
bytes += n
}
err := pm.Flush(context.TODO())
if err != nil {
t.Fatal(err)
}
return bytes
}
const randomSeed = 23
var (
once sync.Once
totalSize int64
)
func TestPackerManager(t *testing.T) {
bytes := testPackerManager(t)
once.Do(func() { totalSize = bytes })
}
func testPackerManager(t testing.TB) int64 {
rnd := rand.New(rand.NewSource(randomSeed))
savedBytes := 0
pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, DefaultPackSize, defaultPackerCount, func(ctx context.Context, tp restic.BlobType, p *packer) error {
err := p.Finalize()
if err != nil {
return err
}
savedBytes += int(p.Size())
return nil
})
blobBuf := make([]byte, maxBlobSize)
bytes := fillPacks(t, rnd, pm, blobBuf)
// bytes does not include the last pack headers
test.Assert(t, savedBytes == (bytes+36) || savedBytes == (bytes+72), "unexpected number of saved bytes, got %v, expected %v", savedBytes, bytes)
t.Logf("saved %d bytes", bytes)
return int64(bytes)
}
func TestPackerManagerWithOversizeBlob(t *testing.T) {
packFiles := 0
sizeLimit := uint(512 * 1024)
pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, sizeLimit, defaultPackerCount, func(ctx context.Context, tp restic.BlobType, p *packer) error {
packFiles++
return nil
})
for _, i := range []uint{sizeLimit / 2, sizeLimit, sizeLimit / 3} {
_, err := pm.SaveBlob(context.TODO(), restic.DataBlob, restic.ID{}, make([]byte, i), 0)
test.OK(t, err)
}
test.OK(t, pm.Flush(context.TODO()))
// oversized blob must be stored in a separate packfile
test.Assert(t, packFiles == 2, "unexpected number of packfiles %v, expected 2", packFiles)
}
func BenchmarkPackerManager(t *testing.B) {
// Run testPackerManager if it hasn't run already, to set totalSize.
once.Do(func() {
totalSize = testPackerManager(t)
})
rnd := rand.New(rand.NewSource(randomSeed))
blobBuf := make([]byte, maxBlobSize)
t.ReportAllocs()
t.SetBytes(totalSize)
t.ResetTimer()
for i := 0; i < t.N; i++ {
rnd.Seed(randomSeed)
pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, DefaultPackSize, defaultPackerCount, func(ctx context.Context, t restic.BlobType, p *packer) error {
return nil
})
fillPacks(t, rnd, pm, blobBuf)
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/lock.go | internal/repository/lock.go | package repository
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
)
type lockContext struct {
lock *restic.Lock
cancel context.CancelFunc
refreshWG sync.WaitGroup
}
type locker struct {
retrySleepStart time.Duration
retrySleepMax time.Duration
refreshInterval time.Duration
refreshabilityTimeout time.Duration
}
const defaultRefreshInterval = 5 * time.Minute
var lockerInst = &locker{
retrySleepStart: 5 * time.Second,
retrySleepMax: 60 * time.Second,
refreshInterval: defaultRefreshInterval,
// consider a lock refresh failed a bit before the lock actually becomes stale
// the difference allows to compensate for a small time drift between clients.
refreshabilityTimeout: restic.StaleLockTimeout - defaultRefreshInterval*3/2,
}
func Lock(ctx context.Context, repo *Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) {
return lockerInst.Lock(ctx, repo, exclusive, retryLock, printRetry, logger)
}
// Lock wraps the ctx such that it is cancelled when the repository is unlocked
// cancelling the original context also stops the lock refresh
func (l *locker) Lock(ctx context.Context, r *Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) {
var lock *restic.Lock
var err error
retrySleep := minDuration(l.retrySleepStart, retryLock)
retryMessagePrinted := false
retryTimeout := time.After(retryLock)
repo := &internalRepository{r}
retryLoop:
for {
lock, err = restic.NewLock(ctx, repo, exclusive)
if err != nil && restic.IsAlreadyLocked(err) {
if !retryMessagePrinted {
printRetry(fmt.Sprintf("repo already locked, waiting up to %s for the lock\n", retryLock))
retryMessagePrinted = true
}
debug.Log("repo already locked, retrying in %v", retrySleep)
retrySleepCh := time.After(retrySleep)
select {
case <-ctx.Done():
return nil, ctx, ctx.Err()
case <-retryTimeout:
debug.Log("repo already locked, timeout expired")
// Last lock attempt
lock, err = restic.NewLock(ctx, repo, exclusive)
break retryLoop
case <-retrySleepCh:
retrySleep = minDuration(retrySleep*2, l.retrySleepMax)
}
} else {
// anything else, either a successful lock or another error
break retryLoop
}
}
if restic.IsInvalidLock(err) {
return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err)
}
if err != nil {
return nil, ctx, fmt.Errorf("unable to create lock in backend: %w", err)
}
debug.Log("create lock %p (exclusive %v)", lock, exclusive)
ctx, cancel := context.WithCancel(ctx)
lockInfo := &lockContext{
lock: lock,
cancel: cancel,
}
lockInfo.refreshWG.Add(2)
refreshChan := make(chan struct{})
forceRefreshChan := make(chan refreshLockRequest)
go l.refreshLocks(ctx, repo.be, lockInfo, refreshChan, forceRefreshChan, logger)
go l.monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger)
return &Unlocker{lockInfo}, ctx, nil
}
func minDuration(a, b time.Duration) time.Duration {
if a <= b {
return a
}
return b
}
type refreshLockRequest struct {
result chan bool
}
func (l *locker) refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest, logger func(format string, args ...interface{})) {
debug.Log("start")
lock := lockInfo.lock
ticker := time.NewTicker(l.refreshInterval)
lastRefresh := lock.Time
defer func() {
ticker.Stop()
// ensure that the context was cancelled before removing the lock
lockInfo.cancel()
// remove the lock from the repo
debug.Log("unlocking repository with lock %v", lock)
if err := lock.Unlock(ctx); err != nil {
debug.Log("error while unlocking: %v", err)
logger("error while unlocking: %v", err)
}
lockInfo.refreshWG.Done()
}()
for {
select {
case <-ctx.Done():
debug.Log("terminate")
return
case req := <-forceRefresh:
debug.Log("trying to refresh stale lock")
// keep on going if our current lock still exists
success := tryRefreshStaleLock(ctx, backend, lock, lockInfo.cancel, logger)
// inform refresh goroutine about forced refresh
select {
case <-ctx.Done():
case req.result <- success:
}
if success {
// update lock refresh time
lastRefresh = lock.Time
}
case <-ticker.C:
if time.Since(lastRefresh) > l.refreshabilityTimeout {
// the lock is too old, wait until the expiry monitor cancels the context
continue
}
debug.Log("refreshing locks")
err := lock.Refresh(context.TODO())
if err != nil {
logger("unable to refresh lock: %v\n", err)
} else {
lastRefresh = lock.Time
// inform monitor goroutine about successful refresh
select {
case <-ctx.Done():
case refreshed <- struct{}{}:
}
}
}
}
}
func (l *locker) monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest, logger func(format string, args ...interface{})) {
// time.Now() might use a monotonic timer which is paused during standby
// convert to unix time to ensure we compare real time values
lastRefresh := time.Now().UnixNano()
pollDuration := 1 * time.Second
if l.refreshInterval < pollDuration {
// required for TestLockFailedRefresh
pollDuration = l.refreshInterval / 5
}
// timers are paused during standby, which is a problem as the refresh timeout
// _must_ expire if the host was too long in standby. Thus fall back to periodic checks
// https://github.com/golang/go/issues/35012
ticker := time.NewTicker(pollDuration)
defer func() {
ticker.Stop()
lockInfo.cancel()
lockInfo.refreshWG.Done()
}()
var refreshStaleLockResult chan bool
for {
select {
case <-ctx.Done():
debug.Log("terminate expiry monitoring")
return
case <-refreshed:
if refreshStaleLockResult != nil {
// ignore delayed refresh notifications while the stale lock is refreshed
continue
}
lastRefresh = time.Now().UnixNano()
case <-ticker.C:
if time.Now().UnixNano()-lastRefresh < l.refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil {
continue
}
debug.Log("trying to refreshStaleLock")
// keep on going if our current lock still exists
refreshReq := refreshLockRequest{
result: make(chan bool),
}
refreshStaleLockResult = refreshReq.result
// inform refresh goroutine about forced refresh
select {
case <-ctx.Done():
case forceRefresh <- refreshReq:
}
case success := <-refreshStaleLockResult:
if success {
lastRefresh = time.Now().UnixNano()
refreshStaleLockResult = nil
continue
}
logger("Fatal: failed to refresh lock in time\n")
return
}
}
}
func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.Lock, cancel context.CancelFunc, logger func(format string, args ...interface{})) bool {
freeze := backend.AsBackend[backend.FreezeBackend](be)
if freeze != nil {
debug.Log("freezing backend")
freeze.Freeze()
defer freeze.Unfreeze()
}
err := lock.RefreshStaleLock(ctx)
if err != nil {
logger("failed to refresh stale lock: %v\n", err)
// cancel context while the backend is still frozen to prevent accidental modifications
cancel()
return false
}
return true
}
type Unlocker struct {
info *lockContext
}
func (l *Unlocker) Unlock() {
l.info.cancel()
l.info.refreshWG.Wait()
}
// RemoveStaleLocks deletes all locks detected as stale from the repository.
func RemoveStaleLocks(ctx context.Context, repo *Repository) (uint, error) {
var processed uint
err := restic.ForAllLocks(ctx, repo, nil, func(id restic.ID, lock *restic.Lock, err error) error {
if err != nil {
// ignore locks that cannot be loaded
debug.Log("ignore lock %v: %v", id, err)
return nil
}
if lock.Stale() {
err = (&internalRepository{repo}).RemoveUnpacked(ctx, restic.LockFile, id)
if err == nil {
processed++
}
return err
}
return nil
})
return processed, err
}
// RemoveAllLocks removes all locks forcefully.
func RemoveAllLocks(ctx context.Context, repo *Repository) (uint, error) {
var processed uint32
err := restic.ParallelList(ctx, repo, restic.LockFile, repo.Connections(), func(ctx context.Context, id restic.ID, _ int64) error {
err := (&internalRepository{repo}).RemoveUnpacked(ctx, restic.LockFile, id)
if err == nil {
atomic.AddUint32(&processed, 1)
}
return err
})
return uint(processed), err
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/check.go | internal/repository/check.go | package repository
import (
"bufio"
"bytes"
"context"
"crypto/sha256"
"fmt"
"io"
"sort"
"github.com/klauspost/compress/zstd"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository/hashing"
"github.com/restic/restic/internal/repository/pack"
"github.com/restic/restic/internal/restic"
)
// ErrPackData is returned if errors are discovered while verifying a packfile
type ErrPackData struct {
PackID restic.ID
errs []error
}
func (e *ErrPackData) Error() string {
return fmt.Sprintf("pack %v contains %v errors: %v", e.PackID, len(e.errs), e.errs)
}
type partialReadError struct {
err error
}
func (e *partialReadError) Error() string {
return e.err.Error()
}
// CheckPack reads a pack and checks the integrity of all blobs.
func CheckPack(ctx context.Context, r *Repository, id restic.ID, blobs []restic.Blob, size int64, bufRd *bufio.Reader, dec *zstd.Decoder) error {
err := checkPackInner(ctx, r, id, blobs, size, bufRd, dec)
if err != nil {
if r.cache != nil {
// ignore error as there's not much we can do here
_ = r.cache.Forget(backend.Handle{Type: restic.PackFile, Name: id.String()})
}
// retry pack verification to detect transient errors
err2 := checkPackInner(ctx, r, id, blobs, size, bufRd, dec)
if err2 != nil {
err = err2
} else {
err = fmt.Errorf("check successful on second attempt, original error %w", err)
}
}
return err
}
func checkPackInner(ctx context.Context, r *Repository, id restic.ID, blobs []restic.Blob, size int64, bufRd *bufio.Reader, dec *zstd.Decoder) error {
debug.Log("checking pack %v", id.String())
if len(blobs) == 0 {
return &ErrPackData{PackID: id, errs: []error{errors.New("pack is empty or not indexed")}}
}
// sanity check blobs in index
sort.Slice(blobs, func(i, j int) bool {
return blobs[i].Offset < blobs[j].Offset
})
idxHdrSize := pack.CalculateHeaderSize(blobs)
lastBlobEnd := 0
nonContinuousPack := false
for _, blob := range blobs {
if lastBlobEnd != int(blob.Offset) {
nonContinuousPack = true
}
lastBlobEnd = int(blob.Offset + blob.Length)
}
// size was calculated by masterindex.PackSize, thus there's no need to recalculate it here
var errs []error
if nonContinuousPack {
debug.Log("Index for pack contains gaps / overlaps, blobs: %v", blobs)
errs = append(errs, errors.New("index for pack contains gaps / overlapping blobs"))
}
// calculate hash on-the-fly while reading the pack and capture pack header
var hash restic.ID
var hdrBuf []byte
// must use a separate slice from `errs` here as we're only interested in the last retry
var blobErrors []error
h := backend.Handle{Type: backend.PackFile, Name: id.String()}
err := r.be.Load(ctx, h, int(size), 0, func(rd io.Reader) error {
hrd := hashing.NewReader(rd, sha256.New())
bufRd.Reset(hrd)
// reset blob errors for each retry
blobErrors = nil
it := newPackBlobIterator(id, newBufReader(bufRd), 0, blobs, r.Key(), dec)
for {
if ctx.Err() != nil {
return ctx.Err()
}
val, err := it.Next()
if err == errPackEOF {
break
} else if err != nil {
return &partialReadError{err}
}
debug.Log(" check blob %v: %v", val.Handle.ID, val.Handle)
if val.Err != nil {
debug.Log(" error verifying blob %v: %v", val.Handle.ID, val.Err)
blobErrors = append(blobErrors, errors.Errorf("blob %v: %v", val.Handle.ID, val.Err))
}
}
// skip enough bytes until we reach the possible header start
curPos := lastBlobEnd
minHdrStart := int(size) - pack.MaxHeaderSize
if minHdrStart > curPos {
_, err := bufRd.Discard(minHdrStart - curPos)
if err != nil {
return &partialReadError{err}
}
curPos += minHdrStart - curPos
}
// read remainder, which should be the pack header
var err error
hdrBuf = make([]byte, int(size-int64(curPos)))
_, err = io.ReadFull(bufRd, hdrBuf)
if err != nil {
return &partialReadError{err}
}
hash = restic.IDFromHash(hrd.Sum(nil))
return nil
})
errs = append(errs, blobErrors...)
if err != nil {
var e *partialReadError
isPartialReadError := errors.As(err, &e)
// failed to load the pack file, return as further checks cannot succeed anyways
debug.Log(" error streaming pack (partial %v): %v", isPartialReadError, err)
if isPartialReadError {
return &ErrPackData{PackID: id, errs: append(errs, fmt.Errorf("partial download error: %w", err))}
}
// The check command suggests to repair files for which a `ErrPackData` is returned. However, this file
// completely failed to download such that there's no point in repairing anything.
return fmt.Errorf("download error: %w", err)
}
if !hash.Equal(id) {
debug.Log("pack ID does not match, want %v, got %v", id, hash)
return &ErrPackData{PackID: id, errs: append(errs, errors.Errorf("unexpected pack id %v", hash))}
}
blobs, hdrSize, err := pack.List(r.Key(), bytes.NewReader(hdrBuf), int64(len(hdrBuf)))
if err != nil {
return &ErrPackData{PackID: id, errs: append(errs, err)}
}
if uint32(idxHdrSize) != hdrSize {
debug.Log("Pack header size does not match, want %v, got %v", idxHdrSize, hdrSize)
errs = append(errs, errors.Errorf("pack header size does not match, want %v, got %v", idxHdrSize, hdrSize))
}
for _, blob := range blobs {
// Check if blob is contained in index and position is correct
idxHas := false
for _, pb := range r.LookupBlob(blob.BlobHandle.Type, blob.BlobHandle.ID) {
if pb.PackID == id && pb.Blob == blob {
idxHas = true
break
}
}
if !idxHas {
errs = append(errs, errors.Errorf("blob %v is not contained in index or position is incorrect", blob.ID))
continue
}
}
if len(errs) > 0 {
return &ErrPackData{PackID: id, errs: errs}
}
return nil
}
type bufReader struct {
rd *bufio.Reader
buf []byte
}
func newBufReader(rd *bufio.Reader) *bufReader {
return &bufReader{
rd: rd,
}
}
func (b *bufReader) Discard(n int) (discarded int, err error) {
return b.rd.Discard(n)
}
func (b *bufReader) ReadFull(n int) (buf []byte, err error) {
if cap(b.buf) < n {
b.buf = make([]byte, n)
}
b.buf = b.buf[:n]
_, err = io.ReadFull(b.rd, b.buf)
if err != nil {
return nil, err
}
return b.buf, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/repository/repair_index_test.go | internal/repository/repair_index_test.go | package repository_test
import (
"context"
"math/rand"
"testing"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/ui/progress"
)
func listIndex(t *testing.T, repo restic.Lister) restic.IDSet {
return listFiles(t, repo, restic.IndexFile)
}
func testRebuildIndex(t *testing.T, readAllPacks bool, damage func(t *testing.T, repo *repository.Repository, be backend.Backend)) {
seed := time.Now().UnixNano()
random := rand.New(rand.NewSource(seed))
t.Logf("rand initialized with seed %d", seed)
repo, _, be := repository.TestRepositoryWithVersion(t, 0)
createRandomBlobs(t, random, repo, 4, 0.5, true)
createRandomBlobs(t, random, repo, 5, 0.5, true)
indexes := listIndex(t, repo)
t.Logf("old indexes %v", indexes)
damage(t, repo, be)
repo = repository.TestOpenBackend(t, be)
rtest.OK(t, repository.RepairIndex(context.TODO(), repo, repository.RepairIndexOptions{
ReadAllPacks: readAllPacks,
}, &progress.NoopPrinter{}))
repository.TestCheckRepo(t, repo)
}
func TestRebuildIndex(t *testing.T) {
for _, test := range []struct {
name string
damage func(t *testing.T, repo *repository.Repository, be backend.Backend)
}{
{
"valid index",
func(t *testing.T, repo *repository.Repository, be backend.Backend) {},
},
{
"damaged index",
func(t *testing.T, repo *repository.Repository, be backend.Backend) {
index := listIndex(t, repo).List()[0]
replaceFile(t, be, backend.Handle{Type: restic.IndexFile, Name: index.String()}, func(b []byte) []byte {
b[0] ^= 0xff
return b
})
},
},
{
"missing index",
func(t *testing.T, repo *repository.Repository, be backend.Backend) {
index := listIndex(t, repo).List()[0]
rtest.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: index.String()}))
},
},
{
"missing pack",
func(t *testing.T, repo *repository.Repository, be backend.Backend) {
pack := listPacks(t, repo).List()[0]
rtest.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: pack.String()}))
},
},
} {
t.Run(test.name, func(t *testing.T) {
testRebuildIndex(t, false, test.damage)
testRebuildIndex(t, true, test.damage)
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.