repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/node_windows.go | internal/data/node_windows.go | package data
import (
"encoding/json"
"reflect"
"runtime"
"syscall"
)
// WindowsAttributes are the genericAttributes for Windows OS
type WindowsAttributes struct {
// CreationTime is used for storing creation time for windows files.
CreationTime *syscall.Filetime `generic:"creation_time"`
// FileAttributes is used for storing file attributes for windows files.
FileAttributes *uint32 `generic:"file_attributes"`
// SecurityDescriptor is used for storing security descriptors which includes
// owner, group, discretionary access control list (DACL), system access control list (SACL)
SecurityDescriptor *[]byte `generic:"security_descriptor"`
}
// WindowsAttrsToGenericAttributes converts the WindowsAttributes to a generic attributes map using reflection
func WindowsAttrsToGenericAttributes(windowsAttributes WindowsAttributes) (attrs map[GenericAttributeType]json.RawMessage, err error) {
// Get the value of the WindowsAttributes
windowsAttributesValue := reflect.ValueOf(windowsAttributes)
return OSAttrsToGenericAttributes(reflect.TypeOf(windowsAttributes), &windowsAttributesValue, runtime.GOOS)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/duration_test.go | internal/data/duration_test.go | package data
import (
"testing"
"github.com/google/go-cmp/cmp"
)
func TestNextNumber(t *testing.T) {
var tests = []struct {
input string
num int
rest string
err bool
}{
{
input: "12h", num: 12, rest: "h",
},
{
input: "3d", num: 3, rest: "d",
},
{
input: "4d9h", num: 4, rest: "d9h",
},
{
input: "7m5d", num: 7, rest: "m5d",
},
{
input: "-23y7m5d", num: -23, rest: "y7m5d",
},
{
input: "-13y5m11d12h", num: -13, rest: "y5m11d12h",
},
{
input: " 5d", num: 0, rest: " 5d", err: true,
},
{
input: "5d ", num: 5, rest: "d ",
},
{
input: "5", num: 5, rest: "",
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
num, rest, err := nextNumber(test.input)
if err != nil && !test.err {
t.Fatal(err)
}
if num != test.num {
t.Errorf("wrong num, want %d, got %d", test.num, num)
}
if rest != test.rest {
t.Errorf("wrong rest, want %q, got %q", test.rest, rest)
}
})
}
}
func TestParseDuration(t *testing.T) {
var tests = []struct {
input string
d Duration
output string
err bool
}{
{input: "9h", d: Duration{Hours: 9}, output: "9h"},
{input: "3d", d: Duration{Days: 3}, output: "3d"},
{input: "4d2h", d: Duration{Days: 4, Hours: 2}, output: "4d2h"},
{input: "7m5d", d: Duration{Months: 7, Days: 5}, output: "7m5d"},
{input: "6m4d8h", d: Duration{Months: 6, Days: 4, Hours: 8}, output: "6m4d8h"},
{input: "5d7m", d: Duration{Months: 7, Days: 5}, output: "7m5d"},
{input: "4h3d9m", d: Duration{Months: 9, Days: 3, Hours: 4}, output: "9m3d4h"},
{input: "-7m5d", d: Duration{Months: -7, Days: 5}, output: "-7m5d"},
{input: "1y4m-5d-3h", d: Duration{Years: 1, Months: 4, Days: -5, Hours: -3}, output: "1y4m-5d-3h"},
{input: "2y7m-5d", d: Duration{Years: 2, Months: 7, Days: -5}, output: "2y7m-5d"},
{input: "2w", err: true},
{input: "1y4m3w1d", err: true},
{input: "s", err: true},
{input: "\xdf\x80", err: true}, // NKO DIGIT ZERO; we want ASCII digits
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
d, err := ParseDuration(test.input)
if test.err {
if err == nil {
t.Fatalf("Missing error for %v", test.input)
}
} else {
if err != nil {
t.Fatal(err)
}
}
if !cmp.Equal(d, test.d) {
t.Error(cmp.Diff(test.d, d))
}
s := d.String()
if s != test.output {
t.Errorf("unexpected return of String(), want %q, got %q", test.output, s)
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/testing.go | internal/data/testing.go | package data
import (
"context"
"fmt"
"io"
"math/rand"
"testing"
"time"
"github.com/restic/chunker"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
)
// fakeFile returns a reader which yields deterministic pseudo-random data.
func fakeFile(seed, size int64) io.Reader {
return io.LimitReader(rand.New(rand.NewSource(seed)), size)
}
type fakeFileSystem struct {
t testing.TB
repo restic.Repository
buf []byte
chunker *chunker.Chunker
rand *rand.Rand
}
// saveFile reads from rd and saves the blobs in the repository. The list of
// IDs is returned.
func (fs *fakeFileSystem) saveFile(ctx context.Context, uploader restic.BlobSaver, rd io.Reader) (blobs restic.IDs) {
if fs.buf == nil {
fs.buf = make([]byte, chunker.MaxSize)
}
if fs.chunker == nil {
fs.chunker = chunker.New(rd, fs.repo.Config().ChunkerPolynomial)
} else {
fs.chunker.Reset(rd, fs.repo.Config().ChunkerPolynomial)
}
blobs = restic.IDs{}
for {
chunk, err := fs.chunker.Next(fs.buf)
if err == io.EOF {
break
}
if err != nil {
fs.t.Fatalf("unable to save chunk in repo: %v", err)
}
id, _, _, err := uploader.SaveBlob(ctx, restic.DataBlob, chunk.Data, restic.ID{}, false)
if err != nil {
fs.t.Fatalf("error saving chunk: %v", err)
}
blobs = append(blobs, id)
}
return blobs
}
const (
maxFileSize = 20000
maxSeed = 32
maxNodes = 15
)
// saveTree saves a tree of fake files in the repo and returns the ID.
func (fs *fakeFileSystem) saveTree(ctx context.Context, uploader restic.BlobSaver, seed int64, depth int) restic.ID {
rnd := rand.NewSource(seed)
numNodes := int(rnd.Int63() % maxNodes)
var tree Tree
for i := 0; i < numNodes; i++ {
// randomly select the type of the node, either tree (p = 1/4) or file (p = 3/4).
if depth > 1 && rnd.Int63()%4 == 0 {
treeSeed := rnd.Int63() % maxSeed
id := fs.saveTree(ctx, uploader, treeSeed, depth-1)
node := &Node{
Name: fmt.Sprintf("dir-%v", treeSeed),
Type: NodeTypeDir,
Mode: 0755,
Subtree: &id,
}
tree.Nodes = append(tree.Nodes, node)
continue
}
fileSeed := rnd.Int63() % maxSeed
fileSize := (maxFileSize / maxSeed) * fileSeed
node := &Node{
Name: fmt.Sprintf("file-%v", fileSeed),
Type: NodeTypeFile,
Mode: 0644,
Size: uint64(fileSize),
}
node.Content = fs.saveFile(ctx, uploader, fakeFile(fileSeed, fileSize))
tree.Nodes = append(tree.Nodes, node)
}
tree.Sort()
id, err := SaveTree(ctx, uploader, &tree)
if err != nil {
fs.t.Fatalf("SaveTree returned error: %v", err)
}
return id
}
// TestCreateSnapshot creates a snapshot filled with fake data. The
// fake data is generated deterministically from the timestamp `at`, which is
// also used as the snapshot's timestamp. The tree's depth can be specified
// with the parameter depth. The parameter duplication is a probability that
// the same blob will saved again.
func TestCreateSnapshot(t testing.TB, repo restic.Repository, at time.Time, depth int) *Snapshot {
seed := at.Unix()
t.Logf("create fake snapshot at %s with seed %d", at, seed)
fakedir := fmt.Sprintf("fakedir-at-%v", at.Format("2006-01-02 15:04:05"))
snapshot, err := NewSnapshot([]string{fakedir}, []string{"test"}, "foo", at)
if err != nil {
t.Fatal(err)
}
fs := fakeFileSystem{
t: t,
repo: repo,
rand: rand.New(rand.NewSource(seed)),
}
var treeID restic.ID
test.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
treeID = fs.saveTree(ctx, uploader, seed, depth)
return nil
}))
snapshot.Tree = &treeID
id, err := SaveSnapshot(context.TODO(), repo, snapshot)
if err != nil {
t.Fatal(err)
}
snapshot.id = &id
t.Logf("saved snapshot %v", id.Str())
return snapshot
}
// TestSetSnapshotID sets the snapshot's ID.
func TestSetSnapshotID(_ testing.TB, sn *Snapshot, id restic.ID) {
sn.id = &id
}
// ParseDurationOrPanic parses a duration from a string or panics if string is invalid.
// The format is `6y5m234d37h`.
func ParseDurationOrPanic(s string) Duration {
d, err := ParseDuration(s)
if err != nil {
panic(err)
}
return d
}
// TestLoadAllSnapshots returns a list of all snapshots in the repo.
// If a snapshot ID is in excludeIDs, it will not be included in the result.
func TestLoadAllSnapshots(ctx context.Context, repo restic.ListerLoaderUnpacked, excludeIDs restic.IDSet) (snapshots Snapshots, err error) {
err = ForAllSnapshots(ctx, repo, repo, excludeIDs, func(id restic.ID, sn *Snapshot, err error) error {
if err != nil {
return err
}
snapshots = append(snapshots, sn)
return nil
})
if err != nil {
return nil, err
}
return snapshots, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/snapshot_group_test.go | internal/data/snapshot_group_test.go | package data_test
import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/test"
)
func TestGroupByOptions(t *testing.T) {
for _, exp := range []struct {
from string
opts data.SnapshotGroupByOptions
normalized string
}{
{
from: "",
opts: data.SnapshotGroupByOptions{},
normalized: "",
},
{
from: "host,paths",
opts: data.SnapshotGroupByOptions{Host: true, Path: true},
normalized: "host,paths",
},
{
from: "host,path,tag",
opts: data.SnapshotGroupByOptions{Host: true, Path: true, Tag: true},
normalized: "host,paths,tags",
},
{
from: "hosts,paths,tags",
opts: data.SnapshotGroupByOptions{Host: true, Path: true, Tag: true},
normalized: "host,paths,tags",
},
} {
var opts data.SnapshotGroupByOptions
test.OK(t, opts.Set(exp.from))
if !cmp.Equal(opts, exp.opts) {
t.Errorf("unexpected opts %s", cmp.Diff(opts, exp.opts))
}
test.Equals(t, opts.String(), exp.normalized)
}
var opts data.SnapshotGroupByOptions
err := opts.Set("tags,invalid")
test.Assert(t, err != nil, "missing error on invalid tags")
test.Assert(t, !opts.Host && !opts.Path && !opts.Tag, "unexpected opts %s %s %s", opts.Host, opts.Path, opts.Tag)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/snapshot_test.go | internal/data/snapshot_test.go | package data_test
import (
"context"
"testing"
"time"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/repository"
rtest "github.com/restic/restic/internal/test"
)
func TestNewSnapshot(t *testing.T) {
paths := []string{"/home/foobar"}
_, err := data.NewSnapshot(paths, nil, "foo", time.Now())
rtest.OK(t, err)
}
func TestTagList(t *testing.T) {
paths := []string{"/home/foobar"}
tags := []string{""}
sn, _ := data.NewSnapshot(paths, nil, "foo", time.Now())
r := sn.HasTags(tags)
rtest.Assert(t, r, "Failed to match untagged snapshot")
}
func TestLoadJSONUnpacked(t *testing.T) {
repository.TestAllVersions(t, testLoadJSONUnpacked)
}
func testLoadJSONUnpacked(t *testing.T, version uint) {
repo, _, _ := repository.TestRepositoryWithVersion(t, version)
// archive a snapshot
sn := data.Snapshot{}
sn.Hostname = "foobar"
sn.Username = "test!"
id, err := data.SaveSnapshot(context.TODO(), repo, &sn)
rtest.OK(t, err)
// restore
sn2, err := data.LoadSnapshot(context.TODO(), repo, id)
rtest.OK(t, err)
rtest.Equals(t, sn.Hostname, sn2.Hostname)
rtest.Equals(t, sn.Username, sn2.Username)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/duration.go | internal/data/duration.go | package data
import (
"fmt"
"strconv"
"strings"
"github.com/restic/restic/internal/errors"
)
// Duration is similar to time.Duration, except it only supports larger ranges
// like hours, days, months, and years.
type Duration struct {
Hours, Days, Months, Years int
}
func (d Duration) String() string {
var s string
if d.Years != 0 {
s += fmt.Sprintf("%dy", d.Years)
}
if d.Months != 0 {
s += fmt.Sprintf("%dm", d.Months)
}
if d.Days != 0 {
s += fmt.Sprintf("%dd", d.Days)
}
if d.Hours != 0 {
s += fmt.Sprintf("%dh", d.Hours)
}
return s
}
func nextNumber(input string) (num int, rest string, err error) {
if len(input) == 0 {
return 0, "", nil
}
var (
n string
negative bool
)
if input[0] == '-' {
negative = true
input = input[1:]
}
for i, s := range input {
if s < '0' || s > '9' {
rest = input[i:]
break
}
n += string(s)
}
if len(n) == 0 {
return 0, input, errors.New("no number found")
}
num, err = strconv.Atoi(n)
if err != nil {
panic(err)
}
if negative {
num = -num
}
return num, rest, nil
}
// ParseDuration parses a duration from a string. The format is `6y5m234d37h`
func ParseDuration(s string) (Duration, error) {
var (
d Duration
num int
err error
)
s = strings.TrimSpace(s)
for s != "" {
num, s, err = nextNumber(s)
if err != nil {
return Duration{}, err
}
if len(s) == 0 {
return Duration{}, errors.Errorf("no unit found after number %d", num)
}
switch s[0] {
case 'y':
d.Years = num
case 'm':
d.Months = num
case 'd':
d.Days = num
case 'h':
d.Hours = num
default:
return Duration{}, errors.Errorf("invalid unit %q found after number %d", s[0], num)
}
s = s[1:]
}
return d, nil
}
// Set calls ParseDuration and updates d.
func (d *Duration) Set(s string) error {
v, err := ParseDuration(s)
if err != nil {
return err
}
*d = v
return nil
}
// Type returns the type of Duration, usable within github.com/spf13/pflag and
// in help texts.
func (d Duration) Type() string {
return "duration"
}
// Zero returns true if the duration is empty (all values are set to zero).
func (d Duration) Zero() bool {
return d.Years == 0 && d.Months == 0 && d.Days == 0 && d.Hours == 0
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/snapshot_group.go | internal/data/snapshot_group.go | package data
import (
"encoding/json"
"fmt"
"sort"
"strings"
)
type SnapshotGroupByOptions struct {
Tag bool
Host bool
Path bool
}
func splitSnapshotGroupBy(s string) (SnapshotGroupByOptions, error) {
var l SnapshotGroupByOptions
for _, option := range strings.Split(s, ",") {
switch option {
case "host", "hosts":
l.Host = true
case "path", "paths":
l.Path = true
case "tag", "tags":
l.Tag = true
case "":
default:
return SnapshotGroupByOptions{}, fmt.Errorf("unknown grouping option: %q", option)
}
}
return l, nil
}
func (l SnapshotGroupByOptions) String() string {
var parts []string
if l.Host {
parts = append(parts, "host")
}
if l.Path {
parts = append(parts, "paths")
}
if l.Tag {
parts = append(parts, "tags")
}
return strings.Join(parts, ",")
}
func (l *SnapshotGroupByOptions) Set(s string) error {
parts, err := splitSnapshotGroupBy(s)
if err != nil {
return err
}
*l = parts
return nil
}
func (l *SnapshotGroupByOptions) Type() string {
return "group"
}
// SnapshotGroupKey is the structure for identifying groups in a grouped
// snapshot list. This is used by GroupSnapshots()
type SnapshotGroupKey struct {
Hostname string `json:"hostname"`
Paths []string `json:"paths"`
Tags []string `json:"tags"`
}
func (s *SnapshotGroupKey) String() string {
var parts []string
if s.Hostname != "" {
parts = append(parts, fmt.Sprintf("host %v", s.Hostname))
}
if len(s.Paths) != 0 {
parts = append(parts, fmt.Sprintf("path %v", s.Paths))
}
if len(s.Tags) != 0 {
parts = append(parts, fmt.Sprintf("tags %v", s.Tags))
}
return strings.Join(parts, ", ")
}
// GroupSnapshots takes a list of snapshots and a grouping criteria and creates
// a grouped list of snapshots.
func GroupSnapshots(snapshots Snapshots, groupBy SnapshotGroupByOptions) (map[string]Snapshots, bool, error) {
// group by hostname and dirs
snapshotGroups := make(map[string]Snapshots)
for _, sn := range snapshots {
// Determining grouping-keys
var tags []string
var hostname string
var paths []string
if groupBy.Tag {
tags = sn.Tags
sort.Strings(tags)
}
if groupBy.Host {
hostname = sn.Hostname
}
if groupBy.Path {
paths = sn.Paths
}
sort.Strings(sn.Paths)
var k []byte
var err error
k, err = json.Marshal(SnapshotGroupKey{Tags: tags, Hostname: hostname, Paths: paths})
if err != nil {
return nil, false, err
}
snapshotGroups[string(k)] = append(snapshotGroups[string(k)], sn)
}
return snapshotGroups, groupBy.Tag || groupBy.Host || groupBy.Path, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/tree_test.go | internal/data/tree_test.go | package data_test
import (
"context"
"encoding/json"
"errors"
"os"
"path/filepath"
"strconv"
"testing"
"github.com/restic/restic/internal/archiver"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
var testFiles = []struct {
name string
content []byte
}{
{"foo", []byte("bar")},
{"bar/foo2", []byte("bar2")},
{"bar/bla/blubb", []byte("This is just a test!\n")},
}
func createTempDir(t *testing.T) string {
tempdir, err := os.MkdirTemp(rtest.TestTempDir, "restic-test-")
rtest.OK(t, err)
for _, test := range testFiles {
file := filepath.Join(tempdir, test.name)
dir := filepath.Dir(file)
if dir != "." {
rtest.OK(t, os.MkdirAll(dir, 0755))
}
f, err := os.Create(file)
defer func() {
rtest.OK(t, f.Close())
}()
rtest.OK(t, err)
_, err = f.Write(test.content)
rtest.OK(t, err)
}
return tempdir
}
func TestTree(t *testing.T) {
dir := createTempDir(t)
defer func() {
if rtest.TestCleanupTempDirs {
rtest.RemoveAll(t, dir)
}
}()
}
var testNodes = []data.Node{
{Name: "normal"},
{Name: "with backslashes \\zzz"},
{Name: "test utf-8 föbärß"},
{Name: "test invalid \x00\x01\x02\x03\x04"},
{Name: "test latin1 \x75\x6d\x6c\xe4\xfc\x74\xf6\x6e\xdf\x6e\x6c\x6c"},
}
func TestNodeMarshal(t *testing.T) {
for i, n := range testNodes {
nodeData, err := json.Marshal(&n)
rtest.OK(t, err)
var node data.Node
err = json.Unmarshal(nodeData, &node)
rtest.OK(t, err)
if n.Name != node.Name {
t.Fatalf("Node %d: Names are not equal, want: %q got: %q", i, n.Name, node.Name)
}
}
}
func nodeForFile(t *testing.T, name string) *data.Node {
f, err := (&fs.Local{}).OpenFile(name, fs.O_NOFOLLOW, true)
rtest.OK(t, err)
node, err := f.ToNode(false, t.Logf)
rtest.OK(t, err)
rtest.OK(t, f.Close())
return node
}
func TestNodeComparison(t *testing.T) {
node := nodeForFile(t, "tree_test.go")
n2 := *node
rtest.Assert(t, node.Equals(n2), "nodes aren't equal")
n2.Size--
rtest.Assert(t, !node.Equals(n2), "nodes are equal")
}
func TestEmptyLoadTree(t *testing.T) {
repo := repository.TestRepository(t)
tree := data.NewTree(0)
var id restic.ID
rtest.OK(t, repo.WithBlobUploader(context.TODO(), func(ctx context.Context, uploader restic.BlobSaverWithAsync) error {
var err error
// save tree
id, err = data.SaveTree(ctx, uploader, tree)
return err
}))
// load tree again
tree2, err := data.LoadTree(context.TODO(), repo, id)
rtest.OK(t, err)
rtest.Assert(t, tree.Equals(tree2),
"trees are not equal: want %v, got %v",
tree, tree2)
}
func TestTreeEqualSerialization(t *testing.T) {
files := []string{"node.go", "tree.go", "tree_test.go"}
for i := 1; i <= len(files); i++ {
tree := data.NewTree(i)
builder := data.NewTreeJSONBuilder()
for _, fn := range files[:i] {
node := nodeForFile(t, fn)
rtest.OK(t, tree.Insert(node))
rtest.OK(t, builder.AddNode(node))
rtest.Assert(t, tree.Insert(node) != nil, "no error on duplicate node")
rtest.Assert(t, builder.AddNode(node) != nil, "no error on duplicate node")
rtest.Assert(t, errors.Is(builder.AddNode(node), data.ErrTreeNotOrdered), "wrong error returned")
}
treeBytes, err := json.Marshal(tree)
treeBytes = append(treeBytes, '\n')
rtest.OK(t, err)
stiBytes, err := builder.Finalize()
rtest.OK(t, err)
// compare serialization of an individual node and the SaveTreeIterator
rtest.Equals(t, treeBytes, stiBytes)
}
}
func BenchmarkBuildTree(b *testing.B) {
const size = 100 // Directories of this size are not uncommon.
nodes := make([]data.Node, size)
for i := range nodes {
// Archiver.SaveTree inputs in sorted order, so do that here too.
nodes[i].Name = strconv.Itoa(i)
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
t := data.NewTree(size)
for i := range nodes {
_ = t.Insert(&nodes[i])
}
}
}
func TestLoadTree(t *testing.T) {
repository.TestAllVersions(t, testLoadTree)
}
func testLoadTree(t *testing.T, version uint) {
if rtest.BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping")
}
// archive a few files
repo, _, _ := repository.TestRepositoryWithVersion(t, version)
sn := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil)
_, err := data.LoadTree(context.TODO(), repo, *sn.Tree)
rtest.OK(t, err)
}
func BenchmarkLoadTree(t *testing.B) {
repository.BenchmarkAllVersions(t, benchmarkLoadTree)
}
func benchmarkLoadTree(t *testing.B, version uint) {
if rtest.BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping")
}
// archive a few files
repo, _, _ := repository.TestRepositoryWithVersion(t, version)
sn := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil)
t.ResetTimer()
for i := 0; i < t.N; i++ {
_, err := data.LoadTree(context.TODO(), repo, *sn.Tree)
rtest.OK(t, err)
}
}
func TestFindTreeDirectory(t *testing.T) {
repo := repository.TestRepository(t)
sn := data.TestCreateSnapshot(t, repo, parseTimeUTC("2017-07-07 07:07:08"), 3)
for _, exp := range []struct {
subfolder string
id restic.ID
err error
}{
{"", restic.TestParseID("c25199703a67455b34cc0c6e49a8ac8861b268a5dd09dc5b2e31e7380973fc97"), nil},
{"/", restic.TestParseID("c25199703a67455b34cc0c6e49a8ac8861b268a5dd09dc5b2e31e7380973fc97"), nil},
{".", restic.TestParseID("c25199703a67455b34cc0c6e49a8ac8861b268a5dd09dc5b2e31e7380973fc97"), nil},
{"..", restic.ID{}, errors.New("path ..: not found")},
{"file-1", restic.ID{}, errors.New("path file-1: not a directory")},
{"dir-21", restic.TestParseID("76172f9dec15d7e4cb98d2993032e99f06b73b2f02ffea3b7cfd9e6b4d762712"), nil},
{"/dir-21", restic.TestParseID("76172f9dec15d7e4cb98d2993032e99f06b73b2f02ffea3b7cfd9e6b4d762712"), nil},
{"dir-21/", restic.TestParseID("76172f9dec15d7e4cb98d2993032e99f06b73b2f02ffea3b7cfd9e6b4d762712"), nil},
{"dir-21/dir-24", restic.TestParseID("74626b3fb2bd4b3e572b81a4059b3e912bcf2a8f69fecd9c187613b7173f13b1"), nil},
} {
t.Run("", func(t *testing.T) {
id, err := data.FindTreeDirectory(context.TODO(), repo, sn.Tree, exp.subfolder)
if exp.err == nil {
rtest.OK(t, err)
rtest.Assert(t, exp.id == *id, "unexpected id, expected %v, got %v", exp.id, id)
} else {
rtest.Assert(t, exp.err.Error() == err.Error(), "unexpected err, expected %v, got %v", exp.err, err)
}
})
}
_, err := data.FindTreeDirectory(context.TODO(), repo, nil, "")
rtest.Assert(t, err != nil, "missing error on null tree id")
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/progress.go | internal/ui/progress.go | package ui
import (
"fmt"
"os"
"strconv"
"time"
"github.com/restic/restic/internal/ui/progress"
)
// CalculateProgressInterval returns the interval configured via RESTIC_PROGRESS_FPS
// or if unset returns an interval for 60fps on interactive terminals and 0 (=disabled)
// for non-interactive terminals or when run using the --quiet flag
func CalculateProgressInterval(show bool, json bool, canUpdateStatus bool) time.Duration {
interval := time.Second / 10
fps, err := strconv.ParseFloat(os.Getenv("RESTIC_PROGRESS_FPS"), 64)
if err == nil && fps > 0 {
if fps > 60 {
fps = 60
}
interval = time.Duration(float64(time.Second) / fps)
} else if !json && !canUpdateStatus || !show {
interval = 0
}
return interval
}
// newProgressMax returns a progress.Counter that prints to terminal if provided.
func newProgressMax(show bool, max uint64, description string, term Terminal) *progress.Counter {
if !show {
return nil
}
interval := CalculateProgressInterval(show, false, term.CanUpdateStatus())
return progress.NewCounter(interval, max, func(v uint64, max uint64, d time.Duration, final bool) {
var status string
if max == 0 {
status = fmt.Sprintf("[%s] %d %s",
FormatDuration(d), v, description)
} else {
status = fmt.Sprintf("[%s] %s %d / %d %s",
FormatDuration(d), FormatPercent(v, max), v, max, description)
}
if final {
term.SetStatus(nil)
term.Print(status)
} else {
term.SetStatus([]string{status})
}
})
}
type progressPrinter struct {
term Terminal
Message
show bool
}
func (t *progressPrinter) NewCounter(description string) *progress.Counter {
return newProgressMax(t.show, 0, description, t.term)
}
func (t *progressPrinter) NewCounterTerminalOnly(description string) *progress.Counter {
return newProgressMax(t.show && t.term.OutputIsTerminal(), 0, description, t.term)
}
func NewProgressPrinter(json bool, verbosity uint, term Terminal) progress.Printer {
if json {
verbosity = 0
}
return &progressPrinter{
term: term,
Message: *NewMessage(term, verbosity),
show: verbosity > 0,
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/mock.go | internal/ui/mock.go | package ui
import (
"context"
"io"
)
var _ Terminal = &MockTerminal{}
type MockTerminal struct {
Output []string
Errors []string
}
func (m *MockTerminal) Print(line string) {
m.Output = append(m.Output, line)
}
func (m *MockTerminal) Error(line string) {
m.Errors = append(m.Errors, line)
}
func (m *MockTerminal) SetStatus(lines []string) {
m.Output = append([]string{}, lines...)
}
func (m *MockTerminal) CanUpdateStatus() bool {
return true
}
func (m *MockTerminal) InputRaw() io.ReadCloser {
return nil
}
func (m *MockTerminal) InputIsTerminal() bool {
return true
}
func (m *MockTerminal) ReadPassword(_ context.Context, _ string) (string, error) {
return "password", nil
}
func (m *MockTerminal) OutputWriter() io.Writer {
return nil
}
func (m *MockTerminal) OutputRaw() io.Writer {
return nil
}
func (m *MockTerminal) OutputIsTerminal() bool {
return true
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/terminal.go | internal/ui/terminal.go | package ui
import (
"context"
"io"
)
// Terminal is used to write messages and display status lines which can be
// updated. See termstatus.Terminal for a concrete implementation.
type Terminal interface {
// Print writes a line to the terminal. Appends a newline if not present.
Print(line string)
// Error writes an error to the terminal. Appends a newline if not present.
Error(line string)
// SetStatus sets the status lines to the terminal.
SetStatus(lines []string)
// CanUpdateStatus returns true if the terminal can update the status lines.
CanUpdateStatus() bool
// InputRaw returns the input reader.
InputRaw() io.ReadCloser
// InputIsTerminal returns true if the input is a terminal.
InputIsTerminal() bool
// ReadPassword reads the password from the terminal.
ReadPassword(ctx context.Context, prompt string) (string, error)
// OutputWriter returns a output writer that is safe for concurrent use with
// other output methods. Output is only shown after a line break.
OutputWriter() io.Writer
// OutputRaw returns the raw output writer. Should only be used if there is no
// other option. Must not be used in combination with Print, Error, SetStatus
// or any other method that writes to the terminal.
OutputRaw() io.Writer
// OutputIsTerminal returns true if the output is a terminal.
OutputIsTerminal() bool
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/format.go | internal/ui/format.go | package ui
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"math/bits"
"strconv"
"time"
"unicode"
"golang.org/x/text/width"
)
func FormatBytes(c uint64) string {
b := float64(c)
switch {
case c >= 1<<40:
return fmt.Sprintf("%.3f TiB", b/(1<<40))
case c >= 1<<30:
return fmt.Sprintf("%.3f GiB", b/(1<<30))
case c >= 1<<20:
return fmt.Sprintf("%.3f MiB", b/(1<<20))
case c >= 1<<10:
return fmt.Sprintf("%.3f KiB", b/(1<<10))
default:
return fmt.Sprintf("%d B", c)
}
}
// FormatPercent formats numerator/denominator as a percentage.
func FormatPercent(numerator uint64, denominator uint64) string {
if denominator == 0 {
return ""
}
percent := 100.0 * float64(numerator) / float64(denominator)
if percent > 100 {
percent = 100
}
return fmt.Sprintf("%3.2f%%", percent)
}
// FormatDuration formats d as FormatSeconds would.
func FormatDuration(d time.Duration) string {
sec := uint64(d / time.Second)
return FormatSeconds(sec)
}
// FormatSeconds formats sec as MM:SS, or HH:MM:SS if sec seconds
// is at least an hour.
func FormatSeconds(sec uint64) string {
hours := sec / 3600
sec -= hours * 3600
mins := sec / 60
sec -= mins * 60
if hours > 0 {
return fmt.Sprintf("%d:%02d:%02d", hours, mins, sec)
}
return fmt.Sprintf("%d:%02d", mins, sec)
}
// ParseBytes parses a size in bytes from s. It understands the suffixes
// B, K, M, G and T for powers of 1024.
func ParseBytes(s string) (int64, error) {
if s == "" {
return 0, errors.New("expected size, got empty string")
}
numStr := s[:len(s)-1]
var unit uint64 = 1
switch s[len(s)-1] {
case 'b', 'B':
// use initialized values, do nothing here
case 'k', 'K':
unit = 1024
case 'm', 'M':
unit = 1024 * 1024
case 'g', 'G':
unit = 1024 * 1024 * 1024
case 't', 'T':
unit = 1024 * 1024 * 1024 * 1024
default:
numStr = s
}
value, err := strconv.ParseInt(numStr, 10, 64)
if err != nil {
return 0, err
}
hi, lo := bits.Mul64(uint64(value), unit)
value = int64(lo)
if hi != 0 || value < 0 {
return 0, fmt.Errorf("ParseSize: %q: %w", numStr, strconv.ErrRange)
}
return value, nil
}
func ToJSONString(status interface{}) string {
buf := new(bytes.Buffer)
err := json.NewEncoder(buf).Encode(status)
if err != nil {
panic(err)
}
return buf.String()
}
// DisplayWidth returns the number of terminal cells needed to display s
func DisplayWidth(s string) int {
width := 0
for _, r := range s {
width += displayRuneWidth(r)
}
return width
}
func displayRuneWidth(r rune) int {
switch width.LookupRune(r).Kind() {
case width.EastAsianWide, width.EastAsianFullwidth:
return 2
case width.EastAsianNarrow, width.EastAsianHalfwidth, width.EastAsianAmbiguous, width.Neutral:
return 1
default:
return 0
}
}
// Quote lines with funny characters in them, meaning control chars, newlines,
// tabs, anything else non-printable and invalid UTF-8.
//
// This is intended to produce a string that does not mess up the terminal
// rather than produce an unambiguous quoted string.
func Quote(line string) string {
for _, r := range line {
// The replacement character usually means the input is not UTF-8.
if r == unicode.ReplacementChar || !unicode.IsPrint(r) {
return strconv.Quote(line)
}
}
return line
}
// Truncate s to fit in width (number of terminal cells) w.
// If w is negative, returns the empty string.
func Truncate(s string, w int) string {
if len(s) < w {
// Since the display width of a character is at most 2
// and all of ASCII (single byte per rune) has width 1,
// no character takes more bytes to encode than its width.
return s
}
for i := uint(0); i < uint(len(s)); {
utfsize := uint(1) // UTF-8 encoding size of first rune in s.
w--
if s[i] > unicode.MaxASCII {
var wide bool
if wide, utfsize = wideRune(s[i:]); wide {
w--
}
}
if w < 0 {
return s[:i]
}
i += utfsize
}
return s
}
// Guess whether the first rune in s would occupy two terminal cells
// instead of one. This cannot be determined exactly without knowing
// the terminal font, so we treat all ambiguous runes as full-width,
// i.e., two cells.
func wideRune(s string) (wide bool, utfsize uint) {
prop, size := width.LookupString(s)
kind := prop.Kind()
wide = kind != width.Neutral && kind != width.EastAsianNarrow
return wide, uint(size)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/format_test.go | internal/ui/format_test.go | package ui
import (
"strconv"
"testing"
rtest "github.com/restic/restic/internal/test"
)
func TestFormatBytes(t *testing.T) {
for _, c := range []struct {
size uint64
want string
}{
{0, "0 B"},
{1023, "1023 B"},
{1024, "1.000 KiB"},
{5<<20 + 1<<19, "5.500 MiB"},
{1 << 30, "1.000 GiB"},
{2 << 30, "2.000 GiB"},
{1<<40 - 1<<36, "960.000 GiB"},
{1 << 40, "1.000 TiB"},
} {
if got := FormatBytes(c.size); got != c.want {
t.Errorf("want %q, got %q", c.want, got)
}
}
}
func TestFormatPercent(t *testing.T) {
for _, c := range []struct {
num, denom uint64
want string
}{
{0, 5, "0.00%"},
{3, 7, "42.86%"},
{99, 99, "100.00%"},
} {
if got := FormatPercent(c.num, c.denom); got != c.want {
t.Errorf("want %q, got %q", c.want, got)
}
}
}
func TestParseBytes(t *testing.T) {
for _, tt := range []struct {
in string
expected int64
}{
{"1024", 1024},
{"1024b", 1024},
{"1024B", 1024},
{"1k", 1024},
{"100k", 102400},
{"100K", 102400},
{"10M", 10485760},
{"100m", 104857600},
{"20G", 21474836480},
{"10g", 10737418240},
{"2T", 2199023255552},
{"2t", 2199023255552},
{"9223372036854775807", 1<<63 - 1},
} {
actual, err := ParseBytes(tt.in)
rtest.OK(t, err)
rtest.Equals(t, tt.expected, actual)
}
}
func TestParseBytesInvalid(t *testing.T) {
for _, s := range []string{
"",
" ",
"foobar",
"zzz",
"18446744073709551615", // 1<<64-1.
"9223372036854775807k", // 1<<63-1 kiB.
"9999999999999M",
"99999999999999999999",
} {
v, err := ParseBytes(s)
if err == nil {
t.Errorf("wanted error for invalid value %q, got nil", s)
}
rtest.Equals(t, int64(0), v)
}
}
func TestDisplayWidth(t *testing.T) {
for _, c := range []struct {
input string
want int
}{
{"foo", 3},
{"aéb", 3},
{"ab", 3},
{"a’b", 3},
{"aあb", 4},
} {
if got := DisplayWidth(c.input); got != c.want {
t.Errorf("wrong display width for '%s', want %d, got %d", c.input, c.want, got)
}
}
}
func TestQuote(t *testing.T) {
for _, c := range []struct {
in string
needQuote bool
}{
{"foo.bar/baz", false},
{"föó_bàŕ-bãẑ", false},
{" foo ", false},
{"foo bar", false},
{"foo\nbar", true},
{"foo\rbar", true},
{"foo\abar", true},
{"\xff", true},
{`c:\foo\bar`, false},
// Issue #2260: terminal control characters.
{"\x1bm_red_is_beautiful", true},
} {
if c.needQuote {
rtest.Equals(t, strconv.Quote(c.in), Quote(c.in))
} else {
rtest.Equals(t, c.in, Quote(c.in))
}
}
}
func TestTruncate(t *testing.T) {
var tests = []struct {
input string
width int
output string
}{
{"", 80, ""},
{"", 0, ""},
{"", -1, ""},
{"foo", 80, "foo"},
{"foo", 4, "foo"},
{"foo", 3, "foo"},
{"foo", 2, "fo"},
{"foo", 1, "f"},
{"foo", 0, ""},
{"foo", -1, ""},
{"Löwen", 4, "Löwe"},
{"あああああ/data", 7, "あああ"},
{"あああああ/data", 10, "あああああ"},
{"あああああ/data", 11, "あああああ/"},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
out := Truncate(test.input, test.width)
if out != test.output {
t.Fatalf("wrong output for input %v, width %d: want %q, got %q",
test.input, test.width, test.output, out)
}
})
}
}
func benchmarkTruncate(b *testing.B, s string, w int) {
for i := 0; i < b.N; i++ {
Truncate(s, w)
}
}
func BenchmarkTruncateASCII(b *testing.B) {
s := "This is an ASCII-only status message...\r\n"
benchmarkTruncate(b, s, len(s)-1)
}
func BenchmarkTruncateUnicode(b *testing.B) {
s := "Hello World or Καλημέρα κόσμε or こんにちは 世界"
w := 0
for i := 0; i < len(s); {
w++
wide, utfsize := wideRune(s[i:])
if wide {
w++
}
i += int(utfsize)
}
b.ResetTimer()
benchmarkTruncate(b, s, w-1)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/message.go | internal/ui/message.go | package ui
import (
"fmt"
)
// Message reports progress with messages of different verbosity.
type Message struct {
term Terminal
v uint
}
// NewMessage returns a message progress reporter with underlying terminal
// term.
func NewMessage(term Terminal, verbosity uint) *Message {
return &Message{
term: term,
v: verbosity,
}
}
// E reports an error. This message is always printed to stderr.
func (m *Message) E(msg string, args ...interface{}) {
m.term.Error(fmt.Sprintf(msg, args...))
}
// S prints a message, this is should only be used for very important messages
// that are not errors. The message is even printed if --quiet is specified.
func (m *Message) S(msg string, args ...interface{}) {
m.term.Print(fmt.Sprintf(msg, args...))
}
// PT prints a message if verbosity >= 1 (neither --quiet nor --verbose is specified)
// and stdout points to a terminal.
// This is used for informational messages.
func (m *Message) PT(msg string, args ...interface{}) {
if m.term.OutputIsTerminal() && m.v >= 1 {
m.term.Print(fmt.Sprintf(msg, args...))
}
}
// P prints a message if verbosity >= 1 (neither --quiet nor --verbose is specified).
// This is used for normal messages which are not errors.
func (m *Message) P(msg string, args ...interface{}) {
if m.v >= 1 {
m.term.Print(fmt.Sprintf(msg, args...))
}
}
// V prints a message if verbosity >= 2 (equivalent to --verbose), this is used for
// verbose messages.
func (m *Message) V(msg string, args ...interface{}) {
if m.v >= 2 {
m.term.Print(fmt.Sprintf(msg, args...))
}
}
// VV prints a message if verbosity >= 3 (equivalent to --verbose=2), this is used for
// debug messages.
func (m *Message) VV(msg string, args ...interface{}) {
if m.v >= 3 {
m.term.Print(fmt.Sprintf(msg, args...))
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/restore/json.go | internal/ui/restore/json.go | package restore
import (
"time"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/progress"
)
type jsonPrinter struct {
progress.Printer
terminal ui.Terminal
verbosity uint
}
func NewJSONProgress(terminal ui.Terminal, verbosity uint) ProgressPrinter {
return &jsonPrinter{
Printer: ui.NewProgressPrinter(true, verbosity, terminal),
terminal: terminal,
verbosity: verbosity,
}
}
func (t *jsonPrinter) print(status interface{}) {
t.terminal.Print(ui.ToJSONString(status))
}
func (t *jsonPrinter) error(status interface{}) {
t.terminal.Error(ui.ToJSONString(status))
}
func (t *jsonPrinter) Update(p State, duration time.Duration) {
status := statusUpdate{
MessageType: "status",
SecondsElapsed: uint64(duration / time.Second),
TotalFiles: p.FilesTotal,
FilesRestored: p.FilesFinished,
FilesSkipped: p.FilesSkipped,
FilesDeleted: p.FilesDeleted,
TotalBytes: p.AllBytesTotal,
BytesRestored: p.AllBytesWritten,
BytesSkipped: p.AllBytesSkipped,
}
if p.AllBytesTotal > 0 {
status.PercentDone = float64(p.AllBytesWritten) / float64(p.AllBytesTotal)
}
t.print(status)
}
func (t *jsonPrinter) Error(item string, err error) error {
t.error(errorUpdate{
MessageType: "error",
Error: errorObject{err.Error()},
During: "restore",
Item: item,
})
return nil
}
func (t *jsonPrinter) CompleteItem(messageType ItemAction, item string, size uint64) {
if t.verbosity < 3 {
return
}
var action string
switch messageType {
case ActionDirRestored:
action = "restored"
case ActionFileRestored:
action = "restored"
case ActionOtherRestored:
action = "restored"
case ActionFileUpdated:
action = "updated"
case ActionFileUnchanged:
action = "unchanged"
case ActionDeleted:
action = "deleted"
default:
panic("unknown message type")
}
status := verboseUpdate{
MessageType: "verbose_status",
Action: action,
Item: item,
Size: size,
}
t.print(status)
}
func (t *jsonPrinter) Finish(p State, duration time.Duration) {
status := summaryOutput{
MessageType: "summary",
SecondsElapsed: uint64(duration / time.Second),
TotalFiles: p.FilesTotal,
FilesRestored: p.FilesFinished,
FilesSkipped: p.FilesSkipped,
FilesDeleted: p.FilesDeleted,
TotalBytes: p.AllBytesTotal,
BytesRestored: p.AllBytesWritten,
BytesSkipped: p.AllBytesSkipped,
}
t.print(status)
}
type statusUpdate struct {
MessageType string `json:"message_type"` // "status"
SecondsElapsed uint64 `json:"seconds_elapsed,omitempty"`
PercentDone float64 `json:"percent_done"`
TotalFiles uint64 `json:"total_files,omitempty"`
FilesRestored uint64 `json:"files_restored,omitempty"`
FilesSkipped uint64 `json:"files_skipped,omitempty"`
FilesDeleted uint64 `json:"files_deleted,omitempty"`
TotalBytes uint64 `json:"total_bytes,omitempty"`
BytesRestored uint64 `json:"bytes_restored,omitempty"`
BytesSkipped uint64 `json:"bytes_skipped,omitempty"`
}
type errorObject struct {
Message string `json:"message"`
}
type errorUpdate struct {
MessageType string `json:"message_type"` // "error"
Error errorObject `json:"error"`
During string `json:"during"`
Item string `json:"item"`
}
type verboseUpdate struct {
MessageType string `json:"message_type"` // "verbose_status"
Action string `json:"action"`
Item string `json:"item"`
Size uint64 `json:"size"`
}
type summaryOutput struct {
MessageType string `json:"message_type"` // "summary"
SecondsElapsed uint64 `json:"seconds_elapsed,omitempty"`
TotalFiles uint64 `json:"total_files,omitempty"`
FilesRestored uint64 `json:"files_restored,omitempty"`
FilesSkipped uint64 `json:"files_skipped,omitempty"`
FilesDeleted uint64 `json:"files_deleted,omitempty"`
TotalBytes uint64 `json:"total_bytes,omitempty"`
BytesRestored uint64 `json:"bytes_restored,omitempty"`
BytesSkipped uint64 `json:"bytes_skipped,omitempty"`
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/restore/text.go | internal/ui/restore/text.go | package restore
import (
"fmt"
"time"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/progress"
)
type textPrinter struct {
progress.Printer
terminal ui.Terminal
}
func NewTextProgress(terminal ui.Terminal, verbosity uint) ProgressPrinter {
return &textPrinter{
Printer: ui.NewProgressPrinter(false, verbosity, terminal),
terminal: terminal,
}
}
func (t *textPrinter) Update(p State, duration time.Duration) {
timeLeft := ui.FormatDuration(duration)
formattedAllBytesWritten := ui.FormatBytes(p.AllBytesWritten)
formattedAllBytesTotal := ui.FormatBytes(p.AllBytesTotal)
allPercent := ui.FormatPercent(p.AllBytesWritten, p.AllBytesTotal)
progress := fmt.Sprintf("[%s] %s %v files/dirs %s, total %v files/dirs %v",
timeLeft, allPercent, p.FilesFinished, formattedAllBytesWritten, p.FilesTotal, formattedAllBytesTotal)
if p.FilesSkipped > 0 {
progress += fmt.Sprintf(", skipped %v files/dirs %v", p.FilesSkipped, ui.FormatBytes(p.AllBytesSkipped))
}
if p.FilesDeleted > 0 {
progress += fmt.Sprintf(", deleted %v files/dirs", p.FilesDeleted)
}
t.terminal.SetStatus([]string{progress})
}
func (t *textPrinter) Error(item string, err error) error {
t.E("ignoring error for %s: %s\n", item, err)
return nil
}
func (t *textPrinter) CompleteItem(messageType ItemAction, item string, size uint64) {
var action string
switch messageType {
case ActionDirRestored:
action = "restored"
case ActionFileRestored:
action = "restored"
case ActionOtherRestored:
action = "restored"
case ActionFileUpdated:
action = "updated"
case ActionFileUnchanged:
action = "unchanged"
case ActionDeleted:
action = "deleted"
default:
panic("unknown message type")
}
if messageType == ActionDirRestored || messageType == ActionOtherRestored || messageType == ActionDeleted {
t.VV("%-9v %v", action, item)
} else {
t.VV("%-9v %v with size %v", action, item, ui.FormatBytes(size))
}
}
func (t *textPrinter) Finish(p State, duration time.Duration) {
t.terminal.SetStatus(nil)
timeLeft := ui.FormatDuration(duration)
formattedAllBytesTotal := ui.FormatBytes(p.AllBytesTotal)
var summary string
if p.FilesFinished == p.FilesTotal && p.AllBytesWritten == p.AllBytesTotal {
summary = fmt.Sprintf("Summary: Restored %d files/dirs (%s) in %s", p.FilesTotal, formattedAllBytesTotal, timeLeft)
} else {
formattedAllBytesWritten := ui.FormatBytes(p.AllBytesWritten)
summary = fmt.Sprintf("Summary: Restored %d / %d files/dirs (%s / %s) in %s",
p.FilesFinished, p.FilesTotal, formattedAllBytesWritten, formattedAllBytesTotal, timeLeft)
}
if p.FilesSkipped > 0 {
summary += fmt.Sprintf(", skipped %v files/dirs %v", p.FilesSkipped, ui.FormatBytes(p.AllBytesSkipped))
}
if p.FilesDeleted > 0 {
summary += fmt.Sprintf(", deleted %v files/dirs", p.FilesDeleted)
}
t.terminal.Print(summary)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/restore/progress.go | internal/ui/restore/progress.go | package restore
import (
"sync"
"time"
"github.com/restic/restic/internal/ui/progress"
)
type State struct {
FilesFinished uint64
FilesTotal uint64
FilesSkipped uint64
FilesDeleted uint64
AllBytesWritten uint64
AllBytesTotal uint64
AllBytesSkipped uint64
}
type Progress struct {
updater progress.Updater
m sync.Mutex
progressInfoMap map[string]progressInfoEntry
s State
started time.Time
printer ProgressPrinter
}
type progressInfoEntry struct {
bytesWritten uint64
bytesTotal uint64
}
type ProgressPrinter interface {
Update(progress State, duration time.Duration)
Error(item string, err error) error
CompleteItem(action ItemAction, item string, size uint64)
Finish(progress State, duration time.Duration)
progress.Printer
}
type ItemAction string
// Constants for the different CompleteItem actions.
const (
ActionDirRestored ItemAction = "dir restored"
ActionFileRestored ItemAction = "file restored"
ActionFileUpdated ItemAction = "file updated"
ActionFileUnchanged ItemAction = "file unchanged"
ActionOtherRestored ItemAction = "other restored"
ActionDeleted ItemAction = "deleted"
)
func NewProgress(printer ProgressPrinter, interval time.Duration) *Progress {
p := &Progress{
progressInfoMap: make(map[string]progressInfoEntry),
started: time.Now(),
printer: printer,
}
p.updater = *progress.NewUpdater(interval, p.update)
return p
}
func (p *Progress) update(runtime time.Duration, final bool) {
p.m.Lock()
defer p.m.Unlock()
if !final {
p.printer.Update(p.s, runtime)
} else {
p.printer.Finish(p.s, runtime)
}
}
// AddFile starts tracking a new file with the given size
func (p *Progress) AddFile(size uint64) {
if p == nil {
return
}
p.m.Lock()
defer p.m.Unlock()
p.s.FilesTotal++
p.s.AllBytesTotal += size
}
// AddProgress accumulates the number of bytes written for a file
func (p *Progress) AddProgress(name string, action ItemAction, bytesWrittenPortion uint64, bytesTotal uint64) {
if p == nil {
return
}
p.m.Lock()
defer p.m.Unlock()
entry, exists := p.progressInfoMap[name]
if !exists {
entry.bytesTotal = bytesTotal
}
entry.bytesWritten += bytesWrittenPortion
p.progressInfoMap[name] = entry
p.s.AllBytesWritten += bytesWrittenPortion
if entry.bytesWritten == entry.bytesTotal {
delete(p.progressInfoMap, name)
p.s.FilesFinished++
p.printer.CompleteItem(action, name, bytesTotal)
}
}
func (p *Progress) AddSkippedFile(name string, size uint64) {
if p == nil {
return
}
p.m.Lock()
defer p.m.Unlock()
p.s.FilesSkipped++
p.s.AllBytesSkipped += size
p.printer.CompleteItem(ActionFileUnchanged, name, size)
}
func (p *Progress) ReportDeletion(name string) {
if p == nil {
return
}
p.s.FilesDeleted++
p.m.Lock()
defer p.m.Unlock()
p.printer.CompleteItem(ActionDeleted, name, 0)
}
func (p *Progress) Error(item string, err error) error {
if p == nil {
return nil
}
p.m.Lock()
defer p.m.Unlock()
return p.printer.Error(item, err)
}
func (p *Progress) Finish() {
p.updater.Done()
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/restore/json_test.go | internal/ui/restore/json_test.go | package restore
import (
"testing"
"time"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/ui"
)
func createJSONProgress() (*ui.MockTerminal, ProgressPrinter) {
term := &ui.MockTerminal{}
printer := NewJSONProgress(term, 3)
return term, printer
}
func TestJSONPrintUpdate(t *testing.T) {
term, printer := createJSONProgress()
printer.Update(State{3, 11, 0, 0, 29, 47, 0}, 5*time.Second)
test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.Output)
}
func TestJSONPrintUpdateWithSkipped(t *testing.T) {
term, printer := createJSONProgress()
printer.Update(State{3, 11, 2, 0, 29, 47, 59}, 5*time.Second)
test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"files_skipped\":2,\"total_bytes\":47,\"bytes_restored\":29,\"bytes_skipped\":59}\n"}, term.Output)
}
func TestJSONPrintSummaryOnSuccess(t *testing.T) {
term, printer := createJSONProgress()
printer.Finish(State{11, 11, 0, 0, 47, 47, 0}, 5*time.Second)
test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"total_bytes\":47,\"bytes_restored\":47}\n"}, term.Output)
}
func TestJSONPrintSummaryOnErrors(t *testing.T) {
term, printer := createJSONProgress()
printer.Finish(State{3, 11, 0, 0, 29, 47, 0}, 5*time.Second)
test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.Output)
}
func TestJSONPrintSummaryOnSuccessWithSkipped(t *testing.T) {
term, printer := createJSONProgress()
printer.Finish(State{11, 11, 2, 0, 47, 47, 59}, 5*time.Second)
test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"files_skipped\":2,\"total_bytes\":47,\"bytes_restored\":47,\"bytes_skipped\":59}\n"}, term.Output)
}
func TestJSONPrintCompleteItem(t *testing.T) {
for _, data := range []struct {
action ItemAction
size uint64
expected string
}{
{ActionDirRestored, 0, "{\"message_type\":\"verbose_status\",\"action\":\"restored\",\"item\":\"test\",\"size\":0}\n"},
{ActionFileRestored, 123, "{\"message_type\":\"verbose_status\",\"action\":\"restored\",\"item\":\"test\",\"size\":123}\n"},
{ActionFileUpdated, 123, "{\"message_type\":\"verbose_status\",\"action\":\"updated\",\"item\":\"test\",\"size\":123}\n"},
{ActionFileUnchanged, 123, "{\"message_type\":\"verbose_status\",\"action\":\"unchanged\",\"item\":\"test\",\"size\":123}\n"},
{ActionDeleted, 0, "{\"message_type\":\"verbose_status\",\"action\":\"deleted\",\"item\":\"test\",\"size\":0}\n"},
} {
term, printer := createJSONProgress()
printer.CompleteItem(data.action, "test", data.size)
test.Equals(t, []string{data.expected}, term.Output)
}
}
func TestJSONError(t *testing.T) {
term, printer := createJSONProgress()
test.Equals(t, printer.Error("/path", errors.New("error \"message\"")), nil)
test.Equals(t, []string{"{\"message_type\":\"error\",\"error\":{\"message\":\"error \\\"message\\\"\"},\"during\":\"restore\",\"item\":\"/path\"}\n"}, term.Errors)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/restore/text_test.go | internal/ui/restore/text_test.go | package restore
import (
"testing"
"time"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/ui"
)
func createTextProgress() (*ui.MockTerminal, ProgressPrinter) {
term := &ui.MockTerminal{}
printer := NewTextProgress(term, 3)
return term, printer
}
func TestPrintUpdate(t *testing.T) {
term, printer := createTextProgress()
printer.Update(State{3, 11, 0, 0, 29, 47, 0}, 5*time.Second)
test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B"}, term.Output)
}
func TestPrintUpdateWithSkipped(t *testing.T) {
term, printer := createTextProgress()
printer.Update(State{3, 11, 2, 0, 29, 47, 59}, 5*time.Second)
test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B, skipped 2 files/dirs 59 B"}, term.Output)
}
func TestPrintSummaryOnSuccess(t *testing.T) {
term, printer := createTextProgress()
printer.Finish(State{11, 11, 0, 0, 47, 47, 0}, 5*time.Second)
test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05"}, term.Output)
}
func TestPrintSummaryOnErrors(t *testing.T) {
term, printer := createTextProgress()
printer.Finish(State{3, 11, 0, 0, 29, 47, 0}, 5*time.Second)
test.Equals(t, []string{"Summary: Restored 3 / 11 files/dirs (29 B / 47 B) in 0:05"}, term.Output)
}
func TestPrintSummaryOnSuccessWithSkipped(t *testing.T) {
term, printer := createTextProgress()
printer.Finish(State{11, 11, 2, 0, 47, 47, 59}, 5*time.Second)
test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05, skipped 2 files/dirs 59 B"}, term.Output)
}
func TestPrintCompleteItem(t *testing.T) {
for _, data := range []struct {
action ItemAction
size uint64
expected string
}{
{ActionDirRestored, 0, "restored test"},
{ActionFileRestored, 123, "restored test with size 123 B"},
{ActionOtherRestored, 0, "restored test"},
{ActionFileUpdated, 123, "updated test with size 123 B"},
{ActionFileUnchanged, 123, "unchanged test with size 123 B"},
{ActionDeleted, 0, "deleted test"},
} {
term, printer := createTextProgress()
printer.CompleteItem(data.action, "test", data.size)
test.Equals(t, []string{data.expected}, term.Output)
}
}
func TestError(t *testing.T) {
term, printer := createTextProgress()
test.Equals(t, printer.Error("/path", errors.New("error \"message\"")), nil)
test.Equals(t, []string{"ignoring error for /path: error \"message\"\n"}, term.Errors)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/restore/progress_test.go | internal/ui/restore/progress_test.go | package restore
import (
"testing"
"time"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/ui/progress"
)
type printerTraceEntry struct {
progress State
duration time.Duration
isFinished bool
}
type printerTrace []printerTraceEntry
type itemTraceEntry struct {
action ItemAction
item string
size uint64
}
type itemTrace []itemTraceEntry
type errorTraceEntry struct {
item string
err error
}
type errorTrace []errorTraceEntry
type mockPrinter struct {
trace printerTrace
items itemTrace
errors errorTrace
progress.NoopPrinter
}
const mockFinishDuration = 42 * time.Second
func (p *mockPrinter) Update(progress State, duration time.Duration) {
p.trace = append(p.trace, printerTraceEntry{progress, duration, false})
}
func (p *mockPrinter) Error(item string, err error) error {
p.errors = append(p.errors, errorTraceEntry{item, err})
return nil
}
func (p *mockPrinter) CompleteItem(action ItemAction, item string, size uint64) {
p.items = append(p.items, itemTraceEntry{action, item, size})
}
func (p *mockPrinter) Finish(progress State, _ time.Duration) {
p.trace = append(p.trace, printerTraceEntry{progress, mockFinishDuration, true})
}
func testProgress(fn func(progress *Progress) bool) (printerTrace, itemTrace, errorTrace) {
printer := &mockPrinter{}
progress := NewProgress(printer, 0)
final := fn(progress)
progress.update(0, final)
trace := append(printerTrace{}, printer.trace...)
items := append(itemTrace{}, printer.items...)
errors := append(errorTrace{}, printer.errors...)
// cleanup to avoid goroutine leak, but copy trace first
progress.Finish()
return trace, items, errors
}
func TestNew(t *testing.T) {
result, items, _ := testProgress(func(progress *Progress) bool {
return false
})
test.Equals(t, printerTrace{
printerTraceEntry{State{0, 0, 0, 0, 0, 0, 0}, 0, false},
}, result)
test.Equals(t, itemTrace{}, items)
}
func TestAddFile(t *testing.T) {
fileSize := uint64(100)
result, items, _ := testProgress(func(progress *Progress) bool {
progress.AddFile(fileSize)
return false
})
test.Equals(t, printerTrace{
printerTraceEntry{State{0, 1, 0, 0, 0, fileSize, 0}, 0, false},
}, result)
test.Equals(t, itemTrace{}, items)
}
func TestFirstProgressOnAFile(t *testing.T) {
expectedBytesWritten := uint64(5)
expectedBytesTotal := uint64(100)
result, items, _ := testProgress(func(progress *Progress) bool {
progress.AddFile(expectedBytesTotal)
progress.AddProgress("test", ActionFileUpdated, expectedBytesWritten, expectedBytesTotal)
return false
})
test.Equals(t, printerTrace{
printerTraceEntry{State{0, 1, 0, 0, expectedBytesWritten, expectedBytesTotal, 0}, 0, false},
}, result)
test.Equals(t, itemTrace{}, items)
}
func TestLastProgressOnAFile(t *testing.T) {
fileSize := uint64(100)
result, items, _ := testProgress(func(progress *Progress) bool {
progress.AddFile(fileSize)
progress.AddProgress("test", ActionFileUpdated, 30, fileSize)
progress.AddProgress("test", ActionFileUpdated, 35, fileSize)
progress.AddProgress("test", ActionFileUpdated, 35, fileSize)
return false
})
test.Equals(t, printerTrace{
printerTraceEntry{State{1, 1, 0, 0, fileSize, fileSize, 0}, 0, false},
}, result)
test.Equals(t, itemTrace{
itemTraceEntry{action: ActionFileUpdated, item: "test", size: fileSize},
}, items)
}
func TestLastProgressOnLastFile(t *testing.T) {
fileSize := uint64(100)
result, items, _ := testProgress(func(progress *Progress) bool {
progress.AddFile(fileSize)
progress.AddFile(50)
progress.AddProgress("test1", ActionFileUpdated, 50, 50)
progress.AddProgress("test2", ActionFileUpdated, 50, fileSize)
progress.AddProgress("test2", ActionFileUpdated, 50, fileSize)
return false
})
test.Equals(t, printerTrace{
printerTraceEntry{State{2, 2, 0, 0, 50 + fileSize, 50 + fileSize, 0}, 0, false},
}, result)
test.Equals(t, itemTrace{
itemTraceEntry{action: ActionFileUpdated, item: "test1", size: 50},
itemTraceEntry{action: ActionFileUpdated, item: "test2", size: fileSize},
}, items)
}
func TestSummaryOnSuccess(t *testing.T) {
fileSize := uint64(100)
result, _, _ := testProgress(func(progress *Progress) bool {
progress.AddFile(fileSize)
progress.AddFile(50)
progress.AddProgress("test1", ActionFileUpdated, 50, 50)
progress.AddProgress("test2", ActionFileUpdated, fileSize, fileSize)
return true
})
test.Equals(t, printerTrace{
printerTraceEntry{State{2, 2, 0, 0, 50 + fileSize, 50 + fileSize, 0}, mockFinishDuration, true},
}, result)
}
func TestSummaryOnErrors(t *testing.T) {
fileSize := uint64(100)
result, _, _ := testProgress(func(progress *Progress) bool {
progress.AddFile(fileSize)
progress.AddFile(50)
progress.AddProgress("test1", ActionFileUpdated, 50, 50)
progress.AddProgress("test2", ActionFileUpdated, fileSize/2, fileSize)
return true
})
test.Equals(t, printerTrace{
printerTraceEntry{State{1, 2, 0, 0, 50 + fileSize/2, 50 + fileSize, 0}, mockFinishDuration, true},
}, result)
}
func TestSkipFile(t *testing.T) {
fileSize := uint64(100)
result, items, _ := testProgress(func(progress *Progress) bool {
progress.AddSkippedFile("test", fileSize)
return true
})
test.Equals(t, printerTrace{
printerTraceEntry{State{0, 0, 1, 0, 0, 0, fileSize}, mockFinishDuration, true},
}, result)
test.Equals(t, itemTrace{
itemTraceEntry{ActionFileUnchanged, "test", fileSize},
}, items)
}
func TestProgressTypes(t *testing.T) {
fileSize := uint64(100)
_, items, _ := testProgress(func(progress *Progress) bool {
progress.AddFile(fileSize)
progress.AddFile(0)
progress.AddProgress("dir", ActionDirRestored, fileSize, fileSize)
progress.AddProgress("new", ActionFileRestored, 0, 0)
progress.ReportDeletion("del")
return true
})
test.Equals(t, itemTrace{
itemTraceEntry{ActionDirRestored, "dir", fileSize},
itemTraceEntry{ActionFileRestored, "new", 0},
itemTraceEntry{ActionDeleted, "del", 0},
}, items)
}
func TestProgressError(t *testing.T) {
err1 := errors.New("err1")
err2 := errors.New("err2")
_, _, errors := testProgress(func(progress *Progress) bool {
test.Equals(t, progress.Error("first", err1), nil)
test.Equals(t, progress.Error("second", err2), nil)
return true
})
test.Equals(t, errorTrace{
errorTraceEntry{"first", err1},
errorTraceEntry{"second", err2},
}, errors)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/progress/counter_test.go | internal/ui/progress/counter_test.go | package progress_test
import (
"testing"
"time"
"github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/ui/progress"
)
func TestCounter(t *testing.T) {
const N = 100
const startTotal = uint64(12345)
var (
finalSeen = false
increasing = true
last uint64
lastTotal = startTotal
ncalls int
nmaxChange int
)
report := func(value uint64, total uint64, d time.Duration, final bool) {
if final {
finalSeen = true
}
if value < last {
increasing = false
}
last = value
if total != lastTotal {
nmaxChange++
}
lastTotal = total
ncalls++
}
c := progress.NewCounter(10*time.Millisecond, startTotal, report)
done := make(chan struct{})
go func() {
defer close(done)
for i := 0; i < N; i++ {
time.Sleep(time.Millisecond)
c.Add(1)
}
c.SetMax(42)
}()
<-done
c.Done()
test.Assert(t, finalSeen, "final call did not happen")
test.Assert(t, increasing, "values not increasing")
test.Equals(t, uint64(N), last)
test.Equals(t, uint64(42), lastTotal)
test.Equals(t, 1, nmaxChange)
t.Log("number of calls:", ncalls)
}
func TestCounterNil(_ *testing.T) {
// Shouldn't panic.
var c *progress.Counter
c.Add(1)
c.SetMax(42)
c.Done()
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/progress/counter.go | internal/ui/progress/counter.go | package progress
import (
"sync/atomic"
"time"
)
// A Func is a callback for a Counter.
//
// The final argument is true if Counter.Done has been called,
// which means that the current call will be the last.
type Func func(value uint64, total uint64, runtime time.Duration, final bool)
// A Counter tracks a running count and controls a goroutine that passes its
// value periodically to a Func.
//
// The Func is also called when SIGUSR1 (or SIGINFO, on BSD) is received.
type Counter struct {
Updater
value, max atomic.Uint64
}
// NewCounter starts a new Counter.
func NewCounter(interval time.Duration, total uint64, report Func) *Counter {
c := new(Counter)
c.max.Store(total)
c.Updater = *NewUpdater(interval, func(runtime time.Duration, final bool) {
v, maxV := c.Get()
report(v, maxV, runtime, final)
})
return c
}
// Add v to the Counter. This method is concurrency-safe.
func (c *Counter) Add(v uint64) {
if c != nil {
c.value.Add(v)
}
}
// SetMax sets the maximum expected counter value. This method is concurrency-safe.
func (c *Counter) SetMax(max uint64) {
if c != nil {
c.max.Store(max)
}
}
// Get returns the current value and the maximum of c.
// This method is concurrency-safe.
func (c *Counter) Get() (v, max uint64) {
return c.value.Load(), c.max.Load()
}
func (c *Counter) Done() {
if c != nil {
c.Updater.Done()
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/progress/printer.go | internal/ui/progress/printer.go | package progress
import "testing"
// A Printer can can return a new counter or print messages
// at different log levels.
// It must be safe to call its methods from concurrent goroutines.
type Printer interface {
// NewCounter returns a new progress counter. It is not shown if --quiet or --json is specified.
NewCounter(description string) *Counter
// NewCounterTerminalOnly returns a new progress counter that is only shown if stdout points to a
// terminal. It is not shown if --quiet or --json is specified.
NewCounterTerminalOnly(description string) *Counter
// E reports an error. This message is always printed to stderr.
// Appends a newline if not present.
E(msg string, args ...interface{})
// S prints a message, this is should only be used for very important messages
// that are not errors. The message is even printed if --quiet is specified.
// Appends a newline if not present.
S(msg string, args ...interface{})
// PT prints a message if verbosity >= 1 (neither --quiet nor --verbose is specified)
// and stdout points to a terminal.
// This is used for informational messages.
PT(msg string, args ...interface{})
// P prints a message if verbosity >= 1 (neither --quiet nor --verbose is specified),
// this is used for normal messages which are not errors. Appends a newline if not present.
P(msg string, args ...interface{})
// V prints a message if verbosity >= 2 (equivalent to --verbose), this is used for
// verbose messages. Appends a newline if not present.
V(msg string, args ...interface{})
// VV prints a message if verbosity >= 3 (equivalent to --verbose=2), this is used for
// debug messages. Appends a newline if not present.
VV(msg string, args ...interface{})
}
// NoopPrinter discards all messages
type NoopPrinter struct{}
var _ Printer = (*NoopPrinter)(nil)
func (*NoopPrinter) NewCounter(_ string) *Counter {
return nil
}
func (*NoopPrinter) NewCounterTerminalOnly(_ string) *Counter {
return nil
}
func (*NoopPrinter) E(_ string, _ ...interface{}) {}
func (*NoopPrinter) S(_ string, _ ...interface{}) {}
func (*NoopPrinter) PT(_ string, _ ...interface{}) {}
func (*NoopPrinter) P(_ string, _ ...interface{}) {}
func (*NoopPrinter) V(_ string, _ ...interface{}) {}
func (*NoopPrinter) VV(_ string, _ ...interface{}) {}
// TestPrinter prints messages during testing
type TestPrinter struct {
t testing.TB
}
func NewTestPrinter(t testing.TB) *TestPrinter {
return &TestPrinter{
t: t,
}
}
var _ Printer = (*TestPrinter)(nil)
func (p *TestPrinter) NewCounter(_ string) *Counter {
return nil
}
func (p *TestPrinter) NewCounterTerminalOnly(_ string) *Counter {
return nil
}
func (p *TestPrinter) E(msg string, args ...interface{}) {
p.t.Logf("error: "+msg, args...)
}
func (p *TestPrinter) S(msg string, args ...interface{}) {
p.t.Logf("stdout: "+msg, args...)
}
func (p *TestPrinter) PT(msg string, args ...interface{}) {
p.t.Logf("stdout(terminal): "+msg, args...)
}
func (p *TestPrinter) P(msg string, args ...interface{}) {
p.t.Logf("print: "+msg, args...)
}
func (p *TestPrinter) V(msg string, args ...interface{}) {
p.t.Logf("verbose: "+msg, args...)
}
func (p *TestPrinter) VV(msg string, args ...interface{}) {
p.t.Logf("verbose2: "+msg, args...)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/progress/updater_test.go | internal/ui/progress/updater_test.go | package progress_test
import (
"testing"
"time"
"github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/ui/progress"
)
func TestUpdater(t *testing.T) {
var (
finalSeen = false
ncalls = 0
dur time.Duration
)
report := func(d time.Duration, final bool) {
if final {
finalSeen = true
}
dur = d
ncalls++
}
c := progress.NewUpdater(10*time.Millisecond, report)
time.Sleep(100 * time.Millisecond)
c.Done()
test.Assert(t, finalSeen, "final call did not happen")
test.Assert(t, ncalls > 0, "no progress was reported")
test.Assert(t, dur > 0, "duration must be positive")
}
func TestUpdaterStopTwice(_ *testing.T) {
// must not panic
c := progress.NewUpdater(0, func(runtime time.Duration, final bool) {})
c.Done()
c.Done()
}
func TestUpdaterNoTick(t *testing.T) {
finalSeen := false
otherSeen := false
report := func(d time.Duration, final bool) {
if final {
finalSeen = true
} else {
otherSeen = true
}
}
c := progress.NewUpdater(0, report)
time.Sleep(time.Millisecond)
c.Done()
test.Assert(t, finalSeen, "final call did not happen")
test.Assert(t, !otherSeen, "unexpected status update")
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/progress/updater.go | internal/ui/progress/updater.go | package progress
import (
"time"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/ui/signals"
)
// An UpdateFunc is a callback for a (progress) Updater.
//
// The final argument is true if Updater.Done has been called,
// which means that the current call will be the last.
type UpdateFunc func(runtime time.Duration, final bool)
// An Updater controls a goroutine that periodically calls an UpdateFunc.
//
// The UpdateFunc is also called when SIGUSR1 (or SIGINFO, on BSD) is received.
type Updater struct {
report UpdateFunc
start time.Time
stopped chan struct{} // Closed by run.
stop chan struct{} // Close to stop run.
tick *time.Ticker
}
// NewUpdater starts a new Updater.
func NewUpdater(interval time.Duration, report UpdateFunc) *Updater {
c := &Updater{
report: report,
start: time.Now(),
stopped: make(chan struct{}),
stop: make(chan struct{}),
}
if interval > 0 {
c.tick = time.NewTicker(interval)
}
go c.run()
return c
}
// Done tells an Updater to stop and waits for it to report its final value.
// Later calls do nothing.
func (c *Updater) Done() {
if c == nil || c.stop == nil {
return
}
if c.tick != nil {
c.tick.Stop()
}
close(c.stop)
<-c.stopped // Wait for last progress report.
c.stop = nil
}
func (c *Updater) run() {
defer close(c.stopped)
var tick <-chan time.Time
if c.tick != nil {
tick = c.tick.C
}
signalsCh := signals.GetProgressChannel()
for final := false; !final; {
var now time.Time
select {
case now = <-tick:
case sig := <-signalsCh:
debug.Log("Signal received: %v\n", sig)
now = time.Now()
case <-c.stop:
final, now = true, time.Now()
}
c.report(now.Sub(c.start), final)
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/signals/signals_sysv.go | internal/ui/signals/signals_sysv.go | //go:build aix || linux || solaris
package signals
import (
"os/signal"
"syscall"
)
func setupSignals() {
signal.Notify(signals.ch, syscall.SIGUSR1)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/signals/signals_bsd.go | internal/ui/signals/signals_bsd.go | //go:build darwin || dragonfly || freebsd || netbsd || openbsd
package signals
import (
"os/signal"
"syscall"
)
func setupSignals() {
signal.Notify(signals.ch, syscall.SIGINFO, syscall.SIGUSR1)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/signals/signals.go | internal/ui/signals/signals.go | package signals
import (
"os"
"sync"
)
// GetProgressChannel returns a channel with which a single listener
// receives each incoming signal.
func GetProgressChannel() <-chan os.Signal {
signals.once.Do(func() {
signals.ch = make(chan os.Signal, 1)
setupSignals()
})
return signals.ch
}
// XXX The fact that signals is a single global variable means that only one
// listener receives each incoming signal.
var signals struct {
ch chan os.Signal
once sync.Once
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/signals/signals_windows.go | internal/ui/signals/signals_windows.go | package signals
func setupSignals() {}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/termstatus/status.go | internal/ui/termstatus/status.go | package termstatus
import (
"bufio"
"context"
"fmt"
"io"
"slices"
"strings"
"sync"
"github.com/restic/restic/internal/terminal"
"github.com/restic/restic/internal/ui"
)
var _ ui.Terminal = &Terminal{}
// Terminal is used to write messages and display status lines which can be
// updated. When the output is redirected to a file, the status lines are not
// printed.
type Terminal struct {
rd io.ReadCloser
inFd uintptr
wr io.Writer
fd uintptr
errWriter io.Writer
msg chan message
status chan status
lastStatusLen int
inputIsTerminal bool
outputIsTerminal bool
canUpdateStatus bool
outputWriter io.WriteCloser
outputWriterOnce sync.Once
// will be closed when the goroutine which runs Run() terminates, so it'll
// yield a default value immediately
closed chan struct{}
clearCurrentLine func(io.Writer, uintptr) error
moveCursorUp func(io.Writer, uintptr, int) error
}
type message struct {
line string
err bool
barrier chan struct{}
}
type status struct {
lines []string
}
type fder interface {
Fd() uintptr
}
// Setup creates a new termstatus.
// The returned function must be called to shut down the termstatus,
//
// Expected usage:
// ```
// term, cancel := termstatus.Setup(os.Stdin, os.Stdout, os.Stderr, false)
// defer cancel()
// // do stuff
// ```
func Setup(stdin io.ReadCloser, stdout, stderr io.Writer, quiet bool) (*Terminal, func()) {
var wg sync.WaitGroup
// only shutdown once cancel is called to ensure that no output is lost
cancelCtx, cancel := context.WithCancel(context.Background())
term := New(stdin, stdout, stderr, quiet)
wg.Add(1)
go func() {
defer wg.Done()
term.Run(cancelCtx)
}()
return term, func() {
if term.outputWriter != nil {
_ = term.outputWriter.Close()
}
term.Flush()
// shutdown termstatus
cancel()
wg.Wait()
}
}
// New returns a new Terminal for wr. A goroutine is started to update the
// terminal. It is terminated when ctx is cancelled. When wr is redirected to
// a file (e.g. via shell output redirection) or is just an io.Writer (not the
// open *os.File for stdout), no status lines are printed. The status lines and
// normal output (via Print/Printf) are written to wr, error messages are
// written to errWriter. If disableStatus is set to true, no status messages
// are printed even if the terminal supports it.
func New(rd io.ReadCloser, wr io.Writer, errWriter io.Writer, disableStatus bool) *Terminal {
t := &Terminal{
rd: rd,
wr: wr,
errWriter: errWriter,
msg: make(chan message),
status: make(chan status),
closed: make(chan struct{}),
}
if disableStatus {
return t
}
if d, ok := rd.(fder); ok {
if terminal.InputIsTerminal(d.Fd()) {
t.inFd = d.Fd()
t.inputIsTerminal = true
}
}
if d, ok := wr.(fder); ok {
if terminal.CanUpdateStatus(d.Fd()) {
// only use the fancy status code when we're running on a real terminal.
t.canUpdateStatus = true
t.fd = d.Fd()
t.clearCurrentLine = terminal.ClearCurrentLine(t.fd)
t.moveCursorUp = terminal.MoveCursorUp(t.fd)
}
if terminal.OutputIsTerminal(d.Fd()) {
t.outputIsTerminal = true
}
}
return t
}
// InputIsTerminal returns whether the input is a terminal.
func (t *Terminal) InputIsTerminal() bool {
return t.inputIsTerminal
}
// InputRaw returns the input reader.
func (t *Terminal) InputRaw() io.ReadCloser {
return t.rd
}
func (t *Terminal) ReadPassword(ctx context.Context, prompt string) (string, error) {
if t.InputIsTerminal() {
t.Flush()
return terminal.ReadPassword(ctx, int(t.inFd), t.errWriter, prompt)
}
if t.OutputIsTerminal() {
t.Print("reading repository password from stdin")
}
return readPassword(t.rd)
}
// readPassword reads the password from the given reader directly.
func readPassword(in io.Reader) (password string, err error) {
sc := bufio.NewScanner(in)
sc.Scan()
if sc.Err() != nil {
return "", fmt.Errorf("readPassword: %w", sc.Err())
}
return sc.Text(), nil
}
// CanUpdateStatus return whether the status output is updated in place.
func (t *Terminal) CanUpdateStatus() bool {
return t.canUpdateStatus
}
// OutputWriter returns a output writer that is safe for concurrent use with
// other output methods. Output is only shown after a line break.
func (t *Terminal) OutputWriter() io.Writer {
t.outputWriterOnce.Do(func() {
t.outputWriter = newLineWriter(t.Print)
})
return t.outputWriter
}
// OutputRaw returns the raw output writer. Should only be used if there is no
// other option. Must not be used in combination with Print, Error, SetStatus
// or any other method that writes to the terminal.
func (t *Terminal) OutputRaw() io.Writer {
t.Flush()
return t.wr
}
// OutputIsTerminal returns whether the output is a terminal.
func (t *Terminal) OutputIsTerminal() bool {
return t.outputIsTerminal
}
// Run updates the screen. It should be run in a separate goroutine. When
// ctx is cancelled, the status lines are cleanly removed.
func (t *Terminal) Run(ctx context.Context) {
defer close(t.closed)
if t.canUpdateStatus {
t.run(ctx)
return
}
t.runWithoutStatus(ctx)
}
// run listens on the channels and updates the terminal screen.
func (t *Terminal) run(ctx context.Context) {
var status []string
var lastWrittenStatus []string
for {
select {
case <-ctx.Done():
if !terminal.IsProcessBackground(t.fd) {
t.writeStatus([]string{})
}
return
case msg := <-t.msg:
if msg.barrier != nil {
msg.barrier <- struct{}{}
continue
}
if terminal.IsProcessBackground(t.fd) {
// ignore all messages, do nothing, we are in the background process group
continue
}
if err := t.clearCurrentLine(t.wr, t.fd); err != nil {
_, _ = fmt.Fprintf(t.errWriter, "write failed: %v\n", err)
continue
}
var dst io.Writer
if msg.err {
dst = t.errWriter
} else {
dst = t.wr
}
if _, err := io.WriteString(dst, msg.line); err != nil {
_, _ = fmt.Fprintf(t.errWriter, "write failed: %v\n", err)
continue
}
t.writeStatus(status)
lastWrittenStatus = append([]string{}, status...)
case stat := <-t.status:
status = append(status[:0], stat.lines...)
if terminal.IsProcessBackground(t.fd) {
// ignore all messages, do nothing, we are in the background process group
continue
}
if !slices.Equal(status, lastWrittenStatus) {
t.writeStatus(status)
// Copy the status slice to avoid aliasing
lastWrittenStatus = append([]string{}, status...)
}
}
}
}
func (t *Terminal) writeStatus(status []string) {
statusLen := len(status)
status = append([]string{}, status...)
for i := len(status); i < t.lastStatusLen; i++ {
// clear no longer used status lines
status = append(status, "")
if i > 0 {
// all lines except the last one must have a line break
status[i-1] = status[i-1] + "\n"
}
}
t.lastStatusLen = statusLen
for _, line := range status {
if err := t.clearCurrentLine(t.wr, t.fd); err != nil {
_, _ = fmt.Fprintf(t.errWriter, "write failed: %v\n", err)
}
_, err := t.wr.Write([]byte(line))
if err != nil {
_, _ = fmt.Fprintf(t.errWriter, "write failed: %v\n", err)
}
}
if len(status) > 0 {
if err := t.moveCursorUp(t.wr, t.fd, len(status)-1); err != nil {
_, _ = fmt.Fprintf(t.errWriter, "write failed: %v\n", err)
}
}
}
// runWithoutStatus listens on the channels and just prints out the messages,
// without status lines.
func (t *Terminal) runWithoutStatus(ctx context.Context) {
var lastStatus []string
for {
select {
case <-ctx.Done():
return
case msg := <-t.msg:
if msg.barrier != nil {
msg.barrier <- struct{}{}
continue
}
var dst io.Writer
if msg.err {
dst = t.errWriter
} else {
dst = t.wr
}
if _, err := io.WriteString(dst, msg.line); err != nil {
_, _ = fmt.Fprintf(t.errWriter, "write failed: %v\n", err)
}
case stat := <-t.status:
if !slices.Equal(stat.lines, lastStatus) {
for _, line := range stat.lines {
// Ensure that each message ends with exactly one newline.
if _, err := fmt.Fprintln(t.wr, strings.TrimRight(line, "\n")); err != nil {
_, _ = fmt.Fprintf(t.errWriter, "write failed: %v\n", err)
}
}
// Copy the status slice to avoid aliasing
lastStatus = append([]string{}, stat.lines...)
}
}
}
}
// Flush waits for all pending messages to be printed.
func (t *Terminal) Flush() {
ch := make(chan struct{})
defer close(ch)
select {
case t.msg <- message{barrier: ch}:
case <-t.closed:
}
select {
case <-ch:
case <-t.closed:
}
}
func (t *Terminal) print(line string, isErr bool) {
// make sure the line ends with a line break
if len(line) == 0 || line[len(line)-1] != '\n' {
line += "\n"
}
select {
case t.msg <- message{line: line, err: isErr}:
case <-t.closed:
}
}
// Print writes a line to the terminal.
func (t *Terminal) Print(line string) {
t.print(line, false)
}
// Error writes an error to the terminal.
func (t *Terminal) Error(line string) {
t.print(line, true)
}
func sanitizeLines(lines []string, width int) []string {
// Sanitize lines and truncate them if they're too long.
for i, line := range lines {
line = ui.Quote(line)
if width > 0 {
line = ui.Truncate(line, width-2)
}
if i < len(lines)-1 { // Last line gets no line break.
line += "\n"
}
lines[i] = line
}
return lines
}
// SetStatus updates the status lines.
// The lines should not contain newlines; this method adds them.
// Pass nil or an empty array to remove the status lines.
func (t *Terminal) SetStatus(lines []string) {
// only truncate interactive status output
var width int
if t.canUpdateStatus {
width = terminal.Width(t.fd)
if width <= 0 {
// use 80 columns by default
width = 80
}
}
sanitizeLines(lines, width)
select {
case t.status <- status{lines: lines}:
case <-t.closed:
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/termstatus/status_test.go | internal/ui/termstatus/status_test.go | package termstatus
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"strings"
"testing"
"github.com/restic/restic/internal/terminal"
rtest "github.com/restic/restic/internal/test"
)
func TestSetStatus(t *testing.T) {
buf, term, cancel := setupStatusTest()
const (
cl = terminal.PosixControlClearLine
home = terminal.PosixControlMoveCursorHome
up = terminal.PosixControlMoveCursorUp
)
term.SetStatus([]string{"first"})
exp := home + cl + "first" + home
term.SetStatus([]string{""})
exp += home + cl + "" + home
term.SetStatus([]string{})
exp += home + cl + "" + home
// already empty status
term.SetStatus([]string{})
term.SetStatus([]string{"foo", "bar", "baz"})
exp += home + cl + "foo\n" + home + cl + "bar\n" +
home + cl + "baz" + home + up + up
term.SetStatus([]string{"quux", "needs\nquote"})
exp += home + cl + "quux\n" +
home + cl + "\"needs\\nquote\"\n" +
home + cl + home + up + up // Clear third line
cancel()
exp += home + cl + "\n" + home + cl + home + up // Status cleared
<-term.closed
rtest.Equals(t, exp, buf.String())
}
func setupStatusTest() (*bytes.Buffer, *Terminal, context.CancelFunc) {
buf := &bytes.Buffer{}
term := New(nil, buf, buf, false)
term.canUpdateStatus = true
term.fd = ^uintptr(0)
term.clearCurrentLine = terminal.PosixClearCurrentLine
term.moveCursorUp = terminal.PosixMoveCursorUp
ctx, cancel := context.WithCancel(context.Background())
go term.Run(ctx)
return buf, term, cancel
}
func TestPrint(t *testing.T) {
buf, term, cancel := setupStatusTest()
const (
cl = terminal.PosixControlClearLine
home = terminal.PosixControlMoveCursorHome
)
term.Print("test")
exp := home + cl + "test\n"
term.Error("error")
exp += home + cl + "error\n"
cancel()
<-term.closed
rtest.Equals(t, exp, buf.String())
}
func TestSanitizeLines(t *testing.T) {
var tests = []struct {
input []string
width int
output []string
}{
{[]string{""}, 80, []string{""}},
{[]string{"too long test line"}, 10, []string{"too long"}},
{[]string{"too long test line", "text"}, 10, []string{"too long\n", "text"}},
{[]string{"too long test line", "second long test line"}, 10, []string{"too long\n", "second l"}},
}
for _, test := range tests {
t.Run(fmt.Sprintf("%s %d", test.input, test.width), func(t *testing.T) {
out := sanitizeLines(test.input, test.width)
rtest.Equals(t, test.output, out)
})
}
}
type errorReader struct{ err error }
func (r *errorReader) Read([]byte) (int, error) { return 0, r.err }
func TestReadPassword(t *testing.T) {
want := errors.New("foo")
_, err := readPassword(&errorReader{want})
rtest.Assert(t, errors.Is(err, want), "wrong error %v", err)
}
func TestReadPasswordTerminal(t *testing.T) {
expected := "password"
term := New(io.NopCloser(strings.NewReader(expected)), io.Discard, io.Discard, false)
pw, err := term.ReadPassword(context.Background(), "test")
rtest.OK(t, err)
rtest.Equals(t, expected, pw)
}
func TestRawInputOutput(t *testing.T) {
input := io.NopCloser(strings.NewReader("password"))
var output bytes.Buffer
term, cancel := Setup(input, &output, io.Discard, false)
defer cancel()
rtest.Equals(t, input, term.InputRaw())
rtest.Equals(t, false, term.InputIsTerminal())
rtest.Equals(t, &output, term.OutputRaw())
rtest.Equals(t, false, term.OutputIsTerminal())
rtest.Equals(t, false, term.CanUpdateStatus())
}
func TestDisableStatus(t *testing.T) {
var output bytes.Buffer
term, cancel := Setup(nil, &output, &output, true)
rtest.Equals(t, false, term.CanUpdateStatus())
term.Print("test")
term.Error("error")
term.SetStatus([]string{"status"})
cancel()
rtest.Equals(t, "test\nerror\nstatus\n", output.String())
}
func TestOutputWriter(t *testing.T) {
var output bytes.Buffer
term, cancel := Setup(nil, &output, &output, true)
_, err := term.OutputWriter().Write([]byte("output\npartial"))
rtest.OK(t, err)
term.Print("test")
term.Error("error")
cancel()
rtest.Equals(t, "output\ntest\nerror\npartial\n", output.String())
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/termstatus/stdio_wrapper.go | internal/ui/termstatus/stdio_wrapper.go | package termstatus
import (
"bytes"
"io"
"sync"
)
type lineWriter struct {
m sync.Mutex
buf bytes.Buffer
print func(string)
}
var _ io.WriteCloser = &lineWriter{}
func newLineWriter(print func(string)) *lineWriter {
return &lineWriter{print: print}
}
func (w *lineWriter) Write(data []byte) (n int, err error) {
w.m.Lock()
defer w.m.Unlock()
n, err = w.buf.Write(data)
if err != nil {
return n, err
}
// look for line breaks
buf := w.buf.Bytes()
i := bytes.LastIndexByte(buf, '\n')
if i != -1 {
w.print(string(buf[:i+1]))
w.buf.Next(i + 1)
}
return n, err
}
func (w *lineWriter) Close() error {
w.m.Lock()
defer w.m.Unlock()
if w.buf.Len() > 0 {
w.print(string(append(w.buf.Bytes(), '\n')))
}
return nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/termstatus/stdio_wrapper_test.go | internal/ui/termstatus/stdio_wrapper_test.go | package termstatus
import (
"context"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
rtest "github.com/restic/restic/internal/test"
"golang.org/x/sync/errgroup"
)
func TestStdioWrapper(t *testing.T) {
var tests = []struct {
inputs [][]byte
output string
}{
{
inputs: [][]byte{
[]byte("foo"),
},
output: "foo\n",
},
{
inputs: [][]byte{
[]byte("foo"),
[]byte("bar"),
[]byte("\n"),
[]byte("baz"),
},
output: "foobar\n" +
"baz\n",
},
{
inputs: [][]byte{
[]byte("foo"),
[]byte("bar\nbaz\n"),
[]byte("bump\n"),
},
output: "foobar\n" +
"baz\n" +
"bump\n",
},
{
inputs: [][]byte{
[]byte("foo"),
[]byte("bar\nbaz\n"),
[]byte("bum"),
[]byte("p\nx"),
[]byte("x"),
[]byte("x"),
[]byte("z"),
},
output: "foobar\n" +
"baz\n" +
"bump\n" +
"xxxz\n",
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
var output strings.Builder
w := newLineWriter(func(s string) { output.WriteString(s) })
for _, data := range test.inputs {
n, err := w.Write(data)
if err != nil {
t.Fatal(err)
}
if n != len(data) {
t.Errorf("invalid length returned by Write, want %d, got %d", len(data), n)
}
}
err := w.Close()
if err != nil {
t.Fatal(err)
}
if outstr := output.String(); outstr != test.output {
t.Error(cmp.Diff(test.output, outstr))
}
})
}
}
func TestStdioWrapperConcurrentWrites(t *testing.T) {
// tests for race conditions when run with `go test -race ./internal/ui/termstatus`
w := newLineWriter(func(_ string) {})
wg, _ := errgroup.WithContext(context.TODO())
for range 5 {
wg.Go(func() error {
_, err := w.Write([]byte("test\n"))
return err
})
}
rtest.OK(t, wg.Wait())
rtest.OK(t, w.Close())
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/backup/json.go | internal/ui/backup/json.go | package backup
import (
"sort"
"time"
"github.com/restic/restic/internal/archiver"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/progress"
)
// JSONProgress reports progress for the `backup` command in JSON.
type JSONProgress struct {
progress.Printer
term ui.Terminal
v uint
}
// assert that Backup implements the ProgressPrinter interface
var _ ProgressPrinter = &JSONProgress{}
// NewJSONProgress returns a new backup progress reporter.
func NewJSONProgress(term ui.Terminal, verbosity uint) *JSONProgress {
return &JSONProgress{
Printer: ui.NewProgressPrinter(true, verbosity, term),
term: term,
v: verbosity,
}
}
func (b *JSONProgress) print(status interface{}) {
b.term.Print(ui.ToJSONString(status))
}
func (b *JSONProgress) error(status interface{}) {
b.term.Error(ui.ToJSONString(status))
}
// Update updates the status lines.
func (b *JSONProgress) Update(total, processed Counter, errors uint, currentFiles map[string]struct{}, start time.Time, secs uint64) {
status := statusUpdate{
MessageType: "status",
SecondsElapsed: uint64(time.Since(start) / time.Second),
SecondsRemaining: secs,
TotalFiles: total.Files,
FilesDone: processed.Files,
TotalBytes: total.Bytes,
BytesDone: processed.Bytes,
ErrorCount: errors,
}
if total.Bytes > 0 {
status.PercentDone = float64(processed.Bytes) / float64(total.Bytes)
}
for filename := range currentFiles {
status.CurrentFiles = append(status.CurrentFiles, filename)
}
sort.Strings(status.CurrentFiles)
b.print(status)
}
// ScannerError is the error callback function for the scanner, it prints the
// error in verbose mode and returns nil.
func (b *JSONProgress) ScannerError(item string, err error) error {
b.error(errorUpdate{
MessageType: "error",
Error: errorObject{err.Error()},
During: "scan",
Item: item,
})
return nil
}
// Error is the error callback function for the archiver, it prints the error and returns nil.
func (b *JSONProgress) Error(item string, err error) error {
b.error(errorUpdate{
MessageType: "error",
Error: errorObject{err.Error()},
During: "archival",
Item: item,
})
return nil
}
// CompleteItem is the status callback function for the archiver when a
// file/dir has been saved successfully.
func (b *JSONProgress) CompleteItem(messageType, item string, s archiver.ItemStats, d time.Duration) {
if b.v < 2 {
return
}
switch messageType {
case "dir new":
b.print(verboseUpdate{
MessageType: "verbose_status",
Action: "new",
Item: item,
Duration: d.Seconds(),
DataSize: s.DataSize,
DataSizeInRepo: s.DataSizeInRepo,
MetadataSize: s.TreeSize,
MetadataSizeInRepo: s.TreeSizeInRepo,
})
case "dir unchanged":
b.print(verboseUpdate{
MessageType: "verbose_status",
Action: "unchanged",
Item: item,
})
case "dir modified":
b.print(verboseUpdate{
MessageType: "verbose_status",
Action: "modified",
Item: item,
Duration: d.Seconds(),
DataSize: s.DataSize,
DataSizeInRepo: s.DataSizeInRepo,
MetadataSize: s.TreeSize,
MetadataSizeInRepo: s.TreeSizeInRepo,
})
case "file new":
b.print(verboseUpdate{
MessageType: "verbose_status",
Action: "new",
Item: item,
Duration: d.Seconds(),
DataSize: s.DataSize,
DataSizeInRepo: s.DataSizeInRepo,
})
case "file unchanged":
b.print(verboseUpdate{
MessageType: "verbose_status",
Action: "unchanged",
Item: item,
})
case "file modified":
b.print(verboseUpdate{
MessageType: "verbose_status",
Action: "modified",
Item: item,
Duration: d.Seconds(),
DataSize: s.DataSize,
DataSizeInRepo: s.DataSizeInRepo,
})
}
}
// ReportTotal sets the total stats up to now
func (b *JSONProgress) ReportTotal(start time.Time, s archiver.ScanStats) {
if b.v >= 2 {
b.print(verboseUpdate{
MessageType: "verbose_status",
Action: "scan_finished",
Duration: time.Since(start).Seconds(),
DataSize: s.Bytes,
TotalFiles: s.Files,
})
}
}
// Finish prints the finishing messages.
func (b *JSONProgress) Finish(snapshotID restic.ID, summary *archiver.Summary, dryRun bool) {
id := ""
// empty if snapshot creation was skipped
if !snapshotID.IsNull() {
id = snapshotID.String()
}
b.print(summaryOutput{
MessageType: "summary",
FilesNew: summary.Files.New,
FilesChanged: summary.Files.Changed,
FilesUnmodified: summary.Files.Unchanged,
DirsNew: summary.Dirs.New,
DirsChanged: summary.Dirs.Changed,
DirsUnmodified: summary.Dirs.Unchanged,
DataBlobs: summary.ItemStats.DataBlobs,
TreeBlobs: summary.ItemStats.TreeBlobs,
DataAdded: summary.ItemStats.DataSize + summary.ItemStats.TreeSize,
DataAddedPacked: summary.ItemStats.DataSizeInRepo + summary.ItemStats.TreeSizeInRepo,
TotalFilesProcessed: summary.Files.New + summary.Files.Changed + summary.Files.Unchanged,
TotalBytesProcessed: summary.ProcessedBytes,
BackupStart: summary.BackupStart,
BackupEnd: summary.BackupEnd,
TotalDuration: summary.BackupEnd.Sub(summary.BackupStart).Seconds(),
SnapshotID: id,
DryRun: dryRun,
})
}
// Reset no-op
func (b *JSONProgress) Reset() {
}
type statusUpdate struct {
MessageType string `json:"message_type"` // "status"
SecondsElapsed uint64 `json:"seconds_elapsed,omitempty"`
SecondsRemaining uint64 `json:"seconds_remaining,omitempty"`
PercentDone float64 `json:"percent_done"`
TotalFiles uint64 `json:"total_files,omitempty"`
FilesDone uint64 `json:"files_done,omitempty"`
TotalBytes uint64 `json:"total_bytes,omitempty"`
BytesDone uint64 `json:"bytes_done,omitempty"`
ErrorCount uint `json:"error_count,omitempty"`
CurrentFiles []string `json:"current_files,omitempty"`
}
type errorObject struct {
Message string `json:"message"`
}
type errorUpdate struct {
MessageType string `json:"message_type"` // "error"
Error errorObject `json:"error"`
During string `json:"during"`
Item string `json:"item"`
}
type verboseUpdate struct {
MessageType string `json:"message_type"` // "verbose_status"
Action string `json:"action"`
Item string `json:"item"`
Duration float64 `json:"duration"` // in seconds
DataSize uint64 `json:"data_size"`
DataSizeInRepo uint64 `json:"data_size_in_repo"`
MetadataSize uint64 `json:"metadata_size"`
MetadataSizeInRepo uint64 `json:"metadata_size_in_repo"`
TotalFiles uint `json:"total_files"`
}
type summaryOutput struct {
MessageType string `json:"message_type"` // "summary"
FilesNew uint `json:"files_new"`
FilesChanged uint `json:"files_changed"`
FilesUnmodified uint `json:"files_unmodified"`
DirsNew uint `json:"dirs_new"`
DirsChanged uint `json:"dirs_changed"`
DirsUnmodified uint `json:"dirs_unmodified"`
DataBlobs int `json:"data_blobs"`
TreeBlobs int `json:"tree_blobs"`
DataAdded uint64 `json:"data_added"`
DataAddedPacked uint64 `json:"data_added_packed"`
TotalFilesProcessed uint `json:"total_files_processed"`
TotalBytesProcessed uint64 `json:"total_bytes_processed"`
TotalDuration float64 `json:"total_duration"` // in seconds
BackupStart time.Time `json:"backup_start"`
BackupEnd time.Time `json:"backup_end"`
SnapshotID string `json:"snapshot_id,omitempty"`
DryRun bool `json:"dry_run,omitempty"`
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/backup/text.go | internal/ui/backup/text.go | package backup
import (
"fmt"
"sort"
"time"
"github.com/restic/restic/internal/archiver"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/progress"
)
// TextProgress reports progress for the `backup` command.
type TextProgress struct {
progress.Printer
term ui.Terminal
verbosity uint
}
// assert that Backup implements the ProgressPrinter interface
var _ ProgressPrinter = &TextProgress{}
// NewTextProgress returns a new backup progress reporter.
func NewTextProgress(term ui.Terminal, verbosity uint) *TextProgress {
return &TextProgress{
Printer: ui.NewProgressPrinter(false, verbosity, term),
term: term,
verbosity: verbosity,
}
}
// Update updates the status lines.
func (b *TextProgress) Update(total, processed Counter, errors uint, currentFiles map[string]struct{}, start time.Time, secs uint64) {
var status string
if total.Files == 0 && total.Dirs == 0 {
// no total count available yet
status = fmt.Sprintf("[%s] %v files, %s, %d errors",
ui.FormatDuration(time.Since(start)),
processed.Files, ui.FormatBytes(processed.Bytes), errors,
)
} else {
var eta, percent string
if secs > 0 && processed.Bytes < total.Bytes {
eta = fmt.Sprintf(" ETA %s", ui.FormatSeconds(secs))
percent = ui.FormatPercent(processed.Bytes, total.Bytes)
percent += " "
}
// include totals
status = fmt.Sprintf("[%s] %s%v files %s, total %v files %v, %d errors%s",
ui.FormatDuration(time.Since(start)),
percent,
processed.Files,
ui.FormatBytes(processed.Bytes),
total.Files,
ui.FormatBytes(total.Bytes),
errors,
eta,
)
}
lines := make([]string, 1, len(currentFiles)+1)
lines[0] = status
for filename := range currentFiles {
lines = append(lines, filename)
}
sort.Strings(lines[1:])
b.term.SetStatus(lines)
}
// ScannerError is the error callback function for the scanner, it prints the
// error in verbose mode and returns nil.
func (b *TextProgress) ScannerError(_ string, err error) error {
if b.verbosity >= 2 {
b.E("scan: %v\n", err)
}
return nil
}
// Error is the error callback function for the archiver, it prints the error and returns nil.
func (b *TextProgress) Error(_ string, err error) error {
b.E("error: %v\n", err)
return nil
}
// CompleteItem is the status callback function for the archiver when a
// file/dir has been saved successfully.
func (b *TextProgress) CompleteItem(messageType, item string, s archiver.ItemStats, d time.Duration) {
item = ui.Quote(item)
switch messageType {
case "dir new":
b.VV("new %v, saved in %.3fs (%v added, %v stored, %v metadata)",
item, d.Seconds(), ui.FormatBytes(s.DataSize),
ui.FormatBytes(s.DataSizeInRepo), ui.FormatBytes(s.TreeSizeInRepo))
case "dir unchanged":
b.VV("unchanged %v", item)
case "dir modified":
b.VV("modified %v, saved in %.3fs (%v added, %v stored, %v metadata)",
item, d.Seconds(), ui.FormatBytes(s.DataSize),
ui.FormatBytes(s.DataSizeInRepo), ui.FormatBytes(s.TreeSizeInRepo))
case "file new":
b.VV("new %v, saved in %.3fs (%v added)", item,
d.Seconds(), ui.FormatBytes(s.DataSize))
case "file unchanged":
b.VV("unchanged %v", item)
case "file modified":
b.VV("modified %v, saved in %.3fs (%v added, %v stored)", item,
d.Seconds(), ui.FormatBytes(s.DataSize), ui.FormatBytes(s.DataSizeInRepo))
}
}
// ReportTotal sets the total stats up to now
func (b *TextProgress) ReportTotal(start time.Time, s archiver.ScanStats) {
b.V("scan finished in %.3fs: %v files, %s",
time.Since(start).Seconds(),
s.Files, ui.FormatBytes(s.Bytes),
)
}
// Reset status
func (b *TextProgress) Reset() {
if b.term.CanUpdateStatus() {
b.term.SetStatus(nil)
}
}
// Finish prints the finishing messages.
func (b *TextProgress) Finish(id restic.ID, summary *archiver.Summary, dryRun bool) {
b.P("\n")
b.P("Files: %5d new, %5d changed, %5d unmodified\n", summary.Files.New, summary.Files.Changed, summary.Files.Unchanged)
b.P("Dirs: %5d new, %5d changed, %5d unmodified\n", summary.Dirs.New, summary.Dirs.Changed, summary.Dirs.Unchanged)
b.V("Data Blobs: %5d new\n", summary.ItemStats.DataBlobs)
b.V("Tree Blobs: %5d new\n", summary.ItemStats.TreeBlobs)
verb := "Added"
if dryRun {
verb = "Would add"
}
b.P("%s to the repository: %-5s (%-5s stored)\n", verb,
ui.FormatBytes(summary.ItemStats.DataSize+summary.ItemStats.TreeSize),
ui.FormatBytes(summary.ItemStats.DataSizeInRepo+summary.ItemStats.TreeSizeInRepo))
b.P("\n")
b.P("processed %v files, %v in %s",
summary.Files.New+summary.Files.Changed+summary.Files.Unchanged,
ui.FormatBytes(summary.ProcessedBytes),
ui.FormatDuration(summary.BackupEnd.Sub(summary.BackupStart)),
)
if !dryRun {
if id.IsNull() {
b.P("skipped creating snapshot\n")
} else {
b.P("snapshot %s saved\n", id.Str())
}
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/backup/progress.go | internal/ui/backup/progress.go | package backup
import (
"sync"
"time"
"github.com/restic/restic/internal/archiver"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
)
// A ProgressPrinter can print various progress messages.
// It must be safe to call its methods from concurrent goroutines.
type ProgressPrinter interface {
Update(total, processed Counter, errors uint, currentFiles map[string]struct{}, start time.Time, secs uint64)
Error(item string, err error) error
ScannerError(item string, err error) error
CompleteItem(messageType string, item string, s archiver.ItemStats, d time.Duration)
ReportTotal(start time.Time, s archiver.ScanStats)
Finish(snapshotID restic.ID, summary *archiver.Summary, dryRun bool)
Reset()
progress.Printer
}
type Counter struct {
Files, Dirs, Bytes uint64
}
// Progress reports progress for the `backup` command.
type Progress struct {
progress.Updater
mu sync.Mutex
start time.Time
estimator rateEstimator
scanStarted, scanFinished bool
currentFiles map[string]struct{}
processed, total Counter
errors uint
printer ProgressPrinter
}
func NewProgress(printer ProgressPrinter, interval time.Duration) *Progress {
p := &Progress{
start: time.Now(),
currentFiles: make(map[string]struct{}),
printer: printer,
estimator: *newRateEstimator(time.Now()),
}
p.Updater = *progress.NewUpdater(interval, func(_ time.Duration, final bool) {
if final {
p.printer.Reset()
} else {
p.mu.Lock()
defer p.mu.Unlock()
if !p.scanStarted {
return
}
var secondsRemaining uint64
if p.scanFinished {
rate := p.estimator.rate(time.Now())
tooSlowCutoff := 1024.
if rate <= tooSlowCutoff {
secondsRemaining = 0
} else {
todo := float64(p.total.Bytes - p.processed.Bytes)
secondsRemaining = uint64(todo / rate)
}
}
p.printer.Update(p.total, p.processed, p.errors, p.currentFiles, p.start, secondsRemaining)
}
})
return p
}
// Error is the error callback function for the archiver, it prints the error and returns nil.
func (p *Progress) Error(item string, err error) error {
p.mu.Lock()
p.errors++
p.scanStarted = true
p.mu.Unlock()
return p.printer.Error(item, err)
}
// StartFile is called when a file is being processed by a worker.
func (p *Progress) StartFile(filename string) {
p.mu.Lock()
defer p.mu.Unlock()
p.currentFiles[filename] = struct{}{}
}
func (p *Progress) addProcessed(c Counter) {
p.processed.Files += c.Files
p.processed.Dirs += c.Dirs
p.processed.Bytes += c.Bytes
p.estimator.recordBytes(time.Now(), c.Bytes)
p.scanStarted = true
}
// CompleteBlob is called for all saved blobs for files.
func (p *Progress) CompleteBlob(bytes uint64) {
p.mu.Lock()
p.addProcessed(Counter{Bytes: bytes})
p.mu.Unlock()
}
// CompleteItem is the status callback function for the archiver when a
// file/dir has been saved successfully.
func (p *Progress) CompleteItem(item string, previous, current *data.Node, s archiver.ItemStats, d time.Duration) {
if current == nil {
// error occurred, tell the status display to remove the line
p.mu.Lock()
delete(p.currentFiles, item)
p.mu.Unlock()
return
}
switch current.Type {
case data.NodeTypeDir:
p.mu.Lock()
p.addProcessed(Counter{Dirs: 1})
p.mu.Unlock()
switch {
case previous == nil:
p.printer.CompleteItem("dir new", item, s, d)
case previous.Equals(*current):
p.printer.CompleteItem("dir unchanged", item, s, d)
default:
p.printer.CompleteItem("dir modified", item, s, d)
}
case data.NodeTypeFile:
p.mu.Lock()
p.addProcessed(Counter{Files: 1})
delete(p.currentFiles, item)
p.mu.Unlock()
switch {
case previous == nil:
p.printer.CompleteItem("file new", item, s, d)
case previous.Equals(*current):
p.printer.CompleteItem("file unchanged", item, s, d)
default:
p.printer.CompleteItem("file modified", item, s, d)
}
}
}
// ReportTotal sets the total stats up to now
func (p *Progress) ReportTotal(item string, s archiver.ScanStats) {
p.mu.Lock()
defer p.mu.Unlock()
p.total = Counter{Files: uint64(s.Files), Dirs: uint64(s.Dirs), Bytes: s.Bytes}
p.scanStarted = true
if item == "" {
p.scanFinished = true
p.printer.ReportTotal(p.start, s)
}
}
// Finish prints the finishing messages.
func (p *Progress) Finish(snapshotID restic.ID, summary *archiver.Summary, dryrun bool) {
// wait for the status update goroutine to shut down
p.Updater.Done()
p.printer.Finish(snapshotID, summary, dryrun)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/backup/json_test.go | internal/ui/backup/json_test.go | package backup
import (
"testing"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/ui"
)
func createJSONProgress() (*ui.MockTerminal, ProgressPrinter) {
term := &ui.MockTerminal{}
printer := NewJSONProgress(term, 3)
return term, printer
}
func TestJSONError(t *testing.T) {
term, printer := createJSONProgress()
test.Equals(t, printer.Error("/path", errors.New("error \"message\"")), nil)
test.Equals(t, []string{"{\"message_type\":\"error\",\"error\":{\"message\":\"error \\\"message\\\"\"},\"during\":\"archival\",\"item\":\"/path\"}\n"}, term.Errors)
}
func TestJSONScannerError(t *testing.T) {
term, printer := createJSONProgress()
test.Equals(t, printer.ScannerError("/path", errors.New("error \"message\"")), nil)
test.Equals(t, []string{"{\"message_type\":\"error\",\"error\":{\"message\":\"error \\\"message\\\"\"},\"during\":\"scan\",\"item\":\"/path\"}\n"}, term.Errors)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/backup/rate_estimator.go | internal/ui/backup/rate_estimator.go | package backup
import (
"container/list"
"time"
)
// rateBucket represents a one second window of recorded progress.
type rateBucket struct {
totalBytes uint64
end time.Time // the end of the time window, exclusive
}
// rateEstimator represents an estimate of the time to complete an operation.
type rateEstimator struct {
buckets *list.List
start time.Time
totalBytes uint64
}
// newRateEstimator returns an estimator initialized to a presumed start time.
func newRateEstimator(start time.Time) *rateEstimator {
return &rateEstimator{buckets: list.New(), start: start}
}
// See trim(), below.
const (
bucketWidth = time.Second
minRateEstimatorBytes = 100 * 1000 * 1000
minRateEstimatorBuckets = 20
minRateEstimatorMinutes = 2
)
// trim removes the oldest history from the estimator assuming a given
// current time.
func (r *rateEstimator) trim(now time.Time) {
// The estimator retains byte transfer counts over a two minute window.
// However, to avoid removing too much history when transfer rates are
// low, the estimator also retains a minimum number of processed bytes
// across a minimum number of buckets. An operation that is processing a
// significant number of bytes per second will typically retain only a
// two minute window's worth of information. One that is making slow
// progress, such as one being over a rate limited connection, typically
// observes bursts of updates as infrequently as every ten or twenty
// seconds, in which case the other limiters will kick in. This heuristic
// avoids wildly fluctuating estimates over rate limited connections.
start := now.Add(-minRateEstimatorMinutes * time.Minute)
for e := r.buckets.Front(); e != nil; e = r.buckets.Front() {
if r.buckets.Len() <= minRateEstimatorBuckets {
break
}
b := e.Value.(*rateBucket)
if b.end.After(start) {
break
}
total := r.totalBytes - b.totalBytes
if total < minRateEstimatorBytes {
break
}
r.start = b.end
r.totalBytes = total
r.buckets.Remove(e)
}
}
// recordBytes records the transfer of a number of bytes at a given
// time. Times passed in successive calls should advance monotonically (as
// is the case with time.Now().
func (r *rateEstimator) recordBytes(now time.Time, bytes uint64) {
if bytes == 0 {
return
}
var tail *rateBucket
if r.buckets.Len() > 0 {
tail = r.buckets.Back().Value.(*rateBucket)
}
if tail == nil || !tail.end.After(now) {
// The new bucket holds measurements in the time range [now .. now+1sec).
tail = &rateBucket{end: now.Add(bucketWidth)}
r.buckets.PushBack(tail)
}
tail.totalBytes += bytes
r.totalBytes += bytes
r.trim(now)
}
// rate returns an estimated bytes per second rate at a given time, or zero
// if there is not enough data to compute a rate.
func (r *rateEstimator) rate(now time.Time) float64 {
r.trim(now)
if !r.start.Before(now) {
return 0
}
elapsed := float64(now.Sub(r.start)) / float64(time.Second)
rate := float64(r.totalBytes) / elapsed
return rate
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/backup/text_test.go | internal/ui/backup/text_test.go | package backup
import (
"testing"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/ui"
)
func createTextProgress() (*ui.MockTerminal, ProgressPrinter) {
term := &ui.MockTerminal{}
printer := NewTextProgress(term, 3)
return term, printer
}
func TestError(t *testing.T) {
term, printer := createTextProgress()
test.Equals(t, printer.Error("/path", errors.New("error \"message\"")), nil)
test.Equals(t, []string{"error: error \"message\"\n"}, term.Errors)
}
func TestScannerError(t *testing.T) {
term, printer := createTextProgress()
test.Equals(t, printer.ScannerError("/path", errors.New("error \"message\"")), nil)
test.Equals(t, []string{"scan: error \"message\"\n"}, term.Errors)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/backup/progress_test.go | internal/ui/backup/progress_test.go | package backup
import (
"sync"
"testing"
"time"
"github.com/restic/restic/internal/archiver"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
)
type mockPrinter struct {
sync.Mutex
progress.NoopPrinter
dirUnchanged, fileNew bool
id restic.ID
}
func (p *mockPrinter) Update(_, _ Counter, _ uint, _ map[string]struct{}, _ time.Time, _ uint64) {
}
func (p *mockPrinter) Error(_ string, err error) error { return err }
func (p *mockPrinter) ScannerError(_ string, err error) error { return err }
func (p *mockPrinter) CompleteItem(messageType string, _ string, _ archiver.ItemStats, _ time.Duration) {
p.Lock()
defer p.Unlock()
switch messageType {
case "dir unchanged":
p.dirUnchanged = true
case "file new":
p.fileNew = true
}
}
func (p *mockPrinter) ReportTotal(_ time.Time, _ archiver.ScanStats) {}
func (p *mockPrinter) Finish(id restic.ID, _ *archiver.Summary, _ bool) {
p.Lock()
defer p.Unlock()
p.id = id
}
func (p *mockPrinter) Reset() {}
func TestProgress(t *testing.T) {
t.Parallel()
prnt := &mockPrinter{}
prog := NewProgress(prnt, time.Millisecond)
prog.StartFile("foo")
prog.CompleteBlob(1024)
// "dir unchanged"
node := data.Node{Type: data.NodeTypeDir}
prog.CompleteItem("foo", &node, &node, archiver.ItemStats{}, 0)
// "file new"
node.Type = data.NodeTypeFile
prog.CompleteItem("foo", nil, &node, archiver.ItemStats{}, 0)
time.Sleep(10 * time.Millisecond)
id := restic.NewRandomID()
prog.Finish(id, nil, false)
if !prnt.dirUnchanged {
t.Error(`"dir unchanged" event not seen`)
}
if !prnt.fileNew {
t.Error(`"file new" event not seen`)
}
if prnt.id != id {
t.Errorf("id not stored (has %v)", prnt.id)
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/backup/rate_estimator_test.go | internal/ui/backup/rate_estimator_test.go | package backup
import (
"fmt"
"math"
"testing"
"time"
rtest "github.com/restic/restic/internal/test"
)
const float64EqualityThreshold = 1e-6
func almostEqual(a, b float64) bool {
if math.IsNaN(a) || math.IsNaN(b) {
panic("almostEqual passed a NaN")
}
return math.Abs(a-b) <= float64EqualityThreshold
}
func TestEstimatorDefault(t *testing.T) {
var start time.Time
e := newRateEstimator(start)
r := e.rate(start)
rtest.Assert(t, r == 0, "e.Rate == %v, want zero", r)
r = e.rate(start.Add(time.Hour))
rtest.Assert(t, r == 0, "e.Rate == %v, want zero", r)
}
func TestEstimatorSimple(t *testing.T) {
var start time.Time
type testcase struct {
bytes uint64
when time.Duration
rate float64
}
cases := []testcase{
{0, 0, 0},
{1, time.Second, 1},
{60, time.Second, 60},
{60, time.Minute, 1},
}
for _, c := range cases {
name := fmt.Sprintf("%+v", c)
t.Run(name, func(t *testing.T) {
e := newRateEstimator(start)
e.recordBytes(start.Add(time.Second), c.bytes)
rate := e.rate(start.Add(c.when))
rtest.Assert(t, almostEqual(rate, c.rate), "e.Rate == %v, want %v", rate, c.rate)
})
}
}
func TestBucketWidth(t *testing.T) {
var when time.Time
// Recording byte transfers within a bucket width's time window uses one
// bucket.
e := newRateEstimator(when)
e.recordBytes(when, 1)
e.recordBytes(when.Add(bucketWidth-time.Nanosecond), 1)
rtest.Assert(t, e.buckets.Len() == 1, "e.buckets.Len() is %d, want 1", e.buckets.Len())
b := e.buckets.Back().Value.(*rateBucket)
rtest.Assert(t, b.totalBytes == 2, "b.totalBytes is %d, want 2", b.totalBytes)
rtest.Assert(t, b.end.Equal(when.Add(bucketWidth)), "b.end is %v, want %v", b.end, when.Add(bucketWidth))
// Recording a byte outside the bucket width causes another bucket.
e.recordBytes(when.Add(bucketWidth), 1)
rtest.Assert(t, e.buckets.Len() == 2, "e.buckets.Len() is %d, want 2", e.buckets.Len())
b = e.buckets.Back().Value.(*rateBucket)
rtest.Assert(t, b.totalBytes == 1, "b.totalBytes is %d, want 1", b.totalBytes)
rtest.Assert(t, b.end.Equal(when.Add(2*bucketWidth)), "b.end is %v, want %v", b.end, when.Add(bucketWidth))
// Recording a byte after a longer delay creates a sparse bucket list.
e.recordBytes(when.Add(time.Hour+time.Millisecond), 7)
rtest.Assert(t, e.buckets.Len() == 3, "e.buckets.Len() is %d, want 3", e.buckets.Len())
b = e.buckets.Back().Value.(*rateBucket)
rtest.Assert(t, b.totalBytes == 7, "b.totalBytes is %d, want 7", b.totalBytes)
rtest.Equals(t, when.Add(time.Hour+time.Millisecond+time.Second), b.end)
}
type chunk struct {
repetitions uint64 // repetition count
bytes uint64 // byte count (every second)
}
func applyChunks(chunks []chunk, t time.Time, e *rateEstimator) time.Time {
for _, c := range chunks {
for i := uint64(0); i < c.repetitions; i++ {
e.recordBytes(t, c.bytes)
t = t.Add(time.Second)
}
}
return t
}
func TestEstimatorResponsiveness(t *testing.T) {
type testcase struct {
description string
chunks []chunk
rate float64
}
cases := []testcase{
{
"1000 bytes/sec over one second",
[]chunk{
{1, 1000},
},
1000,
},
{
"1000 bytes/sec over one minute",
[]chunk{
{60, 1000},
},
1000,
},
{
"1000 bytes/sec for 10 seconds, then 2000 bytes/sec for 10 seconds",
[]chunk{
{10, 1000},
{10, 2000},
},
1500,
},
{
"1000 bytes/sec for one minute, then 2000 bytes/sec for one minute",
[]chunk{
{60, 1000},
{60, 2000},
},
1500,
},
{
"rate doubles after 30 seconds",
[]chunk{
{30, minRateEstimatorBytes},
{90, 2 * minRateEstimatorBytes},
},
minRateEstimatorBytes * 1.75,
},
{
"rate doubles after 31 seconds",
[]chunk{
{31, minRateEstimatorBytes},
{90, 2 * minRateEstimatorBytes},
},
// The expected rate is the same as the prior test case because the
// first second has rolled off the estimator.
minRateEstimatorBytes * 1.75,
},
{
"rate doubles after 90 seconds",
[]chunk{
{90, minRateEstimatorBytes},
{90, 2 * minRateEstimatorBytes},
},
// The expected rate is the same as the prior test case because the
// first 60 seconds have rolled off the estimator.
minRateEstimatorBytes * 1.75,
},
{
"rate doubles for two full minutes",
[]chunk{
{60, minRateEstimatorBytes},
{120, 2 * minRateEstimatorBytes},
},
2 * minRateEstimatorBytes,
},
{
"rate falls to zero",
[]chunk{
{30, minRateEstimatorBytes},
{30, 0},
},
minRateEstimatorBytes / 2,
},
{
"rate falls to zero for extended time",
[]chunk{
{60, 1000},
{300, 0},
},
1000 * 60 / (60 + 300.0),
},
{
"rate falls to zero for extended time (from high rate)",
[]chunk{
{2 * minRateEstimatorBuckets, minRateEstimatorBytes},
{300, 0},
},
// Expect that only minRateEstimatorBuckets buckets are used in the
// rate estimate.
minRateEstimatorBytes * minRateEstimatorBuckets /
(minRateEstimatorBuckets + 300.0),
},
}
for _, c := range cases {
t.Run(c.description, func(t *testing.T) {
var w time.Time
e := newRateEstimator(w)
w = applyChunks(c.chunks, w, e)
r := e.rate(w)
rtest.Assert(t, almostEqual(r, c.rate), "e.Rate == %f, want %f", r, c.rate)
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/table/table_test.go | internal/ui/table/table_test.go | package table
import (
"bytes"
"strings"
"testing"
)
func TestTable(t *testing.T) {
var tests = []struct {
create func(t testing.TB) *Table
output string
}{
{
func(t testing.TB) *Table {
return New()
},
"",
},
{
func(t testing.TB) *Table {
table := New()
table.AddColumn("first column", "data: {{.First}}")
table.AddRow(struct{ First string }{"first data field"})
return table
},
`
first column
----------------------
data: first data field
----------------------
`,
},
{
func(t testing.TB) *Table {
table := New()
table.AddColumn("first\ncolumn", "{{.First}}")
table.AddRow(struct{ First string }{"data"})
return table
},
`
first
column
------
data
------
`,
},
{
func(t testing.TB) *Table {
table := New()
table.AddColumn(" first column ", "data: {{.First}}")
table.AddRow(struct{ First string }{"d"})
return table
},
`
first column
----------------
data: d
----------------
`,
},
{
func(t testing.TB) *Table {
table := New()
table.AddColumn("first column", "data: {{.First}}")
table.AddRow(struct{ First string }{"first data field"})
table.AddRow(struct{ First string }{"second data field"})
table.AddFooter("footer1")
table.AddFooter("footer2")
return table
},
`
first column
-----------------------
data: first data field
data: second data field
-----------------------
footer1
footer2
`,
},
{
func(t testing.TB) *Table {
table := New()
table.AddColumn(" first name", `{{printf "%12s" .FirstName}}`)
table.AddColumn("last name", "{{.LastName}}")
table.AddRow(struct{ FirstName, LastName string }{"firstname", "lastname"})
table.AddRow(struct{ FirstName, LastName string }{"John", "Doe"})
table.AddRow(struct{ FirstName, LastName string }{"Johann", "van den Berjen"})
return table
},
`
first name last name
----------------------------
firstname lastname
John Doe
Johann van den Berjen
----------------------------
`,
},
{
func(t testing.TB) *Table {
table := New()
table.AddColumn("host name", `{{.Host}}`)
table.AddColumn("time", `{{.Time}}`)
table.AddColumn("zz", "xxx")
table.AddColumn("tags", `{{join .Tags ","}}`)
table.AddColumn("dirs", `{{join .Dirs ","}}`)
type data struct {
Host string
Time string
Tags, Dirs []string
}
table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"work"}, []string{"/home/user/work"}})
table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"other"}, []string{"/home/user/other"}})
table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"other"}, []string{"/home/user/other"}})
return table
},
`
host name time zz tags dirs
------------------------------------------------------------
foo 2018-08-19 22:22:22 xxx work /home/user/work
foo 2018-08-19 22:22:22 xxx other /home/user/other
foo 2018-08-19 22:22:22 xxx other /home/user/other
------------------------------------------------------------
`,
},
{
func(t testing.TB) *Table {
table := New()
table.AddColumn("host name", `{{.Host}}`)
table.AddColumn("time", `{{.Time}}`)
table.AddColumn("zz", "xxx")
table.AddColumn("tags", `{{join .Tags "\n"}}`)
table.AddColumn("dirs", `{{join .Dirs "\n"}}`)
type data struct {
Host string
Time string
Tags, Dirs []string
}
table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"work", "go’s"}, []string{"/home/user/work", "/home/user/go"}})
table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"other"}, []string{"/home/user/other"}})
table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"other", "bar"}, []string{"/home/user/other"}})
return table
},
`
host name time zz tags dirs
------------------------------------------------------------
foo 2018-08-19 22:22:22 xxx work /home/user/work
go’s /home/user/go
foo 2018-08-19 22:22:22 xxx other /home/user/other
foo 2018-08-19 22:22:22 xxx other /home/user/other
bar
------------------------------------------------------------
`,
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
table := test.create(t)
buf := bytes.NewBuffer(nil)
err := table.Write(buf)
if err != nil {
t.Fatal(err)
}
want := strings.TrimLeft(test.output, "\n")
if buf.String() != want {
t.Errorf("wrong output\n---- want ---\n%s\n---- got ---\n%s\n-------\n", want, buf.Bytes())
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/ui/table/table.go | internal/ui/table/table.go | package table
import (
"bytes"
"io"
"strings"
"text/template"
"github.com/restic/restic/internal/ui"
)
// Table contains data for a table to be printed.
type Table struct {
columns []string
templates []*template.Template
data []interface{}
footer []string
CellSeparator string
PrintHeader func(io.Writer, string) error
PrintSeparator func(io.Writer, string) error
PrintData func(io.Writer, int, string) error
PrintFooter func(io.Writer, string) error
}
var funcmap = template.FuncMap{
"join": strings.Join,
}
// New initializes a new Table
func New() *Table {
p := func(w io.Writer, s string) error {
_, err := w.Write(append([]byte(s), '\n'))
return err
}
return &Table{
CellSeparator: " ",
PrintHeader: p,
PrintSeparator: p,
PrintData: func(w io.Writer, _ int, s string) error {
return p(w, s)
},
PrintFooter: p,
}
}
// AddColumn adds a new header field with the header and format, which is
// expected to be template string compatible with text/template. When compiling
// the format fails, AddColumn panics.
func (t *Table) AddColumn(header, format string) {
t.columns = append(t.columns, header)
tmpl, err := template.New("template for " + header).Funcs(funcmap).Parse(format)
if err != nil {
panic(err)
}
t.templates = append(t.templates, tmpl)
}
// AddRow adds a new row to the table, which is filled with data.
func (t *Table) AddRow(data interface{}) {
t.data = append(t.data, data)
}
// AddFooter prints line after the table
func (t *Table) AddFooter(line string) {
t.footer = append(t.footer, line)
}
func printLine(w io.Writer, print func(io.Writer, string) error, sep string, data []string, widths []int) error {
var fields [][]string
maxLines := 1
for _, d := range data {
lines := strings.Split(d, "\n")
if len(lines) > maxLines {
maxLines = len(lines)
}
fields = append(fields, lines)
}
for i := 0; i < maxLines; i++ {
var s string
for fieldNum, lines := range fields {
var v string
if i < len(lines) {
v += lines[i]
}
// apply padding
pad := widths[fieldNum] - ui.DisplayWidth(v)
if pad > 0 {
v += strings.Repeat(" ", pad)
}
if fieldNum > 0 {
v = sep + v
}
s += v
}
err := print(w, strings.TrimRight(s, " "))
if err != nil {
return err
}
}
return nil
}
// Write prints the table to w.
func (t *Table) Write(w io.Writer) error {
columns := len(t.templates)
if columns == 0 {
return nil
}
// collect all data fields from all columns
lines := make([][]string, 0, len(t.data))
buf := bytes.NewBuffer(nil)
for _, data := range t.data {
row := make([]string, 0, len(t.templates))
for _, tmpl := range t.templates {
err := tmpl.Execute(buf, data)
if err != nil {
return err
}
row = append(row, buf.String())
buf.Reset()
}
lines = append(lines, row)
}
// find max width for each cell
columnWidths := make([]int, columns)
for i, desc := range t.columns {
for _, line := range strings.Split(desc, "\n") {
width := ui.DisplayWidth(line)
if columnWidths[i] < width {
columnWidths[i] = width
}
}
}
for _, line := range lines {
for i, content := range line {
for _, l := range strings.Split(content, "\n") {
width := ui.DisplayWidth(l)
if columnWidths[i] < width {
columnWidths[i] = width
}
}
}
}
// calculate the total width of the table
totalWidth := 0
for _, width := range columnWidths {
totalWidth += width
}
totalWidth += (columns - 1) * ui.DisplayWidth(t.CellSeparator)
// write header
if len(t.columns) > 0 {
err := printLine(w, t.PrintHeader, t.CellSeparator, t.columns, columnWidths)
if err != nil {
return err
}
// draw separation line
err = t.PrintSeparator(w, strings.Repeat("-", totalWidth))
if err != nil {
return err
}
}
// write all the lines
for i, line := range lines {
printer := func(w io.Writer, s string) error {
return t.PrintData(w, i, s)
}
err := printLine(w, printer, t.CellSeparator, line, columnWidths)
if err != nil {
return err
}
}
// draw separation line
err := t.PrintSeparator(w, strings.Repeat("-", totalWidth))
if err != nil {
return err
}
if len(t.footer) > 0 {
// write the footer
for _, line := range t.footer {
err := t.PrintFooter(w, line)
if err != nil {
return err
}
}
}
return nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/crypto/crypto_test.go | internal/crypto/crypto_test.go | package crypto_test
import (
"bytes"
"crypto/rand"
"io"
"testing"
"github.com/restic/restic/internal/crypto"
rtest "github.com/restic/restic/internal/test"
"github.com/restic/chunker"
)
const testLargeCrypto = false
func TestEncryptDecrypt(t *testing.T) {
k := crypto.NewRandomKey()
tests := []int{5, 23, 2<<18 + 23, 1 << 20}
if testLargeCrypto {
tests = append(tests, 7<<20+123)
}
for _, size := range tests {
data := rtest.Random(42, size)
buf := make([]byte, 0, size+crypto.Extension)
nonce := crypto.NewRandomNonce()
ciphertext := k.Seal(buf[:0], nonce, data, nil)
rtest.Assert(t, len(ciphertext) == len(data)+k.Overhead(),
"ciphertext length does not match: want %d, got %d",
len(data)+crypto.Extension, len(ciphertext))
plaintext := make([]byte, 0, len(ciphertext))
plaintext, err := k.Open(plaintext[:0], nonce, ciphertext, nil)
rtest.OK(t, err)
rtest.Assert(t, len(plaintext) == len(data),
"plaintext length does not match: want %d, got %d",
len(data), len(plaintext))
rtest.Equals(t, plaintext, data)
}
}
func TestSmallBuffer(t *testing.T) {
k := crypto.NewRandomKey()
size := 600
data := make([]byte, size)
_, err := io.ReadFull(rand.Reader, data)
rtest.OK(t, err)
ciphertext := make([]byte, 0, size/2)
nonce := crypto.NewRandomNonce()
ciphertext = k.Seal(ciphertext[:0], nonce, data, nil)
// this must extend the slice
rtest.Assert(t, cap(ciphertext) > size/2,
"expected extended slice, but capacity is only %d bytes",
cap(ciphertext))
// check for the correct plaintext
plaintext := make([]byte, len(ciphertext))
plaintext, err = k.Open(plaintext[:0], nonce, ciphertext, nil)
rtest.OK(t, err)
rtest.Assert(t, bytes.Equal(plaintext, data),
"wrong plaintext returned")
}
func TestSameBuffer(t *testing.T) {
k := crypto.NewRandomKey()
size := 600
data := make([]byte, size)
_, err := io.ReadFull(rand.Reader, data)
rtest.OK(t, err)
ciphertext := make([]byte, 0, size+crypto.Extension)
nonce := crypto.NewRandomNonce()
ciphertext = k.Seal(ciphertext, nonce, data, nil)
// use the same buffer for decryption
ciphertext, err = k.Open(ciphertext[:0], nonce, ciphertext, nil)
rtest.OK(t, err)
rtest.Assert(t, bytes.Equal(ciphertext, data),
"wrong plaintext returned")
}
func encrypt(t testing.TB, k *crypto.Key, data, ciphertext, nonce []byte) []byte {
prefixlen := len(ciphertext)
ciphertext = k.Seal(ciphertext, nonce, data, nil)
if len(ciphertext) != len(data)+k.Overhead()+prefixlen {
t.Fatalf("destination slice has wrong length, want %d, got %d",
len(data)+k.Overhead(), len(ciphertext))
}
return ciphertext
}
func decryptNewSliceAndCompare(t testing.TB, k *crypto.Key, data, ciphertext, nonce []byte) {
plaintext := make([]byte, 0, len(ciphertext))
decryptAndCompare(t, k, data, ciphertext, nonce, plaintext)
}
func decryptAndCompare(t testing.TB, k *crypto.Key, data, ciphertext, nonce, dst []byte) {
prefix := make([]byte, len(dst))
copy(prefix, dst)
plaintext, err := k.Open(dst, nonce, ciphertext, nil)
if err != nil {
t.Fatalf("unable to decrypt ciphertext: %v", err)
}
if len(data)+len(prefix) != len(plaintext) {
t.Fatalf("wrong plaintext returned, want %d bytes, got %d", len(data)+len(prefix), len(plaintext))
}
if !bytes.Equal(plaintext[:len(prefix)], prefix) {
t.Fatal("prefix is wrong")
}
if !bytes.Equal(plaintext[len(prefix):], data) {
t.Fatal("wrong plaintext returned")
}
}
func TestAppendOpen(t *testing.T) {
k := crypto.NewRandomKey()
nonce := crypto.NewRandomNonce()
data := make([]byte, 600)
_, err := io.ReadFull(rand.Reader, data)
rtest.OK(t, err)
ciphertext := encrypt(t, k, data, nil, nonce)
// we need to test several different cases:
// * destination slice is nil
// * destination slice is empty and has enough capacity
// * destination slice is empty and does not have enough capacity
// * destination slice contains data and has enough capacity
// * destination slice contains data and does not have enough capacity
// destination slice is nil
t.Run("nil", func(t *testing.T) {
var plaintext []byte
decryptAndCompare(t, k, data, ciphertext, nonce, plaintext)
})
// destination slice is empty and has enough capacity
t.Run("empty-large", func(t *testing.T) {
plaintext := make([]byte, 0, len(data)+100)
decryptAndCompare(t, k, data, ciphertext, nonce, plaintext)
})
// destination slice is empty and does not have enough capacity
t.Run("empty-small", func(t *testing.T) {
plaintext := make([]byte, 0, len(data)/2)
decryptAndCompare(t, k, data, ciphertext, nonce, plaintext)
})
// destination slice contains data and has enough capacity
t.Run("prefix-large", func(t *testing.T) {
plaintext := make([]byte, 0, len(data)+100)
plaintext = append(plaintext, []byte("foobar")...)
decryptAndCompare(t, k, data, ciphertext, nonce, plaintext)
})
// destination slice contains data and does not have enough capacity
t.Run("prefix-small", func(t *testing.T) {
plaintext := make([]byte, 0, len(data)/2)
plaintext = append(plaintext, []byte("foobar")...)
decryptAndCompare(t, k, data, ciphertext, nonce, plaintext)
})
}
func TestAppendSeal(t *testing.T) {
k := crypto.NewRandomKey()
data := make([]byte, 600)
_, err := io.ReadFull(rand.Reader, data)
rtest.OK(t, err)
// we need to test several different cases:
// * destination slice is nil
// * destination slice is empty and has enough capacity
// * destination slice is empty and does not have enough capacity
// * destination slice contains data and has enough capacity
// * destination slice contains data and does not have enough capacity
// destination slice is nil
t.Run("nil", func(t *testing.T) {
nonce := crypto.NewRandomNonce()
var ciphertext []byte
ciphertext = encrypt(t, k, data, ciphertext, nonce)
decryptNewSliceAndCompare(t, k, data, ciphertext, nonce)
})
// destination slice is empty and has enough capacity
t.Run("empty-large", func(t *testing.T) {
nonce := crypto.NewRandomNonce()
ciphertext := make([]byte, 0, len(data)+100)
ciphertext = encrypt(t, k, data, ciphertext, nonce)
decryptNewSliceAndCompare(t, k, data, ciphertext, nonce)
})
// destination slice is empty and does not have enough capacity
t.Run("empty-small", func(t *testing.T) {
nonce := crypto.NewRandomNonce()
ciphertext := make([]byte, 0, len(data)/2)
ciphertext = encrypt(t, k, data, ciphertext, nonce)
decryptNewSliceAndCompare(t, k, data, ciphertext, nonce)
})
// destination slice contains data and has enough capacity
t.Run("prefix-large", func(t *testing.T) {
nonce := crypto.NewRandomNonce()
ciphertext := make([]byte, 0, len(data)+100)
ciphertext = append(ciphertext, []byte("foobar")...)
ciphertext = encrypt(t, k, data, ciphertext, nonce)
if string(ciphertext[:6]) != "foobar" {
t.Errorf("prefix is missing")
}
decryptNewSliceAndCompare(t, k, data, ciphertext[6:], nonce)
})
// destination slice contains data and does not have enough capacity
t.Run("prefix-small", func(t *testing.T) {
nonce := crypto.NewRandomNonce()
ciphertext := make([]byte, 0, len(data)/2)
ciphertext = append(ciphertext, []byte("foobar")...)
ciphertext = encrypt(t, k, data, ciphertext, nonce)
if string(ciphertext[:6]) != "foobar" {
t.Errorf("prefix is missing")
}
decryptNewSliceAndCompare(t, k, data, ciphertext[6:], nonce)
})
}
func TestLargeEncrypt(t *testing.T) {
if !testLargeCrypto {
t.SkipNow()
}
k := crypto.NewRandomKey()
for _, size := range []int{chunker.MaxSize, chunker.MaxSize + 1, chunker.MaxSize + 1<<20} {
data := make([]byte, size)
_, err := io.ReadFull(rand.Reader, data)
rtest.OK(t, err)
nonce := crypto.NewRandomNonce()
ciphertext := k.Seal(make([]byte, size+k.Overhead()), nonce, data, nil)
plaintext, err := k.Open([]byte{}, nonce, ciphertext, nil)
rtest.OK(t, err)
rtest.Equals(t, plaintext, data)
}
}
func BenchmarkEncrypt(b *testing.B) {
size := 8 << 20 // 8MiB
data := make([]byte, size)
k := crypto.NewRandomKey()
buf := make([]byte, len(data)+crypto.Extension)
nonce := crypto.NewRandomNonce()
b.ResetTimer()
b.SetBytes(int64(size))
for i := 0; i < b.N; i++ {
_ = k.Seal(buf, nonce, data, nil)
}
}
func BenchmarkDecrypt(b *testing.B) {
size := 8 << 20 // 8MiB
data := make([]byte, size)
k := crypto.NewRandomKey()
plaintext := make([]byte, 0, size)
ciphertext := make([]byte, 0, size+crypto.Extension)
nonce := crypto.NewRandomNonce()
ciphertext = k.Seal(ciphertext, nonce, data, nil)
var err error
b.ResetTimer()
b.SetBytes(int64(size))
for i := 0; i < b.N; i++ {
_, err = k.Open(plaintext, nonce, ciphertext, nil)
rtest.OK(b, err)
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/crypto/crypto_int_test.go | internal/crypto/crypto_int_test.go | package crypto
import (
"bytes"
"encoding/hex"
"testing"
)
// test vectors from http://cr.yp.to/mac/poly1305-20050329.pdf
var poly1305Tests = []struct {
msg []byte
r []byte
k []byte
nonce []byte
mac []byte
}{
{
[]byte("\xf3\xf6"),
[]byte("\x85\x1f\xc4\x0c\x34\x67\xac\x0b\xe0\x5c\xc2\x04\x04\xf3\xf7\x00"),
[]byte("\xec\x07\x4c\x83\x55\x80\x74\x17\x01\x42\x5b\x62\x32\x35\xad\xd6"),
[]byte("\xfb\x44\x73\x50\xc4\xe8\x68\xc5\x2a\xc3\x27\x5c\xf9\xd4\x32\x7e"),
[]byte("\xf4\xc6\x33\xc3\x04\x4f\xc1\x45\xf8\x4f\x33\x5c\xb8\x19\x53\xde"),
},
{
[]byte(""),
[]byte("\xa0\xf3\x08\x00\x00\xf4\x64\x00\xd0\xc7\xe9\x07\x6c\x83\x44\x03"),
[]byte("\x75\xde\xaa\x25\xc0\x9f\x20\x8e\x1d\xc4\xce\x6b\x5c\xad\x3f\xbf"),
[]byte("\x61\xee\x09\x21\x8d\x29\xb0\xaa\xed\x7e\x15\x4a\x2c\x55\x09\xcc"),
[]byte("\xdd\x3f\xab\x22\x51\xf1\x1a\xc7\x59\xf0\x88\x71\x29\xcc\x2e\xe7"),
},
{
[]byte("\x66\x3c\xea\x19\x0f\xfb\x83\xd8\x95\x93\xf3\xf4\x76\xb6\xbc\x24\xd7\xe6\x79\x10\x7e\xa2\x6a\xdb\x8c\xaf\x66\x52\xd0\x65\x61\x36"),
[]byte("\x48\x44\x3d\x0b\xb0\xd2\x11\x09\xc8\x9a\x10\x0b\x5c\xe2\xc2\x08"),
[]byte("\x6a\xcb\x5f\x61\xa7\x17\x6d\xd3\x20\xc5\xc1\xeb\x2e\xdc\xdc\x74"),
[]byte("\xae\x21\x2a\x55\x39\x97\x29\x59\x5d\xea\x45\x8b\xc6\x21\xff\x0e"),
[]byte("\x0e\xe1\xc1\x6b\xb7\x3f\x0f\x4f\xd1\x98\x81\x75\x3c\x01\xcd\xbe"),
}, {
[]byte("\xab\x08\x12\x72\x4a\x7f\x1e\x34\x27\x42\xcb\xed\x37\x4d\x94\xd1\x36\xc6\xb8\x79\x5d\x45\xb3\x81\x98\x30\xf2\xc0\x44\x91\xfa\xf0\x99\x0c\x62\xe4\x8b\x80\x18\xb2\xc3\xe4\xa0\xfa\x31\x34\xcb\x67\xfa\x83\xe1\x58\xc9\x94\xd9\x61\xc4\xcb\x21\x09\x5c\x1b\xf9"),
[]byte("\x12\x97\x6a\x08\xc4\x42\x6d\x0c\xe8\xa8\x24\x07\xc4\xf4\x82\x07"),
[]byte("\xe1\xa5\x66\x8a\x4d\x5b\x66\xa5\xf6\x8c\xc5\x42\x4e\xd5\x98\x2d"),
[]byte("\x9a\xe8\x31\xe7\x43\x97\x8d\x3a\x23\x52\x7c\x71\x28\x14\x9e\x3a"),
[]byte("\x51\x54\xad\x0d\x2c\xb2\x6e\x01\x27\x4f\xc5\x11\x48\x49\x1f\x1b"),
},
}
func TestPoly1305(t *testing.T) {
for _, test := range poly1305Tests {
key := &MACKey{}
copy(key.K[:], test.k)
copy(key.R[:], test.r)
mac := poly1305MAC(test.msg, test.nonce, key)
if !bytes.Equal(mac, test.mac) {
t.Fatalf("wrong mac calculated, want: %02x, got: %02x", test.mac, mac)
}
if !poly1305Verify(test.msg, test.nonce, key, test.mac) {
t.Fatalf("mac does not verify: mac: %02x", test.mac)
}
}
}
var testValues = []struct {
ekey EncryptionKey
skey MACKey
ciphertext []byte
plaintext []byte
}{
{
ekey: decodeArray32("303e8687b1d7db18421bdc6bb8588ccadac4d59ee87b8ff70c44e635790cafef"),
skey: MACKey{
K: decodeArray16("ef4d8824cb80b2bcc5fbff8a9b12a42c"),
R: decodeArray16("cc8d4b948ee0ebfe1d415de921d10353"),
},
ciphertext: decodeHex("69fb41c62d12def4593bd71757138606338f621aeaeb39da0fe4f99233f8037a54ea63338a813bcf3f75d8c3cc75dddf8750"),
plaintext: []byte("Dies ist ein Test!"),
},
}
func decodeArray16(s string) (dst [16]byte) {
data := decodeHex(s)
if len(data) != 16 {
panic("data has wrong length")
}
copy(dst[:], data)
return
}
func decodeArray32(s string) (dst [32]byte) {
data := decodeHex(s)
if len(data) != 32 {
panic("data has wrong length")
}
copy(dst[:], data)
return
}
// decodeHex decodes the string s and panics on error.
func decodeHex(s string) []byte {
d, err := hex.DecodeString(s)
if err != nil {
panic(err)
}
return d
}
func TestCrypto(t *testing.T) {
msg := make([]byte, 0, 8*1024*1024) // use 8MiB for now
for _, tv := range testValues {
// test encryption
k := &Key{
EncryptionKey: tv.ekey,
MACKey: tv.skey,
}
nonce := NewRandomNonce()
ciphertext := k.Seal(msg[0:], nonce, tv.plaintext, nil)
// decrypt message
buf := make([]byte, 0, len(tv.plaintext))
buf, err := k.Open(buf, nonce, ciphertext, nil)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, tv.plaintext) {
t.Fatalf("wrong plaintext returned")
}
// change mac, this must fail
ciphertext[len(ciphertext)-8] ^= 0x23
if _, err = k.Open(buf[:0], nonce, ciphertext, nil); err != ErrUnauthenticated {
t.Fatal("wrong MAC value not detected")
}
// reset mac
ciphertext[len(ciphertext)-8] ^= 0x23
// tamper with nonce, this must fail
nonce[2] ^= 0x88
if _, err = k.Open(buf[:0], nonce, ciphertext, nil); err != ErrUnauthenticated {
t.Fatal("tampered nonce not detected")
}
// reset nonce
nonce[2] ^= 0x88
// tamper with message, this must fail
ciphertext[16+5] ^= 0x85
if _, err = k.Open(buf[:0], nonce, ciphertext, nil); err != ErrUnauthenticated {
t.Fatal("tampered message not detected")
}
// test decryption
p := make([]byte, len(tv.ciphertext))
nonce, ciphertext = tv.ciphertext[:16], tv.ciphertext[16:]
p, err = k.Open(p[:0], nonce, ciphertext, nil)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(p, tv.plaintext) {
t.Fatalf("wrong plaintext: expected %q but got %q\n", tv.plaintext, p)
}
}
}
func TestNonceValid(t *testing.T) {
nonce := make([]byte, ivSize)
if validNonce(nonce) {
t.Error("null nonce detected as valid")
}
for i := 0; i < 100; i++ {
nonce = NewRandomNonce()
if !validNonce(nonce) {
t.Errorf("random nonce not detected as valid: %02x", nonce)
}
}
}
func BenchmarkNonceValid(b *testing.B) {
nonce := NewRandomNonce()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if !validNonce(nonce) {
b.Fatal("nonce is invalid")
}
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/crypto/buffer.go | internal/crypto/buffer.go | package crypto
// NewBlobBuffer returns a buffer that is large enough to hold a blob of size
// plaintext bytes, including the crypto overhead.
func NewBlobBuffer(size int) []byte {
return make([]byte, size, size+Extension)
}
// PlaintextLength returns the plaintext length of a blob with ciphertextSize
// bytes.
func PlaintextLength(ciphertextSize int) int {
return ciphertextSize - Extension
}
// CiphertextLength returns the encrypted length of a blob with plaintextSize
// bytes.
func CiphertextLength(plaintextSize int) int {
return plaintextSize + Extension
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/crypto/kdf.go | internal/crypto/kdf.go | package crypto
import (
"crypto/rand"
"time"
"github.com/restic/restic/internal/errors"
sscrypt "github.com/elithrar/simple-scrypt"
"golang.org/x/crypto/scrypt"
)
const saltLength = 64
// Params are the default parameters used for the key derivation function KDF().
type Params struct {
N int
R int
P int
}
// DefaultKDFParams are the default parameters used for Calibrate and KDF().
var DefaultKDFParams = Params{
N: sscrypt.DefaultParams.N,
R: sscrypt.DefaultParams.R,
P: sscrypt.DefaultParams.P,
}
// Calibrate determines new KDF parameters for the current hardware.
func Calibrate(timeout time.Duration, memory int) (Params, error) {
defaultParams := sscrypt.Params{
N: DefaultKDFParams.N,
R: DefaultKDFParams.R,
P: DefaultKDFParams.P,
DKLen: sscrypt.DefaultParams.DKLen,
SaltLen: sscrypt.DefaultParams.SaltLen,
}
params, err := sscrypt.Calibrate(timeout, memory, defaultParams)
if err != nil {
return DefaultKDFParams, errors.Wrap(err, "scrypt.Calibrate")
}
return Params{
N: params.N,
R: params.R,
P: params.P,
}, nil
}
// KDF derives encryption and message authentication keys from the password
// using the supplied parameters N, R and P and the Salt.
func KDF(p Params, salt []byte, password string) (*Key, error) {
if len(salt) != saltLength {
return nil, errors.Errorf("scrypt() called with invalid salt bytes (len %d)", len(salt))
}
// make sure we have valid parameters
params := sscrypt.Params{
N: p.N,
R: p.R,
P: p.P,
DKLen: sscrypt.DefaultParams.DKLen,
SaltLen: len(salt),
}
if err := params.Check(); err != nil {
return nil, errors.Wrap(err, "Check")
}
derKeys := &Key{}
keybytes := macKeySize + aesKeySize
scryptKeys, err := scrypt.Key([]byte(password), salt, p.N, p.R, p.P, keybytes)
if err != nil {
return nil, errors.Wrap(err, "scrypt.Key")
}
if len(scryptKeys) != keybytes {
return nil, errors.Errorf("invalid numbers of bytes expanded from scrypt(): %d", len(scryptKeys))
}
// first 32 byte of scrypt output is the encryption key
copy(derKeys.EncryptionKey[:], scryptKeys[:aesKeySize])
// next 32 byte of scrypt output is the mac key, in the form k||r
macKeyFromSlice(&derKeys.MACKey, scryptKeys[aesKeySize:])
return derKeys, nil
}
// NewSalt returns new random salt bytes to use with KDF(). If NewSalt returns
// an error, this is a grave situation and the program must abort and terminate.
func NewSalt() ([]byte, error) {
buf := make([]byte, saltLength)
n, err := rand.Read(buf)
if n != saltLength || err != nil {
panic("unable to read enough random bytes for new salt")
}
return buf, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/crypto/crypto.go | internal/crypto/crypto.go | package crypto
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/json"
"fmt"
"github.com/restic/restic/internal/errors"
"golang.org/x/crypto/poly1305"
)
const (
aesKeySize = 32 // for AES-256
macKeySizeK = 16 // for AES-128
macKeySizeR = 16 // for Poly1305
macKeySize = macKeySizeK + macKeySizeR // for Poly1305-AES128
ivSize = aes.BlockSize
macSize = poly1305.TagSize
// Extension is the number of bytes a plaintext is enlarged by encrypting it.
Extension = ivSize + macSize
)
var (
// ErrUnauthenticated is returned when ciphertext verification has failed.
ErrUnauthenticated = fmt.Errorf("ciphertext verification failed")
)
// Key holds encryption and message authentication keys for a repository. It is stored
// encrypted and authenticated as a JSON data structure in the Data field of the Key
// structure.
type Key struct {
MACKey `json:"mac"`
EncryptionKey `json:"encrypt"`
}
// EncryptionKey is key used for encryption
type EncryptionKey [32]byte
// MACKey is used to sign (authenticate) data.
type MACKey struct {
K [16]byte // for AES-128
R [16]byte // for Poly1305
}
func poly1305MAC(msg []byte, nonce []byte, key *MACKey) []byte {
k := poly1305PrepareKey(nonce, key)
var out [16]byte
poly1305.Sum(&out, msg, &k)
return out[:]
}
// construct mac key from slice (k||r), with masking
func macKeyFromSlice(mk *MACKey, data []byte) {
copy(mk.K[:], data[:16])
copy(mk.R[:], data[16:32])
}
// prepare key for low-level poly1305.Sum(): r||n
func poly1305PrepareKey(nonce []byte, key *MACKey) [32]byte {
var k [32]byte
cipher, err := aes.NewCipher(key.K[:])
if err != nil {
panic(err)
}
cipher.Encrypt(k[16:], nonce[:])
copy(k[:16], key.R[:])
return k
}
func poly1305Verify(msg []byte, nonce []byte, key *MACKey, mac []byte) bool {
k := poly1305PrepareKey(nonce, key)
var m [16]byte
copy(m[:], mac)
return poly1305.Verify(&m, msg, &k)
}
// NewRandomKey returns new encryption and message authentication keys.
func NewRandomKey() *Key {
k := &Key{}
n, err := rand.Read(k.EncryptionKey[:])
if n != aesKeySize || err != nil {
panic("unable to read enough random bytes for encryption key")
}
n, err = rand.Read(k.MACKey.K[:])
if n != macKeySizeK || err != nil {
panic("unable to read enough random bytes for MAC encryption key")
}
n, err = rand.Read(k.MACKey.R[:])
if n != macKeySizeR || err != nil {
panic("unable to read enough random bytes for MAC key")
}
return k
}
// NewRandomNonce returns a new random nonce. It panics on error so that the
// program is safely terminated.
func NewRandomNonce() []byte {
iv := make([]byte, ivSize)
n, err := rand.Read(iv)
if n != ivSize || err != nil {
panic("unable to read enough random bytes for iv")
}
return iv
}
type jsonMACKey struct {
K []byte `json:"k"`
R []byte `json:"r"`
}
// MarshalJSON converts the MACKey to JSON.
func (m *MACKey) MarshalJSON() ([]byte, error) {
return json.Marshal(jsonMACKey{K: m.K[:], R: m.R[:]})
}
// UnmarshalJSON fills the key m with data from the JSON representation.
func (m *MACKey) UnmarshalJSON(data []byte) error {
j := jsonMACKey{}
err := json.Unmarshal(data, &j)
if err != nil {
return errors.Wrap(err, "Unmarshal")
}
copy(m.K[:], j.K)
copy(m.R[:], j.R)
return nil
}
// Valid tests whether the key k is valid (i.e. not zero).
func (m *MACKey) Valid() bool {
nonzeroK := false
for i := 0; i < len(m.K); i++ {
if m.K[i] != 0 {
nonzeroK = true
}
}
if !nonzeroK {
return false
}
for i := 0; i < len(m.R); i++ {
if m.R[i] != 0 {
return true
}
}
return false
}
// MarshalJSON converts the EncryptionKey to JSON.
func (k *EncryptionKey) MarshalJSON() ([]byte, error) {
return json.Marshal(k[:])
}
// UnmarshalJSON fills the key k with data from the JSON representation.
func (k *EncryptionKey) UnmarshalJSON(data []byte) error {
d := make([]byte, aesKeySize)
err := json.Unmarshal(data, &d)
if err != nil {
return errors.Wrap(err, "Unmarshal")
}
copy(k[:], d)
return nil
}
// Valid tests whether the key k is valid (i.e. not zero).
func (k *EncryptionKey) Valid() bool {
for i := 0; i < len(k); i++ {
if k[i] != 0 {
return true
}
}
return false
}
// validNonce checks that nonce is not all zero.
func validNonce(nonce []byte) bool {
var sum byte
for _, b := range nonce {
sum |= b
}
return sum > 0
}
// statically ensure that *Key implements crypto/cipher.AEAD
var _ cipher.AEAD = &Key{}
// NonceSize returns the size of the nonce that must be passed to Seal
// and Open.
func (k *Key) NonceSize() int {
return ivSize
}
// Overhead returns the maximum difference between the lengths of a
// plaintext and its ciphertext.
func (k *Key) Overhead() int {
return macSize
}
// sliceForAppend takes a slice and a requested number of bytes. It returns a
// slice with the contents of the given slice followed by that many bytes and a
// second slice that aliases into it and contains only the extra bytes. If the
// original slice has sufficient capacity then no allocation is performed.
//
// taken from the stdlib, crypto/aes/aes_gcm.go
func sliceForAppend(in []byte, n int) (head, tail []byte) {
if total := len(in) + n; cap(in) >= total {
head = in[:total]
} else {
head = make([]byte, total)
copy(head, in)
}
tail = head[len(in):]
return
}
// Seal encrypts and authenticates plaintext, authenticates the
// additional data and appends the result to dst, returning the updated
// slice. The nonce must be NonceSize() bytes long and unique for all
// time, for a given key.
//
// The plaintext and dst may alias exactly or not at all. To reuse
// plaintext's storage for the encrypted output, use plaintext[:0] as dst.
func (k *Key) Seal(dst, nonce, plaintext, additionalData []byte) []byte {
if !k.Valid() {
panic("key is invalid")
}
if len(additionalData) > 0 {
panic("additional data is not supported")
}
if len(nonce) != ivSize {
panic("incorrect nonce length")
}
if !validNonce(nonce) {
panic("nonce is invalid")
}
ret, out := sliceForAppend(dst, len(plaintext)+k.Overhead())
c, err := aes.NewCipher(k.EncryptionKey[:])
if err != nil {
panic(fmt.Sprintf("unable to create cipher: %v", err))
}
e := cipher.NewCTR(c, nonce)
e.XORKeyStream(out, plaintext)
mac := poly1305MAC(out[:len(plaintext)], nonce, &k.MACKey)
copy(out[len(plaintext):], mac)
return ret
}
// Open decrypts and authenticates ciphertext, authenticates the
// additional data and, if successful, appends the resulting plaintext
// to dst, returning the updated slice. The nonce must be NonceSize()
// bytes long and both it and the additional data must match the
// value passed to Seal.
//
// The ciphertext and dst may alias exactly or not at all. To reuse
// ciphertext's storage for the decrypted output, use ciphertext[:0] as dst.
//
// Even if the function fails, the contents of dst, up to its capacity,
// may be overwritten.
func (k *Key) Open(dst, nonce, ciphertext, _ []byte) ([]byte, error) {
if !k.Valid() {
return nil, errors.New("invalid key")
}
// check parameters
if len(nonce) != ivSize {
panic("incorrect nonce length")
}
if !validNonce(nonce) {
return nil, errors.New("nonce is invalid")
}
// check for plausible length
if len(ciphertext) < k.Overhead() {
return nil, errors.Errorf("trying to decrypt invalid data: ciphertext too short")
}
l := len(ciphertext) - macSize
ct, mac := ciphertext[:l], ciphertext[l:]
// verify mac
if !poly1305Verify(ct, nonce, &k.MACKey, mac) {
return nil, ErrUnauthenticated
}
ret, out := sliceForAppend(dst, len(ct))
c, err := aes.NewCipher(k.EncryptionKey[:])
if err != nil {
panic(fmt.Sprintf("unable to create cipher: %v", err))
}
e := cipher.NewCTR(c, nonce)
e.XORKeyStream(out, ct)
return ret, nil
}
// Valid tests if the key is valid.
func (k *Key) Valid() bool {
return k.EncryptionKey.Valid() && k.MACKey.Valid()
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/crypto/kdf_test.go | internal/crypto/kdf_test.go | package crypto
import (
"testing"
"time"
)
func TestCalibrate(t *testing.T) {
params, err := Calibrate(100*time.Millisecond, 50)
if err != nil {
t.Fatal(err)
}
t.Logf("testing calibrate, params after: %v", params)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/crypto/doc.go | internal/crypto/doc.go | // Package crypto provides all cryptographic operations needed in restic.
package crypto
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fuse/snapshots_dirstruct.go | internal/fuse/snapshots_dirstruct.go | //go:build darwin || freebsd || linux
package fuse
import (
"bytes"
"context"
"crypto/sha256"
"fmt"
"path"
"sort"
"strings"
"sync"
"time"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/debug"
)
type MetaDirData struct {
// set if this is a symlink or a snapshot mount point
linkTarget string
snapshot *data.Snapshot
// names is set if this is a pseudo directory
names map[string]*MetaDirData
}
// SnapshotsDirStructure contains the directory structure for snapshots.
// It uses a paths and time template to generate a map of pathnames
// pointing to the actual snapshots. For templates that end with a time,
// also "latest" links are generated.
type SnapshotsDirStructure struct {
root *Root
pathTemplates []string
timeTemplate string
mutex sync.Mutex
// "" is the root path, subdirectory paths are assembled as parent+"/"+childFn
// thus all subdirectories are prefixed with a slash as the root is ""
// that way we don't need path processing special cases when using the entries tree
entries map[string]*MetaDirData
hash [sha256.Size]byte // Hash at last check.
lastCheck time.Time
}
// NewSnapshotsDirStructure returns a new directory structure for snapshots.
func NewSnapshotsDirStructure(root *Root, pathTemplates []string, timeTemplate string) *SnapshotsDirStructure {
return &SnapshotsDirStructure{
root: root,
pathTemplates: pathTemplates,
timeTemplate: timeTemplate,
}
}
// pathsFromSn generates the paths from pathTemplate and timeTemplate
// where the variables are replaced by the snapshot data.
// The time is given as suffix if the pathTemplate ends with "%T".
func pathsFromSn(pathTemplate string, timeTemplate string, sn *data.Snapshot) (paths []string, timeSuffix string) {
timeformat := sn.Time.Format(timeTemplate)
inVerb := false
writeTime := false
out := make([]strings.Builder, 1)
for _, c := range pathTemplate {
if writeTime {
for i := range out {
out[i].WriteString(timeformat)
}
writeTime = false
}
if !inVerb {
if c == '%' {
inVerb = true
} else {
for i := range out {
out[i].WriteRune(c)
}
}
continue
}
var repl string
inVerb = false
switch c {
case 'T':
// lazy write; time might be returned as suffix
writeTime = true
continue
case 't':
if len(sn.Tags) == 0 {
return nil, ""
}
if len(sn.Tags) != 1 {
// needs special treatment: Rebuild the string builders
newout := make([]strings.Builder, len(out)*len(sn.Tags))
for i, tag := range sn.Tags {
tag = filenameFromTag(tag)
for j := range out {
newout[i*len(out)+j].WriteString(out[j].String() + tag)
}
}
out = newout
continue
}
repl = sn.Tags[0]
case 'i':
repl = sn.ID().Str()
case 'I':
repl = sn.ID().String()
case 'u':
repl = sn.Username
case 'h':
repl = sn.Hostname
default:
repl = string(c)
}
// write replacement string to all string builders
for i := range out {
out[i].WriteString(repl)
}
}
for i := range out {
paths = append(paths, out[i].String())
}
if writeTime {
timeSuffix = timeformat
}
return paths, timeSuffix
}
// Some tags are problematic when used as filenames:
//
// ""
// ".", ".."
// anything containing '/'
//
// Replace all special character by underscores "_", an empty tag is also represented as a underscore.
func filenameFromTag(tag string) string {
switch tag {
case "", ".":
return "_"
case "..":
return "__"
}
return strings.ReplaceAll(tag, "/", "_")
}
// determine static path prefix
func staticPrefix(pathTemplate string) (prefix string) {
inVerb := false
patternStart := -1
outer:
for i, c := range pathTemplate {
if !inVerb {
if c == '%' {
inVerb = true
}
continue
}
inVerb = false
switch c {
case 'i', 'I', 'u', 'h', 't', 'T':
patternStart = i
break outer
}
}
if patternStart < 0 {
// ignore patterns without template variable
return ""
}
p := pathTemplate[:patternStart]
idx := strings.LastIndex(p, "/")
if idx < 0 {
return ""
}
return p[:idx]
}
// uniqueName returns a unique name to be used for prefix+name.
// It appends -number to make the name unique.
func uniqueName(entries map[string]*MetaDirData, prefix, name string) string {
newname := name
for i := 1; ; i++ {
if _, ok := entries[prefix+newname]; !ok {
break
}
newname = fmt.Sprintf("%s-%d", name, i)
}
return newname
}
// makeDirs inserts all paths generated from pathTemplates and
// TimeTemplate for all given snapshots into d.names.
// Also adds d.latest links if "%T" is at end of a path template
func (d *SnapshotsDirStructure) makeDirs(snapshots data.Snapshots) {
entries := make(map[string]*MetaDirData)
type mountData struct {
sn *data.Snapshot
linkTarget string // if linkTarget!= "", this is a symlink
childFn string
child *MetaDirData
}
// recursively build tree structure
var mount func(path string, data mountData)
mount = func(path string, data mountData) {
e := entries[path]
if e == nil {
e = &MetaDirData{}
}
if data.sn != nil {
e.snapshot = data.sn
e.linkTarget = data.linkTarget
} else {
// intermediate directory, register as a child directory
if e.names == nil {
e.names = make(map[string]*MetaDirData)
}
if data.child != nil {
e.names[data.childFn] = data.child
}
}
entries[path] = e
slashIdx := strings.LastIndex(path, "/")
if slashIdx >= 0 {
// add to parent dir, but without snapshot
mount(path[:slashIdx], mountData{childFn: path[slashIdx+1:], child: e})
}
}
// root directory
mount("", mountData{})
// insert pure directories; needed to get empty structure even if there
// are no snapshots in these dirs
for _, p := range d.pathTemplates {
p = staticPrefix(p)
if p != "" {
mount(path.Clean("/"+p), mountData{})
}
}
latestTime := make(map[string]time.Time)
for _, sn := range snapshots {
for _, templ := range d.pathTemplates {
paths, timeSuffix := pathsFromSn(templ, d.timeTemplate, sn)
for _, p := range paths {
if p != "" {
p = "/" + p
}
suffix := uniqueName(entries, p, timeSuffix)
mount(path.Clean(p+suffix), mountData{sn: sn})
if timeSuffix != "" {
lt, ok := latestTime[p]
if !ok || !sn.Time.Before(lt) {
debug.Log("link (update) %v -> %v\n", p, suffix)
// inject symlink
mount(path.Clean(p+"/latest"), mountData{sn: sn, linkTarget: suffix})
latestTime[p] = sn.Time
}
}
}
}
}
d.entries = entries
}
const minSnapshotsReloadTime = 60 * time.Second
// update snapshots if repository has changed
func (d *SnapshotsDirStructure) updateSnapshots(ctx context.Context) error {
d.mutex.Lock()
defer d.mutex.Unlock()
if time.Since(d.lastCheck) < minSnapshotsReloadTime {
return nil
}
var snapshots data.Snapshots
err := d.root.cfg.Filter.FindAll(ctx, d.root.repo, d.root.repo, nil, func(_ string, sn *data.Snapshot, _ error) error {
if sn != nil {
snapshots = append(snapshots, sn)
}
return nil
})
if err != nil {
return err
}
// Sort snapshots ascending by time, using the id to break ties.
// This needs to be done before hashing.
sort.Slice(snapshots, func(i, j int) bool {
si, sj := snapshots[i], snapshots[j]
if si.Time.Equal(sj.Time) {
return bytes.Compare(si.ID()[:], sj.ID()[:]) < 0
}
return si.Time.Before(sj.Time)
})
// We update the snapshots when the hash of their id's changes.
h := sha256.New()
for _, sn := range snapshots {
h.Write(sn.ID()[:])
}
var hash [sha256.Size]byte
h.Sum(hash[:0])
if d.hash == hash {
d.lastCheck = time.Now()
return nil
}
err = d.root.repo.LoadIndex(ctx, nil)
if err != nil {
return err
}
d.lastCheck = time.Now()
d.hash = hash
d.makeDirs(snapshots)
return nil
}
func (d *SnapshotsDirStructure) UpdatePrefix(ctx context.Context, prefix string) (*MetaDirData, error) {
err := d.updateSnapshots(ctx)
if err != nil {
return nil, err
}
d.mutex.Lock()
defer d.mutex.Unlock()
return d.entries[prefix], nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fuse/xattr.go | internal/fuse/xattr.go | //go:build darwin || freebsd || linux
package fuse
import (
"github.com/anacrolix/fuse"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/debug"
)
func nodeToXattrList(node *data.Node, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) {
debug.Log("Listxattr(%v, %v)", node.Name, req.Size)
for _, attr := range node.ExtendedAttributes {
resp.Append(attr.Name)
}
}
func nodeGetXattr(node *data.Node, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
debug.Log("Getxattr(%v, %v, %v)", node.Name, req.Name, req.Size)
attrval := node.GetExtendedAttribute(req.Name)
if attrval != nil {
resp.Xattr = attrval
return nil
}
return fuse.ErrNoXattr
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fuse/fuse_test.go | internal/fuse/fuse_test.go | //go:build darwin || freebsd || linux
package fuse
import (
"bytes"
"context"
"math/rand"
"os"
"strings"
"testing"
"time"
"github.com/restic/restic/internal/bloblru"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/anacrolix/fuse"
"github.com/anacrolix/fuse/fs"
rtest "github.com/restic/restic/internal/test"
)
func testRead(t testing.TB, f fs.Handle, offset, length int, data []byte) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
req := &fuse.ReadRequest{
Offset: int64(offset),
Size: length,
}
resp := &fuse.ReadResponse{
Data: data,
}
fr := f.(fs.HandleReader)
rtest.OK(t, fr.Read(ctx, req, resp))
}
func firstSnapshotID(t testing.TB, repo restic.Lister) (first restic.ID) {
err := repo.List(context.TODO(), restic.SnapshotFile, func(id restic.ID, size int64) error {
if first.IsNull() {
first = id
}
return nil
})
if err != nil {
t.Fatal(err)
}
return first
}
func loadFirstSnapshot(t testing.TB, repo restic.ListerLoaderUnpacked) *data.Snapshot {
id := firstSnapshotID(t, repo)
sn, err := data.LoadSnapshot(context.TODO(), repo, id)
rtest.OK(t, err)
return sn
}
func loadTree(t testing.TB, repo restic.Loader, id restic.ID) *data.Tree {
tree, err := data.LoadTree(context.TODO(), repo, id)
rtest.OK(t, err)
return tree
}
func TestFuseFile(t *testing.T) {
repo := repository.TestRepository(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
timestamp, err := time.Parse(time.RFC3339, "2017-01-24T10:42:56+01:00")
rtest.OK(t, err)
data.TestCreateSnapshot(t, repo, timestamp, 2)
sn := loadFirstSnapshot(t, repo)
tree := loadTree(t, repo, *sn.Tree)
var content restic.IDs
for _, node := range tree.Nodes {
content = append(content, node.Content...)
}
t.Logf("tree loaded, content: %v", content)
var (
filesize uint64
memfile []byte
)
for _, id := range content {
size, found := repo.LookupBlobSize(restic.DataBlob, id)
rtest.Assert(t, found, "Expected to find blob id %v", id)
filesize += uint64(size)
buf, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, nil)
rtest.OK(t, err)
if len(buf) != int(size) {
t.Fatalf("not enough bytes read for id %v: want %v, got %v", id.Str(), size, len(buf))
}
if uint(len(buf)) != size {
t.Fatalf("buffer has wrong length for id %v: want %v, got %v", id.Str(), size, len(buf))
}
memfile = append(memfile, buf...)
}
t.Logf("filesize is %v, memfile has size %v", filesize, len(memfile))
node := &data.Node{
Name: "foo",
Inode: 23,
Mode: 0742,
Size: filesize,
Content: content,
}
root := &Root{repo: repo, blobCache: bloblru.New(blobCacheSize)}
inode := inodeFromNode(1, node)
f, err := newFile(root, func() {}, inode, node)
rtest.OK(t, err)
of, err := f.Open(context.TODO(), nil, nil)
rtest.OK(t, err)
attr := fuse.Attr{}
rtest.OK(t, f.Attr(ctx, &attr))
rtest.Equals(t, inode, attr.Inode)
rtest.Equals(t, node.Mode, attr.Mode)
rtest.Equals(t, node.Size, attr.Size)
rtest.Equals(t, (node.Size/uint64(attr.BlockSize))+1, attr.Blocks)
for i := 0; i < 200; i++ {
offset := rand.Intn(int(filesize))
length := rand.Intn(int(filesize)-offset) + 100
b := memfile[offset : offset+length]
buf := make([]byte, length)
testRead(t, of, offset, length, buf)
if !bytes.Equal(b, buf) {
t.Errorf("test %d failed, wrong data returned (offset %v, length %v)", i, offset, length)
}
}
}
func TestFuseDir(t *testing.T) {
repo := repository.TestRepository(t)
root := &Root{repo: repo, blobCache: bloblru.New(blobCacheSize)}
node := &data.Node{
Mode: 0755,
UID: 42,
GID: 43,
AccessTime: time.Unix(1606773731, 0),
ChangeTime: time.Unix(1606773732, 0),
ModTime: time.Unix(1606773733, 0),
}
parentInode := inodeFromName(0, "parent")
inode := inodeFromName(1, "foo")
d, err := newDir(root, func() {}, inode, parentInode, node)
rtest.OK(t, err)
// don't open the directory as that would require setting up a proper tree blob
attr := fuse.Attr{}
rtest.OK(t, d.Attr(context.TODO(), &attr))
rtest.Equals(t, inode, attr.Inode)
rtest.Equals(t, node.UID, attr.Uid)
rtest.Equals(t, node.GID, attr.Gid)
rtest.Equals(t, node.AccessTime, attr.Atime)
rtest.Equals(t, node.ChangeTime, attr.Ctime)
rtest.Equals(t, node.ModTime, attr.Mtime)
}
// Test top-level directories for their UID and GID.
func TestTopUIDGID(t *testing.T) {
repo := repository.TestRepository(t)
data.TestCreateSnapshot(t, repo, time.Unix(1460289341, 207401672), 0)
testTopUIDGID(t, Config{}, repo, uint32(os.Getuid()), uint32(os.Getgid()))
testTopUIDGID(t, Config{OwnerIsRoot: true}, repo, 0, 0)
}
func testTopUIDGID(t *testing.T, cfg Config, repo restic.Repository, uid, gid uint32) {
t.Helper()
ctx := context.Background()
root := NewRoot(repo, cfg)
var attr fuse.Attr
err := root.Attr(ctx, &attr)
rtest.OK(t, err)
rtest.Equals(t, uid, attr.Uid)
rtest.Equals(t, gid, attr.Gid)
idsdir, err := root.Lookup(ctx, "ids")
rtest.OK(t, err)
err = idsdir.Attr(ctx, &attr)
rtest.OK(t, err)
rtest.Equals(t, uid, attr.Uid)
rtest.Equals(t, gid, attr.Gid)
snapID := loadFirstSnapshot(t, repo).ID().Str()
snapshotdir, err := idsdir.(fs.NodeStringLookuper).Lookup(ctx, snapID)
rtest.OK(t, err)
// data.TestCreateSnapshot does not set the UID/GID thus it must be always zero
err = snapshotdir.Attr(ctx, &attr)
rtest.OK(t, err)
rtest.Equals(t, uint32(0), attr.Uid)
rtest.Equals(t, uint32(0), attr.Gid)
}
// The Lookup method must return the same Node object unless it was forgotten in the meantime
func testStableLookup(t *testing.T, node fs.Node, path string) fs.Node {
t.Helper()
result, err := node.(fs.NodeStringLookuper).Lookup(context.TODO(), path)
rtest.OK(t, err)
result2, err := node.(fs.NodeStringLookuper).Lookup(context.TODO(), path)
rtest.OK(t, err)
rtest.Assert(t, result == result2, "%v are not the same object", path)
result2.(fs.NodeForgetter).Forget()
result2, err = node.(fs.NodeStringLookuper).Lookup(context.TODO(), path)
rtest.OK(t, err)
rtest.Assert(t, result != result2, "object for %v should change after forget", path)
return result
}
func TestStableNodeObjects(t *testing.T) {
repo := repository.TestRepository(t)
data.TestCreateSnapshot(t, repo, time.Unix(1460289341, 207401672), 2)
root := NewRoot(repo, Config{})
idsdir := testStableLookup(t, root, "ids")
snapID := loadFirstSnapshot(t, repo).ID().Str()
snapshotdir := testStableLookup(t, idsdir, snapID)
dir := testStableLookup(t, snapshotdir, "dir-0")
testStableLookup(t, dir, "file-2")
}
// Test reporting of fuse.Attr.Blocks in multiples of 512.
func TestBlocks(t *testing.T) {
root := &Root{}
for _, c := range []struct {
size, blocks uint64
}{
{0, 0},
{1, 1},
{511, 1},
{512, 1},
{513, 2},
{1024, 2},
{1025, 3},
{41253, 81},
} {
target := strings.Repeat("x", int(c.size))
for _, n := range []fs.Node{
&file{root: root, node: &data.Node{Size: uint64(c.size)}},
&link{root: root, node: &data.Node{LinkTarget: target}},
&snapshotLink{root: root, snapshot: &data.Snapshot{}, target: target},
} {
var a fuse.Attr
err := n.Attr(context.TODO(), &a)
rtest.OK(t, err)
rtest.Equals(t, c.blocks, a.Blocks)
}
}
}
func TestInodeFromNode(t *testing.T) {
node := &data.Node{Name: "foo.txt", Type: data.NodeTypeCharDev, Links: 2}
ino1 := inodeFromNode(1, node)
ino2 := inodeFromNode(2, node)
rtest.Assert(t, ino1 == ino2, "inodes %d, %d of hard links differ", ino1, ino2)
node.Links = 1
ino1 = inodeFromNode(1, node)
ino2 = inodeFromNode(2, node)
rtest.Assert(t, ino1 != ino2, "same inode %d but different parent", ino1)
// Regression test: in a path a/b/b, the grandchild should not get the
// same inode as the grandparent.
a := &data.Node{Name: "a", Type: data.NodeTypeDir, Links: 2}
ab := &data.Node{Name: "b", Type: data.NodeTypeDir, Links: 2}
abb := &data.Node{Name: "b", Type: data.NodeTypeDir, Links: 2}
inoA := inodeFromNode(1, a)
inoAb := inodeFromNode(inoA, ab)
inoAbb := inodeFromNode(inoAb, abb)
rtest.Assert(t, inoA != inoAb, "inode(a/b) = inode(a)")
rtest.Assert(t, inoA != inoAbb, "inode(a/b/b) = inode(a)")
}
func TestLink(t *testing.T) {
node := &data.Node{Name: "foo.txt", Type: data.NodeTypeSymlink, Links: 1, LinkTarget: "dst", ExtendedAttributes: []data.ExtendedAttribute{
{Name: "foo", Value: []byte("bar")},
}}
lnk, err := newLink(&Root{}, func() {}, 42, node)
rtest.OK(t, err)
target, err := lnk.Readlink(context.TODO(), nil)
rtest.OK(t, err)
rtest.Equals(t, node.LinkTarget, target)
exp := &fuse.ListxattrResponse{}
exp.Append("foo")
resp := &fuse.ListxattrResponse{}
rtest.OK(t, lnk.Listxattr(context.TODO(), &fuse.ListxattrRequest{}, resp))
rtest.Equals(t, exp.Xattr, resp.Xattr)
getResp := &fuse.GetxattrResponse{}
rtest.OK(t, lnk.Getxattr(context.TODO(), &fuse.GetxattrRequest{Name: "foo"}, getResp))
rtest.Equals(t, node.ExtendedAttributes[0].Value, getResp.Xattr)
err = lnk.Getxattr(context.TODO(), &fuse.GetxattrRequest{Name: "invalid"}, nil)
rtest.Assert(t, err != nil, "missing error on reading invalid xattr")
}
var sink uint64
func BenchmarkInode(b *testing.B) {
for _, sub := range []struct {
name string
node data.Node
}{
{
name: "no_hard_links",
node: data.Node{Name: "a somewhat long-ish filename.svg.bz2", Type: data.NodeTypeFifo},
},
{
name: "hard_link",
node: data.Node{Name: "some other filename", Type: data.NodeTypeFile, Links: 2},
},
} {
b.Run(sub.name, func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
sink = inodeFromNode(1, &sub.node)
}
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fuse/tree_cache.go | internal/fuse/tree_cache.go | //go:build darwin || freebsd || linux
package fuse
import (
"sync"
"github.com/anacrolix/fuse/fs"
)
type treeCache struct {
nodes map[string]fs.Node
m sync.Mutex
}
type forgetFn func()
func newTreeCache() *treeCache {
return &treeCache{
nodes: map[string]fs.Node{},
}
}
func (t *treeCache) lookupOrCreate(name string, create func(forget forgetFn) (fs.Node, error)) (fs.Node, error) {
t.m.Lock()
defer t.m.Unlock()
if node, ok := t.nodes[name]; ok {
return node, nil
}
node, err := create(func() {
t.m.Lock()
defer t.m.Unlock()
delete(t.nodes, name)
})
if err != nil {
return nil, err
}
t.nodes[name] = node
return node, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fuse/root.go | internal/fuse/root.go | //go:build darwin || freebsd || linux
package fuse
import (
"os"
"github.com/restic/restic/internal/bloblru"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/restic"
"github.com/anacrolix/fuse/fs"
)
// Config holds settings for the fuse mount.
type Config struct {
OwnerIsRoot bool
Filter data.SnapshotFilter
TimeTemplate string
PathTemplates []string
}
// Root is the root node of the fuse mount of a repository.
type Root struct {
repo restic.Repository
cfg Config
blobCache *bloblru.Cache
*SnapshotsDir
uid, gid uint32
}
// ensure that *Root implements these interfaces
var _ = fs.HandleReadDirAller(&Root{})
var _ = fs.NodeStringLookuper(&Root{})
const rootInode = 1
// Size of the blob cache. TODO: make this configurable.
const blobCacheSize = 64 << 20
// NewRoot initializes a new root node from a repository.
func NewRoot(repo restic.Repository, cfg Config) *Root {
debug.Log("NewRoot(), config %v", cfg)
root := &Root{
repo: repo,
cfg: cfg,
blobCache: bloblru.New(blobCacheSize),
}
if !cfg.OwnerIsRoot {
root.uid = uint32(os.Getuid())
root.gid = uint32(os.Getgid())
}
// set defaults, if PathTemplates is not set
if len(cfg.PathTemplates) == 0 {
cfg.PathTemplates = []string{
"ids/%i",
"snapshots/%T",
"hosts/%h/%T",
"tags/%t/%T",
}
}
root.SnapshotsDir = NewSnapshotsDir(root, func() {}, rootInode, rootInode, NewSnapshotsDirStructure(root, cfg.PathTemplates, cfg.TimeTemplate), "")
return root
}
// Root is just there to satisfy fs.Root, it returns itself.
func (r *Root) Root() (fs.Node, error) {
debug.Log("Root()")
return r, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fuse/file.go | internal/fuse/file.go | //go:build darwin || freebsd || linux
package fuse
import (
"context"
"sort"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/anacrolix/fuse"
"github.com/anacrolix/fuse/fs"
)
// The default block size to report in stat
const blockSize = 512
// Statically ensure that *file and *openFile implement the given interfaces
var _ = fs.HandleReader(&openFile{})
var _ = fs.NodeForgetter(&file{})
var _ = fs.NodeGetxattrer(&file{})
var _ = fs.NodeListxattrer(&file{})
var _ = fs.NodeOpener(&file{})
type file struct {
root *Root
forget forgetFn
node *data.Node
inode uint64
}
type openFile struct {
file
// cumsize[i] holds the cumulative size of blobs[:i].
cumsize []uint64
}
func newFile(root *Root, forget forgetFn, inode uint64, node *data.Node) (fusefile *file, err error) {
debug.Log("create new file for %v with %d blobs", node.Name, len(node.Content))
return &file{
inode: inode,
forget: forget,
root: root,
node: node,
}, nil
}
func (f *file) Attr(_ context.Context, a *fuse.Attr) error {
debug.Log("Attr(%v)", f.node.Name)
a.Inode = f.inode
a.Mode = f.node.Mode
a.Size = f.node.Size
a.Blocks = (f.node.Size + blockSize - 1) / blockSize
a.BlockSize = blockSize
a.Nlink = uint32(f.node.Links)
if !f.root.cfg.OwnerIsRoot {
a.Uid = f.node.UID
a.Gid = f.node.GID
}
a.Atime = f.node.AccessTime
a.Ctime = f.node.ChangeTime
a.Mtime = f.node.ModTime
return nil
}
func (f *file) Open(ctx context.Context, _ *fuse.OpenRequest, _ *fuse.OpenResponse) (fs.Handle, error) {
debug.Log("open file %v with %d blobs", f.node.Name, len(f.node.Content))
var bytes uint64
cumsize := make([]uint64, 1+len(f.node.Content))
for i, id := range f.node.Content {
if ctx.Err() != nil {
return nil, ctx.Err()
}
size, found := f.root.repo.LookupBlobSize(restic.DataBlob, id)
if !found {
return nil, errors.Errorf("id %v not found in repository", id)
}
bytes += uint64(size)
cumsize[i+1] = bytes
}
var of = openFile{file: *f}
if bytes != f.node.Size {
debug.Log("sizes do not match: node.Size %v != size %v, using real size", f.node.Size, bytes)
// Make a copy of the node with correct size
nodenew := *f.node
nodenew.Size = bytes
of.file.node = &nodenew
}
of.cumsize = cumsize
return &of, nil
}
func (f *openFile) getBlobAt(ctx context.Context, i int) (blob []byte, err error) {
blob, err = f.root.blobCache.GetOrCompute(f.node.Content[i], func() ([]byte, error) {
return f.root.repo.LoadBlob(ctx, restic.DataBlob, f.node.Content[i], nil)
})
if err != nil {
debug.Log("LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err)
return nil, unwrapCtxCanceled(err)
}
return blob, nil
}
func (f *openFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
debug.Log("Read(%v, %v, %v), file size %v", f.node.Name, req.Size, req.Offset, f.node.Size)
offset := uint64(req.Offset)
// as stated in https://godoc.org/bazil.org/fuse/fs#HandleReader there
// is no need to check if offset > size
// handle special case: file is empty
if f.node.Size == 0 {
resp.Data = resp.Data[:0]
return nil
}
// Skip blobs before the offset
startContent := -1 + sort.Search(len(f.cumsize), func(i int) bool {
return f.cumsize[i] > offset
})
offset -= f.cumsize[startContent]
dst := resp.Data[0:req.Size]
readBytes := 0
remainingBytes := req.Size
// The documentation of bazil/fuse actually says that synchronization is
// required (see https://godoc.org/bazil.org/fuse#hdr-Service_Methods):
//
// Multiple goroutines may call service methods simultaneously;
// the methods being called are responsible for appropriate synchronization.
//
// However, no lock needed here as getBlobAt can be called concurrently
// (blobCache has its own locking)
for i := startContent; remainingBytes > 0 && i < len(f.cumsize)-1; i++ {
blob, err := f.getBlobAt(ctx, i)
if err != nil {
return err
}
if offset > 0 {
blob = blob[offset:]
offset = 0
}
copied := copy(dst, blob)
remainingBytes -= copied
readBytes += copied
dst = dst[copied:]
}
resp.Data = resp.Data[:readBytes]
return nil
}
func (f *file) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
nodeToXattrList(f.node, req, resp)
return nil
}
func (f *file) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
return nodeGetXattr(f.node, req, resp)
}
func (f *file) Forget() {
f.forget()
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fuse/inode.go | internal/fuse/inode.go | //go:build darwin || freebsd || linux
package fuse
import (
"encoding/binary"
"github.com/cespare/xxhash/v2"
"github.com/restic/restic/internal/data"
)
const prime = 11400714785074694791 // prime1 from xxhash.
// inodeFromName generates an inode number for a file in a meta dir.
func inodeFromName(parent uint64, name string) uint64 {
inode := prime*parent ^ xxhash.Sum64String(cleanupNodeName(name))
// Inode 0 is invalid and 1 is the root. Remap those.
if inode < 2 {
inode += 2
}
return inode
}
// inodeFromNode generates an inode number for a file within a snapshot.
func inodeFromNode(parent uint64, node *data.Node) (inode uint64) {
if node.Links > 1 && node.Type != data.NodeTypeDir {
// If node has hard links, give them all the same inode,
// irrespective of the parent.
var buf [16]byte
binary.LittleEndian.PutUint64(buf[:8], node.DeviceID)
binary.LittleEndian.PutUint64(buf[8:], node.Inode)
inode = xxhash.Sum64(buf[:])
} else {
// Else, use the name and the parent inode.
// node.{DeviceID,Inode} may not even be reliable.
inode = prime*parent ^ xxhash.Sum64String(cleanupNodeName(node.Name))
}
// Inode 0 is invalid and 1 is the root. Remap those.
if inode < 2 {
inode += 2
}
return inode
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fuse/other.go | internal/fuse/other.go | //go:build darwin || freebsd || linux
package fuse
import (
"context"
"github.com/anacrolix/fuse"
"github.com/anacrolix/fuse/fs"
"github.com/restic/restic/internal/data"
)
// Statically ensure that *other implements the given interface
var _ = fs.NodeForgetter(&other{})
var _ = fs.NodeReadlinker(&other{})
type other struct {
root *Root
forget forgetFn
node *data.Node
inode uint64
}
func newOther(root *Root, forget forgetFn, inode uint64, node *data.Node) (*other, error) {
return &other{root: root, forget: forget, inode: inode, node: node}, nil
}
func (l *other) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) {
return l.node.LinkTarget, nil
}
func (l *other) Attr(_ context.Context, a *fuse.Attr) error {
a.Inode = l.inode
a.Mode = l.node.Mode
if !l.root.cfg.OwnerIsRoot {
a.Uid = l.node.UID
a.Gid = l.node.GID
}
a.Atime = l.node.AccessTime
a.Ctime = l.node.ChangeTime
a.Mtime = l.node.ModTime
a.Nlink = uint32(l.node.Links)
return nil
}
func (l *other) Forget() {
l.forget()
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fuse/link.go | internal/fuse/link.go | //go:build darwin || freebsd || linux
package fuse
import (
"context"
"github.com/anacrolix/fuse"
"github.com/anacrolix/fuse/fs"
"github.com/restic/restic/internal/data"
)
// Statically ensure that *link implements the given interface
var _ = fs.NodeForgetter(&link{})
var _ = fs.NodeGetxattrer(&link{})
var _ = fs.NodeListxattrer(&link{})
var _ = fs.NodeReadlinker(&link{})
type link struct {
root *Root
forget forgetFn
node *data.Node
inode uint64
}
func newLink(root *Root, forget forgetFn, inode uint64, node *data.Node) (*link, error) {
return &link{root: root, forget: forget, inode: inode, node: node}, nil
}
func (l *link) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) {
return l.node.LinkTarget, nil
}
func (l *link) Attr(_ context.Context, a *fuse.Attr) error {
a.Inode = l.inode
a.Mode = l.node.Mode
if !l.root.cfg.OwnerIsRoot {
a.Uid = l.node.UID
a.Gid = l.node.GID
}
a.Atime = l.node.AccessTime
a.Ctime = l.node.ChangeTime
a.Mtime = l.node.ModTime
a.Nlink = uint32(l.node.Links)
a.Size = uint64(len(l.node.LinkTarget))
a.Blocks = (a.Size + blockSize - 1) / blockSize
return nil
}
func (l *link) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
nodeToXattrList(l.node, req, resp)
return nil
}
func (l *link) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
return nodeGetXattr(l.node, req, resp)
}
func (l *link) Forget() {
l.forget()
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fuse/snapshots_dirstruct_test.go | internal/fuse/snapshots_dirstruct_test.go | //go:build darwin || freebsd || linux
package fuse
import (
"strings"
"testing"
"time"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
)
func TestPathsFromSn(t *testing.T) {
id1, _ := restic.ParseID("1234567812345678123456781234567812345678123456781234567812345678")
time1, _ := time.Parse("2006-01-02T15:04:05", "2021-01-01T00:00:01")
sn1 := &data.Snapshot{Hostname: "host", Username: "user", Tags: []string{"tag1", "tag2"}, Time: time1}
data.TestSetSnapshotID(t, sn1, id1)
var p []string
var s string
p, s = pathsFromSn("ids/%i", "2006-01-02T15:04:05", sn1)
test.Equals(t, []string{"ids/12345678"}, p)
test.Equals(t, "", s)
p, s = pathsFromSn("snapshots/%T", "2006-01-02T15:04:05", sn1)
test.Equals(t, []string{"snapshots/"}, p)
test.Equals(t, "2021-01-01T00:00:01", s)
p, s = pathsFromSn("hosts/%h/%T", "2006-01-02T15:04:05", sn1)
test.Equals(t, []string{"hosts/host/"}, p)
test.Equals(t, "2021-01-01T00:00:01", s)
p, s = pathsFromSn("tags/%t/%T", "2006-01-02T15:04:05", sn1)
test.Equals(t, []string{"tags/tag1/", "tags/tag2/"}, p)
test.Equals(t, "2021-01-01T00:00:01", s)
p, s = pathsFromSn("users/%u/%T", "2006-01-02T15:04:05", sn1)
test.Equals(t, []string{"users/user/"}, p)
test.Equals(t, "2021-01-01T00:00:01", s)
p, s = pathsFromSn("longids/%I", "2006-01-02T15:04:05", sn1)
test.Equals(t, []string{"longids/1234567812345678123456781234567812345678123456781234567812345678"}, p)
test.Equals(t, "", s)
p, s = pathsFromSn("%T/%h", "2006/01/02", sn1)
test.Equals(t, []string{"2021/01/01/host"}, p)
test.Equals(t, "", s)
p, s = pathsFromSn("%T/%i", "2006/01", sn1)
test.Equals(t, []string{"2021/01/12345678"}, p)
test.Equals(t, "", s)
}
func TestMakeDirs(t *testing.T) {
pathTemplates := []string{"ids/%i", "snapshots/%T", "hosts/%h/%T",
"tags/%t/%T", "users/%u/%T", "longids/%I", "%T/%h", "%T/%i",
}
timeTemplate := "2006/01/02"
sds := &SnapshotsDirStructure{
pathTemplates: pathTemplates,
timeTemplate: timeTemplate,
}
id0, _ := restic.ParseID("0000000012345678123456781234567812345678123456781234567812345678")
time0, _ := time.Parse("2006-01-02T15:04:05", "2020-12-31T00:00:01")
sn0 := &data.Snapshot{Hostname: "host", Username: "user", Tags: []string{"tag1", "tag2"}, Time: time0}
data.TestSetSnapshotID(t, sn0, id0)
id1, _ := restic.ParseID("1234567812345678123456781234567812345678123456781234567812345678")
time1, _ := time.Parse("2006-01-02T15:04:05", "2021-01-01T00:00:01")
sn1 := &data.Snapshot{Hostname: "host", Username: "user", Tags: []string{"tag1", "tag2"}, Time: time1}
data.TestSetSnapshotID(t, sn1, id1)
id2, _ := restic.ParseID("8765432112345678123456781234567812345678123456781234567812345678")
time2, _ := time.Parse("2006-01-02T15:04:05", "2021-01-01T01:02:03")
sn2 := &data.Snapshot{Hostname: "host2", Username: "user2", Tags: []string{"tag2", "tag3", "tag4"}, Time: time2}
data.TestSetSnapshotID(t, sn2, id2)
id3, _ := restic.ParseID("aaaaaaaa12345678123456781234567812345678123456781234567812345678")
time3, _ := time.Parse("2006-01-02T15:04:05", "2021-01-01T01:02:03")
sn3 := &data.Snapshot{Hostname: "host", Username: "user2", Tags: []string{}, Time: time3}
data.TestSetSnapshotID(t, sn3, id3)
sds.makeDirs(data.Snapshots{sn0, sn1, sn2, sn3})
expNames := make(map[string]*data.Snapshot)
expLatest := make(map[string]string)
// entries for sn0
expNames["/ids/00000000"] = sn0
expNames["/snapshots/2020/12/31"] = sn0
expNames["/hosts/host/2020/12/31"] = sn0
expNames["/tags/tag1/2020/12/31"] = sn0
expNames["/tags/tag2/2020/12/31"] = sn0
expNames["/users/user/2020/12/31"] = sn0
expNames["/longids/0000000012345678123456781234567812345678123456781234567812345678"] = sn0
expNames["/2020/12/31/host"] = sn0
expNames["/2020/12/31/00000000"] = sn0
// entries for sn1
expNames["/ids/12345678"] = sn1
expNames["/snapshots/2021/01/01"] = sn1
expNames["/hosts/host/2021/01/01"] = sn1
expNames["/tags/tag1/2021/01/01"] = sn1
expNames["/tags/tag2/2021/01/01"] = sn1
expNames["/users/user/2021/01/01"] = sn1
expNames["/longids/1234567812345678123456781234567812345678123456781234567812345678"] = sn1
expNames["/2021/01/01/host"] = sn1
expNames["/2021/01/01/12345678"] = sn1
// entries for sn2
expNames["/ids/87654321"] = sn2
expNames["/snapshots/2021/01/01-1"] = sn2 // sn1 and sn2 have same time string
expNames["/hosts/host2/2021/01/01"] = sn2
expNames["/tags/tag2/2021/01/01-1"] = sn2 // sn1 and sn2 have same time string
expNames["/tags/tag3/2021/01/01"] = sn2
expNames["/tags/tag4/2021/01/01"] = sn2
expNames["/users/user2/2021/01/01"] = sn2
expNames["/longids/8765432112345678123456781234567812345678123456781234567812345678"] = sn2
expNames["/2021/01/01/host2"] = sn2
expNames["/2021/01/01/87654321"] = sn2
// entries for sn3
expNames["/ids/aaaaaaaa"] = sn3
expNames["/snapshots/2021/01/01-2"] = sn3 // sn1 - sn3 have same time string
expNames["/hosts/host/2021/01/01-1"] = sn3 // sn1 and sn3 have same time string
expNames["/users/user2/2021/01/01-1"] = sn3 // sn2 and sn3 have same time string
expNames["/longids/aaaaaaaa12345678123456781234567812345678123456781234567812345678"] = sn3
expNames["/2021/01/01/host-1"] = sn3 // sn1 and sn3 have same time string and identical host
expNames["/2021/01/01/aaaaaaaa"] = sn3
// intermediate directories
// sn0
expNames["/ids"] = nil
expNames[""] = nil
expNames["/snapshots/2020/12"] = nil
expNames["/snapshots/2020"] = nil
expNames["/snapshots"] = nil
expNames["/hosts/host/2020/12"] = nil
expNames["/hosts/host/2020"] = nil
expNames["/hosts/host"] = nil
expNames["/hosts"] = nil
expNames["/tags/tag1/2020/12"] = nil
expNames["/tags/tag1/2020"] = nil
expNames["/tags/tag1"] = nil
expNames["/tags"] = nil
expNames["/tags/tag2/2020/12"] = nil
expNames["/tags/tag2/2020"] = nil
expNames["/tags/tag2"] = nil
expNames["/users/user/2020/12"] = nil
expNames["/users/user/2020"] = nil
expNames["/users/user"] = nil
expNames["/users"] = nil
expNames["/longids"] = nil
expNames["/2020/12/31"] = nil
expNames["/2020/12"] = nil
expNames["/2020"] = nil
// sn1
expNames["/snapshots/2021/01"] = nil
expNames["/snapshots/2021"] = nil
expNames["/hosts/host/2021/01"] = nil
expNames["/hosts/host/2021"] = nil
expNames["/tags/tag1/2021/01"] = nil
expNames["/tags/tag1/2021"] = nil
expNames["/tags/tag2/2021/01"] = nil
expNames["/tags/tag2/2021"] = nil
expNames["/users/user/2021/01"] = nil
expNames["/users/user/2021"] = nil
expNames["/2021/01/01"] = nil
expNames["/2021/01"] = nil
expNames["/2021"] = nil
// sn2
expNames["/hosts/host2/2021/01"] = nil
expNames["/hosts/host2/2021"] = nil
expNames["/hosts/host2"] = nil
expNames["/tags/tag3/2021/01"] = nil
expNames["/tags/tag3/2021"] = nil
expNames["/tags/tag3"] = nil
expNames["/tags/tag4/2021/01"] = nil
expNames["/tags/tag4/2021"] = nil
expNames["/tags/tag4"] = nil
expNames["/users/user2/2021/01"] = nil
expNames["/users/user2/2021"] = nil
expNames["/users/user2"] = nil
// target snapshots for links
expNames["/snapshots/latest"] = sn3 // sn1 - sn3 have same time string
expNames["/hosts/host/latest"] = sn3
expNames["/hosts/host2/latest"] = sn2
expNames["/tags/tag1/latest"] = sn1
expNames["/tags/tag2/latest"] = sn2 // sn1 and sn2 have same time string
expNames["/tags/tag3/latest"] = sn2
expNames["/tags/tag4/latest"] = sn2
expNames["/users/user/latest"] = sn1
expNames["/users/user2/latest"] = sn3 // sn2 and sn3 have same time string
// latest links
expLatest["/snapshots/latest"] = "2021/01/01-2" // sn1 - sn3 have same time string
expLatest["/hosts/host/latest"] = "2021/01/01-1"
expLatest["/hosts/host2/latest"] = "2021/01/01"
expLatest["/tags/tag1/latest"] = "2021/01/01"
expLatest["/tags/tag2/latest"] = "2021/01/01-1" // sn1 and sn2 have same time string
expLatest["/tags/tag3/latest"] = "2021/01/01"
expLatest["/tags/tag4/latest"] = "2021/01/01"
expLatest["/users/user/latest"] = "2021/01/01"
expLatest["/users/user2/latest"] = "2021/01/01-1" // sn2 and sn3 have same time string
verifyEntries(t, expNames, expLatest, sds.entries)
}
func verifyEntries(t *testing.T, expNames map[string]*data.Snapshot, expLatest map[string]string, entries map[string]*MetaDirData) {
actNames := make(map[string]*data.Snapshot)
actLatest := make(map[string]string)
for path, entry := range entries {
actNames[path] = entry.snapshot
if entry.linkTarget != "" {
actLatest[path] = entry.linkTarget
}
}
test.Equals(t, expNames, actNames)
test.Equals(t, expLatest, actLatest)
// verify tree integrity
for path, entry := range entries {
// check that all children are actually contained in entry.names
for otherPath := range entries {
if strings.HasPrefix(otherPath, path+"/") {
sub := otherPath[len(path)+1:]
// remaining path does not contain a directory
test.Assert(t, strings.Contains(sub, "/") || (entry.names != nil && entry.names[sub] != nil), "missing entry %v in %v", sub, path)
}
}
if entry.names == nil {
continue
}
// child entries reference the correct MetaDirData
for elem, subentry := range entry.names {
test.Equals(t, entries[path+"/"+elem], subentry)
}
}
}
func TestMakeEmptyDirs(t *testing.T) {
pathTemplates := []string{"ids/%i", "snapshots/%T", "hosts/%h/%T",
"tags/%t/%T", "users/%u/%T", "longids/id-%I", "%T/%h", "%T/%i", "id-%i",
}
timeTemplate := "2006/01/02"
sds := &SnapshotsDirStructure{
pathTemplates: pathTemplates,
timeTemplate: timeTemplate,
}
sds.makeDirs(data.Snapshots{})
expNames := make(map[string]*data.Snapshot)
expLatest := make(map[string]string)
// empty entries for dir structure
expNames["/ids"] = nil
expNames["/snapshots"] = nil
expNames["/hosts"] = nil
expNames["/tags"] = nil
expNames["/users"] = nil
expNames["/longids"] = nil
expNames[""] = nil
verifyEntries(t, expNames, expLatest, sds.entries)
}
func TestFilenameFromTag(t *testing.T) {
for _, c := range []struct {
tag, filename string
}{
{"", "_"},
{".", "_"},
{"..", "__"},
{"%.", "%."},
{"foo", "foo"},
{"foo ", "foo "},
{"foo/bar_baz", "foo_bar_baz"},
} {
test.Equals(t, c.filename, filenameFromTag(c.tag))
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fuse/snapshots_dir.go | internal/fuse/snapshots_dir.go | //go:build darwin || freebsd || linux
package fuse
import (
"context"
"os"
"syscall"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/debug"
"github.com/anacrolix/fuse"
"github.com/anacrolix/fuse/fs"
)
// SnapshotsDir is a actual fuse directory generated from SnapshotsDirStructure
// It uses the saved prefix to select the corresponding MetaDirData.
type SnapshotsDir struct {
root *Root
forget forgetFn
inode uint64
parentInode uint64
dirStruct *SnapshotsDirStructure
prefix string
cache treeCache
}
// ensure that *SnapshotsDir implements these interfaces
var _ = fs.HandleReadDirAller(&SnapshotsDir{})
var _ = fs.NodeForgetter(&SnapshotsDir{})
var _ = fs.NodeStringLookuper(&SnapshotsDir{})
// NewSnapshotsDir returns a new directory structure containing snapshots and "latest" links
func NewSnapshotsDir(root *Root, forget forgetFn, inode, parentInode uint64, dirStruct *SnapshotsDirStructure, prefix string) *SnapshotsDir {
debug.Log("create snapshots dir, inode %d", inode)
return &SnapshotsDir{
root: root,
forget: forget,
inode: inode,
parentInode: parentInode,
dirStruct: dirStruct,
prefix: prefix,
cache: *newTreeCache(),
}
}
// Attr returns the attributes for any dir in the snapshots directory structure
func (d *SnapshotsDir) Attr(_ context.Context, attr *fuse.Attr) error {
attr.Inode = d.inode
attr.Mode = os.ModeDir | 0555
attr.Uid = d.root.uid
attr.Gid = d.root.gid
debug.Log("attr: %v", attr)
return nil
}
// ReadDirAll returns all entries of the SnapshotsDir.
func (d *SnapshotsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
debug.Log("ReadDirAll()")
// update snapshots
meta, err := d.dirStruct.UpdatePrefix(ctx, d.prefix)
if err != nil {
return nil, unwrapCtxCanceled(err)
} else if meta == nil {
return nil, syscall.ENOENT
}
items := []fuse.Dirent{
{
Inode: d.inode,
Name: ".",
Type: fuse.DT_Dir,
},
{
Inode: d.parentInode,
Name: "..",
Type: fuse.DT_Dir,
},
}
for name, entry := range meta.names {
if ctx.Err() != nil {
return nil, ctx.Err()
}
d := fuse.Dirent{
Inode: inodeFromName(d.inode, name),
Name: name,
Type: fuse.DT_Dir,
}
if entry.linkTarget != "" {
d.Type = fuse.DT_Link
}
items = append(items, d)
}
return items, nil
}
// Lookup returns a specific entry from the SnapshotsDir.
func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error) {
debug.Log("Lookup(%s)", name)
meta, err := d.dirStruct.UpdatePrefix(ctx, d.prefix)
if err != nil {
return nil, unwrapCtxCanceled(err)
} else if meta == nil {
return nil, syscall.ENOENT
}
return d.cache.lookupOrCreate(name, func(forget forgetFn) (fs.Node, error) {
entry := meta.names[name]
if entry == nil {
return nil, syscall.ENOENT
}
inode := inodeFromName(d.inode, name)
if entry.linkTarget != "" {
return newSnapshotLink(d.root, forget, inode, entry.linkTarget, entry.snapshot)
} else if entry.snapshot != nil {
return newDirFromSnapshot(d.root, forget, inode, entry.snapshot)
}
return NewSnapshotsDir(d.root, forget, inode, d.inode, d.dirStruct, d.prefix+"/"+name), nil
})
}
func (d *SnapshotsDir) Forget() {
d.forget()
}
// SnapshotLink
type snapshotLink struct {
root *Root
forget forgetFn
inode uint64
target string
snapshot *data.Snapshot
}
var _ = fs.NodeForgetter(&snapshotLink{})
var _ = fs.NodeReadlinker(&snapshotLink{})
// newSnapshotLink
func newSnapshotLink(root *Root, forget forgetFn, inode uint64, target string, snapshot *data.Snapshot) (*snapshotLink, error) {
return &snapshotLink{root: root, forget: forget, inode: inode, target: target, snapshot: snapshot}, nil
}
// Readlink
func (l *snapshotLink) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) {
return l.target, nil
}
// Attr
func (l *snapshotLink) Attr(_ context.Context, a *fuse.Attr) error {
a.Inode = l.inode
a.Mode = os.ModeSymlink | 0777
a.Size = uint64(len(l.target))
a.Blocks = (a.Size + blockSize - 1) / blockSize
a.Uid = l.root.uid
a.Gid = l.root.gid
a.Atime = l.snapshot.Time
a.Ctime = l.snapshot.Time
a.Mtime = l.snapshot.Time
a.Nlink = 1
return nil
}
func (l *snapshotLink) Forget() {
l.forget()
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fuse/dir.go | internal/fuse/dir.go | //go:build darwin || freebsd || linux
package fuse
import (
"context"
"errors"
"os"
"path/filepath"
"sync"
"syscall"
"github.com/anacrolix/fuse"
"github.com/anacrolix/fuse/fs"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/restic"
)
// Statically ensure that *dir implement those interface
var _ = fs.HandleReadDirAller(&dir{})
var _ = fs.NodeForgetter(&dir{})
var _ = fs.NodeGetxattrer(&dir{})
var _ = fs.NodeListxattrer(&dir{})
var _ = fs.NodeStringLookuper(&dir{})
type dir struct {
root *Root
forget forgetFn
items map[string]*data.Node
inode uint64
parentInode uint64
node *data.Node
m sync.Mutex
cache treeCache
}
func cleanupNodeName(name string) string {
return filepath.Base(name)
}
func newDir(root *Root, forget forgetFn, inode, parentInode uint64, node *data.Node) (*dir, error) {
debug.Log("new dir for %v (%v)", node.Name, node.Subtree)
return &dir{
root: root,
forget: forget,
node: node,
inode: inode,
parentInode: parentInode,
cache: *newTreeCache(),
}, nil
}
// returning a wrapped context.Canceled error will instead result in returning
// an input / output error to the user. Thus unwrap the error to match the
// expectations of bazil/fuse
func unwrapCtxCanceled(err error) error {
if errors.Is(err, context.Canceled) {
return context.Canceled
}
return err
}
// replaceSpecialNodes replaces nodes with name "." and "/" by their contents.
// Otherwise, the node is returned.
func replaceSpecialNodes(ctx context.Context, repo restic.BlobLoader, node *data.Node) ([]*data.Node, error) {
if node.Type != data.NodeTypeDir || node.Subtree == nil {
return []*data.Node{node}, nil
}
if node.Name != "." && node.Name != "/" {
return []*data.Node{node}, nil
}
tree, err := data.LoadTree(ctx, repo, *node.Subtree)
if err != nil {
return nil, unwrapCtxCanceled(err)
}
return tree.Nodes, nil
}
func newDirFromSnapshot(root *Root, forget forgetFn, inode uint64, snapshot *data.Snapshot) (*dir, error) {
debug.Log("new dir for snapshot %v (%v)", snapshot.ID(), snapshot.Tree)
return &dir{
root: root,
forget: forget,
node: &data.Node{
AccessTime: snapshot.Time,
ModTime: snapshot.Time,
ChangeTime: snapshot.Time,
Mode: os.ModeDir | 0555,
Subtree: snapshot.Tree,
},
inode: inode,
cache: *newTreeCache(),
}, nil
}
func (d *dir) open(ctx context.Context) error {
d.m.Lock()
defer d.m.Unlock()
if d.items != nil {
return nil
}
debug.Log("open dir %v (%v)", d.node.Name, d.node.Subtree)
tree, err := data.LoadTree(ctx, d.root.repo, *d.node.Subtree)
if err != nil {
debug.Log(" error loading tree %v: %v", d.node.Subtree, err)
return unwrapCtxCanceled(err)
}
items := make(map[string]*data.Node)
for _, n := range tree.Nodes {
if ctx.Err() != nil {
return ctx.Err()
}
nodes, err := replaceSpecialNodes(ctx, d.root.repo, n)
if err != nil {
debug.Log(" replaceSpecialNodes(%v) failed: %v", n, err)
return err
}
for _, node := range nodes {
items[cleanupNodeName(node.Name)] = node
}
}
d.items = items
return nil
}
func (d *dir) Attr(_ context.Context, a *fuse.Attr) error {
debug.Log("Attr()")
a.Inode = d.inode
a.Mode = os.ModeDir | d.node.Mode
if !d.root.cfg.OwnerIsRoot {
a.Uid = d.node.UID
a.Gid = d.node.GID
}
a.Atime = d.node.AccessTime
a.Ctime = d.node.ChangeTime
a.Mtime = d.node.ModTime
a.Nlink = d.calcNumberOfLinks()
return nil
}
func (d *dir) calcNumberOfLinks() uint32 {
// a directory d has 2 hardlinks + the number
// of directories contained by d
count := uint32(2)
for _, node := range d.items {
if node.Type == data.NodeTypeDir {
count++
}
}
return count
}
func (d *dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
debug.Log("ReadDirAll()")
err := d.open(ctx)
if err != nil {
return nil, err
}
ret := make([]fuse.Dirent, 0, len(d.items)+2)
ret = append(ret, fuse.Dirent{
Inode: d.inode,
Name: ".",
Type: fuse.DT_Dir,
})
ret = append(ret, fuse.Dirent{
Inode: d.parentInode,
Name: "..",
Type: fuse.DT_Dir,
})
for _, node := range d.items {
if ctx.Err() != nil {
return nil, ctx.Err()
}
name := cleanupNodeName(node.Name)
var typ fuse.DirentType
switch node.Type {
case data.NodeTypeDir:
typ = fuse.DT_Dir
case data.NodeTypeFile:
typ = fuse.DT_File
case data.NodeTypeSymlink:
typ = fuse.DT_Link
}
ret = append(ret, fuse.Dirent{
Inode: inodeFromNode(d.inode, node),
Type: typ,
Name: name,
})
}
return ret, nil
}
func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
debug.Log("Lookup(%v)", name)
err := d.open(ctx)
if err != nil {
return nil, err
}
return d.cache.lookupOrCreate(name, func(forget forgetFn) (fs.Node, error) {
node, ok := d.items[name]
if !ok {
debug.Log(" Lookup(%v) -> not found", name)
return nil, syscall.ENOENT
}
inode := inodeFromNode(d.inode, node)
switch node.Type {
case data.NodeTypeDir:
return newDir(d.root, forget, inode, d.inode, node)
case data.NodeTypeFile:
return newFile(d.root, forget, inode, node)
case data.NodeTypeSymlink:
return newLink(d.root, forget, inode, node)
case data.NodeTypeDev, data.NodeTypeCharDev, data.NodeTypeFifo, data.NodeTypeSocket:
return newOther(d.root, forget, inode, node)
default:
debug.Log(" node %v has unknown type %v", name, node.Type)
return nil, syscall.ENOENT
}
})
}
func (d *dir) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
nodeToXattrList(d.node, req, resp)
return nil
}
func (d *dir) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
return nodeGetXattr(d.node, req, resp)
}
func (d *dir) Forget() {
d.forget()
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/debug/stacktrace.go | internal/debug/stacktrace.go | package debug
import "runtime"
func DumpStacktrace() string {
buf := make([]byte, 128*1024)
for {
l := runtime.Stack(buf, true)
if l < len(buf) {
return string(buf[:l])
}
buf = make([]byte, len(buf)*2)
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/debug/log_test.go | internal/debug/log_test.go | package debug_test
import (
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/restic"
"testing"
)
func BenchmarkLogStatic(b *testing.B) {
for i := 0; i < b.N; i++ {
debug.Log("Static string")
}
}
func BenchmarkLogIDStr(b *testing.B) {
id := restic.NewRandomID()
b.ResetTimer()
for i := 0; i < b.N; i++ {
debug.Log("id: %v", id)
}
}
func BenchmarkLogIDString(b *testing.B) {
id := restic.NewRandomID()
b.ResetTimer()
for i := 0; i < b.N; i++ {
debug.Log("id: %s", id)
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/debug/round_tripper.go | internal/debug/round_tripper.go | package debug
import (
"fmt"
"io"
"net/http"
"net/http/httputil"
"os"
"github.com/restic/restic/internal/errors"
)
type eofDetectRoundTripper struct {
http.RoundTripper
}
type eofDetectReader struct {
eofSeen bool
rd io.ReadCloser
}
func (rd *eofDetectReader) Read(p []byte) (n int, err error) {
n, err = rd.rd.Read(p)
if err == io.EOF {
rd.eofSeen = true
}
return n, err
}
func (rd *eofDetectReader) Close() error {
if !rd.eofSeen {
buf, err := io.ReadAll(rd)
msg := fmt.Sprintf("body not drained, %d bytes not read", len(buf))
if err != nil {
msg += fmt.Sprintf(", error: %v", err)
}
if len(buf) > 0 {
if len(buf) > 20 {
buf = append(buf[:20], []byte("...")...)
}
msg += fmt.Sprintf(", body: %q", buf)
}
_, _ = fmt.Fprintln(os.Stderr, msg)
Log("%s: %+v", msg, errors.New("Close()"))
}
return rd.rd.Close()
}
func (tr eofDetectRoundTripper) RoundTrip(req *http.Request) (res *http.Response, err error) {
res, err = tr.RoundTripper.RoundTrip(req)
if res != nil && res.Body != nil {
res.Body = &eofDetectReader{rd: res.Body}
}
return res, err
}
type loggingRoundTripper struct {
http.RoundTripper
}
func redactHeader(header http.Header) map[string][]string {
removedHeaders := make(map[string][]string)
for _, hdr := range []string{
"Authorization",
"X-Auth-Token", // Swift headers
"X-Auth-Key",
} {
origHeader, hasHeader := header[hdr]
if hasHeader {
removedHeaders[hdr] = origHeader
header[hdr] = []string{"**redacted**"}
}
}
return removedHeaders
}
func restoreHeader(header http.Header, origHeaders map[string][]string) {
for hdr, val := range origHeaders {
header[hdr] = val
}
}
func (tr loggingRoundTripper) RoundTrip(req *http.Request) (res *http.Response, err error) {
// save original auth and redact it
origHeaders := redactHeader(req.Header)
trace, err := httputil.DumpRequestOut(req, false)
if err != nil {
Log("DumpRequestOut() error: %v\n", err)
} else {
Log("------------ HTTP REQUEST -----------\n%s", trace)
}
restoreHeader(req.Header, origHeaders)
res, err = tr.RoundTripper.RoundTrip(req)
if err != nil {
Log("RoundTrip() returned error: %v", err)
}
if res != nil {
origHeaders := redactHeader(res.Header)
trace, err := httputil.DumpResponse(res, false)
restoreHeader(res.Header, origHeaders)
if err != nil {
Log("DumpResponse() error: %v\n", err)
} else {
Log("------------ HTTP RESPONSE ----------\n%s", trace)
}
}
return res, err
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/debug/round_tripper_release.go | internal/debug/round_tripper_release.go | //go:build !debug
package debug
import "net/http"
// RoundTripper returns a new http.RoundTripper which logs all requests (if
// debug is enabled). When debug is not enabled, upstream is returned.
func RoundTripper(upstream http.RoundTripper) http.RoundTripper {
if opts.isEnabled {
// only use loggingRoundTripper if the debug log is configured
return loggingRoundTripper{eofDetectRoundTripper{upstream}}
}
return upstream
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/debug/debug.go | internal/debug/debug.go | package debug
import (
"fmt"
"log"
"os"
"path"
"path/filepath"
"runtime"
"strings"
)
var opts struct {
isEnabled bool
logger *log.Logger
funcs map[string]bool
files map[string]bool
}
// make sure that all the initialization happens before the init() functions
// are called, cf https://golang.org/ref/spec#Package_initialization
var _ = initDebug()
func initDebug() bool {
initDebugLogger()
initDebugTags()
if opts.logger == nil && len(opts.funcs) == 0 && len(opts.files) == 0 {
opts.isEnabled = false
return false
}
opts.isEnabled = true
fmt.Fprintf(os.Stderr, "debug enabled\n")
return true
}
func initDebugLogger() {
debugfile := os.Getenv("DEBUG_LOG")
if debugfile == "" {
return
}
fmt.Fprintf(os.Stderr, "debug log file %v\n", debugfile)
f, err := os.OpenFile(debugfile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to open debug log file: %v\n", err)
os.Exit(2)
}
opts.logger = log.New(f, "", log.LstdFlags)
}
func parseFilter(envname string, pad func(string) string) map[string]bool {
filter := make(map[string]bool)
env := os.Getenv(envname)
if env == "" {
return filter
}
for _, fn := range strings.Split(env, ",") {
t := pad(strings.TrimSpace(fn))
val := true
switch t[0] {
case '-':
val = false
t = t[1:]
case '+':
val = true
t = t[1:]
}
// test pattern
_, err := path.Match(t, "")
if err != nil {
fmt.Fprintf(os.Stderr, "error: invalid pattern %q: %v\n", t, err)
os.Exit(5)
}
filter[t] = val
}
return filter
}
func padFunc(s string) string {
if s == "all" {
return s
}
return s
}
func padFile(s string) string {
if s == "all" {
return s
}
if !strings.Contains(s, "/") {
s = "*/" + s
}
if !strings.Contains(s, ":") {
s = s + ":*"
}
return s
}
func initDebugTags() {
opts.funcs = parseFilter("DEBUG_FUNCS", padFunc)
opts.files = parseFilter("DEBUG_FILES", padFile)
}
// taken from https://github.com/VividCortex/trace
func goroutineNum() int {
b := make([]byte, 20)
runtime.Stack(b, false)
var num int
_, _ = fmt.Sscanf(string(b), "goroutine %d ", &num)
return num
}
// taken from https://github.com/VividCortex/trace
func getPosition() (fn, dir, file string, line int) {
pc, file, line, ok := runtime.Caller(2)
if !ok {
return "", "", "", 0
}
dirname, filename := filepath.Base(filepath.Dir(file)), filepath.Base(file)
Func := runtime.FuncForPC(pc)
return path.Base(Func.Name()), dirname, filename, line
}
func checkFilter(filter map[string]bool, key string) bool {
// check if key is enabled directly
if v, ok := filter[key]; ok {
return v
}
// check for globbing
for k, v := range filter {
if m, _ := path.Match(k, key); m {
return v
}
}
// check if tag "all" is enabled
if v, ok := filter["all"]; ok && v {
return true
}
return false
}
// Log prints a message to the debug log (if debug is enabled).
func Log(f string, args ...interface{}) {
if !opts.isEnabled {
return
}
fn, dir, file, line := getPosition()
goroutine := goroutineNum()
if len(f) == 0 || f[len(f)-1] != '\n' {
f += "\n"
}
type Shortener interface {
Str() string
}
for i, item := range args {
if shortener, ok := item.(Shortener); ok {
args[i] = shortener.Str()
}
}
pos := fmt.Sprintf("%s/%s:%d", dir, file, line)
formatString := fmt.Sprintf("%s\t%s\t%d\t%s", pos, fn, goroutine, f)
dbgprint := func() {
fmt.Fprintf(os.Stderr, formatString, args...)
}
if opts.logger != nil {
opts.logger.Printf(formatString, args...)
}
filename := fmt.Sprintf("%s/%s:%d", dir, file, line)
if checkFilter(opts.files, filename) {
dbgprint()
return
}
if checkFilter(opts.funcs, fn) {
dbgprint()
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/debug/round_tripper_test.go | internal/debug/round_tripper_test.go | package debug
import (
"net/http"
"testing"
"github.com/restic/restic/internal/test"
)
func TestRedactHeader(t *testing.T) {
secretHeaders := []string{
"Authorization",
"X-Auth-Token",
"X-Auth-Key",
}
header := make(http.Header)
header["Authorization"] = []string{"123"}
header["X-Auth-Token"] = []string{"1234"}
header["X-Auth-Key"] = []string{"12345"}
header["Host"] = []string{"my.host"}
origHeaders := redactHeader(header)
for _, hdr := range secretHeaders {
test.Equals(t, "**redacted**", header[hdr][0])
}
test.Equals(t, "my.host", header["Host"][0])
restoreHeader(header, origHeaders)
test.Equals(t, "123", header["Authorization"][0])
test.Equals(t, "1234", header["X-Auth-Token"][0])
test.Equals(t, "12345", header["X-Auth-Key"][0])
test.Equals(t, "my.host", header["Host"][0])
delete(header, "X-Auth-Key")
origHeaders = redactHeader(header)
_, hasHeader := header["X-Auth-Key"]
test.Assert(t, !hasHeader, "Unexpected header: %v", header["X-Auth-Key"])
restoreHeader(header, origHeaders)
_, hasHeader = header["X-Auth-Key"]
test.Assert(t, !hasHeader, "Unexpected header: %v", header["X-Auth-Key"])
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/debug/testing.go | internal/debug/testing.go | package debug
import (
"log"
"os"
"testing"
)
// TestLogToStderr configures debug to log to stderr if not the debug log is
// not already configured and returns whether logging was enabled.
func TestLogToStderr(_ testing.TB) bool {
if opts.isEnabled {
return false
}
opts.logger = log.New(os.Stderr, "", log.LstdFlags)
opts.isEnabled = true
return true
}
func TestDisableLog(_ testing.TB) {
opts.logger = nil
opts.isEnabled = false
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/debug/round_tripper_debug.go | internal/debug/round_tripper_debug.go | //go:build debug
package debug
import "net/http"
// RoundTripper returns a new http.RoundTripper which logs all requests (if
// debug is enabled). When debug is not enabled, upstream is returned.
func RoundTripper(upstream http.RoundTripper) http.RoundTripper {
eofRoundTripper := eofDetectRoundTripper{upstream}
if opts.isEnabled {
// only use loggingRoundTripper if the debug log is configured
return loggingRoundTripper{eofRoundTripper}
}
return eofRoundTripper
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/debug/doc.go | internal/debug/doc.go | // Package debug provides an infrastructure for logging debug information and breakpoints.
package debug
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/path_prefix.go | internal/fs/path_prefix.go | package fs
import (
"path/filepath"
)
// HasPathPrefix returns true if p is a subdir of (or a file within) base. It
// assumes a file system which is case sensitive. If the paths are not of the
// same type (one is relative, the other is absolute), false is returned.
func HasPathPrefix(base, p string) bool {
if filepath.VolumeName(base) != filepath.VolumeName(p) {
return false
}
// handle case when base and p are not of the same type
if filepath.IsAbs(base) != filepath.IsAbs(p) {
return false
}
base = filepath.Clean(base)
p = filepath.Clean(p)
if base == p {
return true
}
for {
dir := filepath.Dir(p)
if base == dir {
return true
}
if p == dir {
break
}
p = dir
}
return false
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/node_linux_test.go | internal/fs/node_linux_test.go | package fs
import (
"io/fs"
"strings"
"testing"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/errors"
rtest "github.com/restic/restic/internal/test"
)
func TestRestoreSymlinkTimestampsError(t *testing.T) {
d := t.TempDir()
node := data.Node{Type: data.NodeTypeSymlink}
err := nodeRestoreTimestamps(&node, d+"/nosuchfile")
rtest.Assert(t, errors.Is(err, fs.ErrNotExist), "want ErrNotExist, got %q", err)
rtest.Assert(t, strings.Contains(err.Error(), d), "filename not in %q", err)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/fs_local_vss_test.go | internal/fs/fs_local_vss_test.go | //go:build windows
package fs
import (
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
"time"
"github.com/go-ole/go-ole"
"github.com/restic/restic/internal/options"
rtest "github.com/restic/restic/internal/test"
)
func matchStrings(ptrs []string, strs []string) bool {
if len(ptrs) != len(strs) {
return false
}
for i, p := range ptrs {
if p == "" {
return false
}
matched, err := regexp.MatchString(p, strs[i])
if err != nil {
panic(err)
}
if !matched {
return false
}
}
return true
}
func matchMap(strs []string, m map[string]struct{}) bool {
if len(strs) != len(m) {
return false
}
for _, s := range strs {
if _, ok := m[s]; !ok {
return false
}
}
return true
}
func TestVSSConfig(t *testing.T) {
type config struct {
excludeAllMountPoints bool
timeout time.Duration
provider string
}
setTests := []struct {
input options.Options
output config
}{
{
options.Options{
"vss.timeout": "6h38m42s",
"vss.provider": "Ms",
},
config{
timeout: 23922000000000,
provider: "Ms",
},
},
{
options.Options{
"vss.exclude-all-mount-points": "t",
"vss.provider": "{b5946137-7b9f-4925-af80-51abd60b20d5}",
},
config{
excludeAllMountPoints: true,
timeout: 120000000000,
provider: "{b5946137-7b9f-4925-af80-51abd60b20d5}",
},
},
{
options.Options{
"vss.exclude-all-mount-points": "0",
"vss.exclude-volumes": "",
"vss.timeout": "120s",
"vss.provider": "Microsoft Software Shadow Copy provider 1.0",
},
config{
timeout: 120000000000,
provider: "Microsoft Software Shadow Copy provider 1.0",
},
},
}
for i, test := range setTests {
t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) {
cfg, err := ParseVSSConfig(test.input)
if err != nil {
t.Fatal(err)
}
errorHandler := func(item string, err error) {
t.Fatalf("unexpected error (%v)", err)
}
messageHandler := func(msg string, args ...interface{}) {
t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args))
}
dst := NewLocalVss(errorHandler, messageHandler, cfg)
if dst.excludeAllMountPoints != test.output.excludeAllMountPoints ||
dst.excludeVolumes != nil || dst.timeout != test.output.timeout ||
dst.provider != test.output.provider {
t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, dst)
}
})
}
}
func TestParseMountPoints(t *testing.T) {
volumeMatch := regexp.MustCompile(`^\\\\\?\\Volume\{[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}\}\\$`)
// It's not a good idea to test functions based on getVolumeNameForVolumeMountPoint by calling
// getVolumeNameForVolumeMountPoint itself, but we have restricted test environment:
// cannot manage volumes and can only be sure that the mount point C:\ exists
sysVolume, err := getVolumeNameForVolumeMountPoint("C:")
if err != nil {
t.Fatal(err)
}
// We don't know a valid volume GUID path for c:\, but we'll at least check its format
if !volumeMatch.MatchString(sysVolume) {
t.Fatalf("invalid volume GUID path: %s", sysVolume)
}
// Changing the case and removing trailing backslash allows tests
// the equality of different ways of writing a volume name
sysVolumeMutated := strings.ToUpper(sysVolume[:len(sysVolume)-1])
sysVolumeMatch := strings.ToLower(sysVolume)
type check struct {
volume string
result bool
}
setTests := []struct {
input options.Options
output []string
checks []check
errors []string
}{
{
options.Options{
"vss.exclude-volumes": `c:;c:\;` + sysVolume + `;` + sysVolumeMutated,
},
[]string{
sysVolumeMatch,
},
[]check{
{`c:\`, false},
{`c:`, false},
{sysVolume, false},
{sysVolumeMutated, false},
},
[]string{},
},
{
options.Options{
"vss.exclude-volumes": `z:\nonexistent;c:;c:\windows\;\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`,
},
[]string{
sysVolumeMatch,
},
[]check{
{`c:\windows\`, true},
{`\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, true},
{`c:`, false},
{``, true},
},
[]string{
`failed to parse vss\.exclude-volumes \[z:\\nonexistent\]:.*`,
`failed to parse vss\.exclude-volumes \[c:\\windows\\\]:.*`,
`failed to parse vss\.exclude-volumes \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`,
`failed to get volume from mount point \[c:\\windows\\\]:.*`,
`failed to get volume from mount point \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`,
`failed to get volume from mount point \[\]:.*`,
},
},
}
for i, test := range setTests {
t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) {
cfg, err := ParseVSSConfig(test.input)
if err != nil {
t.Fatal(err)
}
var log []string
errorHandler := func(item string, err error) {
log = append(log, strings.TrimSpace(err.Error()))
}
messageHandler := func(msg string, args ...interface{}) {
t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args))
}
dst := NewLocalVss(errorHandler, messageHandler, cfg)
if !matchMap(test.output, dst.excludeVolumes) {
t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v",
test.output, dst.excludeVolumes)
}
for _, c := range test.checks {
if dst.isMountPointIncluded(c.volume) != c.result {
t.Fatalf(`wrong check: isMountPointIncluded("%s") != %v`, c.volume, c.result)
}
}
if !matchStrings(test.errors, log) {
t.Fatalf("wrong log, want:\n %#v\ngot:\n %#v", test.errors, log)
}
})
}
}
func TestParseProvider(t *testing.T) {
msProvider := ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}")
setTests := []struct {
provider string
id *ole.GUID
result string
}{
{
"",
ole.IID_NULL,
"",
},
{
"mS",
msProvider,
"",
},
{
"{B5946137-7b9f-4925-Af80-51abD60b20d5}",
msProvider,
"",
},
{
"Microsoft Software Shadow Copy provider 1.0",
msProvider,
"",
},
{
"{04560982-3d7d-4bbc-84f7-0712f833a28f}",
nil,
`invalid VSS provider "{04560982-3d7d-4bbc-84f7-0712f833a28f}"`,
},
{
"non-existent provider",
nil,
`invalid VSS provider "non-existent provider"`,
},
}
_ = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
for i, test := range setTests {
t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) {
id, err := getProviderID(test.provider)
if err != nil && id != nil {
t.Fatalf("err!=nil but id=%v", id)
}
if test.result != "" || err != nil {
var result string
if err != nil {
result = err.Error()
}
if test.result != result || test.result == "" {
t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.result, result)
}
} else if !ole.IsEqualGUID(id, test.id) {
t.Fatalf("wrong id, want:\n %s\ngot:\n %s", test.id.String(), id.String())
}
})
}
}
func TestVSSFS(t *testing.T) {
if runtime.GOOS != "windows" || HasSufficientPrivilegesForVSS() != nil {
t.Skip("vss fs test can only be run on windows with admin privileges")
}
cfg, err := ParseVSSConfig(options.Options{})
rtest.OK(t, err)
errorHandler := func(item string, err error) {
t.Fatalf("unexpected error (%v)", err)
}
messageHandler := func(msg string, args ...interface{}) {
if strings.HasPrefix(msg, "creating VSS snapshot for") || strings.HasPrefix(msg, "successfully created snapshot") {
return
}
t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args))
}
localVss := NewLocalVss(errorHandler, messageHandler, cfg)
defer localVss.DeleteSnapshots()
tempdir := t.TempDir()
tempfile := filepath.Join(tempdir, "file")
rtest.OK(t, os.WriteFile(tempfile, []byte("example"), 0o600))
// trigger snapshot creation and
// capture FI while file still exists (should already be within the snapshot)
origFi, err := localVss.Lstat(tempfile)
rtest.OK(t, err)
// remove original file
rtest.OK(t, os.Remove(tempfile))
lstatFi, err := localVss.Lstat(tempfile)
rtest.OK(t, err)
rtest.Equals(t, origFi.Mode, lstatFi.Mode)
f, err := localVss.OpenFile(tempfile, os.O_RDONLY, false)
rtest.OK(t, err)
data, err := io.ReadAll(f)
rtest.OK(t, err)
rtest.Equals(t, "example", string(data), "unexpected file content")
node, err := f.ToNode(false, t.Logf)
rtest.OK(t, err)
rtest.Equals(t, node.Mode, lstatFi.Mode)
rtest.OK(t, f.Close())
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/file_unix_test.go | internal/fs/file_unix_test.go | //go:build unix
package fs
import (
"path/filepath"
"syscall"
"testing"
"github.com/restic/restic/internal/errors"
rtest "github.com/restic/restic/internal/test"
)
func TestReaddirnamesFifo(t *testing.T) {
// should not block when reading from a fifo instead of a directory
tempdir := t.TempDir()
fifoFn := filepath.Join(tempdir, "fifo")
rtest.OK(t, mkfifo(fifoFn, 0o600))
_, err := Readdirnames(&Local{}, fifoFn, 0)
rtest.Assert(t, errors.Is(err, syscall.ENOTDIR), "unexpected error %v", err)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/priv_windows.go | internal/fs/priv_windows.go | //go:build windows
package fs
import (
"github.com/Microsoft/go-winio"
"github.com/restic/restic/internal/errors"
)
var processPrivileges = []string{
// seBackupPrivilege allows the application to bypass file and directory ACLs to back up files and directories.
"SeBackupPrivilege",
// seRestorePrivilege allows the application to bypass file and directory ACLs to restore files and directories.
"SeRestorePrivilege",
// seSecurityPrivilege allows read and write access to all SACLs.
"SeSecurityPrivilege",
// seTakeOwnershipPrivilege allows the application to take ownership of files and directories, regardless of the permissions set on them.
"SeTakeOwnershipPrivilege",
}
// enableProcessPrivileges enables additional file system privileges for the current process.
func enableProcessPrivileges() error {
var errs []error
// EnableProcessPrivileges may enable some but not all requested privileges, yet its error lists all requested.
// Request one at a time to return what actually fails.
for _, p := range processPrivileges {
errs = append(errs, winio.EnableProcessPrivileges([]string{p}))
}
return errors.Join(errs...)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/const_windows.go | internal/fs/const_windows.go | //go:build windows
package fs
// TODO honor flags when opening files
// O_NOFOLLOW is currently only interpreted by FS.OpenFile in metadataOnly mode and ignored by OpenFile.
// The value of the constant is invented and only for use within this fs package. It must not be used in other contexts.
// It must not conflict with the other O_* values from go/src/syscall/types_windows.go
const O_NOFOLLOW int = 0x40000000
// O_DIRECTORY is a noop on Windows.
const O_DIRECTORY int = 0
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/preallocate_test.go | internal/fs/preallocate_test.go | package fs
import (
"os"
"path"
"strconv"
"syscall"
"testing"
"github.com/restic/restic/internal/test"
)
func TestPreallocate(t *testing.T) {
for _, i := range []int64{0, 1, 4096, 1024 * 1024} {
t.Run(strconv.FormatInt(i, 10), func(t *testing.T) {
dirpath := test.TempDir(t)
flags := os.O_CREATE | os.O_TRUNC | os.O_WRONLY
wr, err := os.OpenFile(path.Join(dirpath, "test"), flags, 0600)
test.OK(t, err)
defer func() {
test.OK(t, wr.Close())
}()
err = PreallocateFile(wr, i)
if err == syscall.ENOTSUP {
t.SkipNow()
}
test.OK(t, err)
fi, err := wr.Stat()
test.OK(t, err)
efi := ExtendedStat(fi)
test.Assert(t, efi.Size == i || efi.Blocks > 0, "Preallocated size of %v, got size %v block %v", i, efi.Size, efi.Blocks)
})
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/priv_windows_test.go | internal/fs/priv_windows_test.go | //go:build windows
package fs
import (
"os"
"path/filepath"
"testing"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/test"
"golang.org/x/sys/windows"
)
func TestBackupPrivilegeBypassACL(t *testing.T) {
testPath := testGetRestrictedFilePath(t)
// Read-only Open/OpenFile should automatically use FILE_FLAG_BACKUP_SEMANTICS since Go v1.20.
testfile, err := os.Open(testPath)
test.OK(t, errors.Wrapf(err, "failed to open file for reading: %s", testPath))
test.OK(t, testfile.Close())
}
func TestRestorePrivilegeBypassACL(t *testing.T) {
testPath := testGetRestrictedFilePath(t)
// Writable OpenFile needs explicit FILE_FLAG_BACKUP_SEMANTICS.
// Go with issue #73676 merged would allow: os.OpenFile(testPath, os.O_WRONLY|windows.O_FILE_FLAG_BACKUP_SEMANTICS, 0)
utf16Path := windows.StringToUTF16Ptr(testPath)
handle, err := windows.CreateFile(utf16Path, windows.GENERIC_WRITE, 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0)
test.OK(t, errors.Wrapf(err, "failed to open file for writing: %s", testPath))
test.OK(t, windows.Close(handle))
}
func testGetRestrictedFilePath(t *testing.T) string {
// Non-admin is unlikely to have needed privileges.
isAdmin, err := isAdmin()
test.OK(t, errors.Wrap(err, "failed to check if user is admin"))
if !isAdmin {
t.Skip("not running with administrator access, skipping")
}
// Create temporary file.
tempDir := t.TempDir()
testPath := filepath.Join(tempDir, "testfile.txt")
testfile, err := os.Create(testPath)
test.OK(t, errors.Wrapf(err, "failed to create temporary file: %s", testPath))
test.OK(t, testfile.Close())
// Set restricted permissions.
// Deny file read/write/execute to "Everyone" (all accounts); allow delete to "Everyone".
sd, err := windows.SecurityDescriptorFromString("D:PAI(D;;FRFWFX;;;WD)(A;;SD;;;WD)")
test.OK(t, errors.Wrap(err, "failed to parse SDDL: %s"))
dacl, _, err := sd.DACL()
test.OK(t, errors.Wrap(err, "failed to extract SD DACL"))
err = windows.SetNamedSecurityInfo(testPath, windows.SE_FILE_OBJECT, windows.DACL_SECURITY_INFORMATION|windows.PROTECTED_DACL_SECURITY_INFORMATION, nil, nil, dacl, nil)
test.OK(t, errors.Wrapf(err, "failed to set SD: %s", testPath))
return testPath
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/file.go | internal/fs/file.go | package fs
import (
"fmt"
"os"
"runtime"
)
// MkdirAll creates a directory named path, along with any necessary parents,
// and returns nil, or else returns an error. The permission bits perm are used
// for all directories that MkdirAll creates. If path is already a directory,
// MkdirAll does nothing and returns nil.
func MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(fixpath(path), perm)
}
// Remove removes the named file or directory.
// If there is an error, it will be of type *PathError.
func Remove(name string) error {
return os.Remove(fixpath(name))
}
// RemoveAll removes path and any children it contains.
// It removes everything it can but returns the first error
// it encounters. If the path does not exist, RemoveAll
// returns nil (no error).
func RemoveAll(path string) error {
return os.RemoveAll(fixpath(path))
}
// Link creates newname as a hard link to oldname.
// If there is an error, it will be of type *LinkError.
func Link(oldname, newname string) error {
return os.Link(fixpath(oldname), fixpath(newname))
}
// Lstat returns the FileInfo structure describing the named file.
// If the file is a symbolic link, the returned FileInfo
// describes the symbolic link. Lstat makes no attempt to follow the link.
// If there is an error, it will be of type *PathError.
func Lstat(name string) (os.FileInfo, error) {
return os.Lstat(fixpath(name))
}
// OpenFile is the generalized open call; most users will use Open
// or Create instead. It opens the named file with specified flag
// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
// methods on the returned File can be used for I/O.
// If there is an error, it will be of type *PathError.
func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) {
if runtime.GOOS == "windows" {
flag &^= O_NOFOLLOW
}
return os.OpenFile(fixpath(name), flag, perm)
}
// IsAccessDenied checks if the error is due to permission error.
func IsAccessDenied(err error) bool {
return os.IsPermission(err)
}
// ResetPermissions resets the permissions of the file at the specified path
func ResetPermissions(path string) error {
// Set the default file permissions
if err := os.Chmod(fixpath(path), 0600); err != nil {
return err
}
return nil
}
// Readdirnames returns a list of file in a directory. Flags are passed to fs.OpenFile.
// O_RDONLY and O_DIRECTORY are implied.
func Readdirnames(filesystem FS, dir string, flags int) ([]string, error) {
f, err := filesystem.OpenFile(dir, O_RDONLY|O_DIRECTORY|flags, false)
if err != nil {
return nil, fmt.Errorf("openfile for readdirnames failed: %w", err)
}
entries, err := f.Readdirnames(-1)
if err != nil {
_ = f.Close()
return nil, fmt.Errorf("readdirnames %v failed: %w", dir, err)
}
err = f.Close()
if err != nil {
return nil, err
}
return entries, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/stat_windows_test.go | internal/fs/stat_windows_test.go | package fs_test
import (
iofs "io/fs"
"os"
"path/filepath"
"syscall"
"testing"
"time"
"github.com/restic/restic/internal/fs"
rtest "github.com/restic/restic/internal/test"
"golang.org/x/sys/windows"
)
func TestRecallOnDataAccessRealFile(t *testing.T) {
// create a temp file for testing
tempdir := rtest.TempDir(t)
filename := filepath.Join(tempdir, "regular-file")
err := os.WriteFile(filename, []byte("foobar"), 0640)
rtest.OK(t, err)
fi, err := os.Stat(filename)
rtest.OK(t, err)
xs := fs.ExtendedStat(fi)
// ensure we can check attrs without error
recall, err := xs.RecallOnDataAccess()
rtest.Assert(t, err == nil, "err should be nil", err)
rtest.Assert(t, recall == false, "RecallOnDataAccess should be false")
}
// mockFileInfo implements os.FileInfo for mocking file attributes
type mockFileInfo struct {
FileAttributes uint32
}
func (m mockFileInfo) IsDir() bool {
return false
}
func (m mockFileInfo) ModTime() time.Time {
return time.Now()
}
func (m mockFileInfo) Mode() iofs.FileMode {
return 0
}
func (m mockFileInfo) Name() string {
return "test"
}
func (m mockFileInfo) Size() int64 {
return 0
}
func (m mockFileInfo) Sys() any {
return &syscall.Win32FileAttributeData{
FileAttributes: m.FileAttributes,
}
}
func TestRecallOnDataAccessMockCloudFile(t *testing.T) {
fi := mockFileInfo{
FileAttributes: windows.FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS,
}
xs := fs.ExtendedStat(fi)
recall, err := xs.RecallOnDataAccess()
rtest.Assert(t, err == nil, "err should be nil", err)
rtest.Assert(t, recall, "RecallOnDataAccess should be true")
}
func TestRecallOnDataAccessMockRegularFile(t *testing.T) {
fi := mockFileInfo{
FileAttributes: windows.FILE_ATTRIBUTE_ARCHIVE,
}
xs := fs.ExtendedStat(fi)
recall, err := xs.RecallOnDataAccess()
rtest.Assert(t, err == nil, "err should be nil", err)
rtest.Assert(t, recall == false, "RecallOnDataAccess should be false")
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/sd_windows.go | internal/fs/sd_windows.go | package fs
import (
"fmt"
"sync/atomic"
"syscall"
"unsafe"
"github.com/restic/restic/internal/errors"
"golang.org/x/sys/windows"
)
var lowerPrivileges atomic.Bool
// Flags for backup with admin permissions. Includes protection flags for GET operations.
var highBackupSecurityFlags windows.SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION | windows.GROUP_SECURITY_INFORMATION | windows.DACL_SECURITY_INFORMATION | windows.SACL_SECURITY_INFORMATION | windows.LABEL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.SCOPE_SECURITY_INFORMATION | windows.BACKUP_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION | windows.PROTECTED_SACL_SECURITY_INFORMATION | windows.UNPROTECTED_DACL_SECURITY_INFORMATION | windows.UNPROTECTED_SACL_SECURITY_INFORMATION
// Flags for restore with admin permissions. Base flags without protection flags for SET operations.
var highRestoreSecurityFlags windows.SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION | windows.GROUP_SECURITY_INFORMATION | windows.DACL_SECURITY_INFORMATION | windows.SACL_SECURITY_INFORMATION | windows.LABEL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.SCOPE_SECURITY_INFORMATION | windows.BACKUP_SECURITY_INFORMATION
// Flags for backup without admin permissions. If there are no admin permissions, only the current user's owner, group and DACL will be backed up.
var lowBackupSecurityFlags windows.SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION | windows.GROUP_SECURITY_INFORMATION | windows.DACL_SECURITY_INFORMATION | windows.LABEL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.SCOPE_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION | windows.UNPROTECTED_DACL_SECURITY_INFORMATION
// Flags for restore without admin permissions. If there are no admin permissions, only the DACL from the SD can be restored and owner and group will be set based on the current user.
var lowRestoreSecurityFlags windows.SECURITY_INFORMATION = windows.DACL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION
// getSecurityDescriptor takes the path of the file and returns the SecurityDescriptor for the file.
// This needs admin permissions or SeBackupPrivilege for getting the full SD.
// If there are no admin permissions, only the current user's owner, group and DACL will be got.
func getSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err error) {
var sd *windows.SECURITY_DESCRIPTOR
// store original value to avoid unrelated changes in the error check
useLowerPrivileges := lowerPrivileges.Load()
if useLowerPrivileges {
sd, err = getNamedSecurityInfoLow(filePath)
} else {
sd, err = getNamedSecurityInfoHigh(filePath)
// Fallback to the low privilege version when receiving an access denied error.
// For some reason the ERROR_PRIVILEGE_NOT_HELD error is not returned for removable media
// but instead an access denied error is returned. Workaround that by just retrying with
// the low privilege version, but don't switch privileges as we cannot distinguish this
// case from actual access denied errors.
// see https://github.com/restic/restic/issues/5003#issuecomment-2452314191 for details
if err != nil && isAccessDeniedError(err) {
sd, err = getNamedSecurityInfoLow(filePath)
}
}
if err != nil {
if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) {
// If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges.
lowerPrivileges.Store(true)
return getSecurityDescriptor(filePath)
} else if errors.Is(err, windows.ERROR_NOT_SUPPORTED) {
return nil, nil
} else {
return nil, fmt.Errorf("get named security info failed with: %w", err)
}
}
sdBytes, err := securityDescriptorStructToBytes(sd)
if err != nil {
return nil, fmt.Errorf("convert security descriptor to bytes failed: %w", err)
}
return &sdBytes, nil
}
// setSecurityDescriptor sets the SecurityDescriptor for the file at the specified path.
// This needs admin permissions or SeRestorePrivilege, SeSecurityPrivilege and SeTakeOwnershipPrivilege
// for setting the full SD.
// If there are no admin permissions/required privileges, only the DACL from the SD can be set and
// owner and group will be set based on the current user.
func setSecurityDescriptor(filePath string, securityDescriptor *[]byte) error {
// Set the security descriptor on the file
sd, err := securityDescriptorBytesToStruct(*securityDescriptor)
if err != nil {
return fmt.Errorf("error converting bytes to security descriptor: %w", err)
}
owner, _, err := sd.Owner()
if err != nil {
//Do not set partial values.
owner = nil
}
group, _, err := sd.Group()
if err != nil {
//Do not set partial values.
group = nil
}
dacl, _, err := sd.DACL()
if err != nil {
//Do not set partial values.
dacl = nil
}
sacl, _, err := sd.SACL()
if err != nil {
//Do not set partial values.
sacl = nil
}
// Get the control flags from the original security descriptor
control, _, err := sd.Control()
if err != nil {
// This is unlikely to fail if the sd is valid, but handle it.
return fmt.Errorf("could not get security descriptor control flags: %w", err)
}
// store original value to avoid unrelated changes in the error check
useLowerPrivileges := lowerPrivileges.Load()
if useLowerPrivileges {
err = setNamedSecurityInfoLow(filePath, dacl, control)
} else {
err = setNamedSecurityInfoHigh(filePath, owner, group, dacl, sacl, control)
// See corresponding fallback in getSecurityDescriptor for an explanation
if err != nil && isAccessDeniedError(err) {
err = setNamedSecurityInfoLow(filePath, dacl, control)
}
}
if err != nil {
if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) {
// If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges.
lowerPrivileges.Store(true)
return setSecurityDescriptor(filePath, securityDescriptor)
} else {
return fmt.Errorf("set named security info failed with: %w", err)
}
}
return nil
}
// getNamedSecurityInfoHigh gets the higher level SecurityDescriptor which requires admin permissions.
func getNamedSecurityInfoHigh(filePath string) (*windows.SECURITY_DESCRIPTOR, error) {
return windows.GetNamedSecurityInfo(fixpath(filePath), windows.SE_FILE_OBJECT, highBackupSecurityFlags)
}
// getNamedSecurityInfoLow gets the lower level SecurityDescriptor which requires no admin permissions.
func getNamedSecurityInfoLow(filePath string) (*windows.SECURITY_DESCRIPTOR, error) {
return windows.GetNamedSecurityInfo(fixpath(filePath), windows.SE_FILE_OBJECT, lowBackupSecurityFlags)
}
// setNamedSecurityInfoHigh sets the higher level SecurityDescriptor which requires admin permissions.
func setNamedSecurityInfoHigh(filePath string, owner *windows.SID, group *windows.SID, dacl *windows.ACL, sacl *windows.ACL, control windows.SECURITY_DESCRIPTOR_CONTROL) error {
securityInfo := highRestoreSecurityFlags
// Check if the original DACL was protected from inheritance and add the correct flag.
if control&windows.SE_DACL_PROTECTED != 0 {
securityInfo |= windows.PROTECTED_DACL_SECURITY_INFORMATION
} else {
// Explicitly state that it is NOT protected. This ensures inheritance is re-enabled correctly.
securityInfo |= windows.UNPROTECTED_DACL_SECURITY_INFORMATION
}
// Do the same for the SACL for completeness.
if control&windows.SE_SACL_PROTECTED != 0 {
securityInfo |= windows.PROTECTED_SACL_SECURITY_INFORMATION
} else {
securityInfo |= windows.UNPROTECTED_SACL_SECURITY_INFORMATION
}
return windows.SetNamedSecurityInfo(fixpath(filePath), windows.SE_FILE_OBJECT, securityInfo, owner, group, dacl, sacl)
}
// setNamedSecurityInfoLow sets the lower level SecurityDescriptor which requires no admin permissions.
func setNamedSecurityInfoLow(filePath string, dacl *windows.ACL, control windows.SECURITY_DESCRIPTOR_CONTROL) error {
securityInfo := lowRestoreSecurityFlags
// Check if the original DACL was protected from inheritance and add the correct flag.
if control&windows.SE_DACL_PROTECTED != 0 {
securityInfo |= windows.PROTECTED_DACL_SECURITY_INFORMATION
} else {
// Explicitly state that it is NOT protected. This ensures inheritance is re-enabled correctly.
securityInfo |= windows.UNPROTECTED_DACL_SECURITY_INFORMATION
}
return windows.SetNamedSecurityInfo(fixpath(filePath), windows.SE_FILE_OBJECT, securityInfo, nil, nil, dacl, nil)
}
// isHandlePrivilegeNotHeldError checks if the error is ERROR_PRIVILEGE_NOT_HELD
func isHandlePrivilegeNotHeldError(err error) bool {
// Use a type assertion to check if the error is of type syscall.Errno
if errno, ok := err.(syscall.Errno); ok {
// Compare the error code to the expected value
return errno == windows.ERROR_PRIVILEGE_NOT_HELD
}
return false
}
// isAccessDeniedError checks if the error is ERROR_ACCESS_DENIED
func isAccessDeniedError(err error) bool {
if errno, ok := err.(syscall.Errno); ok {
// Compare the error code to the expected value
return errno == windows.ERROR_ACCESS_DENIED
}
return false
}
// securityDescriptorBytesToStruct converts the security descriptor bytes representation
// into a pointer to windows SECURITY_DESCRIPTOR.
func securityDescriptorBytesToStruct(sd []byte) (*windows.SECURITY_DESCRIPTOR, error) {
if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l {
return nil, fmt.Errorf("securityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE)
}
s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0]))
return s, nil
}
// securityDescriptorStructToBytes converts the pointer to windows SECURITY_DESCRIPTOR
// into a security descriptor bytes representation.
func securityDescriptorStructToBytes(sd *windows.SECURITY_DESCRIPTOR) ([]byte, error) {
b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length())
return b, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/setflags_other.go | internal/fs/setflags_other.go | //go:build !linux
package fs
import "os"
// OS-specific replacements of setFlags can set file status flags
// that improve I/O performance.
func setFlags(_ *os.File) error {
return nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/vss.go | internal/fs/vss.go | //go:build !windows
package fs
import (
"time"
"github.com/restic/restic/internal/errors"
)
// MountPoint is a dummy for non-windows platforms to let client code compile.
type MountPoint struct {
}
// IsSnapshotted is true if this mount point was snapshotted successfully.
func (p *MountPoint) IsSnapshotted() bool {
return false
}
// GetSnapshotDeviceObject returns root path to access the snapshot files and folders.
func (p *MountPoint) GetSnapshotDeviceObject() string {
return ""
}
// VssSnapshot is a dummy for non-windows platforms to let client code compile.
type VssSnapshot struct {
mountPointInfo map[string]MountPoint
}
// HasSufficientPrivilegesForVSS returns true if the user is allowed to use VSS.
func HasSufficientPrivilegesForVSS() error {
return errors.New("VSS snapshots are only supported on windows")
}
// getVolumeNameForVolumeMountPoint add trailing backslash to input parameter
// and calls the equivalent windows api.
func getVolumeNameForVolumeMountPoint(mountPoint string) (string, error) {
return mountPoint, nil
}
// NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't
// finish within the timeout an error is returned.
func NewVssSnapshot(_ string,
_ string, _ time.Duration, _ VolumeFilter, _ ErrorHandler) (VssSnapshot, error) {
return VssSnapshot{}, errors.New("VSS snapshots are only supported on windows")
}
// Delete deletes the created snapshot.
func (p *VssSnapshot) Delete() error {
return nil
}
// GetSnapshotDeviceObject returns root path to access the snapshot files
// and folders.
func (p *VssSnapshot) GetSnapshotDeviceObject() string {
return ""
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/file_windows_test.go | internal/fs/file_windows_test.go | package fs_test
import (
"errors"
"os"
"testing"
"github.com/restic/restic/internal/fs"
rtest "github.com/restic/restic/internal/test"
)
func TestTempFile(t *testing.T) {
// create two temp files at the same time to check that the
// collision avoidance works
f, err := fs.TempFile("", "test")
fn := f.Name()
rtest.OK(t, err)
f2, err := fs.TempFile("", "test")
fn2 := f2.Name()
rtest.OK(t, err)
rtest.Assert(t, fn != fn2, "filenames don't differ %s", fn)
_, err = os.Stat(fn)
rtest.OK(t, err)
_, err = os.Stat(fn2)
rtest.OK(t, err)
rtest.OK(t, f.Close())
rtest.OK(t, f2.Close())
_, err = os.Stat(fn)
rtest.Assert(t, errors.Is(err, os.ErrNotExist), "err %s", err)
_, err = os.Stat(fn2)
rtest.Assert(t, errors.Is(err, os.ErrNotExist), "err %s", err)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/node_freebsd.go | internal/fs/node_freebsd.go | //go:build freebsd
package fs
import (
"os"
"syscall"
)
func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error {
return nil
}
func mknod(path string, mode uint32, dev uint64) error {
err := syscall.Mknod(path, mode, dev)
if err != nil {
err = &os.PathError{Op: "mknod", Path: path, Err: err}
}
return err
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/fs_local.go | internal/fs/fs_local.go | package fs
import (
"os"
"path/filepath"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/debug"
)
func init() {
if err := enableProcessPrivileges(); err != nil {
debug.Log("error enabling privileges: %v", err)
}
}
// Local is the local file system. Most methods are just passed on to the stdlib.
type Local struct{}
// statically ensure that Local implements FS.
var _ FS = &Local{}
// VolumeName returns leading volume name. Given "C:\foo\bar" it returns "C:"
// on Windows. Given "\\host\share\foo" it returns "\\host\share". On other
// platforms it returns "".
func (fs Local) VolumeName(path string) string {
return filepath.VolumeName(path)
}
// OpenFile opens a file or directory for reading.
//
// If metadataOnly is set, an implementation MUST return a File object for
// arbitrary file types including symlinks. The implementation may internally use
// the given file path or a file handle. In particular, an implementation may
// delay actually accessing the underlying filesystem.
//
// Only the O_NOFOLLOW and O_DIRECTORY flags are supported.
func (fs Local) OpenFile(name string, flag int, metadataOnly bool) (File, error) {
return newLocalFile(name, flag, metadataOnly)
}
// Lstat returns the FileInfo structure describing the named file.
// If the file is a symbolic link, the returned FileInfo
// describes the symbolic link. Lstat makes no attempt to follow the link.
// If there is an error, it will be of type *PathError.
func (fs Local) Lstat(name string) (*ExtendedFileInfo, error) {
fi, err := os.Lstat(fixpath(name))
if err != nil {
return nil, err
}
return extendedStat(fi), nil
}
// Join joins any number of path elements into a single path, adding a
// Separator if necessary. Join calls Clean on the result; in particular, all
// empty strings are ignored. On Windows, the result is a UNC path if and only
// if the first path element is a UNC path.
func (fs Local) Join(elem ...string) string {
return filepath.Join(elem...)
}
// Separator returns the OS and FS dependent separator for dirs/subdirs/files.
func (fs Local) Separator() string {
return string(filepath.Separator)
}
// IsAbs reports whether the path is absolute.
func (fs Local) IsAbs(path string) bool {
return filepath.IsAbs(path)
}
// Abs returns an absolute representation of path. If the path is not absolute
// it will be joined with the current working directory to turn it into an
// absolute path. The absolute path name for a given file is not guaranteed to
// be unique. Abs calls Clean on the result.
func (fs Local) Abs(path string) (string, error) {
return filepath.Abs(path)
}
// Clean returns the cleaned path. For details, see filepath.Clean.
func (fs Local) Clean(p string) string {
return filepath.Clean(p)
}
// Base returns the last element of path.
func (fs Local) Base(path string) string {
return filepath.Base(path)
}
// Dir returns path without the last element.
func (fs Local) Dir(path string) string {
return filepath.Dir(path)
}
type localFile struct {
name string
flag int
f *os.File
fi *ExtendedFileInfo
}
// See the File interface for a description of each method
var _ File = &localFile{}
func newLocalFile(name string, flag int, metadataOnly bool) (*localFile, error) {
var f *os.File
if !metadataOnly {
var err error
f, err = os.OpenFile(fixpath(name), flag, 0)
if err != nil {
return nil, err
}
_ = setFlags(f)
}
return &localFile{
name: name,
flag: flag,
f: f,
}, nil
}
func (f *localFile) MakeReadable() error {
if f.f != nil {
panic("file is already readable")
}
newF, err := newLocalFile(f.name, f.flag, false)
if err != nil {
return err
}
// replace state and also reset cached FileInfo
*f = *newF
return nil
}
func (f *localFile) cacheFI() error {
if f.fi != nil {
return nil
}
var fi os.FileInfo
var err error
if f.f != nil {
fi, err = f.f.Stat()
} else if f.flag&O_NOFOLLOW != 0 {
fi, err = os.Lstat(f.name)
} else {
fi, err = os.Stat(f.name)
}
if err != nil {
return err
}
f.fi = extendedStat(fi)
return nil
}
func (f *localFile) Stat() (*ExtendedFileInfo, error) {
err := f.cacheFI()
// the call to cacheFI MUST happen before reading from f.fi
return f.fi, err
}
func (f *localFile) ToNode(ignoreXattrListError bool, warnf func(format string, args ...any)) (*data.Node, error) {
if err := f.cacheFI(); err != nil {
return nil, err
}
return nodeFromFileInfo(f.name, f.fi, ignoreXattrListError, warnf)
}
func (f *localFile) Read(p []byte) (n int, err error) {
return f.f.Read(p)
}
func (f *localFile) Readdirnames(n int) ([]string, error) {
return f.f.Readdirnames(n)
}
func (f *localFile) Close() error {
if f.f != nil {
return f.f.Close()
}
return nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/stat.go | internal/fs/stat.go | package fs
import (
"os"
"time"
)
// ExtendedFileInfo is an extended stat_t, filled with attributes that are
// supported by most operating systems. The original FileInfo is embedded.
type ExtendedFileInfo struct {
Name string
Mode os.FileMode
DeviceID uint64 // ID of device containing the file
Inode uint64 // Inode number
Links uint64 // Number of hard links
UID uint32 // owner user ID
GID uint32 // owner group ID
Device uint64 // Device ID (if this is a device file)
BlockSize int64 // block size for filesystem IO
Blocks int64 // number of allocated filesystem blocks
Size int64 // file size in byte
AccessTime time.Time // last access time stamp
ModTime time.Time // last (content) modification time stamp
ChangeTime time.Time // last status change time stamp
//nolint:unused // only used on Windows/Darwin
sys any // Value returned by os.FileInfo.Sys()
}
// ExtendedStat returns an ExtendedFileInfo constructed from the os.FileInfo.
func ExtendedStat(fi os.FileInfo) *ExtendedFileInfo {
if fi == nil {
panic("os.FileInfo is nil")
}
return extendedStat(fi)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/preallocate_linux.go | internal/fs/preallocate_linux.go | package fs
import (
"os"
"syscall"
"golang.org/x/sys/unix"
)
func PreallocateFile(wr *os.File, size int64) error {
if size <= 0 {
return nil
}
// int fallocate(int fd, int mode, off_t offset, off_t len)
// use mode = 0 to also change the file size
return ignoringEINTR(func() error { return unix.Fallocate(int(wr.Fd()), 0, 0, size) })
}
// ignoringEINTR makes a function call and repeats it if it returns
// an EINTR error.
// copied from /usr/lib/go/src/internal/poll/fd_posix.go of go 1.23.1
func ignoringEINTR(fn func() error) error {
for {
err := fn()
if err != syscall.EINTR {
return err
}
}
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/node.go | internal/fs/node.go | package fs
import (
"fmt"
"os"
"os/user"
"strconv"
"sync"
"syscall"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
)
// nodeFromFileInfo returns a new node from the given path and FileInfo. It
// returns the first error that is encountered, together with a node.
func nodeFromFileInfo(path string, fi *ExtendedFileInfo, ignoreXattrListError bool, warnf func(format string, args ...any)) (*data.Node, error) {
node := buildBasicNode(path, fi)
if err := nodeFillExtendedStat(node, path, fi); err != nil {
return node, err
}
err := nodeFillGenericAttributes(node, path, fi)
err = errors.Join(err, nodeFillExtendedAttributes(node, path, ignoreXattrListError, warnf))
return node, err
}
func buildBasicNode(path string, fi *ExtendedFileInfo) *data.Node {
mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky
node := &data.Node{
Path: path,
Name: fi.Name,
Mode: fi.Mode & mask,
ModTime: fi.ModTime,
}
node.Type = nodeTypeFromFileInfo(fi.Mode)
if node.Type == data.NodeTypeFile {
node.Size = uint64(fi.Size)
}
return node
}
func nodeTypeFromFileInfo(mode os.FileMode) data.NodeType {
switch mode & os.ModeType {
case 0:
return data.NodeTypeFile
case os.ModeDir:
return data.NodeTypeDir
case os.ModeSymlink:
return data.NodeTypeSymlink
case os.ModeDevice | os.ModeCharDevice:
return data.NodeTypeCharDev
case os.ModeDevice:
return data.NodeTypeDev
case os.ModeNamedPipe:
return data.NodeTypeFifo
case os.ModeSocket:
return data.NodeTypeSocket
case os.ModeIrregular:
return data.NodeTypeIrregular
}
return data.NodeTypeInvalid
}
func nodeFillExtendedStat(node *data.Node, path string, stat *ExtendedFileInfo) error {
node.Inode = stat.Inode
node.DeviceID = stat.DeviceID
node.ChangeTime = stat.ChangeTime
node.AccessTime = stat.AccessTime
node.UID = stat.UID
node.GID = stat.GID
node.User = lookupUsername(stat.UID)
node.Group = lookupGroup(stat.GID)
switch node.Type {
case data.NodeTypeFile:
node.Size = uint64(stat.Size)
node.Links = stat.Links
case data.NodeTypeDir:
case data.NodeTypeSymlink:
var err error
node.LinkTarget, err = os.Readlink(fixpath(path))
node.Links = stat.Links
if err != nil {
return errors.WithStack(err)
}
case data.NodeTypeDev:
node.Device = stat.Device
node.Links = stat.Links
case data.NodeTypeCharDev:
node.Device = stat.Device
node.Links = stat.Links
case data.NodeTypeFifo:
case data.NodeTypeSocket:
default:
return errors.Errorf("unsupported file type %q", node.Type)
}
return nil
}
var (
uidLookupCache = make(map[uint32]string)
uidLookupCacheMutex = sync.RWMutex{}
)
// Cached user name lookup by uid. Returns "" when no name can be found.
func lookupUsername(uid uint32) string {
uidLookupCacheMutex.RLock()
username, ok := uidLookupCache[uid]
uidLookupCacheMutex.RUnlock()
if ok {
return username
}
u, err := user.LookupId(strconv.Itoa(int(uid)))
if err == nil {
username = u.Username
}
uidLookupCacheMutex.Lock()
uidLookupCache[uid] = username
uidLookupCacheMutex.Unlock()
return username
}
var (
userNameLookupCache = make(map[string]uint32)
userNameLookupCacheMutex = sync.RWMutex{}
)
// Cached uid lookup by user name. Returns 0 when no id can be found.
//
//nolint:revive // captialization is correct as is
func lookupUid(userName string) uint32 {
userNameLookupCacheMutex.RLock()
uid, ok := userNameLookupCache[userName]
userNameLookupCacheMutex.RUnlock()
if ok {
return uid
}
u, err := user.Lookup(userName)
if err == nil {
var s int
s, err = strconv.Atoi(u.Uid)
if err == nil {
uid = uint32(s)
}
}
userNameLookupCacheMutex.Lock()
userNameLookupCache[userName] = uid
userNameLookupCacheMutex.Unlock()
return uid
}
var (
gidLookupCache = make(map[uint32]string)
gidLookupCacheMutex = sync.RWMutex{}
)
// Cached group name lookup by gid. Returns "" when no name can be found.
func lookupGroup(gid uint32) string {
gidLookupCacheMutex.RLock()
group, ok := gidLookupCache[gid]
gidLookupCacheMutex.RUnlock()
if ok {
return group
}
g, err := user.LookupGroupId(strconv.Itoa(int(gid)))
if err == nil {
group = g.Name
}
gidLookupCacheMutex.Lock()
gidLookupCache[gid] = group
gidLookupCacheMutex.Unlock()
return group
}
var (
groupNameLookupCache = make(map[string]uint32)
groupNameLookupCacheMutex = sync.RWMutex{}
)
// Cached uid lookup by group name. Returns 0 when no id can be found.
func lookupGid(groupName string) uint32 {
groupNameLookupCacheMutex.RLock()
gid, ok := groupNameLookupCache[groupName]
groupNameLookupCacheMutex.RUnlock()
if ok {
return gid
}
g, err := user.LookupGroup(groupName)
if err == nil {
var s int
s, err = strconv.Atoi(g.Gid)
if err == nil {
gid = uint32(s)
}
}
groupNameLookupCacheMutex.Lock()
groupNameLookupCache[groupName] = gid
groupNameLookupCacheMutex.Unlock()
return gid
}
// NodeCreateAt creates the node at the given path but does NOT restore node meta data.
func NodeCreateAt(node *data.Node, path string) (err error) {
debug.Log("create node %v at %v", node.Name, path)
switch node.Type {
case data.NodeTypeDir:
err = nodeCreateDirAt(node, path)
case data.NodeTypeFile:
err = nodeCreateFileAt(path)
case data.NodeTypeSymlink:
err = nodeCreateSymlinkAt(node, path)
case data.NodeTypeDev:
err = nodeCreateDevAt(node, path)
case data.NodeTypeCharDev:
err = nodeCreateCharDevAt(node, path)
case data.NodeTypeFifo:
err = nodeCreateFifoAt(path)
case data.NodeTypeSocket:
err = nil
default:
err = errors.Errorf("filetype %q not implemented", node.Type)
}
return err
}
func nodeCreateDirAt(node *data.Node, path string) error {
err := os.Mkdir(fixpath(path), node.Mode)
if err != nil && !os.IsExist(err) {
return errors.WithStack(err)
}
return nil
}
func nodeCreateFileAt(path string) error {
f, err := OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
if err != nil {
return errors.WithStack(err)
}
if err := f.Close(); err != nil {
return errors.WithStack(err)
}
return nil
}
func nodeCreateSymlinkAt(node *data.Node, path string) error {
if err := os.Symlink(node.LinkTarget, fixpath(path)); err != nil {
return errors.WithStack(err)
}
return nil
}
func nodeCreateDevAt(node *data.Node, path string) error {
return mknod(path, syscall.S_IFBLK|0600, node.Device)
}
func nodeCreateCharDevAt(node *data.Node, path string) error {
return mknod(path, syscall.S_IFCHR|0600, node.Device)
}
func nodeCreateFifoAt(path string) error {
return mkfifo(path, 0600)
}
func mkfifo(path string, mode uint32) (err error) {
return mknod(path, mode|syscall.S_IFIFO, 0)
}
// NodeRestoreMetadata restores node metadata
func NodeRestoreMetadata(node *data.Node, path string, warn func(msg string), xattrSelectFilter func(xattrName string) bool, ownershipByName bool) error {
err := nodeRestoreMetadata(node, path, warn, xattrSelectFilter, ownershipByName)
if err != nil {
// It is common to have permission errors for folders like /home
// unless you're running as root, so ignore those.
if os.Geteuid() > 0 && errors.Is(err, os.ErrPermission) {
debug.Log("not running as root, ignoring permission error for %v: %v",
path, err)
return nil
}
debug.Log("restoreMetadata(%s) error %v", path, err)
}
return err
}
func nodeRestoreMetadata(node *data.Node, path string, warn func(msg string), xattrSelectFilter func(xattrName string) bool, ownershipByName bool) error {
var firsterr error
if err := lchown(path, node, ownershipByName); err != nil {
firsterr = errors.WithStack(err)
}
if err := nodeRestoreExtendedAttributes(node, path, xattrSelectFilter); err != nil {
debug.Log("error restoring extended attributes for %v: %v", path, err)
if firsterr == nil {
firsterr = err
}
}
if err := nodeRestoreGenericAttributes(node, path, warn); err != nil {
debug.Log("error restoring generic attributes for %v: %v", path, err)
if firsterr == nil {
firsterr = err
}
}
if err := nodeRestoreTimestamps(node, path); err != nil {
debug.Log("error restoring timestamps for %v: %v", path, err)
if firsterr == nil {
firsterr = err
}
}
// Moving RestoreTimestamps and restoreExtendedAttributes calls above as for readonly files in windows
// calling Chmod below will no longer allow any modifications to be made on the file and the
// calls above would fail.
if node.Type != data.NodeTypeSymlink {
if err := chmod(path, node.Mode); err != nil {
if firsterr == nil {
firsterr = errors.WithStack(err)
}
}
}
return firsterr
}
func nodeRestoreTimestamps(node *data.Node, path string) error {
atime := node.AccessTime.UnixNano()
mtime := node.ModTime.UnixNano()
if err := utimesNano(fixpath(path), atime, mtime, node.Type); err != nil {
return fmt.Errorf("failed to restore timestamp of %q: %w", path, err)
}
return nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/stat_test.go | internal/fs/stat_test.go | package fs
import (
"os"
"path/filepath"
"testing"
rtest "github.com/restic/restic/internal/test"
)
func TestExtendedStat(t *testing.T) {
tempdir := rtest.TempDir(t)
filename := filepath.Join(tempdir, "file")
err := os.WriteFile(filename, []byte("foobar"), 0640)
if err != nil {
t.Fatal(err)
}
fi, err := Lstat(filename)
if err != nil {
t.Fatal(err)
}
extFI := ExtendedStat(fi)
if !extFI.ModTime.Equal(fi.ModTime()) {
t.Errorf("extFI.ModTime does not match, want %v, got %v", fi.ModTime(), extFI.ModTime)
}
}
func TestNilExtendPanic(t *testing.T) {
defer func() {
if r := recover(); r != nil {
rtest.Assert(t, r == "os.FileInfo is nil", "Panic message does not match, want %v, got %v", "os.FileInfo is nil", r)
} else {
rtest.Assert(t, false, "Expected panic, but no panic occurred")
}
}()
_ = ExtendedStat(nil)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/const_unix.go | internal/fs/const_unix.go | //go:build !windows
package fs
import "syscall"
// O_NOFOLLOW instructs the kernel to not follow symlinks when opening a file.
const O_NOFOLLOW int = syscall.O_NOFOLLOW
// O_DIRECTORY instructs the kernel to only open directories.
const O_DIRECTORY int = syscall.O_DIRECTORY
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/priv.go | internal/fs/priv.go | //go:build !windows
package fs
// enableProcessPrivileges enables additional file system privileges for the current process.
func enableProcessPrivileges() error {
return nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/ea_windows.go | internal/fs/ea_windows.go | //go:build windows
package fs
import (
"fmt"
"syscall"
"unsafe"
"github.com/Microsoft/go-winio"
"golang.org/x/sys/windows"
)
// extendedAttribute is a type alias for winio.ExtendedAttribute
type extendedAttribute = winio.ExtendedAttribute
// encodeExtendedAttributes encodes the extended attributes to a byte slice.
func encodeExtendedAttributes(attrs []extendedAttribute) ([]byte, error) {
return winio.EncodeExtendedAttributes(attrs)
}
// decodeExtendedAttributes decodes the extended attributes from a byte slice.
func decodeExtendedAttributes(data []byte) ([]extendedAttribute, error) {
return winio.DecodeExtendedAttributes(data)
}
// The code below was copied over from https://github.com/microsoft/go-winio/blob/main/pipe.go under MIT license.
// The MIT License (MIT)
// Copyright (c) 2015 Microsoft
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
type ntStatus int32
func (status ntStatus) Err() error {
if status >= 0 {
return nil
}
return rtlNtStatusToDosError(status)
}
// The code below was copied over from https://github.com/microsoft/go-winio/blob/main/zsyscall_windows.go under MIT license.
// ioStatusBlock represents the IO_STATUS_BLOCK struct defined here:
// https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/ns-wdm-_io_status_block
type ioStatusBlock struct {
Status, Information uintptr
}
var (
modntdll = windows.NewLazySystemDLL("ntdll.dll")
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
)
func rtlNtStatusToDosError(status ntStatus) (winerr error) {
r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status))
if r0 != 0 {
winerr = syscall.Errno(r0)
}
return
}
// The code below was adapted from https://github.com/ambarve/go-winio/blob/a7564fd482feb903f9562a135f1317fd3b480739/ea.go
// under MIT license.
var (
procNtQueryEaFile = modntdll.NewProc("NtQueryEaFile")
procNtSetEaFile = modntdll.NewProc("NtSetEaFile")
)
const (
// STATUS_NO_EAS_ON_FILE is a constant value which indicates EAs were requested for the file but it has no EAs.
// Windows NTSTATUS value: STATUS_NO_EAS_ON_FILE=0xC0000052
STATUS_NO_EAS_ON_FILE = -1073741742
)
// fgetEA retrieves the extended attributes for the file represented by `handle`. The
// `handle` must have been opened with file access flag FILE_READ_EA (0x8).
// The extended file attribute names in windows are case-insensitive and when fetching
// the attributes the names are generally returned in UPPER case.
func fgetEA(handle windows.Handle) ([]extendedAttribute, error) {
// default buffer size to start with
bufLen := 1024
buf := make([]byte, bufLen)
var iosb ioStatusBlock
// keep increasing the buffer size until it is large enough
for {
status := getFileEA(handle, &iosb, &buf[0], uint32(bufLen), false, 0, 0, nil, true)
if status == STATUS_NO_EAS_ON_FILE {
//If status is -1073741742, no extended attributes were found
return nil, nil
}
err := status.Err()
if err != nil {
// convert ntstatus code to windows error
if err == windows.ERROR_INSUFFICIENT_BUFFER || err == windows.ERROR_MORE_DATA {
bufLen *= 2
buf = make([]byte, bufLen)
continue
}
return nil, fmt.Errorf("get file EA failed with: %w", err)
}
break
}
return decodeExtendedAttributes(buf)
}
// fsetEA sets the extended attributes for the file represented by `handle`. The
// handle must have been opened with the file access flag FILE_WRITE_EA(0x10).
func fsetEA(handle windows.Handle, attrs []extendedAttribute) error {
encodedEA, err := encodeExtendedAttributes(attrs)
if err != nil {
return fmt.Errorf("failed to encoded extended attributes: %w", err)
}
var iosb ioStatusBlock
return setFileEA(handle, &iosb, &encodedEA[0], uint32(len(encodedEA))).Err()
}
// The code below was adapted from https://github.com/ambarve/go-winio/blob/a7564fd482feb903f9562a135f1317fd3b480739/zsyscall_windows.go
// under MIT license.
func getFileEA(handle windows.Handle, iosb *ioStatusBlock, buf *uint8, bufLen uint32, returnSingleEntry bool, eaList uintptr, eaListLen uint32, eaIndex *uint32, restartScan bool) (status ntStatus) {
var _p0 uint32
if returnSingleEntry {
_p0 = 1
}
var _p1 uint32
if restartScan {
_p1 = 1
}
r0, _, _ := syscall.SyscallN(procNtQueryEaFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(buf)), uintptr(bufLen), uintptr(_p0), eaList, uintptr(eaListLen), uintptr(unsafe.Pointer(eaIndex)), uintptr(_p1))
status = ntStatus(r0)
return
}
func setFileEA(handle windows.Handle, iosb *ioStatusBlock, buf *uint8, bufLen uint32) (status ntStatus) {
r0, _, _ := syscall.SyscallN(procNtSetEaFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(buf)), uintptr(bufLen))
status = ntStatus(r0)
return
}
// pathSupportsExtendedAttributes returns true if the path supports extended attributes.
func pathSupportsExtendedAttributes(path string) (supported bool, err error) {
var fileSystemFlags uint32
utf16Path, err := windows.UTF16PtrFromString(path)
if err != nil {
return false, err
}
err = windows.GetVolumeInformation(utf16Path, nil, 0, nil, nil, &fileSystemFlags, nil, 0)
if err != nil {
return false, err
}
supported = (fileSystemFlags & windows.FILE_SUPPORTS_EXTENDED_ATTRIBUTES) != 0
return supported, nil
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/fs_track.go | internal/fs/fs_track.go | package fs
import (
"fmt"
"os"
"runtime"
"runtime/debug"
)
// Track is a wrapper around another file system which installs finalizers
// for open files which call panic() when they are not closed when the garbage
// collector releases them. This can be used to find resource leaks via open
// files.
type Track struct {
FS
}
// OpenFile wraps the OpenFile method of the underlying file system.
func (fs Track) OpenFile(name string, flag int, metadataOnly bool) (File, error) {
f, err := fs.FS.OpenFile(name, flag, metadataOnly)
if err != nil {
return nil, err
}
return newTrackFile(debug.Stack(), name, f), nil
}
type trackFile struct {
File
}
func newTrackFile(stack []byte, filename string, file File) *trackFile {
f := &trackFile{file}
runtime.SetFinalizer(f, func(_ any) {
fmt.Fprintf(os.Stderr, "file %s not closed\n\nStacktrack:\n%s\n", filename, stack)
panic("file " + filename + " not closed")
})
return f
}
func (f *trackFile) Close() error {
runtime.SetFinalizer(f, nil)
return f.File.Close()
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/node_linux.go | internal/fs/node_linux.go | package fs
import (
"github.com/restic/restic/internal/data"
"golang.org/x/sys/unix"
)
// utimesNano is like syscall.UtimesNano, except that it does not follow symlinks.
func utimesNano(path string, atime, mtime int64, _ data.NodeType) error {
times := []unix.Timespec{
unix.NsecToTimespec(atime),
unix.NsecToTimespec(mtime),
}
return unix.UtimesNanoAt(unix.AT_FDCWD, path, times, unix.AT_SYMLINK_NOFOLLOW)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/vss_windows.go | internal/fs/vss_windows.go | //go:build windows
package fs
import (
"fmt"
"math"
"path/filepath"
"runtime"
"strings"
"syscall"
"time"
"unsafe"
"github.com/go-ole/go-ole"
"github.com/restic/restic/internal/errors"
"golang.org/x/sys/windows"
)
// HRESULT is a custom type for the windows api HRESULT type.
type HRESULT uint
// HRESULT constant values necessary for using VSS api.
//
//nolint:golint
const (
S_OK HRESULT = 0x00000000
S_FALSE HRESULT = 0x00000001
E_ACCESSDENIED HRESULT = 0x80070005
E_OUTOFMEMORY HRESULT = 0x8007000E
E_INVALIDARG HRESULT = 0x80070057
VSS_E_BAD_STATE HRESULT = 0x80042301
VSS_E_UNEXPECTED HRESULT = 0x80042302
VSS_E_PROVIDER_ALREADY_REGISTERED HRESULT = 0x80042303
VSS_E_PROVIDER_NOT_REGISTERED HRESULT = 0x80042304
VSS_E_PROVIDER_VETO HRESULT = 0x80042306
VSS_E_PROVIDER_IN_USE HRESULT = 0x80042307
VSS_E_OBJECT_NOT_FOUND HRESULT = 0x80042308
VSS_E_VOLUME_NOT_SUPPORTED HRESULT = 0x8004230C
VSS_E_VOLUME_NOT_SUPPORTED_BY_PROVIDER HRESULT = 0x8004230E
VSS_E_OBJECT_ALREADY_EXISTS HRESULT = 0x8004230D
VSS_E_UNEXPECTED_PROVIDER_ERROR HRESULT = 0x8004230F
VSS_E_CORRUPT_XML_DOCUMENT HRESULT = 0x80042310
VSS_E_INVALID_XML_DOCUMENT HRESULT = 0x80042311
VSS_E_MAXIMUM_NUMBER_OF_VOLUMES_REACHED HRESULT = 0x80042312
VSS_E_FLUSH_WRITES_TIMEOUT HRESULT = 0x80042313
VSS_E_HOLD_WRITES_TIMEOUT HRESULT = 0x80042314
VSS_E_UNEXPECTED_WRITER_ERROR HRESULT = 0x80042315
VSS_E_SNAPSHOT_SET_IN_PROGRESS HRESULT = 0x80042316
VSS_E_MAXIMUM_NUMBER_OF_SNAPSHOTS_REACHED HRESULT = 0x80042317
VSS_E_WRITER_INFRASTRUCTURE HRESULT = 0x80042318
VSS_E_WRITER_NOT_RESPONDING HRESULT = 0x80042319
VSS_E_WRITER_ALREADY_SUBSCRIBED HRESULT = 0x8004231A
VSS_E_UNSUPPORTED_CONTEXT HRESULT = 0x8004231B
VSS_E_VOLUME_IN_USE HRESULT = 0x8004231D
VSS_E_MAXIMUM_DIFFAREA_ASSOCIATIONS_REACHED HRESULT = 0x8004231E
VSS_E_INSUFFICIENT_STORAGE HRESULT = 0x8004231F
VSS_E_NO_SNAPSHOTS_IMPORTED HRESULT = 0x80042320
VSS_E_SOME_SNAPSHOTS_NOT_IMPORTED HRESULT = 0x80042321
VSS_E_MAXIMUM_NUMBER_OF_REMOTE_MACHINES_REACHED HRESULT = 0x80042322
VSS_E_REMOTE_SERVER_UNAVAILABLE HRESULT = 0x80042323
VSS_E_REMOTE_SERVER_UNSUPPORTED HRESULT = 0x80042324
VSS_E_REVERT_IN_PROGRESS HRESULT = 0x80042325
VSS_E_REVERT_VOLUME_LOST HRESULT = 0x80042326
VSS_E_REBOOT_REQUIRED HRESULT = 0x80042327
VSS_E_TRANSACTION_FREEZE_TIMEOUT HRESULT = 0x80042328
VSS_E_TRANSACTION_THAW_TIMEOUT HRESULT = 0x80042329
VSS_E_VOLUME_NOT_LOCAL HRESULT = 0x8004232D
VSS_E_CLUSTER_TIMEOUT HRESULT = 0x8004232E
VSS_E_WRITERERROR_INCONSISTENTSNAPSHOT HRESULT = 0x800423F0
VSS_E_WRITERERROR_OUTOFRESOURCES HRESULT = 0x800423F1
VSS_E_WRITERERROR_TIMEOUT HRESULT = 0x800423F2
VSS_E_WRITERERROR_RETRYABLE HRESULT = 0x800423F3
VSS_E_WRITERERROR_NONRETRYABLE HRESULT = 0x800423F4
VSS_E_WRITERERROR_RECOVERY_FAILED HRESULT = 0x800423F5
VSS_E_BREAK_REVERT_ID_FAILED HRESULT = 0x800423F6
VSS_E_LEGACY_PROVIDER HRESULT = 0x800423F7
VSS_E_MISSING_DISK HRESULT = 0x800423F8
VSS_E_MISSING_HIDDEN_VOLUME HRESULT = 0x800423F9
VSS_E_MISSING_VOLUME HRESULT = 0x800423FA
VSS_E_AUTORECOVERY_FAILED HRESULT = 0x800423FB
VSS_E_DYNAMIC_DISK_ERROR HRESULT = 0x800423FC
VSS_E_NONTRANSPORTABLE_BCD HRESULT = 0x800423FD
VSS_E_CANNOT_REVERT_DISKID HRESULT = 0x800423FE
VSS_E_RESYNC_IN_PROGRESS HRESULT = 0x800423FF
VSS_E_CLUSTER_ERROR HRESULT = 0x80042400
VSS_E_UNSELECTED_VOLUME HRESULT = 0x8004232A
VSS_E_SNAPSHOT_NOT_IN_SET HRESULT = 0x8004232B
VSS_E_NESTED_VOLUME_LIMIT HRESULT = 0x8004232C
VSS_E_NOT_SUPPORTED HRESULT = 0x8004232F
VSS_E_WRITERERROR_PARTIAL_FAILURE HRESULT = 0x80042336
VSS_E_WRITER_STATUS_NOT_AVAILABLE HRESULT = 0x80042409
)
// hresultToString maps a HRESULT value to a human readable string.
var hresultToString = map[HRESULT]string{
S_OK: "S_OK",
E_ACCESSDENIED: "E_ACCESSDENIED",
E_OUTOFMEMORY: "E_OUTOFMEMORY",
E_INVALIDARG: "E_INVALIDARG",
VSS_E_BAD_STATE: "VSS_E_BAD_STATE",
VSS_E_UNEXPECTED: "VSS_E_UNEXPECTED",
VSS_E_PROVIDER_ALREADY_REGISTERED: "VSS_E_PROVIDER_ALREADY_REGISTERED",
VSS_E_PROVIDER_NOT_REGISTERED: "VSS_E_PROVIDER_NOT_REGISTERED",
VSS_E_PROVIDER_VETO: "VSS_E_PROVIDER_VETO",
VSS_E_PROVIDER_IN_USE: "VSS_E_PROVIDER_IN_USE",
VSS_E_OBJECT_NOT_FOUND: "VSS_E_OBJECT_NOT_FOUND",
VSS_E_VOLUME_NOT_SUPPORTED: "VSS_E_VOLUME_NOT_SUPPORTED",
VSS_E_VOLUME_NOT_SUPPORTED_BY_PROVIDER: "VSS_E_VOLUME_NOT_SUPPORTED_BY_PROVIDER",
VSS_E_OBJECT_ALREADY_EXISTS: "VSS_E_OBJECT_ALREADY_EXISTS",
VSS_E_UNEXPECTED_PROVIDER_ERROR: "VSS_E_UNEXPECTED_PROVIDER_ERROR",
VSS_E_CORRUPT_XML_DOCUMENT: "VSS_E_CORRUPT_XML_DOCUMENT",
VSS_E_INVALID_XML_DOCUMENT: "VSS_E_INVALID_XML_DOCUMENT",
VSS_E_MAXIMUM_NUMBER_OF_VOLUMES_REACHED: "VSS_E_MAXIMUM_NUMBER_OF_VOLUMES_REACHED",
VSS_E_FLUSH_WRITES_TIMEOUT: "VSS_E_FLUSH_WRITES_TIMEOUT",
VSS_E_HOLD_WRITES_TIMEOUT: "VSS_E_HOLD_WRITES_TIMEOUT",
VSS_E_UNEXPECTED_WRITER_ERROR: "VSS_E_UNEXPECTED_WRITER_ERROR",
VSS_E_SNAPSHOT_SET_IN_PROGRESS: "VSS_E_SNAPSHOT_SET_IN_PROGRESS",
VSS_E_MAXIMUM_NUMBER_OF_SNAPSHOTS_REACHED: "VSS_E_MAXIMUM_NUMBER_OF_SNAPSHOTS_REACHED",
VSS_E_WRITER_INFRASTRUCTURE: "VSS_E_WRITER_INFRASTRUCTURE",
VSS_E_WRITER_NOT_RESPONDING: "VSS_E_WRITER_NOT_RESPONDING",
VSS_E_WRITER_ALREADY_SUBSCRIBED: "VSS_E_WRITER_ALREADY_SUBSCRIBED",
VSS_E_UNSUPPORTED_CONTEXT: "VSS_E_UNSUPPORTED_CONTEXT",
VSS_E_VOLUME_IN_USE: "VSS_E_VOLUME_IN_USE",
VSS_E_MAXIMUM_DIFFAREA_ASSOCIATIONS_REACHED: "VSS_E_MAXIMUM_DIFFAREA_ASSOCIATIONS_REACHED",
VSS_E_INSUFFICIENT_STORAGE: "VSS_E_INSUFFICIENT_STORAGE",
VSS_E_NO_SNAPSHOTS_IMPORTED: "VSS_E_NO_SNAPSHOTS_IMPORTED",
VSS_E_SOME_SNAPSHOTS_NOT_IMPORTED: "VSS_E_SOME_SNAPSHOTS_NOT_IMPORTED",
VSS_E_MAXIMUM_NUMBER_OF_REMOTE_MACHINES_REACHED: "VSS_E_MAXIMUM_NUMBER_OF_REMOTE_MACHINES_REACHED",
VSS_E_REMOTE_SERVER_UNAVAILABLE: "VSS_E_REMOTE_SERVER_UNAVAILABLE",
VSS_E_REMOTE_SERVER_UNSUPPORTED: "VSS_E_REMOTE_SERVER_UNSUPPORTED",
VSS_E_REVERT_IN_PROGRESS: "VSS_E_REVERT_IN_PROGRESS",
VSS_E_REVERT_VOLUME_LOST: "VSS_E_REVERT_VOLUME_LOST",
VSS_E_REBOOT_REQUIRED: "VSS_E_REBOOT_REQUIRED",
VSS_E_TRANSACTION_FREEZE_TIMEOUT: "VSS_E_TRANSACTION_FREEZE_TIMEOUT",
VSS_E_TRANSACTION_THAW_TIMEOUT: "VSS_E_TRANSACTION_THAW_TIMEOUT",
VSS_E_VOLUME_NOT_LOCAL: "VSS_E_VOLUME_NOT_LOCAL",
VSS_E_CLUSTER_TIMEOUT: "VSS_E_CLUSTER_TIMEOUT",
VSS_E_WRITERERROR_INCONSISTENTSNAPSHOT: "VSS_E_WRITERERROR_INCONSISTENTSNAPSHOT",
VSS_E_WRITERERROR_OUTOFRESOURCES: "VSS_E_WRITERERROR_OUTOFRESOURCES",
VSS_E_WRITERERROR_TIMEOUT: "VSS_E_WRITERERROR_TIMEOUT",
VSS_E_WRITERERROR_RETRYABLE: "VSS_E_WRITERERROR_RETRYABLE",
VSS_E_WRITERERROR_NONRETRYABLE: "VSS_E_WRITERERROR_NONRETRYABLE",
VSS_E_WRITERERROR_RECOVERY_FAILED: "VSS_E_WRITERERROR_RECOVERY_FAILED",
VSS_E_BREAK_REVERT_ID_FAILED: "VSS_E_BREAK_REVERT_ID_FAILED",
VSS_E_LEGACY_PROVIDER: "VSS_E_LEGACY_PROVIDER",
VSS_E_MISSING_DISK: "VSS_E_MISSING_DISK",
VSS_E_MISSING_HIDDEN_VOLUME: "VSS_E_MISSING_HIDDEN_VOLUME",
VSS_E_MISSING_VOLUME: "VSS_E_MISSING_VOLUME",
VSS_E_AUTORECOVERY_FAILED: "VSS_E_AUTORECOVERY_FAILED",
VSS_E_DYNAMIC_DISK_ERROR: "VSS_E_DYNAMIC_DISK_ERROR",
VSS_E_NONTRANSPORTABLE_BCD: "VSS_E_NONTRANSPORTABLE_BCD",
VSS_E_CANNOT_REVERT_DISKID: "VSS_E_CANNOT_REVERT_DISKID",
VSS_E_RESYNC_IN_PROGRESS: "VSS_E_RESYNC_IN_PROGRESS",
VSS_E_CLUSTER_ERROR: "VSS_E_CLUSTER_ERROR",
VSS_E_UNSELECTED_VOLUME: "VSS_E_UNSELECTED_VOLUME",
VSS_E_SNAPSHOT_NOT_IN_SET: "VSS_E_SNAPSHOT_NOT_IN_SET",
VSS_E_NESTED_VOLUME_LIMIT: "VSS_E_NESTED_VOLUME_LIMIT",
VSS_E_NOT_SUPPORTED: "VSS_E_NOT_SUPPORTED",
VSS_E_WRITERERROR_PARTIAL_FAILURE: "VSS_E_WRITERERROR_PARTIAL_FAILURE",
VSS_E_WRITER_STATUS_NOT_AVAILABLE: "VSS_E_WRITER_STATUS_NOT_AVAILABLE",
}
// Str converts a HRESULT to a human readable string.
func (h HRESULT) Str() string {
if i, ok := hresultToString[h]; ok {
return i
}
return "UNKNOWN"
}
// Error implements the error interface
func (h HRESULT) Error() string {
return h.Str()
}
// VssError encapsulates errors returned from calling VSS api.
type vssError struct {
text string
hresult HRESULT
}
// NewVssError creates a new VSS api error.
func newVssError(text string, hresult HRESULT) error {
return &vssError{text: text, hresult: hresult}
}
// NewVssError creates a new VSS api error.
func newVssErrorIfResultNotOK(text string, hresult HRESULT) error {
if hresult != S_OK {
return newVssError(text, hresult)
}
return nil
}
// Error implements the error interface.
func (e *vssError) Error() string {
return fmt.Sprintf("VSS error: %s: %s (%#x)", e.text, e.hresult.Str(), e.hresult)
}
// Unwrap returns the underlying HRESULT error
func (e *vssError) Unwrap() error {
return e.hresult
}
// vssTextError encapsulates errors returned from calling VSS api.
type vssTextError struct {
text string
}
// NewVssTextError creates a new VSS api error.
func newVssTextError(text string) error {
return &vssTextError{text: text}
}
// Error implements the error interface.
func (e *vssTextError) Error() string {
return fmt.Sprintf("VSS error: %s", e.text)
}
// VssContext is a custom type for the windows api VssContext type.
type VssContext uint
// VssContext constant values necessary for using VSS api.
const (
VSS_CTX_BACKUP VssContext = iota
VSS_CTX_FILE_SHARE_BACKUP
VSS_CTX_NAS_ROLLBACK
VSS_CTX_APP_ROLLBACK
VSS_CTX_CLIENT_ACCESSIBLE
VSS_CTX_CLIENT_ACCESSIBLE_WRITERS
VSS_CTX_ALL
)
// VssBackup is a custom type for the windows api VssBackup type.
type VssBackup uint
// VssBackup constant values necessary for using VSS api.
const (
VSS_BT_UNDEFINED VssBackup = iota
VSS_BT_FULL
VSS_BT_INCREMENTAL
VSS_BT_DIFFERENTIAL
VSS_BT_LOG
VSS_BT_COPY
VSS_BT_OTHER
)
// VssObjectType is a custom type for the windows api VssObjectType type.
type VssObjectType uint
// VssObjectType constant values necessary for using VSS api.
const (
VSS_OBJECT_UNKNOWN VssObjectType = iota
VSS_OBJECT_NONE
VSS_OBJECT_SNAPSHOT_SET
VSS_OBJECT_SNAPSHOT
VSS_OBJECT_PROVIDER
VSS_OBJECT_TYPE_COUNT
)
// UUID_IVSS defines the GUID of IVssBackupComponents.
var UUID_IVSS = ole.NewGUID("{665c1d5f-c218-414d-a05d-7fef5f9d5c86}")
// IVssBackupComponents VSS api interface.
type IVssBackupComponents struct {
ole.IUnknown
}
// IVssBackupComponentsVTable is the vtable for IVssBackupComponents.
// nolint:structcheck
type IVssBackupComponentsVTable struct {
ole.IUnknownVtbl
getWriterComponentsCount uintptr
getWriterComponents uintptr
initializeForBackup uintptr
setBackupState uintptr
initializeForRestore uintptr
setRestoreState uintptr
gatherWriterMetadata uintptr
getWriterMetadataCount uintptr
getWriterMetadata uintptr
freeWriterMetadata uintptr
addComponent uintptr
prepareForBackup uintptr
abortBackup uintptr
gatherWriterStatus uintptr
getWriterStatusCount uintptr
freeWriterStatus uintptr
getWriterStatus uintptr
setBackupSucceeded uintptr
setBackupOptions uintptr
setSelectedForRestore uintptr
setRestoreOptions uintptr
setAdditionalRestores uintptr
setPreviousBackupStamp uintptr
saveAsXML uintptr
backupComplete uintptr
addAlternativeLocationMapping uintptr
addRestoreSubcomponent uintptr
setFileRestoreStatus uintptr
addNewTarget uintptr
setRangesFilePath uintptr
preRestore uintptr
postRestore uintptr
setContext uintptr
startSnapshotSet uintptr
addToSnapshotSet uintptr
doSnapshotSet uintptr
deleteSnapshots uintptr
importSnapshots uintptr
breakSnapshotSet uintptr
getSnapshotProperties uintptr
query uintptr
isVolumeSupported uintptr
disableWriterClasses uintptr
enableWriterClasses uintptr
disableWriterInstances uintptr
exposeSnapshot uintptr
revertToSnapshot uintptr
queryRevertStatus uintptr
}
// getVTable returns the vtable for IVssBackupComponents.
func (vss *IVssBackupComponents) getVTable() *IVssBackupComponentsVTable {
return (*IVssBackupComponentsVTable)(unsafe.Pointer(vss.RawVTable))
}
// AbortBackup calls the equivalent VSS api.
func (vss *IVssBackupComponents) AbortBackup() error {
result, _, _ := syscall.Syscall(vss.getVTable().abortBackup, 1,
uintptr(unsafe.Pointer(vss)), 0, 0)
return newVssErrorIfResultNotOK("AbortBackup() failed", HRESULT(result))
}
// InitializeForBackup calls the equivalent VSS api.
func (vss *IVssBackupComponents) InitializeForBackup() error {
result, _, _ := syscall.Syscall(vss.getVTable().initializeForBackup, 2,
uintptr(unsafe.Pointer(vss)), 0, 0)
return newVssErrorIfResultNotOK("InitializeForBackup() failed", HRESULT(result))
}
// SetContext calls the equivalent VSS api.
func (vss *IVssBackupComponents) SetContext(context VssContext) error {
result, _, _ := syscall.Syscall(vss.getVTable().setContext, 2,
uintptr(unsafe.Pointer(vss)), uintptr(context), 0)
return newVssErrorIfResultNotOK("SetContext() failed", HRESULT(result))
}
// GatherWriterMetadata calls the equivalent VSS api.
func (vss *IVssBackupComponents) GatherWriterMetadata() (*IVSSAsync, error) {
var oleIUnknown *ole.IUnknown
result, _, _ := syscall.Syscall(vss.getVTable().gatherWriterMetadata, 2,
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(&oleIUnknown)), 0)
err := newVssErrorIfResultNotOK("GatherWriterMetadata() failed", HRESULT(result))
return vss.convertToVSSAsync(oleIUnknown, err)
}
// convertToVSSAsync looks up IVSSAsync interface if given result
// is a success.
func (vss *IVssBackupComponents) convertToVSSAsync(
oleIUnknown *ole.IUnknown, err error) (*IVSSAsync, error) {
if err != nil {
return nil, err
}
comInterface, err := queryInterface(oleIUnknown, UIID_IVSS_ASYNC)
if err != nil {
return nil, err
}
iVssAsync := (*IVSSAsync)(unsafe.Pointer(comInterface))
return iVssAsync, nil
}
// IsVolumeSupported calls the equivalent VSS api.
func (vss *IVssBackupComponents) IsVolumeSupported(providerID *ole.GUID, volumeName string) (bool, error) {
volumeNamePointer, err := syscall.UTF16PtrFromString(volumeName)
if err != nil {
panic(err)
}
var isSupportedRaw uint32
var result uintptr
if runtime.GOARCH == "386" {
id := (*[4]uintptr)(unsafe.Pointer(providerID))
result, _, _ = syscall.Syscall9(vss.getVTable().isVolumeSupported, 7,
uintptr(unsafe.Pointer(vss)), id[0], id[1], id[2], id[3],
uintptr(unsafe.Pointer(volumeNamePointer)), uintptr(unsafe.Pointer(&isSupportedRaw)), 0,
0)
} else {
result, _, _ = syscall.Syscall6(vss.getVTable().isVolumeSupported, 4,
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(providerID)),
uintptr(unsafe.Pointer(volumeNamePointer)), uintptr(unsafe.Pointer(&isSupportedRaw)), 0,
0)
}
var isSupported bool
if isSupportedRaw == 0 {
isSupported = false
} else {
isSupported = true
}
return isSupported, newVssErrorIfResultNotOK("IsVolumeSupported() failed", HRESULT(result))
}
// StartSnapshotSet calls the equivalent VSS api.
func (vss *IVssBackupComponents) StartSnapshotSet() (ole.GUID, error) {
var snapshotSetID ole.GUID
result, _, _ := syscall.Syscall(vss.getVTable().startSnapshotSet, 2,
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(&snapshotSetID)), 0,
)
return snapshotSetID, newVssErrorIfResultNotOK("StartSnapshotSet() failed", HRESULT(result))
}
// AddToSnapshotSet calls the equivalent VSS api.
func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, providerID *ole.GUID, idSnapshot *ole.GUID) error {
volumeNamePointer, err := syscall.UTF16PtrFromString(volumeName)
if err != nil {
panic(err)
}
var result uintptr
if runtime.GOARCH == "386" {
id := (*[4]uintptr)(unsafe.Pointer(providerID))
result, _, _ = syscall.Syscall9(vss.getVTable().addToSnapshotSet, 7,
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)),
id[0], id[1], id[2], id[3], uintptr(unsafe.Pointer(idSnapshot)), 0, 0)
} else {
result, _, _ = syscall.Syscall6(vss.getVTable().addToSnapshotSet, 4,
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)),
uintptr(unsafe.Pointer(providerID)), uintptr(unsafe.Pointer(idSnapshot)), 0, 0)
}
return newVssErrorIfResultNotOK("AddToSnapshotSet() failed", HRESULT(result))
}
// PrepareForBackup calls the equivalent VSS api.
func (vss *IVssBackupComponents) PrepareForBackup() (*IVSSAsync, error) {
var oleIUnknown *ole.IUnknown
result, _, _ := syscall.Syscall(vss.getVTable().prepareForBackup, 2,
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(&oleIUnknown)), 0)
err := newVssErrorIfResultNotOK("PrepareForBackup() failed", HRESULT(result))
return vss.convertToVSSAsync(oleIUnknown, err)
}
// apiBoolToInt converts a bool for use calling the VSS api
func apiBoolToInt(input bool) uint {
if input {
return 1
}
return 0
}
// SetBackupState calls the equivalent VSS api.
func (vss *IVssBackupComponents) SetBackupState(selectComponents bool,
backupBootableSystemState bool, backupType VssBackup, partialFileSupport bool,
) error {
selectComponentsVal := apiBoolToInt(selectComponents)
backupBootableSystemStateVal := apiBoolToInt(backupBootableSystemState)
partialFileSupportVal := apiBoolToInt(partialFileSupport)
result, _, _ := syscall.Syscall6(vss.getVTable().setBackupState, 5,
uintptr(unsafe.Pointer(vss)), uintptr(selectComponentsVal),
uintptr(backupBootableSystemStateVal), uintptr(backupType), uintptr(partialFileSupportVal),
0)
return newVssErrorIfResultNotOK("SetBackupState() failed", HRESULT(result))
}
// DoSnapshotSet calls the equivalent VSS api.
func (vss *IVssBackupComponents) DoSnapshotSet() (*IVSSAsync, error) {
var oleIUnknown *ole.IUnknown
result, _, _ := syscall.Syscall(vss.getVTable().doSnapshotSet, 2, uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(&oleIUnknown)), 0)
err := newVssErrorIfResultNotOK("DoSnapshotSet() failed", HRESULT(result))
return vss.convertToVSSAsync(oleIUnknown, err)
}
// DeleteSnapshots calls the equivalent VSS api.
func (vss *IVssBackupComponents) DeleteSnapshots(snapshotID ole.GUID) (int32, ole.GUID, error) {
var deletedSnapshots int32
var nondeletedSnapshotID ole.GUID
var result uintptr
if runtime.GOARCH == "386" {
id := (*[4]uintptr)(unsafe.Pointer(&snapshotID))
result, _, _ = syscall.Syscall9(vss.getVTable().deleteSnapshots, 9,
uintptr(unsafe.Pointer(vss)), id[0], id[1], id[2], id[3],
uintptr(VSS_OBJECT_SNAPSHOT), uintptr(1), uintptr(unsafe.Pointer(&deletedSnapshots)),
uintptr(unsafe.Pointer(&nondeletedSnapshotID)),
)
} else {
result, _, _ = syscall.Syscall6(vss.getVTable().deleteSnapshots, 6,
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(&snapshotID)),
uintptr(VSS_OBJECT_SNAPSHOT), uintptr(1), uintptr(unsafe.Pointer(&deletedSnapshots)),
uintptr(unsafe.Pointer(&nondeletedSnapshotID)))
}
err := newVssErrorIfResultNotOK("DeleteSnapshots() failed", HRESULT(result))
return deletedSnapshots, nondeletedSnapshotID, err
}
// GetSnapshotProperties calls the equivalent VSS api.
func (vss *IVssBackupComponents) GetSnapshotProperties(snapshotID ole.GUID,
properties *VssSnapshotProperties) error {
var result uintptr
if runtime.GOARCH == "386" {
id := (*[4]uintptr)(unsafe.Pointer(&snapshotID))
result, _, _ = syscall.Syscall6(vss.getVTable().getSnapshotProperties, 6,
uintptr(unsafe.Pointer(vss)), id[0], id[1], id[2], id[3],
uintptr(unsafe.Pointer(properties)))
} else {
result, _, _ = syscall.Syscall(vss.getVTable().getSnapshotProperties, 3,
uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(&snapshotID)),
uintptr(unsafe.Pointer(properties)))
}
return newVssErrorIfResultNotOK("GetSnapshotProperties() failed", HRESULT(result))
}
// vssFreeSnapshotProperties calls the equivalent VSS api.
func vssFreeSnapshotProperties(properties *VssSnapshotProperties) error {
proc, err := findVssProc("VssFreeSnapshotProperties")
if err != nil {
return err
}
// this function always succeeds and returns no value
_, _, _ = proc.Call(uintptr(unsafe.Pointer(properties)))
return nil
}
// BackupComplete calls the equivalent VSS api.
func (vss *IVssBackupComponents) BackupComplete() (*IVSSAsync, error) {
var oleIUnknown *ole.IUnknown
result, _, _ := syscall.Syscall(vss.getVTable().backupComplete, 2, uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(&oleIUnknown)), 0)
err := newVssErrorIfResultNotOK("BackupComplete() failed", HRESULT(result))
return vss.convertToVSSAsync(oleIUnknown, err)
}
// VssSnapshotProperties defines the properties of a VSS snapshot as part of the VSS api.
// nolint:structcheck
type VssSnapshotProperties struct {
snapshotID ole.GUID
snapshotSetID ole.GUID
snapshotsCount uint32
snapshotDeviceObject *uint16
originalVolumeName *uint16
originatingMachine *uint16
serviceMachine *uint16
exposedName *uint16
exposedPath *uint16
providerID ole.GUID
snapshotAttributes uint32
creationTimestamp uint64
status uint
}
// VssProviderProperties defines the properties of a VSS provider as part of the VSS api.
// nolint:structcheck
type VssProviderProperties struct {
providerID ole.GUID
providerName *uint16
providerType uint32
providerVersion *uint16
providerVersionID ole.GUID
classID ole.GUID
}
func vssFreeProviderProperties(p *VssProviderProperties) {
ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerName)))
p.providerName = nil
ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerVersion)))
p.providerVersion = nil
}
// GetSnapshotDeviceObject returns root path to access the snapshot files
// and folders.
func (p *VssSnapshotProperties) GetSnapshotDeviceObject() string {
return ole.UTF16PtrToString(p.snapshotDeviceObject)
}
// UIID_IVSS_ASYNC defines to GUID of IVSSAsync.
var UIID_IVSS_ASYNC = ole.NewGUID("{507C37B4-CF5B-4e95-B0AF-14EB9767467E}")
// IVSSAsync VSS api interface.
type IVSSAsync struct {
ole.IUnknown
}
// IVSSAsyncVTable is the vtable for IVSSAsync.
type IVSSAsyncVTable struct {
ole.IUnknownVtbl
cancel uintptr
wait uintptr
queryStatus uintptr
}
// Constants for IVSSAsync api.
const (
VSS_S_ASYNC_PENDING = 0x00042309
VSS_S_ASYNC_FINISHED = 0x0004230A
VSS_S_ASYNC_CANCELLED = 0x0004230B
)
// getVTable returns the vtable for IVSSAsync.
func (vssAsync *IVSSAsync) getVTable() *IVSSAsyncVTable {
return (*IVSSAsyncVTable)(unsafe.Pointer(vssAsync.RawVTable))
}
// Cancel calls the equivalent VSS api.
func (vssAsync *IVSSAsync) Cancel() HRESULT {
result, _, _ := syscall.Syscall(vssAsync.getVTable().cancel, 1,
uintptr(unsafe.Pointer(vssAsync)), 0, 0)
return HRESULT(result)
}
// Wait calls the equivalent VSS api.
func (vssAsync *IVSSAsync) Wait(millis uint32) HRESULT {
result, _, _ := syscall.Syscall(vssAsync.getVTable().wait, 2, uintptr(unsafe.Pointer(vssAsync)),
uintptr(millis), 0)
return HRESULT(result)
}
// QueryStatus calls the equivalent VSS api.
func (vssAsync *IVSSAsync) QueryStatus() (HRESULT, uint32) {
var state uint32 = 0
result, _, _ := syscall.Syscall(vssAsync.getVTable().queryStatus, 3,
uintptr(unsafe.Pointer(vssAsync)), uintptr(unsafe.Pointer(&state)), 0)
return HRESULT(result), state
}
// WaitUntilAsyncFinished waits until either the async call is finished or
// the given timeout is reached.
func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(timeout time.Duration) error {
const maxTimeout = math.MaxInt32 * time.Millisecond
if timeout > maxTimeout {
timeout = maxTimeout
}
hresult := vssAsync.Wait(uint32(timeout.Milliseconds()))
err := newVssErrorIfResultNotOK("Wait() failed", hresult)
if err != nil {
vssAsync.Cancel()
return err
}
hresult, state := vssAsync.QueryStatus()
err = newVssErrorIfResultNotOK("QueryStatus() failed", hresult)
if err != nil {
vssAsync.Cancel()
return err
}
if state == VSS_S_ASYNC_CANCELLED {
return newVssTextError("async operation cancelled")
}
if state == VSS_S_ASYNC_PENDING {
vssAsync.Cancel()
return newVssTextError("async operation pending")
}
if state != VSS_S_ASYNC_FINISHED {
err = newVssErrorIfResultNotOK("async operation failed", HRESULT(state))
if err != nil {
return err
}
}
return nil
}
// UIID_IVSS_ADMIN defines the GUID of IVSSAdmin.
var (
UIID_IVSS_ADMIN = ole.NewGUID("{77ED5996-2F63-11d3-8A39-00C04F72D8E3}")
CLSID_VSS_COORDINATOR = ole.NewGUID("{E579AB5F-1CC4-44b4-BED9-DE0991FF0623}")
)
// IVSSAdmin VSS api interface.
type IVSSAdmin struct {
ole.IUnknown
}
// IVSSAdminVTable is the vtable for IVSSAdmin.
// nolint:structcheck
type IVSSAdminVTable struct {
ole.IUnknownVtbl
registerProvider uintptr
unregisterProvider uintptr
queryProviders uintptr
abortAllSnapshotsInProgress uintptr
}
// getVTable returns the vtable for IVSSAdmin.
func (vssAdmin *IVSSAdmin) getVTable() *IVSSAdminVTable {
return (*IVSSAdminVTable)(unsafe.Pointer(vssAdmin.RawVTable))
}
// QueryProviders calls the equivalent VSS api.
func (vssAdmin *IVSSAdmin) QueryProviders() (*IVssEnumObject, error) {
var enum *IVssEnumObject
result, _, _ := syscall.Syscall(vssAdmin.getVTable().queryProviders, 2,
uintptr(unsafe.Pointer(vssAdmin)), uintptr(unsafe.Pointer(&enum)), 0)
return enum, newVssErrorIfResultNotOK("QueryProviders() failed", HRESULT(result))
}
// IVssEnumObject VSS api interface.
type IVssEnumObject struct {
ole.IUnknown
}
// IVssEnumObjectVTable is the vtable for IVssEnumObject.
// nolint:structcheck
type IVssEnumObjectVTable struct {
ole.IUnknownVtbl
next uintptr
skip uintptr
reset uintptr
clone uintptr
}
// getVTable returns the vtable for IVssEnumObject.
func (vssEnum *IVssEnumObject) getVTable() *IVssEnumObjectVTable {
return (*IVssEnumObjectVTable)(unsafe.Pointer(vssEnum.RawVTable))
}
// Next calls the equivalent VSS api.
func (vssEnum *IVssEnumObject) Next(count uint, props unsafe.Pointer) (uint, error) {
var fetched uint32
result, _, _ := syscall.Syscall6(vssEnum.getVTable().next, 4,
uintptr(unsafe.Pointer(vssEnum)), uintptr(count), uintptr(props),
uintptr(unsafe.Pointer(&fetched)), 0, 0)
if HRESULT(result) == S_FALSE {
return uint(fetched), nil
}
return uint(fetched), newVssErrorIfResultNotOK("Next() failed", HRESULT(result))
}
// MountPoint wraps all information of a snapshot of a mountpoint on a volume.
type MountPoint struct {
isSnapshotted bool
snapshotSetID ole.GUID
snapshotProperties VssSnapshotProperties
snapshotDeviceObject string
}
// IsSnapshotted is true if this mount point was snapshotted successfully.
func (p *MountPoint) IsSnapshotted() bool {
return p.isSnapshotted
}
// GetSnapshotDeviceObject returns root path to access the snapshot files and folders.
func (p *MountPoint) GetSnapshotDeviceObject() string {
return p.snapshotDeviceObject
}
// VssSnapshot wraps windows volume shadow copy api (vss) via a simple
// interface to create and delete a vss snapshot.
type VssSnapshot struct {
iVssBackupComponents *IVssBackupComponents
snapshotID ole.GUID
snapshotProperties VssSnapshotProperties
snapshotDeviceObject string
mountPointInfo map[string]MountPoint
timeout time.Duration
}
// GetSnapshotDeviceObject returns root path to access the snapshot files
// and folders.
func (p *VssSnapshot) GetSnapshotDeviceObject() string {
return p.snapshotDeviceObject
}
// initializeVssCOMInterface initialize an instance of the VSS COM api
func initializeVssCOMInterface() (*ole.IUnknown, error) {
vssInstance, err := loadIVssBackupComponentsConstructor()
if err != nil {
return nil, err
}
// ensure COM is initialized before use
if err = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil {
// CoInitializeEx returns S_FALSE if COM is already initialized
if oleErr, ok := err.(*ole.OleError); !ok || HRESULT(oleErr.Code()) != S_FALSE {
return nil, err
}
}
// initialize COM security for VSS, this can't be called more then once
// Allowing all processes to perform incoming COM calls is not necessarily a security weakness.
// A requester acting as a COM server, like all other COM servers, always retains the option to authorize its clients on every COM method implemented in its process.
//
// Note that internal COM callbacks implemented by VSS are secured by default.
// Reference: https://learn.microsoft.com/en-us/windows/win32/vss/security-considerations-for-requestors#:~:text=Allowing%20all%20processes,secured%20by%20default.
if err = ole.CoInitializeSecurity(
-1, // Default COM authentication service
6, // RPC_C_AUTHN_LEVEL_PKT_PRIVACY
3, // RPC_C_IMP_LEVEL_IMPERSONATE
0x20, // EOAC_STATIC_CLOAKING
); err != nil {
// TODO warn for expected event logs for VSS IVssWriterCallback failure
return nil, newVssError(
"Failed to initialize security for VSS request",
HRESULT(err.(*ole.OleError).Code()))
}
var oleIUnknown *ole.IUnknown
result, _, _ := vssInstance.Call(uintptr(unsafe.Pointer(&oleIUnknown)))
hresult := HRESULT(result)
switch hresult {
case S_OK:
case E_ACCESSDENIED:
return oleIUnknown, newVssError(
"The caller does not have sufficient backup privileges or is not an administrator",
hresult)
default:
return oleIUnknown, newVssError("Failed to create VSS instance", hresult)
}
if oleIUnknown == nil {
return nil, newVssError("Failed to initialize COM interface", hresult)
}
return oleIUnknown, nil
}
// HasSufficientPrivilegesForVSS returns nil if the user is allowed to use VSS.
func HasSufficientPrivilegesForVSS() error {
oleIUnknown, err := initializeVssCOMInterface()
if oleIUnknown != nil {
oleIUnknown.Release()
}
return err
}
// getVolumeNameForVolumeMountPoint add trailing backslash to input parameter
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | true |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/interface.go | internal/fs/interface.go | package fs
import (
"io"
"github.com/restic/restic/internal/data"
)
// FS bundles all methods needed for a file system.
type FS interface {
// OpenFile opens a file or directory for reading.
//
// If metadataOnly is set, an implementation MUST return a File object for
// arbitrary file types including symlinks. The implementation may internally use
// the given file path or a file handle. In particular, an implementation may
// delay actually accessing the underlying filesystem.
//
// Only the O_NOFOLLOW and O_DIRECTORY flags are supported.
OpenFile(name string, flag int, metadataOnly bool) (File, error)
Lstat(name string) (*ExtendedFileInfo, error)
Join(elem ...string) string
Separator() string
Abs(path string) (string, error)
Clean(path string) string
VolumeName(path string) string
IsAbs(path string) bool
Dir(path string) string
Base(path string) string
}
// File is an open file on a file system. When opened as metadataOnly, an
// implementation may opt to perform filesystem operations using the filepath
// instead of actually opening the file.
type File interface {
// MakeReadable reopens a File that was opened metadataOnly for reading.
// The method must not be called for files that are opened for reading.
// If possible, the underlying file should be reopened atomically.
// MakeReadable must work for files and directories.
MakeReadable() error
io.Reader
io.Closer
Readdirnames(n int) ([]string, error)
Stat() (*ExtendedFileInfo, error)
// ToNode returns a data.Node for the File. The internally used os.FileInfo
// must be consistent with that returned by Stat(). In particular, the metadata
// returned by consecutive calls to Stat() and ToNode() must match.
ToNode(ignoreXattrListError bool, warnf func(format string, args ...any)) (*data.Node, error)
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/fs_reader_command_test.go | internal/fs/fs_reader_command_test.go | package fs_test
import (
"bytes"
"context"
"io"
"strings"
"testing"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/test"
)
func TestCommandReaderSuccess(t *testing.T) {
reader, err := fs.NewCommandReader(context.TODO(), []string{"true"}, func(msg string, args ...interface{}) {})
test.OK(t, err)
_, err = io.Copy(io.Discard, reader)
test.OK(t, err)
test.OK(t, reader.Close())
}
func TestCommandReaderFail(t *testing.T) {
reader, err := fs.NewCommandReader(context.TODO(), []string{"false"}, func(msg string, args ...interface{}) {})
test.OK(t, err)
_, err = io.Copy(io.Discard, reader)
test.Assert(t, err != nil, "missing error")
}
func TestCommandReaderInvalid(t *testing.T) {
_, err := fs.NewCommandReader(context.TODO(), []string{"w54fy098hj7fy5twijouytfrj098y645wr"}, func(msg string, args ...interface{}) {})
test.Assert(t, err != nil, "missing error")
}
func TestCommandReaderEmptyArgs(t *testing.T) {
_, err := fs.NewCommandReader(context.TODO(), []string{}, func(msg string, args ...interface{}) {})
test.Assert(t, err != nil, "missing error")
}
func TestCommandReaderOutput(t *testing.T) {
reader, err := fs.NewCommandReader(context.TODO(), []string{"echo", "hello world"}, func(msg string, args ...interface{}) {})
test.OK(t, err)
var buf bytes.Buffer
_, err = io.Copy(&buf, reader)
test.OK(t, err)
test.OK(t, reader.Close())
test.Equals(t, "hello world", strings.TrimSpace(buf.String()))
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
restic/restic | https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/fs/node_unix_test.go | internal/fs/node_unix_test.go | //go:build !windows
package fs
import (
"io/fs"
"os"
"os/user"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"testing"
"github.com/restic/restic/internal/data"
"github.com/restic/restic/internal/errors"
rtest "github.com/restic/restic/internal/test"
)
func stat(t testing.TB, filename string) (fi os.FileInfo, ok bool) {
fi, err := os.Lstat(filename)
if err != nil && os.IsNotExist(err) {
return fi, false
}
if err != nil {
t.Fatal(err)
}
return fi, true
}
func checkFile(t testing.TB, fi fs.FileInfo, node *data.Node) {
t.Helper()
stat := fi.Sys().(*syscall.Stat_t)
if uint32(node.Mode.Perm()) != uint32(stat.Mode&0777) {
t.Errorf("Mode does not match, want %v, got %v", stat.Mode&0777, node.Mode)
}
if node.Inode != uint64(stat.Ino) {
t.Errorf("Inode does not match, want %v, got %v", stat.Ino, node.Inode)
}
if node.DeviceID != uint64(stat.Dev) {
t.Errorf("Dev does not match, want %v, got %v", stat.Dev, node.DeviceID)
}
if node.Size != uint64(stat.Size) && node.Type != data.NodeTypeSymlink {
t.Errorf("Size does not match, want %v, got %v", stat.Size, node.Size)
}
if node.Links != uint64(stat.Nlink) {
t.Errorf("Links does not match, want %v, got %v", stat.Nlink, node.Links)
}
if node.UID != stat.Uid {
t.Errorf("UID does not match, want %v, got %v", stat.Uid, node.UID)
}
if node.GID != stat.Gid {
t.Errorf("UID does not match, want %v, got %v", stat.Gid, node.GID)
}
// use the os dependent function to compare the timestamps
s := ExtendedStat(fi)
if node.ModTime != s.ModTime {
t.Errorf("ModTime does not match, want %v, got %v", s.ModTime, node.ModTime)
}
if node.ChangeTime != s.ChangeTime {
t.Errorf("ChangeTime does not match, want %v, got %v", s.ChangeTime, node.ChangeTime)
}
if node.AccessTime != s.AccessTime {
t.Errorf("AccessTime does not match, want %v, got %v", s.AccessTime, node.AccessTime)
}
}
func checkDevice(t testing.TB, fi fs.FileInfo, node *data.Node) {
stat := fi.Sys().(*syscall.Stat_t)
if node.Device != uint64(stat.Rdev) {
t.Errorf("Rdev does not match, want %v, got %v", stat.Rdev, node.Device)
}
}
func TestNodeFromFileInfo(t *testing.T) {
tmp := t.TempDir()
symlink := filepath.Join(tmp, "symlink")
rtest.OK(t, os.Symlink("target", symlink))
type Test struct {
filename string
canSkip bool
}
var tests = []Test{
{"node_test.go", false},
{"/dev/sda", true},
{symlink, false},
}
// on darwin, users are not permitted to list the extended attributes of
// /dev/null, therefore skip it.
// on solaris, /dev/null is a symlink to a device node in /devices
// which does not support extended attributes, therefore skip it.
if runtime.GOOS != "darwin" && runtime.GOOS != "solaris" {
tests = append(tests, Test{"/dev/null", true})
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
fi, found := stat(t, test.filename)
if !found && test.canSkip {
t.Skipf("%v not found in filesystem", test.filename)
return
}
fs := &Local{}
meta, err := fs.OpenFile(test.filename, O_NOFOLLOW, true)
rtest.OK(t, err)
node, err := meta.ToNode(false, t.Logf)
rtest.OK(t, err)
rtest.OK(t, meta.Close())
rtest.OK(t, err)
switch node.Type {
case data.NodeTypeFile, data.NodeTypeSymlink:
checkFile(t, fi, node)
case data.NodeTypeDev, data.NodeTypeCharDev:
checkFile(t, fi, node)
checkDevice(t, fi, node)
default:
t.Fatalf("invalid node type %q", node.Type)
}
})
}
}
func TestMknodError(t *testing.T) {
d := t.TempDir()
// Call mkfifo, which calls mknod, as mknod may give
// "operation not permitted" on Mac.
err := mkfifo(d, 0)
rtest.Assert(t, errors.Is(err, os.ErrExist), "want ErrExist, got %q", err)
rtest.Assert(t, strings.Contains(err.Error(), d), "filename not in %q", err)
}
func TestLchown(t *testing.T) {
usr, err := user.Current()
rtest.OK(t, err)
uid, err := strconv.Atoi(usr.Uid)
rtest.OK(t, err)
gid, err := strconv.Atoi(usr.Gid)
rtest.OK(t, err)
d := t.TempDir()
f := d + "/test.txt"
err = os.WriteFile(f, []byte(""), 0o700)
rtest.OK(t, err)
t.Run("by UID/GID", func(t *testing.T) {
n := &data.Node{
UID: uint32(uid),
GID: uint32(gid),
}
err = lchown(f, n, false)
rtest.OK(t, err)
})
t.Run("by user name and group name", func(t *testing.T) {
group, err := user.LookupGroupId(strconv.Itoa(gid))
rtest.OK(t, err)
n := &data.Node{
User: usr.Username,
Group: group.Name,
}
err = lchown(f, n, true)
rtest.OK(t, err)
})
}
| go | BSD-2-Clause | 9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59 | 2026-01-07T08:36:32.238827Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.