repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/limiter/limiter.go
internal/backend/limiter/limiter.go
package limiter import ( "io" "net/http" ) // Limiter defines an interface that implementers can use to rate limit I/O // according to some policy defined and configured by the implementer. type Limiter interface { // Upstream returns a rate limited reader that is intended to be used in // uploads. Upstream(r io.Reader) io.Reader // UpstreamWriter returns a rate limited writer that is intended to be used // in uploads. UpstreamWriter(w io.Writer) io.Writer // Downstream returns a rate limited reader that is intended to be used // for downloads. Downstream(r io.Reader) io.Reader // DownstreamWriter returns a rate limited reader that is intended to be used // for downloads. DownstreamWriter(r io.Writer) io.Writer // Transport returns an http.RoundTripper limited with the limiter. Transport(http.RoundTripper) http.RoundTripper }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/limiter/limiter_backend_test.go
internal/backend/limiter/limiter_backend_test.go
package limiter import ( "bytes" "context" "crypto/rand" "fmt" "io" "testing" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/mock" rtest "github.com/restic/restic/internal/test" ) func randomBytes(t *testing.T, size int) []byte { data := make([]byte, size) _, err := io.ReadFull(rand.Reader, data) rtest.OK(t, err) return data } func TestLimitBackendSave(t *testing.T) { testHandle := backend.Handle{Type: backend.PackFile, Name: "test"} data := randomBytes(t, 1234) be := mock.NewBackend() be.SaveFn = func(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { buf := new(bytes.Buffer) _, err := io.Copy(buf, rd) if err != nil { return nil } if !bytes.Equal(data, buf.Bytes()) { return fmt.Errorf("data mismatch") } return nil } limiter := NewStaticLimiter(Limits{42 * 1024, 42 * 1024}) limbe := LimitBackend(be, limiter) rd := backend.NewByteReader(data, nil) err := limbe.Save(context.TODO(), testHandle, rd) rtest.OK(t, err) } type tracedReadWriteToCloser struct { io.Reader io.WriterTo Traced bool } func newTracedReadWriteToCloser(rd *bytes.Reader) *tracedReadWriteToCloser { return &tracedReadWriteToCloser{Reader: rd, WriterTo: rd} } func (r *tracedReadWriteToCloser) WriteTo(w io.Writer) (n int64, err error) { r.Traced = true return r.WriterTo.WriteTo(w) } func (r *tracedReadWriteToCloser) Close() error { return nil } func TestLimitBackendLoad(t *testing.T) { testHandle := backend.Handle{Type: backend.PackFile, Name: "test"} data := randomBytes(t, 1234) for _, test := range []struct { innerWriteTo, outerWriteTo bool }{{false, false}, {false, true}, {true, false}, {true, true}} { be := mock.NewBackend() src := newTracedReadWriteToCloser(bytes.NewReader(data)) be.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { if length != 0 || offset != 0 { return nil, fmt.Errorf("Not supported") } // test both code paths in WriteTo of limitedReadCloser if test.innerWriteTo { return src, nil } return newTracedReadCloser(src), nil } limiter := NewStaticLimiter(Limits{42 * 1024, 42 * 1024}) limbe := LimitBackend(be, limiter) err := limbe.Load(context.TODO(), testHandle, 0, 0, func(rd io.Reader) error { dataRead := new(bytes.Buffer) // test both Read and WriteTo if !test.outerWriteTo { rd = newTracedReadCloser(rd) } _, err := io.Copy(dataRead, rd) if err != nil { return err } if !bytes.Equal(data, dataRead.Bytes()) { return fmt.Errorf("read broken data") } return nil }) rtest.OK(t, err) rtest.Assert(t, src.Traced == (test.innerWriteTo && test.outerWriteTo), "unexpected/missing writeTo call innerWriteTo %v outerWriteTo %v", test.innerWriteTo, test.outerWriteTo) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/limiter/static_limiter_test.go
internal/backend/limiter/static_limiter_test.go
package limiter import ( "bytes" "crypto/rand" "fmt" "io" "net/http" "testing" "github.com/restic/restic/internal/test" "golang.org/x/time/rate" ) func TestLimiterWrapping(t *testing.T) { reader := bytes.NewReader([]byte{}) writer := new(bytes.Buffer) for _, limits := range []Limits{ {0, 0}, {42, 0}, {0, 42}, {42, 42}, } { limiter := NewStaticLimiter(limits) mustWrapUpstream := limits.UploadKb > 0 test.Equals(t, limiter.Upstream(reader) != reader, mustWrapUpstream) test.Equals(t, limiter.UpstreamWriter(writer) != writer, mustWrapUpstream) mustWrapDownstream := limits.DownloadKb > 0 test.Equals(t, limiter.Downstream(reader) != reader, mustWrapDownstream) test.Equals(t, limiter.DownstreamWriter(writer) != writer, mustWrapDownstream) } } func TestReadLimiter(t *testing.T) { reader := bytes.NewReader(make([]byte, 300)) limiter := rate.NewLimiter(rate.Limit(10000), 100) limReader := rateLimitedReader{reader, limiter} n, err := limReader.Read([]byte{}) test.OK(t, err) test.Equals(t, n, 0) n, err = limReader.Read(make([]byte, 300)) test.OK(t, err) test.Equals(t, n, 300) n, err = limReader.Read([]byte{}) test.Equals(t, err, io.EOF) test.Equals(t, n, 0) } func TestWriteLimiter(t *testing.T) { writer := &bytes.Buffer{} limiter := rate.NewLimiter(rate.Limit(10000), 100) limReader := rateLimitedWriter{writer, limiter} n, err := limReader.Write([]byte{}) test.OK(t, err) test.Equals(t, n, 0) n, err = limReader.Write(make([]byte, 300)) test.OK(t, err) test.Equals(t, n, 300) } type tracedReadCloser struct { io.Reader Closed bool } func newTracedReadCloser(rd io.Reader) *tracedReadCloser { return &tracedReadCloser{Reader: rd} } func (r *tracedReadCloser) Close() error { r.Closed = true return nil } func TestRoundTripperReader(t *testing.T) { limiter := NewStaticLimiter(Limits{42 * 1024, 42 * 1024}) data := make([]byte, 1234) _, err := io.ReadFull(rand.Reader, data) test.OK(t, err) send := newTracedReadCloser(bytes.NewReader(data)) var recv *tracedReadCloser rt := limiter.Transport(roundTripper(func(req *http.Request) (*http.Response, error) { buf := new(bytes.Buffer) _, err := io.Copy(buf, req.Body) if err != nil { return nil, err } err = req.Body.Close() if err != nil { return nil, err } recv = newTracedReadCloser(bytes.NewReader(buf.Bytes())) return &http.Response{Body: recv}, nil })) res, err := rt.RoundTrip(&http.Request{Body: send}) test.OK(t, err) out := new(bytes.Buffer) n, err := io.Copy(out, res.Body) test.OK(t, err) test.Equals(t, int64(len(data)), n) test.OK(t, res.Body.Close()) test.Assert(t, send.Closed, "request body not closed") test.Assert(t, recv.Closed, "result body not closed") test.Assert(t, bytes.Equal(data, out.Bytes()), "data ping-pong failed") } //nolint:bodyclose // the http response is just a mock func TestRoundTripperCornerCases(t *testing.T) { limiter := NewStaticLimiter(Limits{42 * 1024, 42 * 1024}) rt := limiter.Transport(roundTripper(func(req *http.Request) (*http.Response, error) { return &http.Response{}, nil })) res, err := rt.RoundTrip(&http.Request{}) test.OK(t, err) test.Assert(t, res != nil, "round tripper returned no response") rt = limiter.Transport(roundTripper(func(req *http.Request) (*http.Response, error) { return nil, fmt.Errorf("error") })) _, err = rt.RoundTrip(&http.Request{}) test.Assert(t, err != nil, "round tripper lost an error") }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/limiter/limiter_backend.go
internal/backend/limiter/limiter_backend.go
package limiter import ( "context" "io" "github.com/restic/restic/internal/backend" ) func WrapBackendConstructor[B backend.Backend, C any](constructor func(ctx context.Context, cfg C, errorLog func(string, ...interface{})) (B, error)) func(ctx context.Context, cfg C, lim Limiter, errorLog func(string, ...interface{})) (backend.Backend, error) { return func(ctx context.Context, cfg C, lim Limiter, errorLog func(string, ...interface{})) (backend.Backend, error) { var be backend.Backend be, err := constructor(ctx, cfg, errorLog) if err != nil { return nil, err } if lim != nil { be = LimitBackend(be, lim) } return be, nil } } // LimitBackend wraps a Backend and applies rate limiting to Load() and Save() // calls on the backend. func LimitBackend(be backend.Backend, l Limiter) backend.Backend { return rateLimitedBackend{ Backend: be, limiter: l, } } type rateLimitedBackend struct { backend.Backend limiter Limiter } func (r rateLimitedBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { limited := limitedRewindReader{ RewindReader: rd, limited: r.limiter.Upstream(rd), } return r.Backend.Save(ctx, h, limited) } type limitedRewindReader struct { backend.RewindReader limited io.Reader } func (l limitedRewindReader) Read(b []byte) (int, error) { return l.limited.Read(b) } func (r rateLimitedBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, consumer func(rd io.Reader) error) error { return r.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error { return consumer(newDownstreamLimitedReader(rd, r.limiter)) }) } func (r rateLimitedBackend) Unwrap() backend.Backend { return r.Backend } type limitedReader struct { io.Reader writerTo io.WriterTo limiter Limiter } func newDownstreamLimitedReader(rd io.Reader, limiter Limiter) io.Reader { lrd := limiter.Downstream(rd) if wt, ok := rd.(io.WriterTo); ok { lrd = &limitedReader{ Reader: lrd, writerTo: wt, limiter: limiter, } } return lrd } func (l *limitedReader) WriteTo(w io.Writer) (int64, error) { return l.writerTo.WriteTo(l.limiter.DownstreamWriter(w)) } var _ backend.Backend = (*rateLimitedBackend)(nil)
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/dryrun/dry_backend_test.go
internal/backend/dryrun/dry_backend_test.go
package dryrun_test import ( "context" "fmt" "io" "sort" "strings" "testing" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/dryrun" "github.com/restic/restic/internal/backend/mem" ) // make sure that Backend implements backend.Backend var _ backend.Backend = &dryrun.Backend{} func newBackends() (*dryrun.Backend, backend.Backend) { m := mem.New() return dryrun.New(m), m } func TestDry(t *testing.T) { ctx := context.TODO() d, m := newBackends() // Since the dry backend is a mostly write-only overlay, the standard backend test suite // won't pass. Instead, perform a series of operations over the backend, testing the state // at each step. steps := []struct { be backend.Backend op string fname string content string wantErr string }{ {d, "delete", "", "", ""}, {d, "stat", "a", "", "not found"}, {d, "list", "", "", ""}, {m, "save", "a", "baz", ""}, // save a directly to the mem backend {d, "save", "b", "foob", ""}, // b is not saved {d, "save", "b", "xxx", ""}, // no error as b is not saved {d, "stat", "a", "a 3", ""}, {d, "load", "a", "baz", ""}, {d, "load", "b", "", "not found"}, {d, "list", "", "a", ""}, {d, "remove", "c", "", ""}, {d, "stat", "b", "", "not found"}, {d, "list", "", "a", ""}, {d, "remove", "a", "", ""}, // a is in fact not removed {d, "list", "", "a", ""}, {m, "remove", "a", "", ""}, // remove a from the mem backend {d, "list", "", "", ""}, {d, "close", "", "", ""}, {d, "close", "", "", ""}, } for i, step := range steps { var err error handle := backend.Handle{Type: backend.PackFile, Name: step.fname} switch step.op { case "save": err = step.be.Save(ctx, handle, backend.NewByteReader([]byte(step.content), step.be.Hasher())) case "list": fileList := []string{} err = step.be.List(ctx, backend.PackFile, func(fi backend.FileInfo) error { fileList = append(fileList, fi.Name) return nil }) sort.Strings(fileList) files := strings.Join(fileList, " ") if files != step.content { t.Errorf("%d. List = %q, want %q", i, files, step.content) } case "delete": err = step.be.Delete(ctx) case "remove": err = step.be.Remove(ctx, handle) case "stat": var fi backend.FileInfo fi, err = step.be.Stat(ctx, handle) if err == nil { fis := fmt.Sprintf("%s %d", fi.Name, fi.Size) if fis != step.content { t.Errorf("%d. Stat = %q, want %q", i, fis, step.content) } } case "load": data := "" err = step.be.Load(ctx, handle, 0, 0, func(rd io.Reader) error { buf, err := io.ReadAll(rd) data = string(buf) return err }) if data != step.content { t.Errorf("%d. Load = %q, want %q", i, data, step.content) } case "close": err = step.be.Close() default: t.Fatalf("%d. unknown step operation %q", i, step.op) } if step.wantErr != "" { if err == nil { t.Errorf("%d. %s error = nil, want %q", i, step.op, step.wantErr) } else if !strings.Contains(err.Error(), step.wantErr) { t.Errorf("%d. %s error = %q, doesn't contain %q", i, step.op, err, step.wantErr) } else if step.wantErr == "not found" && !step.be.IsNotExist(err) { t.Errorf("%d. IsNotExist(%s error) = false, want true", i, step.op) } } else if err != nil { t.Errorf("%d. %s error = %q, want nil", i, step.op, err) } } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/dryrun/dry_backend.go
internal/backend/dryrun/dry_backend.go
package dryrun import ( "context" "hash" "io" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" ) // Backend passes reads through to an underlying layer and accepts writes, but // doesn't do anything. Also removes are ignored. // So in fact, this backend silently ignores all operations that would modify // the repo and does normal operations else. // This is used for `backup --dry-run`. type Backend struct { b backend.Backend } // statically ensure that Backend implements backend.Backend. var _ backend.Backend = &Backend{} func New(be backend.Backend) *Backend { b := &Backend{b: be} debug.Log("created new dry backend") return b } // Save adds new Data to the backend. func (be *Backend) Save(_ context.Context, h backend.Handle, _ backend.RewindReader) error { if err := h.Valid(); err != nil { return err } // don't save anything, just return ok return nil } // Remove deletes a file from the backend. func (be *Backend) Remove(_ context.Context, _ backend.Handle) error { return nil } func (be *Backend) Properties() backend.Properties { return be.b.Properties() } // Delete removes all data in the backend. func (be *Backend) Delete(_ context.Context) error { return nil } func (be *Backend) Close() error { return be.b.Close() } func (be *Backend) Hasher() hash.Hash { return be.b.Hasher() } func (be *Backend) IsNotExist(err error) bool { return be.b.IsNotExist(err) } func (be *Backend) IsPermanentError(err error) bool { return be.b.IsPermanentError(err) } func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error { return be.b.List(ctx, t, fn) } func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(io.Reader) error) error { return be.b.Load(ctx, h, length, offset, fn) } func (be *Backend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) { return be.b.Stat(ctx, h) } // Warmup should not occur during dry-runs. func (be *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { return []backend.Handle{}, nil } func (be *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/layout/layout_default.go
internal/backend/layout/layout_default.go
package layout import ( "encoding/hex" "github.com/restic/restic/internal/backend" ) // DefaultLayout implements the default layout for local and sftp backends, as // described in the Design document. The `data` directory has one level of // subdirs, two characters each (taken from the first two characters of the // file name). type DefaultLayout struct { path string join func(...string) string } var defaultLayoutPaths = map[backend.FileType]string{ backend.PackFile: "data", backend.SnapshotFile: "snapshots", backend.IndexFile: "index", backend.LockFile: "locks", backend.KeyFile: "keys", } func NewDefaultLayout(path string, join func(...string) string) *DefaultLayout { return &DefaultLayout{ path: path, join: join, } } func (l *DefaultLayout) String() string { return "<DefaultLayout>" } // Name returns the name for this layout. func (l *DefaultLayout) Name() string { return "default" } // Dirname returns the directory path for a given file type and name. func (l *DefaultLayout) Dirname(h backend.Handle) string { p := defaultLayoutPaths[h.Type] if h.Type == backend.PackFile && len(h.Name) > 2 { p = l.join(p, h.Name[:2]) + "/" } return l.join(l.path, p) + "/" } // Filename returns a path to a file, including its name. func (l *DefaultLayout) Filename(h backend.Handle) string { name := h.Name if h.Type == backend.ConfigFile { return l.join(l.path, "config") } return l.join(l.Dirname(h), name) } // Paths returns all directory names needed for a repo. func (l *DefaultLayout) Paths() (dirs []string) { for _, p := range defaultLayoutPaths { dirs = append(dirs, l.join(l.path, p)) } // also add subdirs for i := 0; i < 256; i++ { subdir := hex.EncodeToString([]byte{byte(i)}) dirs = append(dirs, l.join(l.path, defaultLayoutPaths[backend.PackFile], subdir)) } return dirs } // Basedir returns the base dir name for type t. func (l *DefaultLayout) Basedir(t backend.FileType) (dirname string, subdirs bool) { if t == backend.PackFile { subdirs = true } dirname = l.join(l.path, defaultLayoutPaths[t]) return }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/layout/layout_rest.go
internal/backend/layout/layout_rest.go
package layout import ( "path" "github.com/restic/restic/internal/backend" ) // RESTLayout implements the default layout for the REST protocol. type RESTLayout struct { url string } var restLayoutPaths = defaultLayoutPaths func NewRESTLayout(url string) *RESTLayout { return &RESTLayout{ url: url, } } func (l *RESTLayout) String() string { return "<RESTLayout>" } // Name returns the name for this layout. func (l *RESTLayout) Name() string { return "rest" } // Dirname returns the directory path for a given file type and name. func (l *RESTLayout) Dirname(h backend.Handle) string { if h.Type == backend.ConfigFile { return l.url + "/" } return l.url + path.Join("/", restLayoutPaths[h.Type]) + "/" } // Filename returns a path to a file, including its name. func (l *RESTLayout) Filename(h backend.Handle) string { name := h.Name if h.Type == backend.ConfigFile { name = "config" } return l.url + path.Join("/", restLayoutPaths[h.Type], name) } // Paths returns all directory names func (l *RESTLayout) Paths() (dirs []string) { for _, p := range restLayoutPaths { dirs = append(dirs, l.url+path.Join("/", p)) } return dirs } // Basedir returns the base dir name for files of type t. func (l *RESTLayout) Basedir(t backend.FileType) (dirname string, subdirs bool) { return l.url + path.Join("/", restLayoutPaths[t]), false }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/layout/layout.go
internal/backend/layout/layout.go
package layout import ( "github.com/restic/restic/internal/backend" ) // Layout computes paths for file name storage. type Layout interface { Filename(backend.Handle) string Dirname(backend.Handle) string Basedir(backend.FileType) (dir string, subdirs bool) Paths() []string Name() string }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/layout/layout_test.go
internal/backend/layout/layout_test.go
package layout import ( "fmt" "path" "path/filepath" "reflect" "sort" "strings" "testing" "github.com/restic/restic/internal/backend" rtest "github.com/restic/restic/internal/test" ) func TestDefaultLayout(t *testing.T) { tempdir := rtest.TempDir(t) var tests = []struct { path string join func(...string) string backend.Handle filename string }{ { tempdir, filepath.Join, backend.Handle{Type: backend.PackFile, Name: "0123456"}, filepath.Join(tempdir, "data", "01", "0123456"), }, { tempdir, filepath.Join, backend.Handle{Type: backend.ConfigFile, Name: "CFG"}, filepath.Join(tempdir, "config"), }, { tempdir, filepath.Join, backend.Handle{Type: backend.SnapshotFile, Name: "123456"}, filepath.Join(tempdir, "snapshots", "123456"), }, { tempdir, filepath.Join, backend.Handle{Type: backend.IndexFile, Name: "123456"}, filepath.Join(tempdir, "index", "123456"), }, { tempdir, filepath.Join, backend.Handle{Type: backend.LockFile, Name: "123456"}, filepath.Join(tempdir, "locks", "123456"), }, { tempdir, filepath.Join, backend.Handle{Type: backend.KeyFile, Name: "123456"}, filepath.Join(tempdir, "keys", "123456"), }, { "", path.Join, backend.Handle{Type: backend.PackFile, Name: "0123456"}, "data/01/0123456", }, { "", path.Join, backend.Handle{Type: backend.ConfigFile, Name: "CFG"}, "config", }, { "", path.Join, backend.Handle{Type: backend.SnapshotFile, Name: "123456"}, "snapshots/123456", }, { "", path.Join, backend.Handle{Type: backend.IndexFile, Name: "123456"}, "index/123456", }, { "", path.Join, backend.Handle{Type: backend.LockFile, Name: "123456"}, "locks/123456", }, { "", path.Join, backend.Handle{Type: backend.KeyFile, Name: "123456"}, "keys/123456", }, } t.Run("Paths", func(t *testing.T) { l := &DefaultLayout{ path: tempdir, join: filepath.Join, } dirs := l.Paths() want := []string{ filepath.Join(tempdir, "data"), filepath.Join(tempdir, "snapshots"), filepath.Join(tempdir, "index"), filepath.Join(tempdir, "locks"), filepath.Join(tempdir, "keys"), } for i := 0; i < 256; i++ { want = append(want, filepath.Join(tempdir, "data", fmt.Sprintf("%02x", i))) } sort.Strings(want) sort.Strings(dirs) if !reflect.DeepEqual(dirs, want) { t.Fatalf("wrong paths returned, want:\n %v\ngot:\n %v", want, dirs) } }) for _, test := range tests { t.Run(fmt.Sprintf("%v/%v", test.Type, test.Handle.Name), func(t *testing.T) { l := &DefaultLayout{ path: test.path, join: test.join, } filename := l.Filename(test.Handle) if filename != test.filename { t.Fatalf("wrong filename, want %v, got %v", test.filename, filename) } }) } } func TestRESTLayout(t *testing.T) { url := `https://hostname.foo` var tests = []struct { backend.Handle filename string }{ { backend.Handle{Type: backend.PackFile, Name: "0123456"}, strings.Join([]string{url, "data", "0123456"}, "/"), }, { backend.Handle{Type: backend.ConfigFile, Name: "CFG"}, strings.Join([]string{url, "config"}, "/"), }, { backend.Handle{Type: backend.SnapshotFile, Name: "123456"}, strings.Join([]string{url, "snapshots", "123456"}, "/"), }, { backend.Handle{Type: backend.IndexFile, Name: "123456"}, strings.Join([]string{url, "index", "123456"}, "/"), }, { backend.Handle{Type: backend.LockFile, Name: "123456"}, strings.Join([]string{url, "locks", "123456"}, "/"), }, { backend.Handle{Type: backend.KeyFile, Name: "123456"}, strings.Join([]string{url, "keys", "123456"}, "/"), }, } l := &RESTLayout{ url: url, } t.Run("Paths", func(t *testing.T) { dirs := l.Paths() want := []string{ strings.Join([]string{url, "data"}, "/"), strings.Join([]string{url, "snapshots"}, "/"), strings.Join([]string{url, "index"}, "/"), strings.Join([]string{url, "locks"}, "/"), strings.Join([]string{url, "keys"}, "/"), } sort.Strings(want) sort.Strings(dirs) if !reflect.DeepEqual(dirs, want) { t.Fatalf("wrong paths returned, want:\n %v\ngot:\n %v", want, dirs) } }) for _, test := range tests { t.Run(fmt.Sprintf("%v/%v", test.Type, test.Handle.Name), func(t *testing.T) { filename := l.Filename(test.Handle) if filename != test.filename { t.Fatalf("wrong filename, want %v, got %v", test.filename, filename) } }) } } func TestRESTLayoutURLs(t *testing.T) { var tests = []struct { l Layout h backend.Handle fn string dir string }{ { &RESTLayout{url: "https://hostname.foo"}, backend.Handle{Type: backend.PackFile, Name: "foobar"}, "https://hostname.foo/data/foobar", "https://hostname.foo/data/", }, { &RESTLayout{url: "https://hostname.foo:1234/prefix/repo"}, backend.Handle{Type: backend.LockFile, Name: "foobar"}, "https://hostname.foo:1234/prefix/repo/locks/foobar", "https://hostname.foo:1234/prefix/repo/locks/", }, { &RESTLayout{url: "https://hostname.foo:1234/prefix/repo"}, backend.Handle{Type: backend.ConfigFile, Name: "foobar"}, "https://hostname.foo:1234/prefix/repo/config", "https://hostname.foo:1234/prefix/repo/", }, } for _, test := range tests { t.Run(fmt.Sprintf("%T", test.l), func(t *testing.T) { fn := test.l.Filename(test.h) if fn != test.fn { t.Fatalf("wrong filename, want %v, got %v", test.fn, fn) } dir := test.l.Dirname(test.h) if dir != test.dir { t.Fatalf("wrong dirname, want %v, got %v", test.dir, dir) } }) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/b2/b2.go
internal/backend/b2/b2.go
package b2 import ( "context" "fmt" "hash" "io" "net/http" "path" "sync" "time" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/layout" "github.com/restic/restic/internal/backend/location" "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "github.com/Backblaze/blazer/b2" "github.com/Backblaze/blazer/base" ) // b2Backend is a backend which stores its data on Backblaze B2. type b2Backend struct { client *b2.Client bucket *b2.Bucket cfg Config listMaxItems int layout.Layout canDelete bool } var errTooShort = fmt.Errorf("file is too short") // Billing happens in 1000 item granularity, but we are more interested in reducing the number of network round trips const defaultListMaxItems = 10 * 1000 // ensure statically that *b2Backend implements backend.Backend. var _ backend.Backend = &b2Backend{} func NewFactory() location.Factory { return location.NewHTTPBackendFactory("b2", ParseConfig, location.NoPassword, Create, Open) } type sniffingRoundTripper struct { sync.Mutex lastErr error http.RoundTripper } func (s *sniffingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { res, err := s.RoundTripper.RoundTrip(req) if err != nil { s.Lock() s.lastErr = err s.Unlock() } return res, err } func newClient(ctx context.Context, cfg Config, rt http.RoundTripper) (*b2.Client, error) { if cfg.AccountID == "" { return nil, errors.Fatalf("unable to open B2 backend: Account ID ($B2_ACCOUNT_ID) is empty") } if cfg.Key.String() == "" { return nil, errors.Fatalf("unable to open B2 backend: Key ($B2_ACCOUNT_KEY) is empty") } sniffer := &sniffingRoundTripper{RoundTripper: rt} opts := []b2.ClientOption{b2.Transport(sniffer)} // if the connection B2 fails, this can cause the client to hang // cancel the connection after a minute to at least provide some feedback to the user ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() c, err := b2.NewClient(ctx, cfg.AccountID, cfg.Key.Unwrap(), opts...) if err == context.DeadlineExceeded { if sniffer.lastErr != nil { return nil, sniffer.lastErr } return nil, errors.New("connection to B2 failed") } else if err != nil { return nil, errors.Wrap(err, "b2.NewClient") } return c, nil } // Open opens a connection to the B2 service. func Open(ctx context.Context, cfg Config, rt http.RoundTripper, _ func(string, ...interface{})) (backend.Backend, error) { debug.Log("cfg %#v", cfg) ctx, cancel := context.WithCancel(ctx) defer cancel() client, err := newClient(ctx, cfg, rt) if err != nil { return nil, err } bucket, err := client.Bucket(ctx, cfg.Bucket) if b2.IsNotExist(err) { return nil, backend.ErrNoRepository } else if err != nil { return nil, errors.Wrap(err, "Bucket") } be := &b2Backend{ client: client, bucket: bucket, cfg: cfg, Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), listMaxItems: defaultListMaxItems, canDelete: true, } return be, nil } // Create opens a connection to the B2 service. If the bucket does not exist yet, // it is created. func Create(ctx context.Context, cfg Config, rt http.RoundTripper, _ func(string, ...interface{})) (backend.Backend, error) { debug.Log("cfg %#v", cfg) ctx, cancel := context.WithCancel(ctx) defer cancel() client, err := newClient(ctx, cfg, rt) if err != nil { return nil, err } attr := b2.BucketAttrs{ Type: b2.Private, } bucket, err := client.NewBucket(ctx, cfg.Bucket, &attr) if err != nil { return nil, errors.Wrap(err, "NewBucket") } be := &b2Backend{ client: client, bucket: bucket, cfg: cfg, Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), listMaxItems: defaultListMaxItems, } return be, nil } // SetListMaxItems sets the number of list items to load per request. func (be *b2Backend) SetListMaxItems(i int) { be.listMaxItems = i } func (be *b2Backend) Properties() backend.Properties { return backend.Properties{ Connections: be.cfg.Connections, HasAtomicReplace: true, } } // Hasher may return a hash function for calculating a content hash for the backend func (be *b2Backend) Hasher() hash.Hash { return nil } // IsNotExist returns true if the error is caused by a non-existing file. func (be *b2Backend) IsNotExist(err error) bool { // blazer/b2 does not export its error types and values, // so we can't use errors.{As,Is}. for ; err != nil; err = errors.Unwrap(err) { if b2.IsNotExist(err) { return true } } return false } func (be *b2Backend) IsPermanentError(err error) bool { // the library unfortunately endlessly retries authentication errors return be.IsNotExist(err) || errors.Is(err, errTooShort) } // Load runs fn with a reader that yields the contents of the file at h at the // given offset. func (be *b2Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { ctx, cancel := context.WithCancel(ctx) defer cancel() return util.DefaultLoad(ctx, h, length, offset, be.openReader, func(rd io.Reader) error { if length == 0 { return fn(rd) } // there is no direct way to efficiently check whether the file is too short // use a LimitedReader to track the number of bytes read limrd := &io.LimitedReader{R: rd, N: int64(length)} err := fn(limrd) // check the underlying reader to be agnostic to however fn() handles the returned error _, rderr := rd.Read([]byte{0}) if rderr == io.EOF && limrd.N != 0 { // file is too short return fmt.Errorf("%w: %v", errTooShort, err) } return err }) } func (be *b2Backend) openReader(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { name := be.Layout.Filename(h) obj := be.bucket.Object(name) if offset == 0 && length == 0 { return obj.NewReader(ctx), nil } // pass a negative length to NewRangeReader so that the remainder of the // file is read. if length == 0 { length = -1 } return obj.NewRangeReader(ctx, offset, int64(length)), nil } // Save stores data in the backend at the handle. func (be *b2Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { ctx, cancel := context.WithCancel(ctx) defer cancel() name := be.Filename(h) obj := be.bucket.Object(name) // b2 always requires sha1 checksums for uploaded file parts w := obj.NewWriter(ctx) n, err := io.Copy(w, rd) if err != nil { _ = w.Close() return errors.Wrap(err, "Copy") } // sanity check if n != rd.Length() { return errors.Errorf("wrote %d bytes instead of the expected %d bytes", n, rd.Length()) } return errors.Wrap(w.Close(), "Close") } // Stat returns information about a blob. func (be *b2Backend) Stat(ctx context.Context, h backend.Handle) (bi backend.FileInfo, err error) { name := be.Filename(h) obj := be.bucket.Object(name) info, err := obj.Attrs(ctx) if err != nil { return backend.FileInfo{}, errors.Wrap(err, "Stat") } return backend.FileInfo{Size: info.Size, Name: h.Name}, nil } // Remove removes the blob with the given name and type. func (be *b2Backend) Remove(ctx context.Context, h backend.Handle) error { // the retry backend will also repeat the remove method up to 10 times for i := 0; i < 3; i++ { obj := be.bucket.Object(be.Filename(h)) var err error if be.canDelete { err = obj.Delete(ctx) if err == nil { // keep deleting until we are sure that no leftover file versions exist continue } code, _ := base.Code(err) if code == 401 { // unauthorized // fallback to hide if not allowed to delete files be.canDelete = false debug.Log("Removing %v failed, falling back to b2_hide_file.", h) continue } } else { // hide adds a new file version hiding all older ones, thus retries are not necessary err = obj.Hide(ctx) } // consider a file as removed if b2 informs us that it does not exist if b2.IsNotExist(err) { return nil } return errors.Wrap(err, "Delete") } return errors.New("failed to delete all file versions") } // List returns a channel that yields all names of blobs of type t. func (be *b2Backend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error { ctx, cancel := context.WithCancel(ctx) defer cancel() prefix, _ := be.Basedir(t) iter := be.bucket.List(ctx, b2.ListPrefix(prefix), b2.ListPageSize(be.listMaxItems)) for iter.Next() { obj := iter.Object() attrs, err := obj.Attrs(ctx) if err != nil { return err } fi := backend.FileInfo{ Name: path.Base(obj.Name()), Size: attrs.Size, } if err := fn(fi); err != nil { return err } } return iter.Err() } // Delete removes all restic keys in the bucket. It will not remove the bucket itself. func (be *b2Backend) Delete(ctx context.Context) error { return util.DefaultDelete(ctx, be) } // Close does nothing func (be *b2Backend) Close() error { return nil } // Warmup not implemented func (be *b2Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { return []backend.Handle{}, nil } func (be *b2Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/b2/b2_test.go
internal/backend/b2/b2_test.go
package b2_test import ( "fmt" "os" "testing" "time" "github.com/restic/restic/internal/backend/b2" "github.com/restic/restic/internal/backend/test" rtest "github.com/restic/restic/internal/test" ) func newB2TestSuite() *test.Suite[b2.Config] { return &test.Suite[b2.Config]{ // do not use excessive data MinimalData: true, // wait for at most 10 seconds for removed files to disappear WaitForDelayedRemoval: 10 * time.Second, // NewConfig returns a config for a new temporary backend that will be used in tests. NewConfig: func() (*b2.Config, error) { cfg, err := b2.ParseConfig(os.Getenv("RESTIC_TEST_B2_REPOSITORY")) if err != nil { return nil, err } cfg.ApplyEnvironment("RESTIC_TEST_") cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano()) return cfg, nil }, Factory: b2.NewFactory(), } } func testVars(t testing.TB) { vars := []string{ "RESTIC_TEST_B2_ACCOUNT_ID", "RESTIC_TEST_B2_ACCOUNT_KEY", "RESTIC_TEST_B2_REPOSITORY", } for _, v := range vars { if os.Getenv(v) == "" { t.Skipf("environment variable %v not set", v) return } } } func TestBackendB2(t *testing.T) { defer func() { if t.Skipped() { rtest.SkipDisallowed(t, "restic/backend/b2.TestBackendB2") } }() testVars(t) newB2TestSuite().RunTests(t) } func BenchmarkBackendb2(t *testing.B) { testVars(t) newB2TestSuite().RunBenchmarks(t) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/b2/config.go
internal/backend/b2/config.go
package b2 import ( "os" "path" "regexp" "strings" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/options" ) // Config contains all configuration necessary to connect to an b2 compatible // server. type Config struct { AccountID string Key options.SecretString Bucket string Prefix string Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` } // NewConfig returns a new config with default options applied. func NewConfig() Config { return Config{ Connections: 5, } } func init() { options.Register("b2", Config{}) } var bucketName = regexp.MustCompile("^[a-zA-Z0-9-]+$") // checkBucketName tests the bucket name against the rules at // https://help.backblaze.com/hc/en-us/articles/217666908-What-you-need-to-know-about-B2-Bucket-names func checkBucketName(name string) error { if name == "" { return errors.New("bucket name not found") } if len(name) < 6 { return errors.New("bucket name is too short") } if len(name) > 50 { return errors.New("bucket name is too long") } if !bucketName.MatchString(name) { return errors.New("bucket name contains invalid characters, allowed are: a-z, 0-9, dash (-)") } return nil } // ParseConfig parses the string s and extracts the b2 config. The supported // configuration format is b2:bucketname/prefix. If no prefix is given the // prefix "restic" will be used. func ParseConfig(s string) (*Config, error) { if !strings.HasPrefix(s, "b2:") { return nil, errors.New("invalid format, want: b2:bucket-name[:path]") } s = s[3:] bucket, prefix, _ := strings.Cut(s, ":") if err := checkBucketName(bucket); err != nil { return nil, err } if len(prefix) > 0 { prefix = strings.TrimPrefix(path.Clean(prefix), "/") } cfg := NewConfig() cfg.Bucket = bucket cfg.Prefix = prefix return &cfg, nil } var _ backend.ApplyEnvironmenter = &Config{} // ApplyEnvironment saves values from the environment to the config. func (cfg *Config) ApplyEnvironment(prefix string) { if cfg.AccountID == "" { cfg.AccountID = os.Getenv(prefix + "B2_ACCOUNT_ID") } if cfg.Key.String() == "" { cfg.Key = options.NewSecretString(os.Getenv(prefix + "B2_ACCOUNT_KEY")) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/b2/config_test.go
internal/backend/b2/config_test.go
package b2 import ( "testing" "github.com/restic/restic/internal/backend/test" ) var configTests = []test.ConfigTestData[Config]{ {S: "b2:bucketname", Cfg: Config{ Bucket: "bucketname", Prefix: "", Connections: 5, }}, {S: "b2:bucketname:", Cfg: Config{ Bucket: "bucketname", Prefix: "", Connections: 5, }}, {S: "b2:bucketname:/prefix/directory", Cfg: Config{ Bucket: "bucketname", Prefix: "prefix/directory", Connections: 5, }}, {S: "b2:foobar", Cfg: Config{ Bucket: "foobar", Prefix: "", Connections: 5, }}, {S: "b2:foobar:", Cfg: Config{ Bucket: "foobar", Prefix: "", Connections: 5, }}, {S: "b2:foobar:/", Cfg: Config{ Bucket: "foobar", Prefix: "", Connections: 5, }}, } func TestParseConfig(t *testing.T) { test.ParseConfigTester(t, ParseConfig, configTests) } var invalidConfigTests = []struct { s string err string }{ { "b2", "invalid format, want: b2:bucket-name[:path]", }, { "b2:", "bucket name not found", }, { "b2:bucket_name", "bucket name contains invalid characters, allowed are: a-z, 0-9, dash (-)", }, { "b2:bucketname/prefix/directory/", "bucket name contains invalid characters, allowed are: a-z, 0-9, dash (-)", }, } func TestInvalidConfig(t *testing.T) { for _, test := range invalidConfigTests { t.Run("", func(t *testing.T) { cfg, err := ParseConfig(test.s) if err == nil { t.Fatalf("expected error not found for invalid config: %v, cfg is:\n%#v", test.s, cfg) } if err.Error() != test.err { t.Fatalf("unexpected error found, want:\n %v\ngot:\n %v", test.err, err.Error()) } }) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/swift/config.go
internal/backend/swift/config.go
package swift import ( "os" "strings" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/options" ) // Config contains basic configuration needed to specify swift location for a swift server type Config struct { UserName string UserID string Domain string DomainID string APIKey string AuthURL string Region string Tenant string TenantID string TenantDomain string TenantDomainID string TrustID string StorageURL string AuthToken options.SecretString // auth v3 only ApplicationCredentialID string ApplicationCredentialName string ApplicationCredentialSecret options.SecretString Container string Prefix string DefaultContainerPolicy string Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` } func init() { options.Register("swift", Config{}) } // NewConfig returns a new config with the default values filled in. func NewConfig() Config { return Config{ Connections: 5, } } // ParseConfig parses the string s and extract swift's container name and prefix. func ParseConfig(s string) (*Config, error) { if !strings.HasPrefix(s, "swift:") { return nil, errors.New("invalid URL, expected: swift:container-name:/[prefix]") } s = strings.TrimPrefix(s, "swift:") container, prefix, _ := strings.Cut(s, ":") if prefix == "" { return nil, errors.Errorf("prefix is empty") } if prefix[0] != '/' { return nil, errors.Errorf("prefix does not start with slash (/)") } prefix = prefix[1:] cfg := NewConfig() cfg.Container = container cfg.Prefix = prefix return &cfg, nil } var _ backend.ApplyEnvironmenter = &Config{} // ApplyEnvironment saves values from the environment to the config. func (cfg *Config) ApplyEnvironment(prefix string) { for _, val := range []struct { s *string env string }{ // v2/v3 specific {&cfg.UserName, prefix + "OS_USERNAME"}, {&cfg.APIKey, prefix + "OS_PASSWORD"}, {&cfg.Region, prefix + "OS_REGION_NAME"}, {&cfg.AuthURL, prefix + "OS_AUTH_URL"}, // v3 specific {&cfg.UserID, prefix + "OS_USER_ID"}, {&cfg.Domain, prefix + "OS_USER_DOMAIN_NAME"}, {&cfg.DomainID, prefix + "OS_USER_DOMAIN_ID"}, {&cfg.Tenant, prefix + "OS_PROJECT_NAME"}, {&cfg.TenantDomain, prefix + "OS_PROJECT_DOMAIN_NAME"}, {&cfg.TenantDomainID, prefix + "OS_PROJECT_DOMAIN_ID"}, {&cfg.TrustID, prefix + "OS_TRUST_ID"}, // v2 specific {&cfg.TenantID, prefix + "OS_TENANT_ID"}, {&cfg.Tenant, prefix + "OS_TENANT_NAME"}, // v1 specific {&cfg.AuthURL, prefix + "ST_AUTH"}, {&cfg.UserName, prefix + "ST_USER"}, {&cfg.APIKey, prefix + "ST_KEY"}, // Application Credential auth {&cfg.ApplicationCredentialID, prefix + "OS_APPLICATION_CREDENTIAL_ID"}, {&cfg.ApplicationCredentialName, prefix + "OS_APPLICATION_CREDENTIAL_NAME"}, // Manual authentication {&cfg.StorageURL, prefix + "OS_STORAGE_URL"}, {&cfg.DefaultContainerPolicy, prefix + "SWIFT_DEFAULT_CONTAINER_POLICY"}, } { if *val.s == "" { *val.s = os.Getenv(val.env) } } for _, val := range []struct { s *options.SecretString env string }{ {&cfg.ApplicationCredentialSecret, prefix + "OS_APPLICATION_CREDENTIAL_SECRET"}, {&cfg.AuthToken, prefix + "OS_AUTH_TOKEN"}, } { if val.s.String() == "" { *val.s = options.NewSecretString(os.Getenv(val.env)) } } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/swift/swift_test.go
internal/backend/swift/swift_test.go
package swift_test import ( "fmt" "os" "testing" "time" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/swift" "github.com/restic/restic/internal/backend/test" rtest "github.com/restic/restic/internal/test" ) func newSwiftTestSuite(t testing.TB) *test.Suite[swift.Config] { return &test.Suite[swift.Config]{ // do not use excessive data MinimalData: true, // wait for removals for at least 5m WaitForDelayedRemoval: 5 * time.Minute, ErrorHandler: func(t testing.TB, be backend.Backend, err error) error { if err == nil { return nil } if be.IsNotExist(err) { t.Logf("swift: ignoring error %v", err) return nil } return err }, // NewConfig returns a config for a new temporary backend that will be used in tests. NewConfig: func() (*swift.Config, error) { cfg, err := swift.ParseConfig(os.Getenv("RESTIC_TEST_SWIFT")) if err != nil { return nil, err } cfg.ApplyEnvironment("RESTIC_TEST_") cfg.Prefix += fmt.Sprintf("/test-%d", time.Now().UnixNano()) t.Logf("using prefix %v", cfg.Prefix) return cfg, nil }, Factory: swift.NewFactory(), } } func TestBackendSwift(t *testing.T) { defer func() { if t.Skipped() { rtest.SkipDisallowed(t, "restic/backend/swift.TestBackendSwift") } }() if os.Getenv("RESTIC_TEST_SWIFT") == "" { t.Skip("RESTIC_TEST_SWIFT unset, skipping test") return } t.Logf("run tests") newSwiftTestSuite(t).RunTests(t) } func BenchmarkBackendSwift(t *testing.B) { if os.Getenv("RESTIC_TEST_SWIFT") == "" { t.Skip("RESTIC_TEST_SWIFT unset, skipping test") return } t.Logf("run tests") newSwiftTestSuite(t).RunBenchmarks(t) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/swift/config_test.go
internal/backend/swift/config_test.go
package swift import ( "testing" "github.com/restic/restic/internal/backend/test" ) var configTests = []test.ConfigTestData[Config]{ { S: "swift:cnt1:/", Cfg: Config{ Container: "cnt1", Prefix: "", Connections: 5, }, }, { S: "swift:cnt2:/prefix", Cfg: Config{Container: "cnt2", Prefix: "prefix", Connections: 5, }, }, { S: "swift:cnt3:/prefix/longer", Cfg: Config{Container: "cnt3", Prefix: "prefix/longer", Connections: 5, }, }, } func TestParseConfig(t *testing.T) { test.ParseConfigTester(t, ParseConfig, configTests) } var configTestsInvalid = []string{ "swift://hostname/container", "swift:////", "swift://", "swift:////prefix", "swift:container", "swift:container:", "swift:container/prefix", } func TestParseConfigInvalid(t *testing.T) { for i, test := range configTestsInvalid { _, err := ParseConfig(test) if err == nil { t.Errorf("test %d: invalid config %s did not return an error", i, test) continue } } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/swift/swift.go
internal/backend/swift/swift.go
package swift import ( "context" "crypto/md5" "encoding/hex" "fmt" "hash" "io" "net/http" "path" "strconv" "strings" "time" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/layout" "github.com/restic/restic/internal/backend/location" "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/feature" "github.com/ncw/swift/v2" ) // beSwift is a backend which stores the data on a swift endpoint. type beSwift struct { conn *swift.Connection connections uint container string // Container name prefix string // Prefix of object names in the container layout.Layout } // ensure statically that *beSwift implements backend.Backend. var _ backend.Backend = &beSwift{} func NewFactory() location.Factory { return location.NewHTTPBackendFactory("swift", ParseConfig, location.NoPassword, Open, Open) } // Open opens the swift backend at a container in region. The container is // created if it does not exist yet. func Open(ctx context.Context, cfg Config, rt http.RoundTripper, _ func(string, ...interface{})) (backend.Backend, error) { debug.Log("config %#v", cfg) be := &beSwift{ conn: &swift.Connection{ UserName: cfg.UserName, UserId: cfg.UserID, Domain: cfg.Domain, DomainId: cfg.DomainID, ApiKey: cfg.APIKey, AuthUrl: cfg.AuthURL, Region: cfg.Region, Tenant: cfg.Tenant, TenantId: cfg.TenantID, TenantDomain: cfg.TenantDomain, TenantDomainId: cfg.TenantDomainID, TrustId: cfg.TrustID, StorageUrl: cfg.StorageURL, AuthToken: cfg.AuthToken.Unwrap(), ApplicationCredentialId: cfg.ApplicationCredentialID, ApplicationCredentialName: cfg.ApplicationCredentialName, ApplicationCredentialSecret: cfg.ApplicationCredentialSecret.Unwrap(), ConnectTimeout: time.Minute, Timeout: time.Minute, Transport: rt, }, connections: cfg.Connections, container: cfg.Container, prefix: cfg.Prefix, Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), } // Authenticate if needed if !be.conn.Authenticated() { if err := be.conn.Authenticate(ctx); err != nil { return nil, errors.Wrap(err, "conn.Authenticate") } } // Ensure container exists switch _, _, err := be.conn.Container(ctx, be.container); err { case nil: // Container exists case swift.ContainerNotFound: err = be.createContainer(ctx, cfg.DefaultContainerPolicy) if err != nil { return nil, errors.Wrap(err, "beSwift.createContainer") } default: return nil, errors.Wrap(err, "conn.Container") } return be, nil } func (be *beSwift) createContainer(ctx context.Context, policy string) error { var h swift.Headers if policy != "" { h = swift.Headers{ "X-Storage-Policy": policy, } } return be.conn.ContainerCreate(ctx, be.container, h) } func (be *beSwift) Properties() backend.Properties { return backend.Properties{ Connections: be.connections, HasAtomicReplace: true, } } // Hasher may return a hash function for calculating a content hash for the backend func (be *beSwift) Hasher() hash.Hash { return md5.New() } // Load runs fn with a reader that yields the contents of the file at h at the // given offset. func (be *beSwift) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { return util.DefaultLoad(ctx, h, length, offset, be.openReader, fn) } func (be *beSwift) openReader(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { objName := be.Filename(h) headers := swift.Headers{} if offset > 0 { headers["Range"] = fmt.Sprintf("bytes=%d-", offset) } if length > 0 { headers["Range"] = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1) } obj, _, err := be.conn.ObjectOpen(ctx, be.container, objName, false, headers) if err != nil { return nil, fmt.Errorf("conn.ObjectOpen: %w", err) } if feature.Flag.Enabled(feature.BackendErrorRedesign) && length > 0 { // get response length, but don't cause backend calls cctx, cancel := context.WithCancel(context.Background()) cancel() objLength, e := obj.Length(cctx) if e == nil && objLength != int64(length) { _ = obj.Close() return nil, &swift.Error{StatusCode: http.StatusRequestedRangeNotSatisfiable, Text: "restic-file-too-short"} } } return obj, nil } // Save stores data in the backend at the handle. func (be *beSwift) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { objName := be.Filename(h) encoding := "binary/octet-stream" hdr := swift.Headers{"Content-Length": strconv.FormatInt(rd.Length(), 10)} _, err := be.conn.ObjectPut(ctx, be.container, objName, rd, true, hex.EncodeToString(rd.Hash()), encoding, hdr) // swift does not return the upload length return errors.Wrap(err, "client.PutObject") } // Stat returns information about a blob. func (be *beSwift) Stat(ctx context.Context, h backend.Handle) (bi backend.FileInfo, err error) { objName := be.Filename(h) obj, _, err := be.conn.Object(ctx, be.container, objName) if err != nil { return backend.FileInfo{}, errors.Wrap(err, "conn.Object") } return backend.FileInfo{Size: obj.Bytes, Name: h.Name}, nil } // Remove removes the blob with the given name and type. func (be *beSwift) Remove(ctx context.Context, h backend.Handle) error { objName := be.Filename(h) err := be.conn.ObjectDelete(ctx, be.container, objName) return errors.Wrap(err, "conn.ObjectDelete") } // List runs fn for each file in the backend which has the type t. When an // error occurs (or fn returns an error), List stops and returns it. func (be *beSwift) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error { prefix, _ := be.Basedir(t) prefix += "/" err := be.conn.ObjectsWalk(ctx, be.container, &swift.ObjectsOpts{Prefix: prefix}, func(ctx context.Context, opts *swift.ObjectsOpts) (interface{}, error) { newObjects, err := be.conn.Objects(ctx, be.container, opts) if err != nil { return nil, errors.Wrap(err, "conn.ObjectNames") } for _, obj := range newObjects { m := path.Base(strings.TrimPrefix(obj.Name, prefix)) if m == "" { continue } fi := backend.FileInfo{ Name: m, Size: obj.Bytes, } err := fn(fi) if err != nil { return nil, err } if ctx.Err() != nil { return nil, ctx.Err() } } return newObjects, nil }) if err != nil { return err } return ctx.Err() } // IsNotExist returns true if the error is caused by a not existing file. func (be *beSwift) IsNotExist(err error) bool { var e *swift.Error return errors.As(err, &e) && e.StatusCode == http.StatusNotFound } func (be *beSwift) IsPermanentError(err error) bool { if be.IsNotExist(err) { return true } var serr *swift.Error if errors.As(err, &serr) { if serr.StatusCode == http.StatusRequestedRangeNotSatisfiable || serr.StatusCode == http.StatusUnauthorized || serr.StatusCode == http.StatusForbidden { return true } } return false } // Delete removes all restic objects in the container. // It will not remove the container itself. func (be *beSwift) Delete(ctx context.Context) error { return util.DefaultDelete(ctx, be) } // Close does nothing func (be *beSwift) Close() error { return nil } // Warmup not implemented func (be *beSwift) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { return []backend.Handle{}, nil } func (be *beSwift) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/location/location.go
internal/backend/location/location.go
// Package location implements parsing the restic repository location from a string. package location import ( "strings" "github.com/restic/restic/internal/errors" ) // Location specifies the location of a repository, including the method of // access and (possibly) credentials needed for access. type Location struct { Scheme string Config interface{} } // NoPassword returns the repository location unchanged (there's no sensitive information there) func NoPassword(s string) string { return s } func isPath(s string) bool { if strings.HasPrefix(s, "../") || strings.HasPrefix(s, `..\`) { return true } if strings.HasPrefix(s, "/") || strings.HasPrefix(s, `\`) { return true } if len(s) < 3 { return false } // check for drive paths drive := s[0] //nolint:staticcheck // de morgan's law makes this harder to read if !(drive >= 'a' && drive <= 'z') && !(drive >= 'A' && drive <= 'Z') { return false } if s[1] != ':' { return false } if s[2] != '\\' && s[2] != '/' { return false } return true } // Parse extracts repository location information from the string s. If s // starts with a backend name followed by a colon, that backend's Parse() // function is called. Otherwise, the local backend is used which interprets s // as the name of a directory. func Parse(registry *Registry, s string) (u Location, err error) { scheme := extractScheme(s) u.Scheme = scheme factory := registry.Lookup(scheme) if factory != nil { u.Config, err = factory.ParseConfig(s) if err != nil { return Location{}, err } return u, nil } // if s is not a path or contains ":", it's ambiguous if !isPath(s) && strings.ContainsRune(s, ':') { return Location{}, errors.New("invalid backend\nIf the repository is in a local directory, you need to add a `local:` prefix") } u.Scheme = "local" factory = registry.Lookup(u.Scheme) if factory == nil { return Location{}, errors.New("local backend not available") } u.Config, err = factory.ParseConfig("local:" + s) if err != nil { return Location{}, err } return u, nil } // StripPassword returns a displayable version of a repository location (with any sensitive information removed) func StripPassword(registry *Registry, s string) string { scheme := extractScheme(s) factory := registry.Lookup(scheme) if factory != nil { return factory.StripPassword(s) } return s } func extractScheme(s string) string { scheme, _, _ := strings.Cut(s, ":") return scheme }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/location/registry.go
internal/backend/location/registry.go
package location import ( "context" "net/http" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/limiter" ) type Registry struct { factories map[string]Factory } func NewRegistry() *Registry { return &Registry{ factories: make(map[string]Factory), } } func (r *Registry) Register(factory Factory) { if r.factories[factory.Scheme()] != nil { panic("duplicate backend") } r.factories[factory.Scheme()] = factory } func (r *Registry) Lookup(scheme string) Factory { return r.factories[scheme] } type Factory interface { Scheme() string ParseConfig(s string) (interface{}, error) StripPassword(s string) string Create(ctx context.Context, cfg interface{}, rt http.RoundTripper, lim limiter.Limiter, errorLog func(string, ...interface{})) (backend.Backend, error) Open(ctx context.Context, cfg interface{}, rt http.RoundTripper, lim limiter.Limiter, errorLog func(string, ...interface{})) (backend.Backend, error) } type genericBackendFactory[C any, T backend.Backend] struct { scheme string parseConfigFn func(s string) (*C, error) stripPasswordFn func(s string) string createFn func(ctx context.Context, cfg C, rt http.RoundTripper, lim limiter.Limiter, errorLog func(string, ...interface{})) (T, error) openFn func(ctx context.Context, cfg C, rt http.RoundTripper, lim limiter.Limiter, errorLog func(string, ...interface{})) (T, error) } func (f *genericBackendFactory[C, T]) Scheme() string { return f.scheme } func (f *genericBackendFactory[C, T]) ParseConfig(s string) (interface{}, error) { return f.parseConfigFn(s) } func (f *genericBackendFactory[C, T]) StripPassword(s string) string { if f.stripPasswordFn != nil { return f.stripPasswordFn(s) } return s } func (f *genericBackendFactory[C, T]) Create(ctx context.Context, cfg interface{}, rt http.RoundTripper, lim limiter.Limiter, errorLog func(string, ...interface{})) (backend.Backend, error) { return f.createFn(ctx, *cfg.(*C), rt, lim, errorLog) } func (f *genericBackendFactory[C, T]) Open(ctx context.Context, cfg interface{}, rt http.RoundTripper, lim limiter.Limiter, errorLog func(string, ...interface{})) (backend.Backend, error) { return f.openFn(ctx, *cfg.(*C), rt, lim, errorLog) } func NewHTTPBackendFactory[C any, T backend.Backend]( scheme string, parseConfigFn func(s string) (*C, error), stripPasswordFn func(s string) string, createFn func(ctx context.Context, cfg C, rt http.RoundTripper, errorLog func(string, ...interface{})) (T, error), openFn func(ctx context.Context, cfg C, rt http.RoundTripper, errorLog func(string, ...interface{})) (T, error)) Factory { return &genericBackendFactory[C, T]{ scheme: scheme, parseConfigFn: parseConfigFn, stripPasswordFn: stripPasswordFn, createFn: func(ctx context.Context, cfg C, rt http.RoundTripper, _ limiter.Limiter, errorLog func(string, ...interface{})) (T, error) { return createFn(ctx, cfg, rt, errorLog) }, openFn: func(ctx context.Context, cfg C, rt http.RoundTripper, _ limiter.Limiter, errorLog func(string, ...interface{})) (T, error) { return openFn(ctx, cfg, rt, errorLog) }, } } func NewLimitedBackendFactory[C any, T backend.Backend]( scheme string, parseConfigFn func(s string) (*C, error), stripPasswordFn func(s string) string, createFn func(ctx context.Context, cfg C, lim limiter.Limiter, errorLog func(string, ...interface{})) (T, error), openFn func(ctx context.Context, cfg C, lim limiter.Limiter, errorLog func(string, ...interface{})) (T, error)) Factory { return &genericBackendFactory[C, T]{ scheme: scheme, parseConfigFn: parseConfigFn, stripPasswordFn: stripPasswordFn, createFn: func(ctx context.Context, cfg C, _ http.RoundTripper, lim limiter.Limiter, errorLog func(string, ...interface{})) (T, error) { return createFn(ctx, cfg, lim, errorLog) }, openFn: func(ctx context.Context, cfg C, _ http.RoundTripper, lim limiter.Limiter, errorLog func(string, ...interface{})) (T, error) { return openFn(ctx, cfg, lim, errorLog) }, } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/location/location_test.go
internal/backend/location/location_test.go
package location_test import ( "testing" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/location" "github.com/restic/restic/internal/test" ) type testConfig struct { loc string } func testFactory() location.Factory { return location.NewHTTPBackendFactory[testConfig, backend.Backend]( "local", func(s string) (*testConfig, error) { return &testConfig{loc: s}, nil }, nil, nil, nil, ) } func TestParse(t *testing.T) { registry := location.NewRegistry() registry.Register(testFactory()) path := "local:example" u, err := location.Parse(registry, path) test.OK(t, err) test.Equals(t, "local", u.Scheme) test.Equals(t, &testConfig{loc: path}, u.Config) } func TestParseFallback(t *testing.T) { fallbackTests := []string{ "dir1/dir2", "/dir1/dir2", "/dir1:foobar/dir2", `\dir1\foobar\dir2`, `c:\dir1\foobar\dir2`, `C:\Users\appveyor\AppData\Local\Temp\1\restic-test-879453535\repo`, `c:/dir1/foobar/dir2`, } registry := location.NewRegistry() registry.Register(testFactory()) for _, path := range fallbackTests { t.Run(path, func(t *testing.T) { u, err := location.Parse(registry, path) if err != nil { t.Fatalf("unexpected error: %v", err) } test.Equals(t, "local", u.Scheme) test.Equals(t, "local:"+path, u.Config.(*testConfig).loc) }) } } func TestInvalidScheme(t *testing.T) { registry := location.NewRegistry() var invalidSchemes = []string{ "foobar:xxx", "foobar:/dir/dir2", } for _, s := range invalidSchemes { t.Run(s, func(t *testing.T) { _, err := location.Parse(registry, s) if err == nil { t.Fatalf("error for invalid location %q not found", s) } }) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/location/display_location_test.go
internal/backend/location/display_location_test.go
package location_test import ( "testing" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/location" "github.com/restic/restic/internal/test" ) func TestStripPassword(t *testing.T) { registry := location.NewRegistry() registry.Register( location.NewHTTPBackendFactory[any, backend.Backend]("test", nil, func(s string) string { return "cleaned" }, nil, nil, ), ) t.Run("valid", func(t *testing.T) { clean := location.StripPassword(registry, "test:secret") test.Equals(t, "cleaned", clean) }) t.Run("unknown", func(t *testing.T) { clean := location.StripPassword(registry, "invalid:secret") test.Equals(t, "invalid:secret", clean) }) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/all/all.go
internal/backend/all/all.go
package all import ( "github.com/restic/restic/internal/backend/azure" "github.com/restic/restic/internal/backend/b2" "github.com/restic/restic/internal/backend/gs" "github.com/restic/restic/internal/backend/local" "github.com/restic/restic/internal/backend/location" "github.com/restic/restic/internal/backend/rclone" "github.com/restic/restic/internal/backend/rest" "github.com/restic/restic/internal/backend/s3" "github.com/restic/restic/internal/backend/sftp" "github.com/restic/restic/internal/backend/swift" ) func Backends() *location.Registry { backends := location.NewRegistry() backends.Register(azure.NewFactory()) backends.Register(b2.NewFactory()) backends.Register(gs.NewFactory()) backends.Register(local.NewFactory()) backends.Register(rclone.NewFactory()) backends.Register(rest.NewFactory()) backends.Register(s3.NewFactory()) backends.Register(sftp.NewFactory()) backends.Register(swift.NewFactory()) return backends }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/rest/rest_unix_test.go
internal/backend/rest/rest_unix_test.go
//go:build !windows package rest_test import ( "context" "fmt" "path" "testing" rtest "github.com/restic/restic/internal/test" ) func TestBackendRESTWithUnixSocket(t *testing.T) { defer func() { if t.Skipped() { rtest.SkipDisallowed(t, "restic/backend/rest.TestBackendREST") } }() ctx, cancel := context.WithCancel(context.Background()) defer cancel() dir := rtest.TempDir(t) serverURL, cleanup := runRESTServer(ctx, t, path.Join(dir, "data"), fmt.Sprintf("unix:%s", path.Join(dir, "sock"))) defer cleanup() newTestSuite(serverURL, false).RunTests(t) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/rest/rest_test.go
internal/backend/rest/rest_test.go
package rest_test import ( "bufio" "context" "fmt" "net/url" "os" "os/exec" "regexp" "strings" "syscall" "testing" "time" "github.com/restic/restic/internal/backend/rest" "github.com/restic/restic/internal/backend/test" rtest "github.com/restic/restic/internal/test" ) var ( serverStartedRE = regexp.MustCompile("^start server on (.*)$") ) func runRESTServer(ctx context.Context, t testing.TB, dir, reqListenAddr string) (*url.URL, func()) { srv, err := exec.LookPath("rest-server") if err != nil { t.Skip(err) } // create our own context, so that our cleanup can cancel and wait for completion // this will ensure any open ports, open unix sockets etc are properly closed processCtx, cancel := context.WithCancel(ctx) cmd := exec.CommandContext(processCtx, srv, "--no-auth", "--path", dir, "--listen", reqListenAddr) // this cancel func is called by when the process context is done cmd.Cancel = func() error { // we execute in a Go-routine as we know the caller will // be waiting on a .Wait() regardless go func() { // try to send a graceful termination signal if cmd.Process.Signal(syscall.SIGTERM) == nil { // if we succeed, then wait a few seconds time.Sleep(2 * time.Second) } // and then make sure it's killed either way, ignoring any error code _ = cmd.Process.Kill() }() return nil } // this is the cleanup function that we return the caller, // which will cancel our process context, and then wait for it to finish cleanup := func() { cancel() _ = cmd.Wait() } // but in-case we don't finish this method, e.g. by calling t.Fatal() // we also defer a call to clean it up ourselves, guarded by a flag to // indicate that we returned the function to the caller to deal with. callerWillCleanUp := false defer func() { if !callerWillCleanUp { cleanup() } }() // send stdout to our std out cmd.Stdout = os.Stdout // capture stderr with a pipe, as we want to examine this output // to determine when the server is started and listening. cmdErr, err := cmd.StderrPipe() if err != nil { t.Fatal(err) } // start the rest-server if err := cmd.Start(); err != nil { t.Fatal(err) } // create a channel to receive the actual listen address on listenAddrCh := make(chan string) go func() { defer close(listenAddrCh) matched := false br := bufio.NewReader(cmdErr) for { line, err := br.ReadString('\n') if err != nil { // we ignore errors, as code that relies on this // will happily fail via timeout and empty closed // channel. return } line = strings.Trim(line, "\r\n") if !matched { // look for the server started message, and return the address // that it's listening on matchedServerListen := serverStartedRE.FindSubmatch([]byte(line)) if len(matchedServerListen) == 2 { listenAddrCh <- string(matchedServerListen[1]) matched = true } } t.Log(line) } }() // wait for us to get an address, // or the parent context to cancel, // or for us to timeout var actualListenAddr string select { case <-processCtx.Done(): t.Fatal(context.Canceled) case <-time.NewTimer(2 * time.Second).C: t.Fatal(context.DeadlineExceeded) case a, ok := <-listenAddrCh: if !ok { t.Fatal(context.Canceled) } actualListenAddr = a } // this translate the address that the server is listening on // to a URL suitable for us to connect to var addrToConnectTo string if strings.HasPrefix(reqListenAddr, "unix:") { addrToConnectTo = fmt.Sprintf("http+unix://%s:/restic-test/", actualListenAddr) } else { // while we may listen on 0.0.0.0, we connect to localhost addrToConnectTo = fmt.Sprintf("http://%s/restic-test/", strings.Replace(actualListenAddr, "0.0.0.0", "localhost", 1)) } // parse to a URL url, err := url.Parse(addrToConnectTo) if err != nil { t.Fatal(err) } // indicate that we've completed successfully, and that the caller // is responsible for calling cleanup callerWillCleanUp = true return url, cleanup } func newTestSuite(url *url.URL, minimalData bool) *test.Suite[rest.Config] { return &test.Suite[rest.Config]{ MinimalData: minimalData, // NewConfig returns a config for a new temporary backend that will be used in tests. NewConfig: func() (*rest.Config, error) { cfg := rest.NewConfig() cfg.URL = url return &cfg, nil }, Factory: rest.NewFactory(), } } func TestBackendREST(t *testing.T) { defer func() { if t.Skipped() { rtest.SkipDisallowed(t, "restic/backend/rest.TestBackendREST") } }() ctx, cancel := context.WithCancel(context.Background()) defer cancel() dir := rtest.TempDir(t) serverURL, cleanup := runRESTServer(ctx, t, dir, ":0") defer cleanup() newTestSuite(serverURL, false).RunTests(t) } func TestBackendRESTExternalServer(t *testing.T) { repostr := os.Getenv("RESTIC_TEST_REST_REPOSITORY") if repostr == "" { t.Skipf("environment variable %v not set", "RESTIC_TEST_REST_REPOSITORY") } cfg, err := rest.ParseConfig(repostr) if err != nil { t.Fatal(err) } newTestSuite(cfg.URL, true).RunTests(t) } func BenchmarkBackendREST(t *testing.B) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() dir := rtest.TempDir(t) serverURL, cleanup := runRESTServer(ctx, t, dir, ":0") defer cleanup() newTestSuite(serverURL, false).RunBenchmarks(t) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/rest/rest.go
internal/backend/rest/rest.go
package rest import ( "context" "encoding/json" "fmt" "hash" "io" "net/http" "net/url" "strings" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/layout" "github.com/restic/restic/internal/backend/location" "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/feature" ) // make sure the rest backend implements backend.Backend var _ backend.Backend = &Backend{} // Backend uses the REST protocol to access data stored on a server. type Backend struct { url *url.URL connections uint client http.Client layout.Layout } // restError is returned whenever the server returns a non-successful HTTP status. type restError struct { backend.Handle StatusCode int Status string } func (e *restError) Error() string { if e.StatusCode == http.StatusNotFound && e.Handle.Type.String() != "invalid" { return fmt.Sprintf("%v does not exist", e.Handle) } return fmt.Sprintf("unexpected HTTP response (%v): %v", e.StatusCode, e.Status) } func NewFactory() location.Factory { return location.NewHTTPBackendFactory("rest", ParseConfig, StripPassword, Create, Open) } // the REST API protocol version is decided by HTTP request headers, these are the constants. const ( ContentTypeV1 = "application/vnd.x.restic.rest.v1" ContentTypeV2 = "application/vnd.x.restic.rest.v2" ) // Open opens the REST backend with the given config. func Open(_ context.Context, cfg Config, rt http.RoundTripper, _ func(string, ...interface{})) (*Backend, error) { // use url without trailing slash for layout url := cfg.URL.String() if url[len(url)-1] == '/' { url = url[:len(url)-1] } be := &Backend{ url: cfg.URL, client: http.Client{Transport: rt}, Layout: layout.NewRESTLayout(url), connections: cfg.Connections, } return be, nil } func drainAndClose(resp *http.Response) error { _, err := io.Copy(io.Discard, resp.Body) cerr := resp.Body.Close() // return first error if err != nil { return errors.Errorf("drain: %w", err) } return cerr } // Create creates a new REST on server configured in config. func Create(ctx context.Context, cfg Config, rt http.RoundTripper, errorLog func(string, ...interface{})) (*Backend, error) { be, err := Open(ctx, cfg, rt, errorLog) if err != nil { return nil, err } _, err = be.Stat(ctx, backend.Handle{Type: backend.ConfigFile}) if err == nil { return nil, errors.New("config file already exists") } url := *cfg.URL values := url.Query() values.Set("create", "true") url.RawQuery = values.Encode() resp, err := be.client.Post(url.String(), "binary/octet-stream", strings.NewReader("")) if err != nil { return nil, err } if err := drainAndClose(resp); err != nil { return nil, err } if resp.StatusCode != http.StatusOK { return nil, &restError{backend.Handle{}, resp.StatusCode, resp.Status} } return be, nil } func (b *Backend) Properties() backend.Properties { return backend.Properties{ Connections: b.connections, // rest-server prevents overwriting HasAtomicReplace: false, } } // Hasher may return a hash function for calculating a content hash for the backend func (b *Backend) Hasher() hash.Hash { return nil } // Save stores data in the backend at the handle. func (b *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { ctx, cancel := context.WithCancel(ctx) defer cancel() // make sure that client.Post() cannot close the reader by wrapping it req, err := http.NewRequestWithContext(ctx, http.MethodPost, b.Filename(h), io.NopCloser(rd)) if err != nil { return errors.WithStack(err) } req.GetBody = func() (io.ReadCloser, error) { if err := rd.Rewind(); err != nil { return nil, err } return io.NopCloser(rd), nil } req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Accept", ContentTypeV2) // explicitly set the content length, this prevents chunked encoding and // let's the server know what's coming. req.ContentLength = rd.Length() resp, err := b.client.Do(req) if err != nil { return errors.WithStack(err) } if err := drainAndClose(resp); err != nil { return err } if resp.StatusCode != http.StatusOK { return &restError{h, resp.StatusCode, resp.Status} } return nil } // IsNotExist returns true if the error was caused by a non-existing file. func (b *Backend) IsNotExist(err error) bool { var e *restError return errors.As(err, &e) && e.StatusCode == http.StatusNotFound } func (b *Backend) IsPermanentError(err error) bool { if b.IsNotExist(err) { return true } var rerr *restError if errors.As(err, &rerr) { if rerr.StatusCode == http.StatusRequestedRangeNotSatisfiable || rerr.StatusCode == http.StatusUnauthorized || rerr.StatusCode == http.StatusForbidden || rerr.StatusCode == http.StatusInsufficientStorage { return true } } return false } // Load runs fn with a reader that yields the contents of the file at h at the // given offset. func (b *Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { r, err := b.openReader(ctx, h, length, offset) if err != nil { return err } err = fn(r) if err != nil { _ = r.Close() // ignore error here return err } // Note: readerat.ReadAt() (the fn) uses io.ReadFull() that doesn't // wait for EOF after reading body. Due to HTTP/2 stream multiplexing // and goroutine timings the EOF frame arrives from server (eg. rclone) // with a delay after reading body. Immediate close might trigger // HTTP/2 stream reset resulting in the *stream closed* error on server, // so we wait for EOF before closing body. var buf [1]byte _, err = r.Read(buf[:]) if err == io.EOF { err = nil } if e := r.Close(); err == nil { err = e } return err } func (b *Backend) openReader(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { req, err := http.NewRequestWithContext(ctx, "GET", b.Filename(h), nil) if err != nil { return nil, errors.WithStack(err) } byteRange := fmt.Sprintf("bytes=%d-", offset) if length > 0 { byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1) } req.Header.Set("Range", byteRange) req.Header.Set("Accept", ContentTypeV2) resp, err := b.client.Do(req) if err != nil { return nil, errors.Wrap(err, "client.Do") } if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { _ = drainAndClose(resp) return nil, &restError{h, resp.StatusCode, resp.Status} } if feature.Flag.Enabled(feature.BackendErrorRedesign) && length > 0 && resp.ContentLength != int64(length) { return nil, &restError{h, http.StatusRequestedRangeNotSatisfiable, "partial out of bounds read"} } return resp.Body, nil } // Stat returns information about a blob. func (b *Backend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) { req, err := http.NewRequestWithContext(ctx, http.MethodHead, b.Filename(h), nil) if err != nil { return backend.FileInfo{}, errors.WithStack(err) } req.Header.Set("Accept", ContentTypeV2) resp, err := b.client.Do(req) if err != nil { return backend.FileInfo{}, errors.WithStack(err) } if err = drainAndClose(resp); err != nil { return backend.FileInfo{}, err } if resp.StatusCode != http.StatusOK { return backend.FileInfo{}, &restError{h, resp.StatusCode, resp.Status} } if resp.ContentLength < 0 { return backend.FileInfo{}, errors.New("negative content length") } bi := backend.FileInfo{ Size: resp.ContentLength, Name: h.Name, } return bi, nil } // Remove removes the blob with the given name and type. func (b *Backend) Remove(ctx context.Context, h backend.Handle) error { req, err := http.NewRequestWithContext(ctx, "DELETE", b.Filename(h), nil) if err != nil { return errors.WithStack(err) } req.Header.Set("Accept", ContentTypeV2) resp, err := b.client.Do(req) if err != nil { return errors.Wrap(err, "client.Do") } if err = drainAndClose(resp); err != nil { return err } if resp.StatusCode != http.StatusOK { return &restError{h, resp.StatusCode, resp.Status} } return nil } // List runs fn for each file in the backend which has the type t. When an // error occurs (or fn returns an error), List stops and returns it. func (b *Backend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error { url := b.Dirname(backend.Handle{Type: t}) if !strings.HasSuffix(url, "/") { url += "/" } req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return errors.WithStack(err) } req.Header.Set("Accept", ContentTypeV2) resp, err := b.client.Do(req) if err != nil { return errors.Wrap(err, "List") } if resp.StatusCode == http.StatusNotFound { if !strings.HasPrefix(resp.Header.Get("Server"), "rclone/") { // ignore missing directories, unless the server is rclone. rclone // already ignores missing directories, but misuses "not found" to // report certain internal errors, see // https://github.com/rclone/rclone/pull/7550 for details. return drainAndClose(resp) } } if resp.StatusCode != http.StatusOK { _ = drainAndClose(resp) return &restError{backend.Handle{Type: t}, resp.StatusCode, resp.Status} } if resp.Header.Get("Content-Type") == ContentTypeV2 { err = b.listv2(ctx, resp, fn) } else { err = b.listv1(ctx, t, resp, fn) } if cerr := drainAndClose(resp); cerr != nil && err == nil { err = cerr } return err } // listv1 uses the REST protocol v1, where a list HTTP request (e.g. `GET // /data/`) only returns the names of the files, so we need to issue an HTTP // HEAD request for each file. func (b *Backend) listv1(ctx context.Context, t backend.FileType, resp *http.Response, fn func(backend.FileInfo) error) error { debug.Log("parsing API v1 response") dec := json.NewDecoder(resp.Body) var list []string if err := dec.Decode(&list); err != nil { return errors.Wrap(err, "Decode") } for _, m := range list { fi, err := b.Stat(ctx, backend.Handle{Name: m, Type: t}) if err != nil { return err } if ctx.Err() != nil { return ctx.Err() } fi.Name = m err = fn(fi) if err != nil { return err } if ctx.Err() != nil { return ctx.Err() } } return ctx.Err() } // listv2 uses the REST protocol v2, where a list HTTP request (e.g. `GET // /data/`) returns the names and sizes of all files. func (b *Backend) listv2(ctx context.Context, resp *http.Response, fn func(backend.FileInfo) error) error { debug.Log("parsing API v2 response") dec := json.NewDecoder(resp.Body) var list []struct { Name string `json:"name"` Size int64 `json:"size"` } if err := dec.Decode(&list); err != nil { return errors.Wrap(err, "Decode") } for _, item := range list { if ctx.Err() != nil { return ctx.Err() } fi := backend.FileInfo{ Name: item.Name, Size: item.Size, } err := fn(fi) if err != nil { return err } if ctx.Err() != nil { return ctx.Err() } } return ctx.Err() } // Close closes all open files. func (b *Backend) Close() error { // this does not need to do anything, all open files are closed within the // same function. return nil } // Delete removes all data in the backend. func (b *Backend) Delete(ctx context.Context) error { return util.DefaultDelete(ctx, b) } // Warmup not implemented func (b *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { return []backend.Handle{}, nil } func (b *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/rest/config.go
internal/backend/rest/config.go
package rest import ( "net/url" "os" "strings" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/options" ) // Config contains all configuration necessary to connect to a REST server. type Config struct { URL *url.URL Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` } func init() { options.Register("rest", Config{}) } // NewConfig returns a new Config with the default values filled in. func NewConfig() Config { return Config{ Connections: 5, } } // ParseConfig parses the string s and extracts the REST server URL. func ParseConfig(s string) (*Config, error) { if !strings.HasPrefix(s, "rest:") { return nil, errors.New("invalid REST backend specification") } s = prepareURL(s) u, err := url.Parse(s) if err != nil { return nil, errors.WithStack(err) } cfg := NewConfig() cfg.URL = u return &cfg, nil } // StripPassword removes the password from the URL // If the repository location cannot be parsed as a valid URL, it will be returned as is // (it's because this function is used for logging errors) func StripPassword(s string) string { scheme := s[:5] s = prepareURL(s) u, err := url.Parse(s) if err != nil { return scheme + s } if _, set := u.User.Password(); !set { return scheme + s } // a password was set: we replace it with *** return scheme + strings.Replace(u.String(), u.User.String()+"@", u.User.Username()+":***@", 1) } func prepareURL(s string) string { s = s[5:] if !strings.HasSuffix(s, "/") { s += "/" } return s } var _ backend.ApplyEnvironmenter = &Config{} // ApplyEnvironment saves values from the environment to the config. func (cfg *Config) ApplyEnvironment(prefix string) { username := cfg.URL.User.Username() _, pwdSet := cfg.URL.User.Password() // Only apply env variable values if neither username nor password are provided. if username == "" && !pwdSet { envName := os.Getenv(prefix + "RESTIC_REST_USERNAME") envPwd := os.Getenv(prefix + "RESTIC_REST_PASSWORD") cfg.URL.User = url.UserPassword(envName, envPwd) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/rest/rest_int_test.go
internal/backend/rest/rest_int_test.go
package rest_test import ( "context" "fmt" "net/http" "net/http/httptest" "net/url" "reflect" "strconv" "testing" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/rest" ) func TestListAPI(t *testing.T) { var tests = []struct { Name string ContentType string // response header Data string // response data Requests int Result []backend.FileInfo }{ { Name: "content-type-unknown", ContentType: "application/octet-stream", Data: `[ "1122e6749358b057fa1ac6b580a0fbe7a9a5fbc92e82743ee21aaf829624a985", "3b6ec1af8d4f7099d0445b12fdb75b166ba19f789e5c48350c423dc3b3e68352", "8271d221a60e0058e6c624f248d0080fc04f4fac07a28584a9b89d0eb69e189b" ]`, Result: []backend.FileInfo{ {Name: "1122e6749358b057fa1ac6b580a0fbe7a9a5fbc92e82743ee21aaf829624a985", Size: 4386}, {Name: "3b6ec1af8d4f7099d0445b12fdb75b166ba19f789e5c48350c423dc3b3e68352", Size: 15214}, {Name: "8271d221a60e0058e6c624f248d0080fc04f4fac07a28584a9b89d0eb69e189b", Size: 33393}, }, Requests: 4, }, { Name: "content-type-v1", ContentType: "application/vnd.x.restic.rest.v1", Data: `[ "1122e6749358b057fa1ac6b580a0fbe7a9a5fbc92e82743ee21aaf829624a985", "3b6ec1af8d4f7099d0445b12fdb75b166ba19f789e5c48350c423dc3b3e68352", "8271d221a60e0058e6c624f248d0080fc04f4fac07a28584a9b89d0eb69e189b" ]`, Result: []backend.FileInfo{ {Name: "1122e6749358b057fa1ac6b580a0fbe7a9a5fbc92e82743ee21aaf829624a985", Size: 4386}, {Name: "3b6ec1af8d4f7099d0445b12fdb75b166ba19f789e5c48350c423dc3b3e68352", Size: 15214}, {Name: "8271d221a60e0058e6c624f248d0080fc04f4fac07a28584a9b89d0eb69e189b", Size: 33393}, }, Requests: 4, }, { Name: "content-type-v2", ContentType: "application/vnd.x.restic.rest.v2", Data: `[ {"name": "1122e6749358b057fa1ac6b580a0fbe7a9a5fbc92e82743ee21aaf829624a985", "size": 1001}, {"name": "3b6ec1af8d4f7099d0445b12fdb75b166ba19f789e5c48350c423dc3b3e68352", "size": 1002}, {"name": "8271d221a60e0058e6c624f248d0080fc04f4fac07a28584a9b89d0eb69e189b", "size": 1003} ]`, Result: []backend.FileInfo{ {Name: "1122e6749358b057fa1ac6b580a0fbe7a9a5fbc92e82743ee21aaf829624a985", Size: 1001}, {Name: "3b6ec1af8d4f7099d0445b12fdb75b166ba19f789e5c48350c423dc3b3e68352", Size: 1002}, {Name: "8271d221a60e0058e6c624f248d0080fc04f4fac07a28584a9b89d0eb69e189b", Size: 1003}, }, Requests: 1, }, } for _, test := range tests { t.Run(test.Name, func(t *testing.T) { numRequests := 0 srv := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { numRequests++ t.Logf("req %v %v, accept: %v", req.Method, req.URL.Path, req.Header["Accept"]) var err error switch req.Method { case "GET": // list files in data/ res.Header().Set("Content-Type", test.ContentType) _, err = res.Write([]byte(test.Data)) if err != nil { t.Fatal(err) } return case "HEAD": // stat file in data/, use the first two bytes in the name // of the file as the size :) filename := req.URL.Path[6:] length, err := strconv.ParseInt(filename[:4], 16, 64) if err != nil { t.Fatal(err) } res.Header().Set("Content-Length", fmt.Sprintf("%d", length)) res.WriteHeader(http.StatusOK) return } t.Errorf("unhandled request %v %v", req.Method, req.URL.Path) })) defer srv.Close() srvURL, err := url.Parse(srv.URL) if err != nil { t.Fatal(err) } cfg := rest.Config{ Connections: 5, URL: srvURL, } be, err := rest.Open(context.TODO(), cfg, http.DefaultTransport, t.Logf) if err != nil { t.Fatal(err) } var list []backend.FileInfo err = be.List(context.TODO(), backend.PackFile, func(fi backend.FileInfo) error { list = append(list, fi) return nil }) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(list, test.Result) { t.Fatalf("wrong response returned, want:\n %v\ngot: %v", test.Result, list) } if numRequests != test.Requests { t.Fatalf("wrong number of HTTP requests executed, want %d, got %d", test.Requests, numRequests) } defer func() { err = be.Close() if err != nil { t.Fatal(err) } }() }) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/rest/config_test.go
internal/backend/rest/config_test.go
package rest import ( "net/url" "testing" "github.com/restic/restic/internal/backend/test" ) func parseURL(s string) *url.URL { u, err := url.Parse(s) if err != nil { panic(err) } return u } var configTests = []test.ConfigTestData[Config]{ { S: "rest:http://localhost:1234", Cfg: Config{ URL: parseURL("http://localhost:1234/"), Connections: 5, }, }, { S: "rest:http://localhost:1234/", Cfg: Config{ URL: parseURL("http://localhost:1234/"), Connections: 5, }, }, { S: "rest:http+unix:///tmp/rest.socket:/my_backup_repo/", Cfg: Config{ URL: parseURL("http+unix:///tmp/rest.socket:/my_backup_repo/"), Connections: 5, }, }, } func TestParseConfig(t *testing.T) { test.ParseConfigTester(t, ParseConfig, configTests) } var passwordTests = []struct { input string expected string }{ { "rest:", "rest:/", }, { "rest:localhost/", "rest:localhost/", }, { "rest::123/", "rest::123/", }, { "rest:http://", "rest:http://", }, { "rest:http://hostname.foo:1234/", "rest:http://hostname.foo:1234/", }, { "rest:http://user@hostname.foo:1234/", "rest:http://user@hostname.foo:1234/", }, { "rest:http://user:@hostname.foo:1234/", "rest:http://user:***@hostname.foo:1234/", }, { "rest:http://user:p@hostname.foo:1234/", "rest:http://user:***@hostname.foo:1234/", }, { "rest:http://user:pppppaaafhhfuuwiiehhthhghhdkjaoowpprooghjjjdhhwuuhgjsjhhfdjhruuhsjsdhhfhshhsppwufhhsjjsjs@hostname.foo:1234/", "rest:http://user:***@hostname.foo:1234/", }, { "rest:http://user:password@hostname", "rest:http://user:***@hostname/", }, { "rest:http://user:password@:123", "rest:http://user:***@:123/", }, { "rest:http://user:password@", "rest:http://user:***@/", }, } func TestStripPassword(t *testing.T) { // Make sure that the factory uses the correct method StripPassword := NewFactory().StripPassword for i, test := range passwordTests { t.Run(test.input, func(t *testing.T) { result := StripPassword(test.input) if result != test.expected { t.Errorf("test %d: expected '%s' but got '%s'", i, test.expected, result) } }) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/gs/gs.go
internal/backend/gs/gs.go
// Package gs provides a restic backend for Google Cloud Storage. package gs import ( "context" "crypto/md5" "hash" "io" "net/http" "os" "path" "strings" "cloud.google.com/go/storage" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/layout" "github.com/restic/restic/internal/backend/location" "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" "google.golang.org/api/option" ) // Backend stores data in a GCS bucket. // // The service account used to access the bucket must have these permissions: // - storage.objects.create // - storage.objects.delete // - storage.objects.get // - storage.objects.list type Backend struct { gcsClient *storage.Client projectID string connections uint bucketName string region string bucket *storage.BucketHandle prefix string listMaxItems int layout.Layout } // Ensure that *Backend implements backend.Backend. var _ backend.Backend = &Backend{} func NewFactory() location.Factory { return location.NewHTTPBackendFactory("gs", ParseConfig, location.NoPassword, Create, Open) } func getStorageClient(rt http.RoundTripper) (*storage.Client, error) { // create a new HTTP client httpClient := &http.Client{ Transport: rt, } // create a new context with the HTTP client stored at the oauth2.HTTPClient key ctx := context.WithValue(context.Background(), oauth2.HTTPClient, httpClient) var ts oauth2.TokenSource if token := os.Getenv("GOOGLE_ACCESS_TOKEN"); token != "" { ts = oauth2.StaticTokenSource(&oauth2.Token{ AccessToken: token, TokenType: "Bearer", }) } else { var err error ts, err = google.DefaultTokenSource(ctx, storage.ScopeReadWrite) if err != nil { return nil, err } } oauthClient := oauth2.NewClient(ctx, ts) gcsClient, err := storage.NewClient(ctx, option.WithHTTPClient(oauthClient)) if err != nil { return nil, err } return gcsClient, nil } func (be *Backend) bucketExists(ctx context.Context, bucket *storage.BucketHandle) (bool, error) { _, err := bucket.Attrs(ctx) if err == storage.ErrBucketNotExist { return false, nil } return err == nil, err } const defaultListMaxItems = 1000 func open(cfg Config, rt http.RoundTripper) (*Backend, error) { debug.Log("open, config %#v", cfg) gcsClient, err := getStorageClient(rt) if err != nil { return nil, errors.Wrap(err, "getStorageClient") } be := &Backend{ gcsClient: gcsClient, projectID: cfg.ProjectID, connections: cfg.Connections, bucketName: cfg.Bucket, region: cfg.Region, bucket: gcsClient.Bucket(cfg.Bucket), prefix: cfg.Prefix, Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), listMaxItems: defaultListMaxItems, } return be, nil } // Open opens the gs backend at the specified bucket. func Open(_ context.Context, cfg Config, rt http.RoundTripper, _ func(string, ...interface{})) (backend.Backend, error) { return open(cfg, rt) } // Create opens the gs backend at the specified bucket and attempts to creates // the bucket if it does not exist yet. // // The service account must have the "storage.buckets.create" permission to // create a bucket the does not yet exist. func Create(ctx context.Context, cfg Config, rt http.RoundTripper, _ func(string, ...interface{})) (backend.Backend, error) { be, err := open(cfg, rt) if err != nil { return nil, err } // Try to determine if the bucket exists. If it does not, try to create it. exists, err := be.bucketExists(ctx, be.bucket) if err != nil { if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusForbidden { // the bucket might exist! // however, the client doesn't have storage.bucket.get permission return be, nil } return nil, errors.WithStack(err) } if !exists { bucketAttrs := &storage.BucketAttrs{ Location: cfg.Region, } // Bucket doesn't exist, try to create it. if err := be.bucket.Create(ctx, be.projectID, bucketAttrs); err != nil { // Always an error, as the bucket definitely doesn't exist. return nil, errors.WithStack(err) } } return be, nil } // SetListMaxItems sets the number of list items to load per request. func (be *Backend) SetListMaxItems(i int) { be.listMaxItems = i } // IsNotExist returns true if the error is caused by a not existing file. func (be *Backend) IsNotExist(err error) bool { return errors.Is(err, storage.ErrObjectNotExist) } func (be *Backend) IsPermanentError(err error) bool { if be.IsNotExist(err) { return true } var gerr *googleapi.Error if errors.As(err, &gerr) { if gerr.Code == http.StatusRequestedRangeNotSatisfiable || gerr.Code == http.StatusUnauthorized || gerr.Code == http.StatusForbidden { return true } } return false } func (be *Backend) Properties() backend.Properties { return backend.Properties{ Connections: be.connections, HasAtomicReplace: true, } } // Hasher may return a hash function for calculating a content hash for the backend func (be *Backend) Hasher() hash.Hash { return md5.New() } // Path returns the path in the bucket that is used for this backend. func (be *Backend) Path() string { return be.prefix } // Save stores data in the backend at the handle. func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { objName := be.Filename(h) // Set chunk size to zero to disable resumable uploads. // // With a non-zero chunk size (the default is // googleapi.DefaultUploadChunkSize, 8MB), Insert will buffer data from // rd in chunks of this size so it can upload these chunks in // individual requests. // // This chunking allows the library to automatically handle network // interruptions and re-upload only the last chunk rather than the full // file. // // Unfortunately, this buffering doesn't play nicely with // --limit-upload, which applies a rate limit to rd. This rate limit // ends up only limiting the read from rd into the buffer rather than // the network traffic itself. This results in poor network rate limit // behavior, where individual chunks are written to the network at full // bandwidth for several seconds, followed by several seconds of no // network traffic as the next chunk is read through the rate limiter. // // By disabling chunking, rd is passed further down the request stack, // where there is less (but some) buffering, which ultimately results // in better rate limiting behavior. // // restic typically writes small blobs (4MB-30MB), so the resumable // uploads are not providing significant benefit anyways. w := be.bucket.Object(objName).NewWriter(ctx) w.ChunkSize = 0 w.MD5 = rd.Hash() wbytes, err := io.Copy(w, rd) cerr := w.Close() if err == nil { err = cerr } if err != nil { return errors.WithStack(err) } // sanity check if wbytes != rd.Length() { return errors.Errorf("wrote %d bytes instead of the expected %d bytes", wbytes, rd.Length()) } return nil } // Load runs fn with a reader that yields the contents of the file at h at the // given offset. func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { ctx, cancel := context.WithCancel(ctx) defer cancel() return util.DefaultLoad(ctx, h, length, offset, be.openReader, fn) } func (be *Backend) openReader(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { if length == 0 { // negative length indicates read till end to GCS lib length = -1 } objName := be.Filename(h) r, err := be.bucket.Object(objName).NewRangeReader(ctx, offset, int64(length)) if err != nil { return nil, err } if length > 0 && r.Attrs.Size < offset+int64(length) { _ = r.Close() return nil, &googleapi.Error{Code: http.StatusRequestedRangeNotSatisfiable, Message: "restic-file-too-short"} } return r, err } // Stat returns information about a blob. func (be *Backend) Stat(ctx context.Context, h backend.Handle) (bi backend.FileInfo, err error) { objName := be.Filename(h) attr, err := be.bucket.Object(objName).Attrs(ctx) if err != nil { return backend.FileInfo{}, errors.WithStack(err) } return backend.FileInfo{Size: attr.Size, Name: h.Name}, nil } // Remove removes the blob with the given name and type. func (be *Backend) Remove(ctx context.Context, h backend.Handle) error { objName := be.Filename(h) err := be.bucket.Object(objName).Delete(ctx) if be.IsNotExist(err) { err = nil } return errors.WithStack(err) } // List runs fn for each file in the backend which has the type t. When an // error occurs (or fn returns an error), List stops and returns it. func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error { prefix, _ := be.Basedir(t) // make sure prefix ends with a slash if !strings.HasSuffix(prefix, "/") { prefix += "/" } ctx, cancel := context.WithCancel(ctx) defer cancel() itr := be.bucket.Objects(ctx, &storage.Query{Prefix: prefix}) for { attrs, err := itr.Next() if err == iterator.Done { break } if err != nil { return err } m := strings.TrimPrefix(attrs.Name, prefix) if m == "" { continue } fi := backend.FileInfo{ Name: path.Base(m), Size: attrs.Size, } err = fn(fi) if err != nil { return err } if ctx.Err() != nil { return ctx.Err() } } return ctx.Err() } // Delete removes all restic keys in the bucket. It will not remove the bucket itself. func (be *Backend) Delete(ctx context.Context) error { return util.DefaultDelete(ctx, be) } // Close does nothing. func (be *Backend) Close() error { return nil } // Warmup not implemented func (be *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { return []backend.Handle{}, nil } func (be *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/gs/config.go
internal/backend/gs/config.go
package gs import ( "os" "path" "strings" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/options" ) // Config contains all configuration necessary to connect to a Google Cloud Storage // bucket. We use Google's default application credentials to acquire an access token, so // we don't require that calling code supply any authentication material here. type Config struct { ProjectID string Bucket string Prefix string Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` Region string `option:"region" help:"region to create the bucket in (default: us)"` } // NewConfig returns a new Config with the default values filled in. func NewConfig() Config { return Config{ Connections: 5, Region: "us", } } func init() { options.Register("gs", Config{}) } // ParseConfig parses the string s and extracts the gcs config. The // supported configuration format is gs:bucketName:/[prefix]. func ParseConfig(s string) (*Config, error) { if !strings.HasPrefix(s, "gs:") { return nil, errors.New("gs: invalid format") } // strip prefix "gs:" s = s[3:] // use the first entry of the path as the bucket name and the // remainder as prefix bucket, prefix, colon := strings.Cut(s, ":") if !colon { return nil, errors.New("gs: invalid format: bucket name or path not found") } prefix = strings.TrimPrefix(path.Clean(prefix), "/") cfg := NewConfig() cfg.Bucket = bucket cfg.Prefix = prefix return &cfg, nil } var _ backend.ApplyEnvironmenter = &Config{} // ApplyEnvironment saves values from the environment to the config. func (cfg *Config) ApplyEnvironment(prefix string) { if cfg.ProjectID == "" { cfg.ProjectID = os.Getenv(prefix + "GOOGLE_PROJECT_ID") } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/gs/config_test.go
internal/backend/gs/config_test.go
package gs import ( "testing" "github.com/restic/restic/internal/backend/test" ) var configTests = []test.ConfigTestData[Config]{ {S: "gs:bucketname:/", Cfg: Config{ Bucket: "bucketname", Prefix: "", Connections: 5, Region: "us", }}, {S: "gs:bucketname:/prefix/directory", Cfg: Config{ Bucket: "bucketname", Prefix: "prefix/directory", Connections: 5, Region: "us", }}, {S: "gs:bucketname:/prefix/directory/", Cfg: Config{ Bucket: "bucketname", Prefix: "prefix/directory", Connections: 5, Region: "us", }}, } func TestParseConfig(t *testing.T) { test.ParseConfigTester(t, ParseConfig, configTests) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/gs/gs_test.go
internal/backend/gs/gs_test.go
package gs_test import ( "fmt" "os" "testing" "time" "github.com/restic/restic/internal/backend/gs" "github.com/restic/restic/internal/backend/test" rtest "github.com/restic/restic/internal/test" ) func newGSTestSuite() *test.Suite[gs.Config] { return &test.Suite[gs.Config]{ // do not use excessive data MinimalData: true, // NewConfig returns a config for a new temporary backend that will be used in tests. NewConfig: func() (*gs.Config, error) { cfg, err := gs.ParseConfig(os.Getenv("RESTIC_TEST_GS_REPOSITORY")) if err != nil { return nil, err } cfg.ProjectID = os.Getenv("RESTIC_TEST_GS_PROJECT_ID") cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano()) return cfg, nil }, Factory: gs.NewFactory(), } } func TestBackendGS(t *testing.T) { defer func() { if t.Skipped() { rtest.SkipDisallowed(t, "restic/backend/gs.TestBackendGS") } }() vars := []string{ "RESTIC_TEST_GS_PROJECT_ID", "RESTIC_TEST_GS_REPOSITORY", } for _, v := range vars { if os.Getenv(v) == "" { t.Skipf("environment variable %v not set", v) return } } if os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")+os.Getenv("GOOGLE_ACCESS_TOKEN") == "" { t.Skipf("environment variable GOOGLE_APPLICATION_CREDENTIALS not set, nor GOOGLE_ACCESS_TOKEN") return } t.Logf("run tests") newGSTestSuite().RunTests(t) } func BenchmarkBackendGS(t *testing.B) { vars := []string{ "RESTIC_TEST_GS_PROJECT_ID", "RESTIC_TEST_GS_REPOSITORY", } for _, v := range vars { if os.Getenv(v) == "" { t.Skipf("environment variable %v not set", v) return } } if os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")+os.Getenv("GOOGLE_ACCESS_TOKEN") == "" { t.Skipf("environment variable GOOGLE_APPLICATION_CREDENTIALS not set, nor GOOGLE_ACCESS_TOKEN") return } t.Logf("run tests") newGSTestSuite().RunBenchmarks(t) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/local/local.go
internal/backend/local/local.go
package local import ( "context" "fmt" "hash" "io" "os" "path/filepath" "syscall" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/layout" "github.com/restic/restic/internal/backend/limiter" "github.com/restic/restic/internal/backend/location" "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" "github.com/cenkalti/backoff/v4" ) // Local is a backend in a local directory. type Local struct { Config layout.Layout util.Modes } // ensure statically that *Local implements backend.Backend. var _ backend.Backend = &Local{} var errTooShort = fmt.Errorf("file is too short") func NewFactory() location.Factory { return location.NewLimitedBackendFactory("local", ParseConfig, location.NoPassword, limiter.WrapBackendConstructor(Create), limiter.WrapBackendConstructor(Open)) } func open(cfg Config) (*Local, error) { l := layout.NewDefaultLayout(cfg.Path, filepath.Join) fi, err := os.Stat(l.Filename(backend.Handle{Type: backend.ConfigFile})) m := util.DeriveModesFromFileInfo(fi, err) debug.Log("using (%03O file, %03O dir) permissions", m.File, m.Dir) return &Local{ Config: cfg, Layout: l, Modes: m, }, nil } // Open opens the local backend as specified by config. func Open(_ context.Context, cfg Config, _ func(string, ...interface{})) (*Local, error) { debug.Log("open local backend at %v", cfg.Path) return open(cfg) } // Create creates all the necessary files and directories for a new local // backend at dir. Afterwards a new config blob should be created. func Create(_ context.Context, cfg Config, _ func(string, ...interface{})) (*Local, error) { debug.Log("create local backend at %v", cfg.Path) be, err := open(cfg) if err != nil { return nil, err } // test if config file already exists _, err = os.Lstat(be.Filename(backend.Handle{Type: backend.ConfigFile})) if err == nil { return nil, errors.New("config file already exists") } // create paths for data and refs for _, d := range be.Paths() { err := os.MkdirAll(d, be.Modes.Dir) if err != nil { return nil, errors.WithStack(err) } } return be, nil } func (b *Local) Properties() backend.Properties { return backend.Properties{ Connections: b.Config.Connections, HasAtomicReplace: true, } } // Hasher may return a hash function for calculating a content hash for the backend func (b *Local) Hasher() hash.Hash { return nil } // IsNotExist returns true if the error is caused by a non existing file. func (b *Local) IsNotExist(err error) bool { return errors.Is(err, os.ErrNotExist) } func (b *Local) IsPermanentError(err error) bool { return b.IsNotExist(err) || errors.Is(err, errTooShort) || errors.Is(err, os.ErrPermission) } // Save stores data in the backend at the handle. func (b *Local) Save(_ context.Context, h backend.Handle, rd backend.RewindReader) (err error) { finalname := b.Filename(h) dir := filepath.Dir(finalname) defer func() { // Mark non-retriable errors as such if errors.Is(err, syscall.ENOSPC) || os.IsPermission(err) { err = backoff.Permanent(err) } }() // Create new file with a temporary name. tmpname := filepath.Base(finalname) + "-tmp-" f, err := tempFile(dir, tmpname) if b.IsNotExist(err) { debug.Log("error %v: creating dir", err) // error is caused by a missing directory, try to create it mkdirErr := os.MkdirAll(dir, b.Modes.Dir) if mkdirErr != nil { debug.Log("error creating dir %v: %v", dir, mkdirErr) } else { // try again f, err = tempFile(dir, tmpname) } } if err != nil { return errors.WithStack(err) } defer func(f *os.File) { if err != nil { _ = f.Close() // Double Close is harmless. // Remove after Rename is harmless: we embed the final name in the // temporary's name and no other goroutine will get the same data to // Save, so the temporary name should never be reused by another // goroutine. _ = os.Remove(f.Name()) } }(f) // preallocate disk space if size := rd.Length(); size > 0 { if err := fs.PreallocateFile(f, size); err != nil { debug.Log("Failed to preallocate %v with size %v: %v", finalname, size, err) } } // save data, then sync wbytes, err := io.Copy(f, rd) if err != nil { return errors.WithStack(err) } // sanity check if wbytes != rd.Length() { return errors.Errorf("wrote %d bytes instead of the expected %d bytes", wbytes, rd.Length()) } // Ignore error if filesystem does not support fsync. err = f.Sync() syncNotSup := err != nil && (errors.Is(err, syscall.ENOTSUP) || isMacENOTTY(err)) if err != nil && !syncNotSup { return errors.WithStack(err) } // Close, then rename. Windows doesn't like the reverse order. if err = f.Close(); err != nil { return errors.WithStack(err) } if err = os.Rename(f.Name(), finalname); err != nil { return errors.WithStack(err) } // Now sync the directory to commit the Rename. if !syncNotSup { err = fsyncDir(dir) if err != nil { return errors.WithStack(err) } } // try to mark file as read-only to avoid accidental modifications // ignore if the operation fails as some filesystems don't allow the chmod call // e.g. exfat and network file systems with certain mount options err = setFileReadonly(finalname, b.Modes.File) if err != nil && !os.IsPermission(err) { return errors.WithStack(err) } return nil } var tempFile = os.CreateTemp // Overridden by test. // Load runs fn with a reader that yields the contents of the file at h at the // given offset. func (b *Local) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { return util.DefaultLoad(ctx, h, length, offset, b.openReader, fn) } func (b *Local) openReader(_ context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { f, err := os.Open(b.Filename(h)) if err != nil { return nil, err } fi, err := f.Stat() if err != nil { _ = f.Close() return nil, err } size := fi.Size() if size < offset+int64(length) { _ = f.Close() return nil, errTooShort } if offset > 0 { _, err = f.Seek(offset, 0) if err != nil { _ = f.Close() return nil, err } } if length > 0 { return util.LimitReadCloser(f, int64(length)), nil } return f, nil } // Stat returns information about a blob. func (b *Local) Stat(_ context.Context, h backend.Handle) (backend.FileInfo, error) { fi, err := os.Stat(b.Filename(h)) if err != nil { return backend.FileInfo{}, errors.WithStack(err) } return backend.FileInfo{Size: fi.Size(), Name: h.Name}, nil } // Remove removes the blob with the given name and type. func (b *Local) Remove(_ context.Context, h backend.Handle) error { fn := b.Filename(h) return removeFile(fn) } // List runs fn for each file in the backend which has the type t. When an // error occurs (or fn returns an error), List stops and returns it. func (b *Local) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) (err error) { basedir, subdirs := b.Basedir(t) if subdirs { err = visitDirs(ctx, basedir, fn) } else { err = visitFiles(ctx, basedir, fn, false) } if b.IsNotExist(err) { debug.Log("ignoring non-existing directory") return nil } return err } // The following two functions are like filepath.Walk, but visit only one or // two levels of directory structure (including dir itself as the first level). // Also, visitDirs assumes it sees a directory full of directories, while // visitFiles wants a directory full or regular files. func visitDirs(ctx context.Context, dir string, fn func(backend.FileInfo) error) error { d, err := os.Open(dir) if err != nil { return err } sub, err := d.Readdirnames(-1) if err != nil { // ignore subsequent errors _ = d.Close() return err } err = d.Close() if err != nil { return err } for _, f := range sub { err = visitFiles(ctx, filepath.Join(dir, f), fn, true) if err != nil { return err } } return ctx.Err() } func visitFiles(ctx context.Context, dir string, fn func(backend.FileInfo) error, ignoreNotADirectory bool) error { d, err := os.Open(dir) if err != nil { return err } if ignoreNotADirectory { fi, err := d.Stat() if err != nil || !fi.IsDir() { // ignore subsequent errors _ = d.Close() return err } } sub, err := d.Readdir(-1) if err != nil { // ignore subsequent errors _ = d.Close() return err } err = d.Close() if err != nil { return err } for _, fi := range sub { select { case <-ctx.Done(): return ctx.Err() default: } err := fn(backend.FileInfo{ Name: fi.Name(), Size: fi.Size(), }) if err != nil { return err } } return nil } // Delete removes the repository and all files. func (b *Local) Delete(_ context.Context) error { return os.RemoveAll(b.Path) } // Close closes all open files. func (b *Local) Close() error { // this does not need to do anything, all open files are closed within the // same function. return nil } // Warmup not implemented func (b *Local) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { return []backend.Handle{}, nil } func (b *Local) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/local/local_windows.go
internal/backend/local/local_windows.go
package local import ( "os" "github.com/restic/restic/internal/errors" ) // Can't explicitly flush directory changes on Windows. func fsyncDir(_ string) error { return nil } // Windows is not macOS. func isMacENOTTY(_ error) bool { return false } // We don't modify read-only on windows, // since it will make us unable to delete the file, // and this isn't common practice on this platform. func setFileReadonly(_ string, _ os.FileMode) error { return nil } func removeFile(f string) error { // Reset read-only flag, // as Windows won't let you delete a read-only file err := os.Chmod(f, 0666) if err != nil && !os.IsPermission(err) { return errors.WithStack(err) } return os.Remove(f) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/local/config.go
internal/backend/local/config.go
package local import ( "strings" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/options" ) // Config holds all information needed to open a local repository. type Config struct { Path string Connections uint `option:"connections" help:"set a limit for the number of concurrent operations (default: 2)"` } // NewConfig returns a new config with default options applied. func NewConfig() Config { return Config{ Connections: 2, } } func init() { options.Register("local", Config{}) } // ParseConfig parses a local backend config. func ParseConfig(s string) (*Config, error) { if !strings.HasPrefix(s, "local:") { return nil, errors.New(`invalid format, prefix "local" not found`) } cfg := NewConfig() cfg.Path = s[6:] return &cfg, nil }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/local/local_internal_test.go
internal/backend/local/local_internal_test.go
package local import ( "context" "errors" "fmt" "os" "syscall" "testing" "github.com/restic/restic/internal/backend" rtest "github.com/restic/restic/internal/test" "github.com/cenkalti/backoff/v4" ) func TestNoSpacePermanent(t *testing.T) { oldTempFile := tempFile defer func() { tempFile = oldTempFile }() tempFile = func(_, _ string) (*os.File, error) { return nil, fmt.Errorf("not creating tempfile, %w", syscall.ENOSPC) } dir := rtest.TempDir(t) be, err := Open(context.Background(), Config{Path: dir, Connections: 2}, t.Logf) rtest.OK(t, err) defer func() { rtest.OK(t, be.Close()) }() h := backend.Handle{Type: backend.ConfigFile} err = be.Save(context.Background(), h, nil) _, ok := err.(*backoff.PermanentError) rtest.Assert(t, ok, "error type should be backoff.PermanentError, got %T", err) rtest.Assert(t, errors.Is(err, syscall.ENOSPC), "could not recover original ENOSPC error") }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/local/config_test.go
internal/backend/local/config_test.go
package local import ( "testing" "github.com/restic/restic/internal/backend/test" ) var configTests = []test.ConfigTestData[Config]{ {S: "local:/some/path", Cfg: Config{ Path: "/some/path", Connections: 2, }}, {S: "local:dir1/dir2", Cfg: Config{ Path: "dir1/dir2", Connections: 2, }}, {S: "local:../dir1/dir2", Cfg: Config{ Path: "../dir1/dir2", Connections: 2, }}, {S: "local:/dir1:foobar/dir2", Cfg: Config{ Path: "/dir1:foobar/dir2", Connections: 2, }}, {S: `local:\dir1\foobar\dir2`, Cfg: Config{ Path: `\dir1\foobar\dir2`, Connections: 2, }}, {S: `local:c:\dir1\foobar\dir2`, Cfg: Config{ Path: `c:\dir1\foobar\dir2`, Connections: 2, }}, {S: `local:C:\Users\appveyor\AppData\Local\Temp\1\restic-test-879453535\repo`, Cfg: Config{ Path: `C:\Users\appveyor\AppData\Local\Temp\1\restic-test-879453535\repo`, Connections: 2, }}, {S: `local:c:/dir1/foobar/dir2`, Cfg: Config{ Path: `c:/dir1/foobar/dir2`, Connections: 2, }}, } func TestParseConfig(t *testing.T) { test.ParseConfigTester(t, ParseConfig, configTests) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/local/layout_test.go
internal/backend/local/layout_test.go
package local import ( "context" "path/filepath" "testing" "github.com/restic/restic/internal/backend" rtest "github.com/restic/restic/internal/test" ) func TestLayout(t *testing.T) { path := rtest.TempDir(t) var tests = []struct { filename string failureExpected bool packfiles map[string]bool }{ {"repo-layout-default.tar.gz", false, map[string]bool{ "aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false, "fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false, "c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false, }}, } for _, test := range tests { t.Run(test.filename, func(t *testing.T) { rtest.SetupTarTestFixture(t, path, filepath.Join("..", "testdata", test.filename)) repo := filepath.Join(path, "repo") be, err := Open(context.TODO(), Config{ Path: repo, Connections: 2, }, t.Logf) if err != nil { t.Fatal(err) } if be == nil { t.Fatalf("Open() returned nil but no error") } packs := make(map[string]bool) err = be.List(context.TODO(), backend.PackFile, func(fi backend.FileInfo) error { packs[fi.Name] = false return nil }) if err != nil { t.Fatalf("List() returned error %v", err) } if len(packs) == 0 { t.Errorf("List() returned zero pack files") } for id := range test.packfiles { if _, ok := packs[id]; !ok { t.Errorf("packfile with id %v not found", id) } packs[id] = true } for id, v := range packs { if !v { t.Errorf("unexpected id %v found", id) } } if err = be.Close(); err != nil { t.Errorf("Close() returned error %v", err) } rtest.RemoveAll(t, filepath.Join(path, "repo")) }) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/local/local_test.go
internal/backend/local/local_test.go
package local_test import ( "context" "os" "path/filepath" "testing" "github.com/restic/restic/internal/backend/local" "github.com/restic/restic/internal/backend/test" rtest "github.com/restic/restic/internal/test" ) func newTestSuite(t testing.TB) *test.Suite[local.Config] { return &test.Suite[local.Config]{ // NewConfig returns a config for a new temporary backend that will be used in tests. NewConfig: func() (*local.Config, error) { dir := rtest.TempDir(t) t.Logf("create new backend at %v", dir) cfg := &local.Config{ Path: dir, Connections: 2, } return cfg, nil }, Factory: local.NewFactory(), } } func TestBackend(t *testing.T) { newTestSuite(t).RunTests(t) } func BenchmarkBackend(t *testing.B) { newTestSuite(t).RunBenchmarks(t) } func readdirnames(t testing.TB, dir string) []string { f, err := os.Open(dir) if err != nil { t.Fatal(err) } entries, err := f.Readdirnames(-1) if err != nil { t.Fatal(err) } err = f.Close() if err != nil { t.Fatal(err) } return entries } func empty(t testing.TB, dir string) { entries := readdirnames(t, dir) if len(entries) != 0 { t.Fatalf("directory %v is not empty, contains: %v", dir, entries) } } func openclose(t testing.TB, dir string) { cfg := local.Config{Path: dir} be, err := local.Open(context.TODO(), cfg, t.Logf) if err != nil { t.Logf("Open returned error %v", err) } if be != nil { err = be.Close() if err != nil { t.Logf("Close returned error %v", err) } } } func mkdir(t testing.TB, dir string) { err := os.Mkdir(dir, 0700) if err != nil { t.Fatal(err) } } func removeAll(t testing.TB, dir string) { err := os.RemoveAll(dir) if err != nil { t.Fatal(err) } } func TestOpenNotExistingDirectory(t *testing.T) { dir := rtest.TempDir(t) // local.Open must not create any files dirs in the repo openclose(t, filepath.Join(dir, "repo")) empty(t, dir) openclose(t, dir) empty(t, dir) mkdir(t, filepath.Join(dir, "data")) openclose(t, dir) removeAll(t, filepath.Join(dir, "data")) empty(t, dir) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/local/doc.go
internal/backend/local/doc.go
// Package local implements repository storage in a local directory. package local
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/local/local_unix.go
internal/backend/local/local_unix.go
//go:build !windows package local import ( "errors" "os" "runtime" "syscall" ) // fsyncDir flushes changes to the directory dir. func fsyncDir(dir string) error { d, err := os.Open(dir) if err != nil { return err } err = d.Sync() if err != nil && (errors.Is(err, syscall.ENOTSUP) || errors.Is(err, syscall.ENOENT) || errors.Is(err, syscall.EINVAL) || isMacENOTTY(err)) { err = nil } cerr := d.Close() if err == nil { err = cerr } return err } // The ExFAT driver on some versions of macOS can return ENOTTY, // "inappropriate ioctl for device", for fsync. // // https://github.com/restic/restic/issues/4016 // https://github.com/realm/realm-core/issues/5789 func isMacENOTTY(err error) bool { return runtime.GOOS == "darwin" && errors.Is(err, syscall.ENOTTY) } // set file to readonly func setFileReadonly(f string, mode os.FileMode) error { err := os.Chmod(f, mode&^0222) // ignore the error if the FS does not support setting this mode (e.g. CIFS with gvfs on Linux) if err != nil && errors.Is(err, errors.ErrUnsupported) { return nil } return err } func removeFile(f string) error { return os.Remove(f) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/mem/mem_backend.go
internal/backend/mem/mem_backend.go
package mem import ( "bytes" "context" "encoding/base64" "fmt" "hash" "io" "net/http" "sync" "github.com/cespare/xxhash/v2" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/location" "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" ) type memMap map[backend.Handle][]byte // make sure that MemoryBackend implements backend.Backend var _ backend.Backend = &MemoryBackend{} // NewFactory creates a persistent mem backend func NewFactory() location.Factory { be := New() return location.NewHTTPBackendFactory[struct{}, *MemoryBackend]( "mem", func(_ string) (*struct{}, error) { return &struct{}{}, nil }, location.NoPassword, func(_ context.Context, _ struct{}, _ http.RoundTripper, _ func(string, ...interface{})) (*MemoryBackend, error) { return be, nil }, func(_ context.Context, _ struct{}, _ http.RoundTripper, _ func(string, ...interface{})) (*MemoryBackend, error) { return be, nil }, ) } var errNotFound = fmt.Errorf("not found") var errTooSmall = errors.New("access beyond end of file") const connectionCount = 2 // MemoryBackend is a mock backend that uses a map for storing all data in // memory. This should only be used for tests. type MemoryBackend struct { data memMap m sync.Mutex } // New returns a new backend that saves all data in a map in memory. func New() *MemoryBackend { be := &MemoryBackend{ data: make(memMap), } debug.Log("created new memory backend") return be } // IsNotExist returns true if the file does not exist. func (be *MemoryBackend) IsNotExist(err error) bool { return errors.Is(err, errNotFound) } func (be *MemoryBackend) IsPermanentError(err error) bool { return be.IsNotExist(err) || errors.Is(err, errTooSmall) } // Save adds new Data to the backend. func (be *MemoryBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { be.m.Lock() defer be.m.Unlock() h.IsMetadata = false if h.Type == backend.ConfigFile { h.Name = "" } if _, ok := be.data[h]; ok { return errors.New("file already exists") } buf, err := io.ReadAll(rd) if err != nil { return err } // sanity check if int64(len(buf)) != rd.Length() { return errors.Errorf("wrote %d bytes instead of the expected %d bytes", len(buf), rd.Length()) } beHash := be.Hasher() // must never fail according to interface _, err = beHash.Write(buf) if err != nil { panic(err) } if !bytes.Equal(beHash.Sum(nil), rd.Hash()) { return errors.Errorf("invalid file hash or content, got %s expected %s", base64.RawStdEncoding.EncodeToString(beHash.Sum(nil)), base64.RawStdEncoding.EncodeToString(rd.Hash()), ) } be.data[h] = buf return ctx.Err() } // Load runs fn with a reader that yields the contents of the file at h at the // given offset. func (be *MemoryBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { return util.DefaultLoad(ctx, h, length, offset, be.openReader, fn) } func (be *MemoryBackend) openReader(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { be.m.Lock() defer be.m.Unlock() h.IsMetadata = false if h.Type == backend.ConfigFile { h.Name = "" } if _, ok := be.data[h]; !ok { return nil, errNotFound } buf := be.data[h] if offset+int64(length) > int64(len(buf)) { return nil, errTooSmall } buf = buf[offset:] if length > 0 { buf = buf[:length] } return io.NopCloser(bytes.NewReader(buf)), ctx.Err() } // Stat returns information about a file in the backend. func (be *MemoryBackend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) { be.m.Lock() defer be.m.Unlock() h.IsMetadata = false if h.Type == backend.ConfigFile { h.Name = "" } e, ok := be.data[h] if !ok { return backend.FileInfo{}, errNotFound } return backend.FileInfo{Size: int64(len(e)), Name: h.Name}, ctx.Err() } // Remove deletes a file from the backend. func (be *MemoryBackend) Remove(ctx context.Context, h backend.Handle) error { be.m.Lock() defer be.m.Unlock() h.IsMetadata = false if _, ok := be.data[h]; !ok { return errNotFound } delete(be.data, h) return ctx.Err() } // List returns a channel which yields entries from the backend. func (be *MemoryBackend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error { entries := make(map[string]int64) be.m.Lock() for entry, buf := range be.data { if entry.Type != t { continue } entries[entry.Name] = int64(len(buf)) } be.m.Unlock() for name, size := range entries { fi := backend.FileInfo{ Name: name, Size: size, } if ctx.Err() != nil { return ctx.Err() } err := fn(fi) if err != nil { return err } if ctx.Err() != nil { return ctx.Err() } } return ctx.Err() } func (be *MemoryBackend) Properties() backend.Properties { return backend.Properties{ Connections: connectionCount, HasAtomicReplace: false, } } // Hasher may return a hash function for calculating a content hash for the backend func (be *MemoryBackend) Hasher() hash.Hash { return xxhash.New() } // Delete removes all data in the backend. func (be *MemoryBackend) Delete(ctx context.Context) error { be.m.Lock() defer be.m.Unlock() if ctx.Err() != nil { return ctx.Err() } be.data = make(memMap) return nil } // Close closes the backend. func (be *MemoryBackend) Close() error { return nil } // Warmup not implemented func (be *MemoryBackend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { return []backend.Handle{}, nil } func (be *MemoryBackend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/backend/mem/mem_backend_test.go
internal/backend/mem/mem_backend_test.go
package mem_test import ( "testing" "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/backend/test" ) func newTestSuite() *test.Suite[struct{}] { return &test.Suite[struct{}]{ // NewConfig returns a config for a new temporary backend that will be used in tests. NewConfig: func() (*struct{}, error) { return &struct{}{}, nil }, Factory: mem.NewFactory(), } } func TestSuiteBackendMem(t *testing.T) { newTestSuite().RunTests(t) } func BenchmarkSuiteBackendMem(t *testing.B) { newTestSuite().RunBenchmarks(t) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/feature/registry.go
internal/feature/registry.go
package feature // Flag is named such that checking for a feature uses `feature.Flag.Enabled(feature.ExampleFeature)`. var Flag = New() // flag names are written in kebab-case const ( BackendErrorRedesign FlagName = "backend-error-redesign" DeprecateLegacyIndex FlagName = "deprecate-legacy-index" DeprecateS3LegacyLayout FlagName = "deprecate-s3-legacy-layout" DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" ExplicitS3AnonymousAuth FlagName = "explicit-s3-anonymous-auth" SafeForgetKeepTags FlagName = "safe-forget-keep-tags" S3Restore FlagName = "s3-restore" ) func init() { Flag.SetFlags(map[FlagName]FlagDesc{ BackendErrorRedesign: {Type: Beta, Description: "enforce timeouts for stuck HTTP requests and use new backend error handling design."}, DeprecateLegacyIndex: {Type: Stable, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, DeprecateS3LegacyLayout: {Type: Stable, Description: "disable support for S3 legacy layout used up to restic 0.7.0. Use restic 0.17.3 to migrate if necessary."}, DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, ExplicitS3AnonymousAuth: {Type: Stable, Description: "forbid anonymous S3 authentication unless `-o s3.unsafe-anonymous-auth=true` is set"}, SafeForgetKeepTags: {Type: Stable, Description: "prevent deleting all snapshots if the tag passed to `forget --keep-tags tagname` does not exist"}, S3Restore: {Type: Alpha, Description: "restore S3 objects from cold storage classes when `-o s3.enable-restore=true` is set"}, }) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/feature/features.go
internal/feature/features.go
package feature import ( "fmt" "sort" "strconv" "strings" ) type state string type FlagName string const ( // Alpha features are disabled by default. They do not guarantee any backwards compatibility and may change in arbitrary ways between restic versions. Alpha state = "alpha" // Beta features are enabled by default. They may still change, but incompatible changes should be avoided. Beta state = "beta" // Stable features are always enabled Stable state = "stable" // Deprecated features are always disabled Deprecated state = "deprecated" ) type FlagDesc struct { Type state Description string } type FlagSet struct { flags map[FlagName]*FlagDesc enabled map[FlagName]bool } func New() *FlagSet { return &FlagSet{} } func getDefault(phase state) bool { switch phase { case Alpha, Deprecated: return false case Beta, Stable: return true default: panic("unknown feature phase") } } func (f *FlagSet) SetFlags(flags map[FlagName]FlagDesc) { f.flags = map[FlagName]*FlagDesc{} f.enabled = map[FlagName]bool{} for name, flag := range flags { fcopy := flag f.flags[name] = &fcopy f.enabled[name] = getDefault(fcopy.Type) } } func (f *FlagSet) Apply(flags string, logWarning func(string)) error { if flags == "" { return nil } selection := make(map[string]bool) for _, flag := range strings.Split(flags, ",") { parts := strings.SplitN(flag, "=", 2) name := parts[0] value := "true" if len(parts) == 2 { value = parts[1] } isEnabled, err := strconv.ParseBool(value) if err != nil { return fmt.Errorf("failed to parse value %q for feature flag %v: %w", value, name, err) } selection[name] = isEnabled } for name, value := range selection { fname := FlagName(name) flag := f.flags[fname] if flag == nil { return fmt.Errorf("unknown feature flag %q", name) } switch flag.Type { case Alpha, Beta: f.enabled[fname] = value case Stable: logWarning(fmt.Sprintf("feature flag %q is always enabled and will be removed in a future release", fname)) case Deprecated: logWarning(fmt.Sprintf("feature flag %q is always disabled and will be removed in a future release", fname)) default: panic("unknown feature phase") } } return nil } func (f *FlagSet) Enabled(name FlagName) bool { isEnabled, ok := f.enabled[name] if !ok { panic(fmt.Sprintf("unknown feature flag %v", name)) } return isEnabled } // Help contains information about a feature. type Help struct { Name string Type string Default bool Description string } func (f *FlagSet) List() []Help { var help []Help for name, flag := range f.flags { help = append(help, Help{ Name: string(name), Type: string(flag.Type), Default: getDefault(flag.Type), Description: flag.Description, }) } sort.Slice(help, func(i, j int) bool { return strings.Compare(help[i].Name, help[j].Name) < 0 }) return help }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/feature/testing_test.go
internal/feature/testing_test.go
package feature_test import ( "testing" "github.com/restic/restic/internal/feature" rtest "github.com/restic/restic/internal/test" ) func TestSetFeatureFlag(t *testing.T) { flags := buildTestFlagSet() rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") restore := feature.TestSetFlag(t, flags, alpha, true) rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") restore() rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled again") }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/feature/features_test.go
internal/feature/features_test.go
package feature_test import ( "fmt" "strings" "testing" "github.com/restic/restic/internal/feature" rtest "github.com/restic/restic/internal/test" ) var ( alpha = feature.FlagName("alpha-feature") beta = feature.FlagName("beta-feature") stable = feature.FlagName("stable-feature") deprecated = feature.FlagName("deprecated-feature") ) var testFlags = map[feature.FlagName]feature.FlagDesc{ alpha: { Type: feature.Alpha, Description: "alpha", }, beta: { Type: feature.Beta, Description: "beta", }, stable: { Type: feature.Stable, Description: "stable", }, deprecated: { Type: feature.Deprecated, Description: "deprecated", }, } func buildTestFlagSet() *feature.FlagSet { flags := feature.New() flags.SetFlags(testFlags) return flags } func TestFeatureDefaults(t *testing.T) { flags := buildTestFlagSet() for _, exp := range []struct { flag feature.FlagName value bool }{ {alpha, false}, {beta, true}, {stable, true}, {deprecated, false}, } { rtest.Assert(t, flags.Enabled(exp.flag) == exp.value, "expected flag %v to have value %v got %v", exp.flag, exp.value, flags.Enabled(exp.flag)) } } func panicIfCalled(msg string) { panic(msg) } func TestEmptyApply(t *testing.T) { flags := buildTestFlagSet() rtest.OK(t, flags.Apply("", panicIfCalled)) rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") rtest.Assert(t, flags.Enabled(beta), "expected beta feature to be enabled") } func TestFeatureApply(t *testing.T) { flags := buildTestFlagSet() rtest.OK(t, flags.Apply(string(alpha), panicIfCalled)) rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", alpha), panicIfCalled)) rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", alpha), panicIfCalled)) rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled again") rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", beta), panicIfCalled)) rtest.Assert(t, !flags.Enabled(beta), "expected beta feature to be disabled") logMsg := "" log := func(msg string) { logMsg = msg } rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", stable), log)) rtest.Assert(t, flags.Enabled(stable), "expected stable feature to remain enabled") rtest.Assert(t, strings.Contains(logMsg, string(stable)), "unexpected log message for stable flag: %v", logMsg) logMsg = "" rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", deprecated), log)) rtest.Assert(t, !flags.Enabled(deprecated), "expected deprecated feature to remain disabled") rtest.Assert(t, strings.Contains(logMsg, string(deprecated)), "unexpected log message for deprecated flag: %v", logMsg) } func TestFeatureMultipleApply(t *testing.T) { flags := buildTestFlagSet() rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true,%s=false", alpha, beta), panicIfCalled)) rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") rtest.Assert(t, !flags.Enabled(beta), "expected beta feature to be disabled") } func TestFeatureApplyInvalid(t *testing.T) { flags := buildTestFlagSet() err := flags.Apply("invalid-flag", panicIfCalled) rtest.Assert(t, err != nil && strings.Contains(err.Error(), "unknown feature flag"), "expected unknown feature flag error, got: %v", err) err = flags.Apply(fmt.Sprintf("%v=invalid", alpha), panicIfCalled) rtest.Assert(t, err != nil && strings.Contains(err.Error(), "failed to parse value"), "expected parsing error, got: %v", err) } func assertPanic(t *testing.T) { if r := recover(); r == nil { t.Fatal("should have panicked") } } func TestFeatureQueryInvalid(t *testing.T) { defer assertPanic(t) flags := buildTestFlagSet() flags.Enabled("invalid-flag") } func TestFeatureSetInvalidPhase(t *testing.T) { defer assertPanic(t) flags := feature.New() flags.SetFlags(map[feature.FlagName]feature.FlagDesc{ "invalid": { Type: "invalid", }, }) } func TestFeatureList(t *testing.T) { flags := buildTestFlagSet() rtest.Equals(t, []feature.Help{ {string(alpha), string(feature.Alpha), false, "alpha"}, {string(beta), string(feature.Beta), true, "beta"}, {string(deprecated), string(feature.Deprecated), false, "deprecated"}, {string(stable), string(feature.Stable), true, "stable"}, }, flags.List()) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/feature/testing.go
internal/feature/testing.go
package feature import ( "fmt" "testing" ) // TestSetFlag temporarily sets a feature flag to the given value until the // returned function is called. // // Usage // ``` // defer TestSetFlag(t, features.Flags, features.ExampleFlag, true)() // ``` func TestSetFlag(_ *testing.T, f *FlagSet, flag FlagName, value bool) func() { current := f.Enabled(flag) panicIfCalled := func(msg string) { panic(msg) } if err := f.Apply(fmt.Sprintf("%s=%v", flag, value), panicIfCalled); err != nil { // not reachable panic(err) } return func() { if err := f.Apply(fmt.Sprintf("%s=%v", flag, current), panicIfCalled); err != nil { // not reachable panic(err) } } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/migrations/interface.go
internal/migrations/interface.go
package migrations import ( "context" "github.com/restic/restic/internal/restic" ) // Migration implements a data migration. type Migration interface { // Check returns true if the migration can be applied to a repo. If the option is not applicable it can return a specific reason. Check(context.Context, restic.Repository) (bool, string, error) RepoCheck() bool // Apply runs the migration. Apply(context.Context, restic.Repository) error // Name returns a short name. Name() string // Desc returns a description what the migration does. Desc() string }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/migrations/upgrade_repo_v2_test.go
internal/migrations/upgrade_repo_v2_test.go
package migrations import ( "context" "testing" "github.com/restic/restic/internal/repository" ) func TestUpgradeRepoV2(t *testing.T) { repo, _, _ := repository.TestRepositoryWithVersion(t, 1) if repo.Config().Version != 1 { t.Fatal("test repo has wrong version") } m := &UpgradeRepoV2{} ok, _, err := m.Check(context.Background(), repo) if err != nil { t.Fatal(err) } if !ok { t.Fatal("migration check returned false") } err = m.Apply(context.Background(), repo) if err != nil { t.Fatal(err) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/migrations/upgrade_repo_v2.go
internal/migrations/upgrade_repo_v2.go
package migrations import ( "context" "fmt" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" ) func init() { register(&UpgradeRepoV2{}) } type UpgradeRepoV2 struct{} func (*UpgradeRepoV2) Name() string { return "upgrade_repo_v2" } func (*UpgradeRepoV2) Desc() string { return "upgrade a repository to version 2" } func (*UpgradeRepoV2) Check(_ context.Context, repo restic.Repository) (bool, string, error) { isV1 := repo.Config().Version == 1 reason := "" if !isV1 { reason = fmt.Sprintf("repository is already upgraded to version %v", repo.Config().Version) } return isV1, reason, nil } func (*UpgradeRepoV2) RepoCheck() bool { return true } func (m *UpgradeRepoV2) Apply(ctx context.Context, repo restic.Repository) error { return repository.UpgradeRepo(ctx, repo.(*repository.Repository)) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/migrations/list.go
internal/migrations/list.go
package migrations // All contains all migrations. var All []Migration func register(m Migration) { All = append(All, m) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/migrations/doc.go
internal/migrations/doc.go
// Package migrations contains migrations that can be applied to a repository and/or backend. package migrations
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/foreground.go
internal/terminal/foreground.go
package terminal import ( "os" "os/exec" "strings" ) // StartForeground runs cmd in the foreground, by temporarily switching to the // new process group created for cmd. The returned function `bg` switches back // to the previous process group. // // The command's environment has all RESTIC_* variables removed. // // Return exec.ErrDot if it would implicitly run an executable from the current // directory. func StartForeground(cmd *exec.Cmd) (bg func() error, err error) { env := os.Environ() // Returns a copy that we can modify. cmd.Env = env[:0] for _, kv := range env { if strings.HasPrefix(kv, "RESTIC_") { continue } cmd.Env = append(cmd.Env, kv) } return startForeground(cmd) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/terminal_unix.go
internal/terminal/terminal_unix.go
//go:build !windows package terminal import ( "io" "os" "golang.org/x/term" ) // ClearCurrentLine removes all characters from the current line and resets the // cursor position to the first column. func ClearCurrentLine(_ uintptr) func(io.Writer, uintptr) error { return PosixClearCurrentLine } // MoveCursorUp moves the cursor to the line n lines above the current one. func MoveCursorUp(_ uintptr) func(io.Writer, uintptr, int) error { return PosixMoveCursorUp } // CanUpdateStatus returns true if status lines can be printed, the process // output is not redirected to a file or pipe. func CanUpdateStatus(fd uintptr) bool { if !term.IsTerminal(int(fd)) { return false } term := os.Getenv("TERM") if term == "" { return false } // TODO actually read termcap db and detect if terminal supports what we need return term != "dumb" }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/terminal_windows_test.go
internal/terminal/terminal_windows_test.go
package terminal import ( "syscall" "testing" "golang.org/x/sys/windows" rtest "github.com/restic/restic/internal/test" ) func TestIsMinTTY(t *testing.T) { for _, test := range []struct { path string result bool }{ {`\\.\pipe\msys-dd50a72ab4668b33-pty0-to-master`, true}, {`\\.\pipe\msys-dd50a72ab4668b33-13244-pipe-0x16`, false}, } { filename, err := syscall.UTF16FromString(test.path) rtest.OK(t, err) handle, err := windows.CreateNamedPipe(&filename[0], windows.PIPE_ACCESS_DUPLEX, windows.PIPE_TYPE_BYTE, 1, 1024, 1024, 0, nil) rtest.OK(t, err) defer windows.CloseHandle(handle) rtest.Assert(t, CanUpdateStatus(uintptr(handle)) == test.result, "expected CanUpdateStatus(%v) == %v", test.path, test.result) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/background_unix.go
internal/terminal/background_unix.go
//go:build unix package terminal import "github.com/restic/restic/internal/debug" // IsProcessBackground reports whether the current process is running in the // background. fd must be a file descriptor for the terminal. func IsProcessBackground(fd uintptr) bool { bg, err := isProcessBackground(int(fd)) if err != nil { debug.Log("Can't check if we are in the background. Using default behaviour. Error: %s\n", err.Error()) return false } return bg } func isProcessBackground(fd int) (bg bool, err error) { pgid, err := tcgetpgrp(fd) if err != nil { return false, err } return pgid != getpgrp(), nil }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/terminal_windows.go
internal/terminal/terminal_windows.go
//go:build windows package terminal import ( "io" "strings" "syscall" "unsafe" "golang.org/x/sys/windows" "golang.org/x/term" ) // clearCurrentLine removes all characters from the current line and resets the // cursor position to the first column. func ClearCurrentLine(fd uintptr) func(io.Writer, uintptr) error { // easy case, the terminal is cmd or psh, without redirection if isWindowsTerminal(fd) { return windowsClearCurrentLine } // assume we're running in mintty/cygwin return PosixClearCurrentLine } // moveCursorUp moves the cursor to the line n lines above the current one. func MoveCursorUp(fd uintptr) func(io.Writer, uintptr, int) error { // easy case, the terminal is cmd or psh, without redirection if isWindowsTerminal(fd) { return windowsMoveCursorUp } // assume we're running in mintty/cygwin return PosixMoveCursorUp } var kernel32 = syscall.NewLazyDLL("kernel32.dll") var ( procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") ) // windowsClearCurrentLine removes all characters from the current line and // resets the cursor position to the first column. func windowsClearCurrentLine(_ io.Writer, fd uintptr) error { var info windows.ConsoleScreenBufferInfo windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info) // clear the line cursor := windows.Coord{ X: info.Window.Left, Y: info.CursorPosition.Y, } var count, w uint32 count = uint32(info.Size.X) procFillConsoleOutputAttribute.Call(fd, uintptr(info.Attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&w))) procFillConsoleOutputCharacter.Call(fd, uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&w))) return nil } // windowsMoveCursorUp moves the cursor to the line n lines above the current one. func windowsMoveCursorUp(_ io.Writer, fd uintptr, n int) error { var info windows.ConsoleScreenBufferInfo windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info) // move cursor up by n lines and to the first column windows.SetConsoleCursorPosition(windows.Handle(fd), windows.Coord{ X: 0, Y: info.CursorPosition.Y - int16(n), }) return nil } // isWindowsTerminal return true if the file descriptor is a windows terminal (cmd, psh). func isWindowsTerminal(fd uintptr) bool { return term.IsTerminal(int(fd)) } func isPipe(fd uintptr) bool { typ, err := windows.GetFileType(windows.Handle(fd)) return err == nil && typ == windows.FILE_TYPE_PIPE } func getFileNameByHandle(fd uintptr) (string, error) { type FILE_NAME_INFO struct { FileNameLength int32 FileName [windows.MAX_LONG_PATH]uint16 } var fi FILE_NAME_INFO err := windows.GetFileInformationByHandleEx(windows.Handle(fd), windows.FileNameInfo, (*byte)(unsafe.Pointer(&fi)), uint32(unsafe.Sizeof(fi))) if err != nil { return "", err } filename := syscall.UTF16ToString(fi.FileName[:]) return filename, nil } // CanUpdateStatus returns true if status lines can be printed, the process // output is not redirected to a file or pipe. func CanUpdateStatus(fd uintptr) bool { // easy case, the terminal is cmd or psh, without redirection if isWindowsTerminal(fd) { return true } // pipes require special handling if !isPipe(fd) { return false } fn, err := getFileNameByHandle(fd) if err != nil { return false } // inspired by https://github.com/RyanGlScott/mintty/blob/master/src/System/Console/MinTTY/Win32.hsc // terminal: \msys-dd50a72ab4668b33-pty0-to-master // pipe to cat: \msys-dd50a72ab4668b33-13244-pipe-0x16 if (strings.HasPrefix(fn, "\\cygwin-") || strings.HasPrefix(fn, "\\msys-")) && strings.Contains(fn, "-pty") && strings.HasSuffix(fn, "-master") { return true } return false }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/getpgrp_unix.go
internal/terminal/getpgrp_unix.go
//go:build unix && !solaris package terminal import "golang.org/x/sys/unix" func getpgrp() int { return unix.Getpgrp() }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/background_unix_test.go
internal/terminal/background_unix_test.go
//go:build unix package terminal import ( "os" "testing" rtest "github.com/restic/restic/internal/test" ) func TestIsProcessBackground(t *testing.T) { tty, err := os.Open("/dev/tty") if err != nil { t.Skipf("can't open terminal: %v", err) } _, err = isProcessBackground(int(tty.Fd())) rtest.OK(t, err) _ = tty.Close() }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/background_windows.go
internal/terminal/background_windows.go
package terminal // IsProcessBackground reports whether the current process is running in the // background. Not implemented for this platform. func IsProcessBackground(_ uintptr) bool { return false }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/tcgetpgrp_unix.go
internal/terminal/tcgetpgrp_unix.go
//go:build unix && !linux package terminal import "golang.org/x/sys/unix" func tcgetpgrp(ttyfd int) (int, error) { return unix.IoctlGetInt(ttyfd, unix.TIOCGPGRP) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/foreground_test.go
internal/terminal/foreground_test.go
//go:build !windows package terminal_test import ( "bufio" "os" "os/exec" "strings" "testing" "github.com/restic/restic/internal/terminal" rtest "github.com/restic/restic/internal/test" ) func TestForeground(t *testing.T) { err := os.Setenv("RESTIC_PASSWORD", "supersecret") rtest.OK(t, err) cmd := exec.Command("env") stdout, err := cmd.StdoutPipe() rtest.OK(t, err) bg, err := terminal.StartForeground(cmd) rtest.OK(t, err) defer func() { rtest.OK(t, cmd.Wait()) }() err = bg() rtest.OK(t, err) sc := bufio.NewScanner(stdout) for sc.Scan() { if strings.HasPrefix(sc.Text(), "RESTIC_PASSWORD=") { t.Error("subprocess got to see the password") } } rtest.OK(t, err) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/foreground_windows.go
internal/terminal/foreground_windows.go
package terminal import ( "os/exec" "syscall" "github.com/restic/restic/internal/errors" "golang.org/x/sys/windows" ) func startForeground(cmd *exec.Cmd) (bg func() error, err error) { // just start the process and hope for the best cmd.SysProcAttr = &syscall.SysProcAttr{} cmd.SysProcAttr.CreationFlags = windows.CREATE_NEW_PROCESS_GROUP err = cmd.Start() if err != nil { return nil, errors.Wrap(err, "cmd.Start") } bg = func() error { return nil } return bg, nil }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/stdio.go
internal/terminal/stdio.go
package terminal import ( "golang.org/x/term" ) func InputIsTerminal(fd uintptr) bool { return term.IsTerminal(int(fd)) } func OutputIsTerminal(fd uintptr) bool { // mintty on windows can use pipes which behave like a posix terminal, // but which are not a terminal handle. Thus also check `CanUpdateStatus`, // which is able to detect such pipes. return term.IsTerminal(int(fd)) || CanUpdateStatus(fd) } func Width(fd uintptr) int { w, _, err := term.GetSize(int(fd)) if err != nil { return 0 } return w }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/getpgrp_solaris.go
internal/terminal/getpgrp_solaris.go
package terminal import "golang.org/x/sys/unix" func getpgrp() int { pid, _ := unix.Getpgrp() return pid }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/tcsetpgrp_unix.go
internal/terminal/tcsetpgrp_unix.go
//go:build unix && !aix package terminal import "golang.org/x/sys/unix" func tcsetpgrp(fd int, pid int) error { return unix.IoctlSetPointerInt(fd, unix.TIOCSPGRP, pid) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/terminal_posix.go
internal/terminal/terminal_posix.go
package terminal import ( "bytes" "fmt" "io" ) const ( // PosixControlMoveCursorHome moves cursor to the first column PosixControlMoveCursorHome = "\r" // PosixControlMoveCursorUp moves cursor up one line PosixControlMoveCursorUp = "\x1b[1A" // PosixControlClearLine clears the current line PosixControlClearLine = "\x1b[2K" ) // PosixClearCurrentLine removes all characters from the current line and resets the // cursor position to the first column. func PosixClearCurrentLine(wr io.Writer, _ uintptr) error { // clear current line _, err := wr.Write([]byte(PosixControlMoveCursorHome + PosixControlClearLine)) if err != nil { return fmt.Errorf("write failed: %w", err) } return nil } // PosixMoveCursorUp moves the cursor to the line n lines above the current one. func PosixMoveCursorUp(wr io.Writer, _ uintptr, n int) error { data := []byte(PosixControlMoveCursorHome) data = append(data, bytes.Repeat([]byte(PosixControlMoveCursorUp), n)...) _, err := wr.Write(data) if err != nil { return fmt.Errorf("write failed: %w", err) } return nil }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/foreground_unix.go
internal/terminal/foreground_unix.go
//go:build unix package terminal import ( "os" "os/exec" "os/signal" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "golang.org/x/sys/unix" ) func startForeground(cmd *exec.Cmd) (bg func() error, err error) { // run the command in its own process group // this ensures that sending ctrl-c to restic will not immediately stop the backend process. cmd.SysProcAttr = &unix.SysProcAttr{ Setpgid: true, } // open the TTY, we need the file descriptor tty, err := os.OpenFile("/dev/tty", os.O_RDWR, 0) if err != nil { debug.Log("unable to open tty: %v", err) return startFallback(cmd) } // only move child process to foreground if restic is in the foreground prev, err := tcgetpgrp(int(tty.Fd())) if err != nil { _ = tty.Close() return nil, err } self := getpgrp() if prev != self { debug.Log("restic is not controlling the tty; err = %v", err) if err := tty.Close(); err != nil { return nil, err } return startFallback(cmd) } // Prevent getting suspended when interacting with the tty signal.Ignore(unix.SIGTTIN) signal.Ignore(unix.SIGTTOU) // start the process err = cmd.Start() if err != nil { _ = tty.Close() return nil, errors.Wrap(err, "cmd.Start") } // move the command's process group into the foreground err = tcsetpgrp(int(tty.Fd()), cmd.Process.Pid) if err != nil { _ = tty.Close() return nil, err } bg = func() error { signal.Reset(unix.SIGTTIN) signal.Reset(unix.SIGTTOU) // reset the foreground process group err = tcsetpgrp(int(tty.Fd()), prev) if err != nil { _ = tty.Close() return err } return tty.Close() } return bg, nil } func startFallback(cmd *exec.Cmd) (bg func() error, err error) { bg = func() error { return nil } return bg, cmd.Start() }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/tcsetpgrp_aix.go
internal/terminal/tcsetpgrp_aix.go
package terminal import "golang.org/x/sys/unix" func tcsetpgrp(fd int, pid int) error { // The second argument to IoctlSetPointerInt has type int on AIX, // but the constant overflows 64-bit int, hence the two-step cast. req := uint(unix.TIOCSPGRP) return unix.IoctlSetPointerInt(fd, int(req), pid) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/password.go
internal/terminal/password.go
package terminal import ( "context" "fmt" "io" "golang.org/x/term" ) // ReadPassword reads the password from the given reader which must be a // tty. Prompt is printed on the writer out before attempting to read the // password. If the context is canceled, the function leaks the password reading // goroutine. func ReadPassword(ctx context.Context, inFd int, out io.Writer, prompt string) (password string, err error) { state, err := term.GetState(inFd) if err != nil { _, _ = fmt.Fprintf(out, "unable to get terminal state: %v\n", err) return "", err } done := make(chan struct{}) var buf []byte go func() { defer close(done) _, err = fmt.Fprint(out, prompt) if err != nil { return } buf, err = term.ReadPassword(inFd) if err != nil { return } _, err = fmt.Fprintln(out) }() select { case <-ctx.Done(): err := term.Restore(inFd, state) if err != nil { _, _ = fmt.Fprintf(out, "unable to restore terminal state: %v\n", err) } return "", ctx.Err() case <-done: // clean shutdown, nothing to do } if err != nil { return "", fmt.Errorf("ReadPassword: %w", err) } return string(buf), nil }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/terminal/tcgetpgrp_linux.go
internal/terminal/tcgetpgrp_linux.go
package terminal import "golang.org/x/sys/unix" func tcgetpgrp(ttyfd int) (int, error) { // We need to use IoctlGetUint32 here, because pid_t is 32-bit even on // 64-bit Linux. IoctlGetInt doesn't work on big-endian platforms: // https://github.com/golang/go/issues/45585 // https://github.com/golang/go/issues/60429 pid, err := unix.IoctlGetUint32(ttyfd, unix.TIOCGPGRP) return int(pid), err }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/walker/walker.go
internal/walker/walker.go
package walker import ( "context" "path" "sort" "github.com/pkg/errors" "github.com/restic/restic/internal/data" "github.com/restic/restic/internal/restic" ) // ErrSkipNode is returned by WalkFunc when a dir node should not be walked. var ErrSkipNode = errors.New("skip this node") // WalkFunc is the type of the function called for each node visited by Walk. // Path is the slash-separated path from the root node. If there was a problem // loading a node, err is set to a non-nil error. WalkFunc can chose to ignore // it by returning nil. // // When the special value ErrSkipNode is returned and node is a dir node, it is // not walked. When the node is not a dir node, the remaining items in this // tree are skipped. type WalkFunc func(parentTreeID restic.ID, path string, node *data.Node, nodeErr error) (err error) type WalkVisitor struct { // If the node is a `dir`, it will be entered afterwards unless `ErrSkipNode` // was returned. This function is mandatory ProcessNode WalkFunc // Optional callback LeaveDir func(path string) error } // Walk calls walkFn recursively for each node in root. If walkFn returns an // error, it is passed up the call stack. The trees in ignoreTrees are not // walked. If walkFn ignores trees, these are added to the set. func Walk(ctx context.Context, repo restic.BlobLoader, root restic.ID, visitor WalkVisitor) error { tree, err := data.LoadTree(ctx, repo, root) err = visitor.ProcessNode(root, "/", nil, err) if err != nil { if err == ErrSkipNode { err = nil } return err } return walk(ctx, repo, "/", root, tree, visitor) } // walk recursively traverses the tree, ignoring subtrees when the ID of the // subtree is in ignoreTrees. If err is nil and ignore is true, the subtree ID // will be added to ignoreTrees by walk. func walk(ctx context.Context, repo restic.BlobLoader, prefix string, parentTreeID restic.ID, tree *data.Tree, visitor WalkVisitor) (err error) { sort.Slice(tree.Nodes, func(i, j int) bool { return tree.Nodes[i].Name < tree.Nodes[j].Name }) for _, node := range tree.Nodes { if ctx.Err() != nil { return ctx.Err() } p := path.Join(prefix, node.Name) if node.Type == data.NodeTypeInvalid { return errors.Errorf("node type is empty for node %q", node.Name) } if node.Type != data.NodeTypeDir { err := visitor.ProcessNode(parentTreeID, p, node, nil) if err != nil { if err == ErrSkipNode { // skip the remaining entries in this tree break } return err } continue } if node.Subtree == nil { return errors.Errorf("subtree for node %v in tree %v is nil", node.Name, p) } subtree, err := data.LoadTree(ctx, repo, *node.Subtree) err = visitor.ProcessNode(parentTreeID, p, node, err) if err != nil { if err == ErrSkipNode { continue } return err } err = walk(ctx, repo, p, *node.Subtree, subtree, visitor) if err != nil { return err } } if visitor.LeaveDir != nil { return visitor.LeaveDir(prefix) } return nil }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/walker/rewriter_test.go
internal/walker/rewriter_test.go
package walker import ( "context" "testing" "github.com/pkg/errors" "github.com/restic/restic/internal/data" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/test" ) // WritableTreeMap also support saving type WritableTreeMap struct { TreeMap } func (t WritableTreeMap) SaveBlob(_ context.Context, tpe restic.BlobType, buf []byte, id restic.ID, _ bool) (newID restic.ID, known bool, size int, err error) { if tpe != restic.TreeBlob { return restic.ID{}, false, 0, errors.New("can only save trees") } if id.IsNull() { id = restic.Hash(buf) } _, ok := t.TreeMap[id] if ok { return id, false, 0, nil } t.TreeMap[id] = append([]byte{}, buf...) return id, true, len(buf), nil } func (t WritableTreeMap) Dump(test testing.TB) { for k, v := range t.TreeMap { test.Logf("%v: %v", k, string(v)) } } type checkRewriteFunc func(t testing.TB) (rewriter *TreeRewriter, final func(testing.TB)) // checkRewriteItemOrder ensures that the order of the 'path' arguments is the one passed in as 'want'. func checkRewriteItemOrder(want []string) checkRewriteFunc { pos := 0 return func(t testing.TB) (rewriter *TreeRewriter, final func(testing.TB)) { rewriter = NewTreeRewriter(RewriteOpts{ RewriteNode: func(node *data.Node, path string) *data.Node { if pos >= len(want) { t.Errorf("additional unexpected path found: %v", path) return nil } if path != want[pos] { t.Errorf("wrong path found, want %q, got %q", want[pos], path) } pos++ return node }, }) final = func(t testing.TB) { if pos != len(want) { t.Errorf("not enough items returned, want %d, got %d", len(want), pos) } } return rewriter, final } } // checkRewriteSkips excludes nodes if path is in skipFor, it checks that rewriting proceeds in the correct order. func checkRewriteSkips(skipFor map[string]struct{}, want []string, disableCache bool) checkRewriteFunc { var pos int return func(t testing.TB) (rewriter *TreeRewriter, final func(testing.TB)) { rewriter = NewTreeRewriter(RewriteOpts{ RewriteNode: func(node *data.Node, path string) *data.Node { if pos >= len(want) { t.Errorf("additional unexpected path found: %v", path) return nil } if path != want[pos] { t.Errorf("wrong path found, want %q, got %q", want[pos], path) } pos++ _, skip := skipFor[path] if skip { return nil } return node }, DisableNodeCache: disableCache, }) final = func(t testing.TB) { if pos != len(want) { t.Errorf("not enough items returned, want %d, got %d", len(want), pos) } } return rewriter, final } } // checkIncreaseNodeSize modifies each node by changing its size. func checkIncreaseNodeSize(increase uint64) checkRewriteFunc { return func(t testing.TB) (rewriter *TreeRewriter, final func(testing.TB)) { rewriter = NewTreeRewriter(RewriteOpts{ RewriteNode: func(node *data.Node, path string) *data.Node { if node.Type == data.NodeTypeFile { node.Size += increase } return node }, }) final = func(t testing.TB) {} return rewriter, final } } func TestRewriter(t *testing.T) { var tests = []struct { tree TestTree newTree TestTree check checkRewriteFunc }{ { // don't change tree: TestTree{ "foo": TestFile{}, "subdir": TestTree{ "subfile": TestFile{}, }, }, check: checkRewriteItemOrder([]string{ "/foo", "/subdir", "/subdir/subfile", }), }, { // exclude file tree: TestTree{ "foo": TestFile{}, "subdir": TestTree{ "subfile": TestFile{}, }, }, newTree: TestTree{ "foo": TestFile{}, "subdir": TestTree{}, }, check: checkRewriteSkips( map[string]struct{}{ "/subdir/subfile": {}, }, []string{ "/foo", "/subdir", "/subdir/subfile", }, false, ), }, { // exclude dir tree: TestTree{ "foo": TestFile{}, "subdir": TestTree{ "subfile": TestFile{}, }, }, newTree: TestTree{ "foo": TestFile{}, }, check: checkRewriteSkips( map[string]struct{}{ "/subdir": {}, }, []string{ "/foo", "/subdir", }, false, ), }, { // modify node tree: TestTree{ "foo": TestFile{Size: 21}, "subdir": TestTree{ "subfile": TestFile{Size: 21}, }, }, newTree: TestTree{ "foo": TestFile{Size: 42}, "subdir": TestTree{ "subfile": TestFile{Size: 42}, }, }, check: checkIncreaseNodeSize(21), }, { // test cache tree: TestTree{ // both subdirs are identical "subdir1": TestTree{ "subfile": TestFile{}, "subfile2": TestFile{}, }, "subdir2": TestTree{ "subfile": TestFile{}, "subfile2": TestFile{}, }, }, newTree: TestTree{ "subdir1": TestTree{ "subfile2": TestFile{}, }, "subdir2": TestTree{ "subfile2": TestFile{}, }, }, check: checkRewriteSkips( map[string]struct{}{ "/subdir1/subfile": {}, }, []string{ "/subdir1", "/subdir1/subfile", "/subdir1/subfile2", "/subdir2", }, false, ), }, { // test disabled cache tree: TestTree{ // both subdirs are identical "subdir1": TestTree{ "subfile": TestFile{}, "subfile2": TestFile{}, }, "subdir2": TestTree{ "subfile": TestFile{}, "subfile2": TestFile{}, }, }, newTree: TestTree{ "subdir1": TestTree{ "subfile2": TestFile{}, }, "subdir2": TestTree{ "subfile": TestFile{}, "subfile2": TestFile{}, }, }, check: checkRewriteSkips( map[string]struct{}{ "/subdir1/subfile": {}, }, []string{ "/subdir1", "/subdir1/subfile", "/subdir1/subfile2", "/subdir2", "/subdir2/subfile", "/subdir2/subfile2", }, true, ), }, } for _, test := range tests { t.Run("", func(t *testing.T) { repo, root := BuildTreeMap(test.tree) if test.newTree == nil { test.newTree = test.tree } expRepo, expRoot := BuildTreeMap(test.newTree) modrepo := WritableTreeMap{repo} ctx, cancel := context.WithCancel(context.TODO()) defer cancel() rewriter, last := test.check(t) newRoot, err := rewriter.RewriteTree(ctx, modrepo, modrepo, "/", root) if err != nil { t.Error(err) } last(t) // verifying against the expected tree root also implicitly checks the structural integrity if newRoot != expRoot { t.Error("hash mismatch") t.Log("Got") modrepo.Dump(t) t.Log("Expected") WritableTreeMap{expRepo}.Dump(t) } }) } } func TestSnapshotSizeQuery(t *testing.T) { tree := TestTree{ "foo": TestFile{Size: 21}, "bar": TestFile{Size: 21}, "subdir": TestTree{ "subfile": TestFile{Size: 21}, }, } newTree := TestTree{ "foo": TestFile{Size: 42}, "subdir": TestTree{ "subfile": TestFile{Size: 42}, }, } t.Run("", func(t *testing.T) { repo, root := BuildTreeMap(tree) expRepo, expRoot := BuildTreeMap(newTree) modrepo := WritableTreeMap{repo} ctx, cancel := context.WithCancel(context.TODO()) defer cancel() rewriteNode := func(node *data.Node, path string) *data.Node { if path == "/bar" { return nil } if node.Type == data.NodeTypeFile { node.Size += 21 } return node } rewriter, querySize := NewSnapshotSizeRewriter(rewriteNode) newRoot, err := rewriter.RewriteTree(ctx, modrepo, modrepo, "/", root) if err != nil { t.Error(err) } ss := querySize() test.Equals(t, uint(2), ss.FileCount, "snapshot file count mismatch") test.Equals(t, uint64(84), ss.FileSize, "snapshot size mismatch") // verifying against the expected tree root also implicitly checks the structural integrity if newRoot != expRoot { t.Error("hash mismatch") t.Log("Got") modrepo.Dump(t) t.Log("Expected") WritableTreeMap{expRepo}.Dump(t) } }) } func TestRewriterFailOnUnknownFields(t *testing.T) { tm := WritableTreeMap{TreeMap{}} node := []byte(`{"nodes":[{"name":"subfile","type":"file","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","uid":0,"gid":0,"content":null,"unknown_field":42}]}`) id := restic.Hash(node) tm.TreeMap[id] = node ctx, cancel := context.WithCancel(context.TODO()) defer cancel() rewriter := NewTreeRewriter(RewriteOpts{ RewriteNode: func(node *data.Node, path string) *data.Node { // tree loading must not succeed t.Fail() return node }, }) _, err := rewriter.RewriteTree(ctx, tm, tm, "/", id) if err == nil { t.Error("missing error on unknown field") } // check that the serialization check can be disabled rewriter = NewTreeRewriter(RewriteOpts{ AllowUnstableSerialization: true, }) root, err := rewriter.RewriteTree(ctx, tm, tm, "/", id) test.OK(t, err) _, expRoot := BuildTreeMap(TestTree{ "subfile": TestFile{}, }) test.Assert(t, root == expRoot, "mismatched trees") } func TestRewriterTreeLoadError(t *testing.T) { tm := WritableTreeMap{TreeMap{}} id := restic.NewRandomID() ctx, cancel := context.WithCancel(context.TODO()) defer cancel() // also check that load error by default cause the operation to fail rewriter := NewTreeRewriter(RewriteOpts{}) _, err := rewriter.RewriteTree(ctx, tm, tm, "/", id) if err == nil { t.Fatal("missing error on unloadable tree") } replacementTree := &data.Tree{Nodes: []*data.Node{{Name: "replacement", Type: data.NodeTypeFile, Size: 42}}} replacementID, err := data.SaveTree(ctx, tm, replacementTree) test.OK(t, err) rewriter = NewTreeRewriter(RewriteOpts{ RewriteFailedTree: func(nodeID restic.ID, path string, err error) (*data.Tree, error) { if nodeID != id || path != "/" { t.Fail() } return replacementTree, nil }, }) newRoot, err := rewriter.RewriteTree(ctx, tm, tm, "/", id) test.OK(t, err) test.Equals(t, replacementID, newRoot) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/walker/rewriter.go
internal/walker/rewriter.go
package walker import ( "context" "fmt" "path" "github.com/restic/restic/internal/data" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/restic" ) type NodeRewriteFunc func(node *data.Node, path string) *data.Node type FailedTreeRewriteFunc func(nodeID restic.ID, path string, err error) (*data.Tree, error) type QueryRewrittenSizeFunc func() SnapshotSize type SnapshotSize struct { FileCount uint FileSize uint64 } type RewriteOpts struct { // return nil to remove the node RewriteNode NodeRewriteFunc // decide what to do with a tree that could not be loaded. Return nil to remove the node. By default the load error is returned which causes the operation to fail. RewriteFailedTree FailedTreeRewriteFunc AllowUnstableSerialization bool DisableNodeCache bool } type idMap map[restic.ID]restic.ID type TreeRewriter struct { opts RewriteOpts replaces idMap } func NewTreeRewriter(opts RewriteOpts) *TreeRewriter { rw := &TreeRewriter{ opts: opts, } if !opts.DisableNodeCache { rw.replaces = make(idMap) } // setup default implementations if rw.opts.RewriteNode == nil { rw.opts.RewriteNode = func(node *data.Node, _ string) *data.Node { return node } } if rw.opts.RewriteFailedTree == nil { // fail with error by default rw.opts.RewriteFailedTree = func(_ restic.ID, _ string, err error) (*data.Tree, error) { return nil, err } } return rw } func NewSnapshotSizeRewriter(rewriteNode NodeRewriteFunc) (*TreeRewriter, QueryRewrittenSizeFunc) { var count uint var size uint64 t := NewTreeRewriter(RewriteOpts{ RewriteNode: func(node *data.Node, path string) *data.Node { node = rewriteNode(node, path) if node != nil && node.Type == data.NodeTypeFile { count++ size += node.Size } return node }, DisableNodeCache: true, }) ss := func() SnapshotSize { return SnapshotSize{count, size} } return t, ss } func (t *TreeRewriter) RewriteTree(ctx context.Context, loader restic.BlobLoader, saver restic.BlobSaver, nodepath string, nodeID restic.ID) (newNodeID restic.ID, err error) { // check if tree was already changed newID, ok := t.replaces[nodeID] if ok { return newID, nil } // a nil nodeID will lead to a load error curTree, err := data.LoadTree(ctx, loader, nodeID) if err != nil { replacement, err := t.opts.RewriteFailedTree(nodeID, nodepath, err) if err != nil { return restic.ID{}, err } if replacement != nil { replacementID, err := data.SaveTree(ctx, saver, replacement) if err != nil { return restic.ID{}, err } return replacementID, nil } return restic.ID{}, nil } if !t.opts.AllowUnstableSerialization { // check that we can properly encode this tree without losing information // The alternative of using json/Decoder.DisallowUnknownFields() doesn't work as we use // a custom UnmarshalJSON to decode trees, see also https://github.com/golang/go/issues/41144 testID, err := data.SaveTree(ctx, saver, curTree) if err != nil { return restic.ID{}, err } if nodeID != testID { return restic.ID{}, fmt.Errorf("cannot encode tree at %q without losing information", nodepath) } } debug.Log("filterTree: %s, nodeId: %s\n", nodepath, nodeID.Str()) tb := data.NewTreeJSONBuilder() for _, node := range curTree.Nodes { if ctx.Err() != nil { return restic.ID{}, ctx.Err() } path := path.Join(nodepath, node.Name) node = t.opts.RewriteNode(node, path) if node == nil { continue } if node.Type != data.NodeTypeDir { err = tb.AddNode(node) if err != nil { return restic.ID{}, err } continue } // treat nil as null id var subtree restic.ID if node.Subtree != nil { subtree = *node.Subtree } newID, err := t.RewriteTree(ctx, loader, saver, path, subtree) if err != nil { return restic.ID{}, err } node.Subtree = &newID err = tb.AddNode(node) if err != nil { return restic.ID{}, err } } tree, err := tb.Finalize() if err != nil { return restic.ID{}, err } // Save new tree newTreeID, _, _, err := saver.SaveBlob(ctx, restic.TreeBlob, tree, restic.ID{}, false) if t.replaces != nil { t.replaces[nodeID] = newTreeID } if !newTreeID.Equal(nodeID) { debug.Log("filterTree: save new tree for %s as %v\n", nodepath, newTreeID) } return newTreeID, err }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/walker/testing.go
internal/walker/testing.go
package walker
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/walker/walker_test.go
internal/walker/walker_test.go
package walker import ( "context" "fmt" "sort" "testing" "github.com/pkg/errors" "github.com/restic/restic/internal/data" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) // TestTree is used to construct a list of trees for testing the walker. type TestTree map[string]interface{} // TestFile is used to test the walker. type TestFile struct { Size uint64 } func BuildTreeMap(tree TestTree) (m TreeMap, root restic.ID) { m = TreeMap{} id := buildTreeMap(tree, m) return m, id } func buildTreeMap(tree TestTree, m TreeMap) restic.ID { tb := data.NewTreeJSONBuilder() var names []string for name := range tree { names = append(names, name) } sort.Strings(names) for _, name := range names { item := tree[name] switch elem := item.(type) { case TestFile: err := tb.AddNode(&data.Node{ Name: name, Type: data.NodeTypeFile, Size: elem.Size, }) if err != nil { panic(err) } case TestTree: id := buildTreeMap(elem, m) err := tb.AddNode(&data.Node{ Name: name, Subtree: &id, Type: data.NodeTypeDir, }) if err != nil { panic(err) } default: panic(fmt.Sprintf("invalid type %T", elem)) } } buf, err := tb.Finalize() if err != nil { panic(err) } id := restic.Hash(buf) if _, ok := m[id]; !ok { m[id] = buf } return id } // TreeMap returns the trees from the map on LoadTree. type TreeMap map[restic.ID][]byte func (t TreeMap) LoadBlob(_ context.Context, tpe restic.BlobType, id restic.ID, _ []byte) ([]byte, error) { if tpe != restic.TreeBlob { return nil, errors.New("can only load trees") } tree, ok := t[id] if !ok { return nil, errors.New("tree not found") } return tree, nil } func (t TreeMap) Connections() uint { return 2 } // checkFunc returns a function suitable for walking the tree to check // something, and a function which will check the final result. type checkFunc func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB, error)) // checkItemOrder ensures that the order of the 'path' arguments is the one passed in as 'want'. func checkItemOrder(want []string) checkFunc { pos := 0 return func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB, error)) { walker = func(treeID restic.ID, path string, node *data.Node, err error) error { if err != nil { t.Errorf("error walking %v: %v", path, err) return err } if pos >= len(want) { t.Errorf("additional unexpected path found: %v", path) return nil } if path != want[pos] { t.Errorf("wrong path found, want %q, got %q", want[pos], path) } pos++ return nil } leaveDir = func(path string) error { return walker(restic.ID{}, "leave: "+path, nil, nil) } final = func(t testing.TB, err error) { rtest.OK(t, err) if pos != len(want) { t.Errorf("not enough items returned, want %d, got %d", len(want), pos) } } return walker, leaveDir, final } } // checkParentTreeOrder ensures that the order of the 'parentID' arguments is the one passed in as 'want'. func checkParentTreeOrder(want []string) checkFunc { pos := 0 return func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB, error)) { walker = func(treeID restic.ID, path string, node *data.Node, err error) error { if err != nil { t.Errorf("error walking %v: %v", path, err) return err } if pos >= len(want) { t.Errorf("additional unexpected parent tree ID found: %v", treeID) return nil } if treeID.String() != want[pos] { t.Errorf("wrong parent tree ID found, want %q, got %q", want[pos], treeID.String()) } pos++ return nil } final = func(t testing.TB, err error) { rtest.OK(t, err) if pos != len(want) { t.Errorf("not enough items returned, want %d, got %d", len(want), pos) } } return walker, nil, final } } // checkSkipFor returns ErrSkipNode if path is in skipFor, it checks that the // paths the walk func is called for are exactly the ones in wantPaths. func checkSkipFor(skipFor map[string]struct{}, wantPaths []string) checkFunc { var pos int return func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB, error)) { walker = func(treeID restic.ID, path string, node *data.Node, err error) error { if err != nil { t.Errorf("error walking %v: %v", path, err) return err } if pos >= len(wantPaths) { t.Errorf("additional unexpected path found: %v", path) return nil } if path != wantPaths[pos] { t.Errorf("wrong path found, want %q, got %q", wantPaths[pos], path) } pos++ if _, ok := skipFor[path]; ok { return ErrSkipNode } return nil } leaveDir = func(path string) error { return walker(restic.ID{}, "leave: "+path, nil, nil) } final = func(t testing.TB, err error) { rtest.OK(t, err) if pos != len(wantPaths) { t.Errorf("wrong number of paths returned, want %d, got %d", len(wantPaths), pos) } } return walker, leaveDir, final } } func checkErrorReturned(errForPath string) checkFunc { expectedErr := fmt.Errorf("error for %v", errForPath) return func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB, error)) { walker = func(treeID restic.ID, path string, node *data.Node, err error) error { if path == errForPath { return expectedErr } return nil } leaveDir = func(path string) error { return walker(restic.ID{}, "leave: "+path, nil, nil) } final = func(t testing.TB, err error) { if err == nil { t.Errorf("expected error for %v, got nil", errForPath) } rtest.Assert(t, err == expectedErr, "expected error for %v, got %v", errForPath, err) } return walker, leaveDir, final } } func TestWalker(t *testing.T) { var tests = []struct { tree TestTree checks []checkFunc }{ { tree: TestTree{ "foo": TestFile{}, "subdir": TestTree{ "subfile": TestFile{}, }, }, checks: []checkFunc{ checkItemOrder([]string{ "/", "/foo", "/subdir", "/subdir/subfile", "leave: /subdir", "leave: /", }), checkParentTreeOrder([]string{ "a760536a8fd64dd63f8dd95d85d788d71fd1bee6828619350daf6959dcb499a0", // tree / "a760536a8fd64dd63f8dd95d85d788d71fd1bee6828619350daf6959dcb499a0", // tree / "a760536a8fd64dd63f8dd95d85d788d71fd1bee6828619350daf6959dcb499a0", // tree / "670046b44353a89b7cd6ef84c78422232438f10eb225c29c07989ae05283d797", // tree /subdir }), checkSkipFor( map[string]struct{}{ "/subdir": {}, }, []string{ "/", "/foo", "/subdir", "leave: /", }, ), checkSkipFor( map[string]struct{}{ "/": {}, }, []string{ "/", }, ), }, }, { tree: TestTree{ "foo": TestFile{}, "subdir1": TestTree{ "subfile1": TestFile{}, }, "subdir2": TestTree{ "subfile2": TestFile{}, "subsubdir2": TestTree{ "subsubfile3": TestFile{}, }, }, }, checks: []checkFunc{ checkItemOrder([]string{ "/", "/foo", "/subdir1", "/subdir1/subfile1", "leave: /subdir1", "/subdir2", "/subdir2/subfile2", "/subdir2/subsubdir2", "/subdir2/subsubdir2/subsubfile3", "leave: /subdir2/subsubdir2", "leave: /subdir2", "leave: /", }), checkParentTreeOrder([]string{ "7a0e59b986cc83167d9fbeeefc54e4629770124c5825d391f7ee0598667fcdf1", // tree / "7a0e59b986cc83167d9fbeeefc54e4629770124c5825d391f7ee0598667fcdf1", // tree / "7a0e59b986cc83167d9fbeeefc54e4629770124c5825d391f7ee0598667fcdf1", // tree / "22c9feefa0b9fabc7ec5383c90cfe84ba714babbe4d2968fcb78f0ec7612e82f", // tree /subdir1 "7a0e59b986cc83167d9fbeeefc54e4629770124c5825d391f7ee0598667fcdf1", // tree / "9bfe4aab3ac0ad7a81909355d7221801441fb20f7ed06c0142196b3f10358493", // tree /subdir2 "9bfe4aab3ac0ad7a81909355d7221801441fb20f7ed06c0142196b3f10358493", // tree /subdir2 "6b962fef064ef9beecc27dfcd6e0f2e7beeebc9c1f1f4f477d4af59fc45f411d", // tree /subdir2/subsubdir2 }), checkSkipFor( map[string]struct{}{ "/subdir1": {}, }, []string{ "/", "/foo", "/subdir1", "/subdir2", "/subdir2/subfile2", "/subdir2/subsubdir2", "/subdir2/subsubdir2/subsubfile3", "leave: /subdir2/subsubdir2", "leave: /subdir2", "leave: /", }, ), checkSkipFor( map[string]struct{}{ "/subdir1": {}, "/subdir2/subsubdir2": {}, }, []string{ "/", "/foo", "/subdir1", "/subdir2", "/subdir2/subfile2", "/subdir2/subsubdir2", "leave: /subdir2", "leave: /", }, ), checkSkipFor( map[string]struct{}{ "/foo": {}, }, []string{ "/", "/foo", "leave: /", }, ), }, }, { tree: TestTree{ "foo": TestFile{}, "subdir1": TestTree{ "subfile1": TestFile{}, "subfile2": TestFile{}, "subfile3": TestFile{}, }, "subdir2": TestTree{ "subfile1": TestFile{}, "subfile2": TestFile{}, "subfile3": TestFile{}, }, "subdir3": TestTree{ "subfile1": TestFile{}, "subfile2": TestFile{}, "subfile3": TestFile{}, }, "zzz other": TestFile{}, }, checks: []checkFunc{ checkItemOrder([]string{ "/", "/foo", "/subdir1", "/subdir1/subfile1", "/subdir1/subfile2", "/subdir1/subfile3", "leave: /subdir1", "/subdir2", "/subdir2/subfile1", "/subdir2/subfile2", "/subdir2/subfile3", "leave: /subdir2", "/subdir3", "/subdir3/subfile1", "/subdir3/subfile2", "/subdir3/subfile3", "leave: /subdir3", "/zzz other", "leave: /", }), checkParentTreeOrder([]string{ "c2efeff7f217a4dfa12a16e8bb3cefedd37c00873605c29e5271c6061030672f", // tree / "c2efeff7f217a4dfa12a16e8bb3cefedd37c00873605c29e5271c6061030672f", // tree / "c2efeff7f217a4dfa12a16e8bb3cefedd37c00873605c29e5271c6061030672f", // tree / "57ee8960c7a86859b090a76e5d013f83d10c0ce11d5460076ca8468706f784ab", // tree /subdir1 "57ee8960c7a86859b090a76e5d013f83d10c0ce11d5460076ca8468706f784ab", // tree /subdir1 "57ee8960c7a86859b090a76e5d013f83d10c0ce11d5460076ca8468706f784ab", // tree /subdir1 "c2efeff7f217a4dfa12a16e8bb3cefedd37c00873605c29e5271c6061030672f", // tree / "57ee8960c7a86859b090a76e5d013f83d10c0ce11d5460076ca8468706f784ab", // tree /subdir2 "57ee8960c7a86859b090a76e5d013f83d10c0ce11d5460076ca8468706f784ab", // tree /subdir2 "57ee8960c7a86859b090a76e5d013f83d10c0ce11d5460076ca8468706f784ab", // tree /subdir2 "c2efeff7f217a4dfa12a16e8bb3cefedd37c00873605c29e5271c6061030672f", // tree / "57ee8960c7a86859b090a76e5d013f83d10c0ce11d5460076ca8468706f784ab", // tree /subdir3 "57ee8960c7a86859b090a76e5d013f83d10c0ce11d5460076ca8468706f784ab", // tree /subdir3 "57ee8960c7a86859b090a76e5d013f83d10c0ce11d5460076ca8468706f784ab", // tree /subdir3 "c2efeff7f217a4dfa12a16e8bb3cefedd37c00873605c29e5271c6061030672f", // tree / }), }, }, { tree: TestTree{ "subdir1": TestTree{}, "subdir2": TestTree{}, "subdir3": TestTree{ "file": TestFile{}, }, "subdir4": TestTree{ "file": TestFile{}, }, "subdir5": TestTree{}, "subdir6": TestTree{}, }, checks: []checkFunc{ checkItemOrder([]string{ "/", "/subdir1", "leave: /subdir1", "/subdir2", "leave: /subdir2", "/subdir3", "/subdir3/file", "leave: /subdir3", "/subdir4", "/subdir4/file", "leave: /subdir4", "/subdir5", "leave: /subdir5", "/subdir6", "leave: /subdir6", "leave: /", }), }, }, { tree: TestTree{ "subdir1": TestTree{ "file": TestFile{}, }, "subdir2": TestTree{}, }, checks: []checkFunc{ checkErrorReturned("/subdir1"), checkErrorReturned("/subdir2"), checkErrorReturned("/subdir1/file"), checkErrorReturned("leave: /subdir1"), checkErrorReturned("leave: /subdir2"), }, }, } for _, test := range tests { t.Run("", func(t *testing.T) { repo, root := BuildTreeMap(test.tree) for _, check := range test.checks { t.Run("", func(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) defer cancel() fn, leaveDir, last := check(t) err := Walk(ctx, repo, root, WalkVisitor{ ProcessNode: fn, LeaveDir: leaveDir, }) last(t, err) }) } }) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/dump/common_test.go
internal/dump/common_test.go
package dump import ( "bytes" "context" "testing" "github.com/restic/restic/internal/archiver" "github.com/restic/restic/internal/data" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) func prepareTempdirRepoSrc(t testing.TB, src archiver.TestDir) (string, restic.Repository) { tempdir := rtest.TempDir(t) repo := repository.TestRepository(t) archiver.TestCreateFiles(t, tempdir, src) return tempdir, repo } type CheckDump func(t *testing.T, testDir string, testDump *bytes.Buffer) error func WriteTest(t *testing.T, format string, cd CheckDump) { tests := []struct { name string args archiver.TestDir target string }{ { name: "single file in root", args: archiver.TestDir{ "file": archiver.TestFile{Content: "string"}, }, target: "/", }, { name: "multiple files in root", args: archiver.TestDir{ "file1": archiver.TestFile{Content: "string"}, "file2": archiver.TestFile{Content: "string"}, }, target: "/", }, { name: "multiple files and folders in root", args: archiver.TestDir{ "file1": archiver.TestFile{Content: "string"}, "file2": archiver.TestFile{Content: "string"}, "firstDir": archiver.TestDir{ "another": archiver.TestFile{Content: "string"}, }, "secondDir": archiver.TestDir{ "another2": archiver.TestFile{Content: "string"}, }, }, target: "/", }, { name: "file and symlink in root", args: archiver.TestDir{ "file1": archiver.TestFile{Content: "string"}, "file2": archiver.TestSymlink{Target: "file1"}, }, target: "/", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() tmpdir, repo := prepareTempdirRepoSrc(t, tt.args) arch := archiver.New(repo, fs.Track{FS: fs.Local{}}, archiver.Options{}) back := rtest.Chdir(t, tmpdir) defer back() sn, _, _, err := arch.Snapshot(ctx, []string{"."}, archiver.SnapshotOptions{}) rtest.OK(t, err) tree, err := data.LoadTree(ctx, repo, *sn.Tree) rtest.OK(t, err) dst := &bytes.Buffer{} d := New(format, repo, dst) if err := d.DumpTree(ctx, tree, tt.target); err != nil { t.Fatalf("Dumper.Run error = %v", err) } if err := cd(t, tmpdir, dst); err != nil { t.Errorf("WriteDump() = does not match: %v", err) } }) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/dump/zip_test.go
internal/dump/zip_test.go
package dump import ( "archive/zip" "bytes" "fmt" "os" "path/filepath" "strings" "testing" "time" ) func TestWriteZip(t *testing.T) { WriteTest(t, "zip", checkZip) } func readZipFile(f *zip.File) ([]byte, error) { rc, err := f.Open() if err != nil { return nil, err } b := &bytes.Buffer{} _, err = b.ReadFrom(rc) if err != nil { // ignore subsequent errors _ = rc.Close() return nil, err } err = rc.Close() if err != nil { return nil, err } return b.Bytes(), nil } func checkZip(t *testing.T, testDir string, srcZip *bytes.Buffer) error { z, err := zip.NewReader(bytes.NewReader(srcZip.Bytes()), int64(srcZip.Len())) if err != nil { return err } fileNumber := 0 zipFiles := len(z.File) err = filepath.Walk(testDir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if info.Name() != filepath.Base(testDir) { fileNumber++ } return nil }) if err != nil { return err } for _, f := range z.File { matchPath := filepath.Join(testDir, f.Name) match, err := os.Lstat(matchPath) if err != nil { return err } // check metadata, zip header contains time rounded to seconds fileTime := match.ModTime().Truncate(time.Second) zipTime := f.Modified if !fileTime.Equal(zipTime) { return fmt.Errorf("modTime does not match, got: %s, want: %s", zipTime, fileTime) } if f.Mode() != match.Mode() { return fmt.Errorf("mode does not match, got: %v [%08x], want: %v [%08x]", f.Mode(), uint32(f.Mode()), match.Mode(), uint32(match.Mode())) } t.Logf("Mode is %v [%08x] for %s", f.Mode(), uint32(f.Mode()), f.Name) switch { case f.FileInfo().IsDir(): filebase := filepath.ToSlash(match.Name()) if filepath.Base(f.Name) != filebase { return fmt.Errorf("foldernames don't match got %v want %v", filepath.Base(f.Name), filebase) } if !strings.HasSuffix(f.Name, "/") { return fmt.Errorf("foldernames must end with separator got %v", f.Name) } case f.Mode()&os.ModeSymlink != 0: target, err := os.Readlink(matchPath) if err != nil { return err } linkName, err := readZipFile(f) if err != nil { t.Fatal(err) } if target != string(linkName) { return fmt.Errorf("symlink target does not match, got %s want %s", string(linkName), target) } default: if f.Method != zip.Deflate { return fmt.Errorf("expected compression method got %v want %v", f.Method, zip.Deflate) } if uint64(match.Size()) != f.UncompressedSize64 { return fmt.Errorf("size does not match got %v want %v", f.UncompressedSize64, match.Size()) } contentsFile, err := os.ReadFile(matchPath) if err != nil { t.Fatal(err) } contentsZip, err := readZipFile(f) if err != nil { t.Fatal(err) } if string(contentsZip) != string(contentsFile) { return fmt.Errorf("contents does not match, got %s want %s", contentsZip, contentsFile) } } } if zipFiles != fileNumber { return fmt.Errorf("not the same amount of files got %v want %v", zipFiles, fileNumber) } return nil }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/dump/acl.go
internal/dump/acl.go
package dump import ( "encoding/binary" "errors" "strconv" ) const ( // Permissions aclPermRead = 0x4 aclPermWrite = 0x2 aclPermExecute = 0x1 // Tags aclTagUserObj = 0x01 // Owner. aclTagUser = 0x02 aclTagGroupObj = 0x04 // Owning group. aclTagGroup = 0x08 aclTagMask = 0x10 aclTagOther = 0x20 ) // formatLinuxACL converts a Linux ACL from its binary format to the POSIX.1e // long text format. // // User and group IDs are printed in decimal, because we may be dumping // a snapshot from a different machine. // // https://man7.org/linux/man-pages/man5/acl.5.html // https://savannah.nongnu.org/projects/acl // https://simson.net/ref/1997/posix_1003.1e-990310.pdf func formatLinuxACL(acl []byte) (string, error) { if len(acl)-4 < 0 || (len(acl)-4)%8 != 0 { return "", errors.New("wrong length") } version := binary.LittleEndian.Uint32(acl) if version != 2 { return "", errors.New("unsupported ACL format version") } acl = acl[4:] text := make([]byte, 0, 2*len(acl)) for ; len(acl) >= 8; acl = acl[8:] { tag := binary.LittleEndian.Uint16(acl) perm := binary.LittleEndian.Uint16(acl[2:]) id := binary.LittleEndian.Uint32(acl[4:]) switch tag { case aclTagUserObj: text = append(text, "user:"...) case aclTagUser: text = append(text, "user:"...) text = strconv.AppendUint(text, uint64(id), 10) case aclTagGroupObj: text = append(text, "group:"...) case aclTagGroup: text = append(text, "group:"...) text = strconv.AppendUint(text, uint64(id), 10) case aclTagMask: text = append(text, "mask:"...) case aclTagOther: text = append(text, "other:"...) default: return "", errors.New("unknown tag") } text = append(text, ':') text = append(text, aclPermText(perm)...) text = append(text, '\n') } return string(text), nil } func aclPermText(p uint16) []byte { s := []byte("---") if p&aclPermRead != 0 { s[0] = 'r' } if p&aclPermWrite != 0 { s[1] = 'w' } if p&aclPermExecute != 0 { s[2] = 'x' } return s }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/dump/acl_test.go
internal/dump/acl_test.go
package dump import ( "testing" rtest "github.com/restic/restic/internal/test" ) func TestFormatLinuxACL(t *testing.T) { for _, c := range []struct { in, out, err string }{ { in: "\x02\x00\x00\x00\x01\x00\x06\x00\xff\xff\xff\xff\x02\x00" + "\x04\x00\x03\x00\x00\x00\x02\x00\x04\x00\xe9\x03\x00\x00" + "\x04\x00\x02\x00\xff\xff\xff\xff\b\x00\x01\x00'\x00\x00\x00" + "\x10\x00\a\x00\xff\xff\xff\xff \x00\x04\x00\xff\xff\xff\xff", out: "user::rw-\nuser:3:r--\nuser:1001:r--\ngroup::-w-\n" + "group:39:--x\nmask::rwx\nother::r--\n", }, { in: "\x02\x00\x00\x00\x00\x00\x06\x00\xff\xff\xff\xff\x02\x00" + "\x04\x00\x03\x00\x00\x00\x02\x00\x04\x00\xe9\x03\x00\x00" + "\x04\x00\x06\x00\xff\xff\xff\xff\b\x00\x05\x00'\x00\x00\x00" + "\x10\x00\a\x00\xff\xff\xff\xff \x00\x04\x00\xff\xff\xff\xff", err: "unknown tag", }, { in: "\x01\x00\x00\x00\x01\x00\x06\x00\xff\xff\xff\xff\x02\x00" + "\x04\x00\x03\x00\x00\x00\x02\x00\x04\x00\xe9\x03\x00\x00" + "\x04\x00\x06\x00\xff\xff\xff\xff\b\x00\x05\x00'\x00\x00\x00" + "\x10\x00\a\x00\xff\xff\xff\xff \x00\x04\x00\xff\xff\xff\xff", err: "unsupported ACL format version", }, {in: "\x02\x00", err: "wrong length"}, {in: "", err: "wrong length"}, } { out, err := formatLinuxACL([]byte(c.in)) if c.err == "" { rtest.Equals(t, c.out, out) } else { rtest.Assert(t, err != nil, "wanted %q but got nil", c.err) rtest.Equals(t, c.err, err.Error()) } } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/dump/zip.go
internal/dump/zip.go
package dump import ( "archive/zip" "context" "path/filepath" "github.com/restic/restic/internal/data" "github.com/restic/restic/internal/errors" ) func (d *Dumper) dumpZip(ctx context.Context, ch <-chan *data.Node) (err error) { w := zip.NewWriter(d.w) defer func() { if err == nil { err = w.Close() err = errors.Wrap(err, "Close") } }() for node := range ch { if err := d.dumpNodeZip(ctx, node, w); err != nil { return err } } return nil } func (d *Dumper) dumpNodeZip(ctx context.Context, node *data.Node, zw *zip.Writer) error { relPath, err := filepath.Rel("/", node.Path) if err != nil { return err } header := &zip.FileHeader{ Name: filepath.ToSlash(relPath), UncompressedSize64: node.Size, Modified: node.ModTime, } header.SetMode(node.Mode) if node.Type == data.NodeTypeFile { header.Method = zip.Deflate } if node.Type == data.NodeTypeDir { header.Name += "/" } w, err := zw.CreateHeader(header) if err != nil { return errors.Wrap(err, "ZipHeader") } if node.Type == data.NodeTypeSymlink { if _, err = w.Write([]byte(node.LinkTarget)); err != nil { return errors.Wrap(err, "Write") } return nil } return d.writeNode(ctx, w, node) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/dump/tar_test.go
internal/dump/tar_test.go
package dump import ( "archive/tar" "bytes" "context" "errors" "fmt" "io" "os" "path/filepath" "strings" "testing" "time" "github.com/restic/restic/internal/data" rtest "github.com/restic/restic/internal/test" ) func TestWriteTar(t *testing.T) { WriteTest(t, "tar", checkTar) } func checkTar(t *testing.T, testDir string, srcTar *bytes.Buffer) error { tr := tar.NewReader(srcTar) fileNumber := 0 tarFiles := 0 err := filepath.Walk(testDir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if info.Name() != filepath.Base(testDir) { fileNumber++ } return nil }) if err != nil { return err } for { hdr, err := tr.Next() if err == io.EOF { break } if err != nil { t.Fatal(err) } matchPath := filepath.Join(testDir, hdr.Name) match, err := os.Lstat(matchPath) if err != nil { return err } // check metadata, tar header contains time rounded to seconds fileTime := match.ModTime().Round(time.Second) tarTime := hdr.ModTime if !fileTime.Equal(tarTime) { return fmt.Errorf("modTime does not match, got: %s, want: %s", fileTime, tarTime) } if os.FileMode(hdr.Mode).Perm() != match.Mode().Perm() || os.FileMode(hdr.Mode)&^os.ModePerm != 0 { return fmt.Errorf("mode does not match, got: %v, want: %v", os.FileMode(hdr.Mode), match.Mode()) } switch hdr.Typeflag { case tar.TypeDir: // this is a folder if hdr.Name == "." { // we don't need to check the root folder continue } filebase := filepath.ToSlash(match.Name()) if filepath.Base(hdr.Name) != filebase { return fmt.Errorf("foldernames don't match got %v want %v", filepath.Base(hdr.Name), filebase) } if !strings.HasSuffix(hdr.Name, "/") { return fmt.Errorf("foldernames must end with separator got %v", hdr.Name) } case tar.TypeSymlink: target, err := os.Readlink(matchPath) if err != nil { return err } if target != hdr.Linkname { return fmt.Errorf("symlink target does not match, got %s want %s", target, hdr.Linkname) } default: if match.Size() != hdr.Size { return fmt.Errorf("size does not match got %v want %v", hdr.Size, match.Size()) } contentsFile, err := os.ReadFile(matchPath) if err != nil { t.Fatal(err) } contentsTar := &bytes.Buffer{} _, err = io.Copy(contentsTar, tr) if err != nil { t.Fatal(err) } if contentsTar.String() != string(contentsFile) { return fmt.Errorf("contents does not match, got %s want %s", contentsTar, contentsFile) } } tarFiles++ } if tarFiles != fileNumber { return fmt.Errorf("not the same amount of files got %v want %v", tarFiles, fileNumber) } return nil } // #4307. func TestFieldTooLong(t *testing.T) { const maxSpecialFileSize = 1 << 20 // Unexported limit in archive/tar. node := data.Node{ Name: "file_with_xattr", Path: "/file_with_xattr", Type: data.NodeTypeFile, Mode: 0644, ExtendedAttributes: []data.ExtendedAttribute{ { Name: "user.way_too_large", Value: make([]byte, 2*maxSpecialFileSize), }, }, } d := Dumper{format: "tar"} err := d.dumpNodeTar(context.Background(), &node, tar.NewWriter(io.Discard)) // We want a tar.ErrFieldTooLong that has the filename. rtest.Assert(t, errors.Is(err, tar.ErrFieldTooLong), "wrong type %T", err) rtest.Assert(t, strings.Contains(err.Error(), node.Path), "no filename in %q", err) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/dump/common.go
internal/dump/common.go
package dump import ( "context" "io" "path" "github.com/restic/restic/internal/bloblru" "github.com/restic/restic/internal/data" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/walker" "golang.org/x/sync/errgroup" ) // A Dumper writes trees and files from a repository to a Writer // in an archive format. type Dumper struct { cache *bloblru.Cache format string repo restic.Loader w io.Writer } func New(format string, repo restic.Loader, w io.Writer) *Dumper { return &Dumper{ cache: bloblru.New(64 << 20), format: format, repo: repo, w: w, } } func (d *Dumper) DumpTree(ctx context.Context, tree *data.Tree, rootPath string) error { ctx, cancel := context.WithCancel(ctx) defer cancel() // ch is buffered to deal with variable download/write speeds. ch := make(chan *data.Node, 10) go sendTrees(ctx, d.repo, tree, rootPath, ch) switch d.format { case "tar": return d.dumpTar(ctx, ch) case "zip": return d.dumpZip(ctx, ch) default: panic("unknown dump format") } } func sendTrees(ctx context.Context, repo restic.BlobLoader, tree *data.Tree, rootPath string, ch chan *data.Node) { defer close(ch) for _, root := range tree.Nodes { root.Path = path.Join(rootPath, root.Name) if sendNodes(ctx, repo, root, ch) != nil { break } } } func sendNodes(ctx context.Context, repo restic.BlobLoader, root *data.Node, ch chan *data.Node) error { select { case ch <- root: case <-ctx.Done(): return ctx.Err() } // If this is no directory we are finished if root.Type != data.NodeTypeDir { return nil } err := walker.Walk(ctx, repo, *root.Subtree, walker.WalkVisitor{ProcessNode: func(_ restic.ID, nodepath string, node *data.Node, err error) error { if err != nil { return err } if node == nil { return nil } node.Path = path.Join(root.Path, nodepath) if node.Type != data.NodeTypeFile && node.Type != data.NodeTypeDir && node.Type != data.NodeTypeSymlink { return nil } select { case ch <- node: case <-ctx.Done(): return ctx.Err() } return nil }}) return err } // WriteNode writes a file node's contents directly to d's Writer, // without caring about d's format. func (d *Dumper) WriteNode(ctx context.Context, node *data.Node) error { return d.writeNode(ctx, d.w, node) } func (d *Dumper) writeNode(ctx context.Context, w io.Writer, node *data.Node) error { wg, ctx := errgroup.WithContext(ctx) limit := int(d.repo.Connections()) wg.SetLimit(1 + limit) // +1 for the writer. blobs := make(chan (<-chan []byte), limit) // Writer. wg.Go(func() error { for ch := range blobs { select { case <-ctx.Done(): return ctx.Err() case blob := <-ch: if _, err := w.Write(blob); err != nil { return err } } } return nil }) // Start short-lived goroutines to load blobs. loop: for _, id := range node.Content { // This needs to be buffered, so that loaders can quit // without waiting for the writer. ch := make(chan []byte, 1) wg.Go(func() error { blob, err := d.cache.GetOrCompute(id, func() ([]byte, error) { return d.repo.LoadBlob(ctx, restic.DataBlob, id, nil) }) if err == nil { ch <- blob } return err }) select { case blobs <- ch: case <-ctx.Done(): break loop } } close(blobs) return wg.Wait() }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/dump/tar.go
internal/dump/tar.go
package dump import ( "archive/tar" "context" "fmt" "os" "path/filepath" "github.com/restic/restic/internal/data" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" ) func (d *Dumper) dumpTar(ctx context.Context, ch <-chan *data.Node) (err error) { w := tar.NewWriter(d.w) defer func() { if err == nil { err = w.Close() err = errors.Wrap(err, "Close") } }() for node := range ch { if err := d.dumpNodeTar(ctx, node, w); err != nil { return err } } return nil } // copied from archive/tar.FileInfoHeader const ( // Mode constants from the USTAR spec: // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06 cISUID = 0o4000 // Set uid cISGID = 0o2000 // Set gid cISVTX = 0o1000 // Save text (sticky bit) ) // in a 32-bit build of restic: // substitute a uid or gid of -1 (which was converted to 2^32 - 1) with 0 func tarIdentifier(id uint32) int { if int(id) == -1 { return 0 } return int(id) } func (d *Dumper) dumpNodeTar(ctx context.Context, node *data.Node, w *tar.Writer) error { relPath, err := filepath.Rel("/", node.Path) if err != nil { return err } header := &tar.Header{ Name: filepath.ToSlash(relPath), Size: int64(node.Size), Mode: int64(node.Mode.Perm()), // cIS* constants are added later Uid: tarIdentifier(node.UID), Gid: tarIdentifier(node.GID), Uname: node.User, Gname: node.Group, ModTime: node.ModTime, AccessTime: node.AccessTime, ChangeTime: node.ChangeTime, PAXRecords: parseXattrs(node.ExtendedAttributes), } // adapted from archive/tar.FileInfoHeader if node.Mode&os.ModeSetuid != 0 { header.Mode |= cISUID } if node.Mode&os.ModeSetgid != 0 { header.Mode |= cISGID } if node.Mode&os.ModeSticky != 0 { header.Mode |= cISVTX } if node.Type == data.NodeTypeFile { header.Typeflag = tar.TypeReg } if node.Type == data.NodeTypeSymlink { header.Typeflag = tar.TypeSymlink header.Linkname = node.LinkTarget } if node.Type == data.NodeTypeDir { header.Typeflag = tar.TypeDir header.Name += "/" } err = w.WriteHeader(header) if err != nil { return fmt.Errorf("writing header for %q: %w", node.Path, err) } return d.writeNode(ctx, w, node) } func parseXattrs(xattrs []data.ExtendedAttribute) map[string]string { tmpMap := make(map[string]string) for _, attr := range xattrs { // Check for Linux POSIX.1e ACLs. // // TODO support ACLs from other operating systems. // FreeBSD ACLs have names "posix1e.acl_(access|default)", // but their binary format may not match the Linux format. aclKey := "" switch attr.Name { case "system.posix_acl_access": aclKey = "SCHILY.acl.access" case "system.posix_acl_default": aclKey = "SCHILY.acl.default" } if aclKey != "" { text, err := formatLinuxACL(attr.Value) if err != nil { debug.Log("parsing Linux ACL: %v, skipping", err) continue } tmpMap[aclKey] = text } else { tmpMap["SCHILY.xattr."+attr.Name] = string(attr.Value) } } return tmpMap }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/tree.go
internal/data/tree.go
package data import ( "bytes" "context" "encoding/json" "fmt" "path" "sort" "strings" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/debug" ) // Tree is an ordered list of nodes. type Tree struct { Nodes []*Node `json:"nodes"` } // NewTree creates a new tree object with the given initial capacity. func NewTree(capacity int) *Tree { return &Tree{ Nodes: make([]*Node, 0, capacity), } } func (t *Tree) String() string { return fmt.Sprintf("Tree<%d nodes>", len(t.Nodes)) } // Equals returns true if t and other have exactly the same nodes. func (t *Tree) Equals(other *Tree) bool { if len(t.Nodes) != len(other.Nodes) { debug.Log("tree.Equals(): trees have different number of nodes") return false } for i := 0; i < len(t.Nodes); i++ { if !t.Nodes[i].Equals(*other.Nodes[i]) { debug.Log("tree.Equals(): node %d is different:", i) debug.Log(" %#v", t.Nodes[i]) debug.Log(" %#v", other.Nodes[i]) return false } } return true } // Insert adds a new node at the correct place in the tree. func (t *Tree) Insert(node *Node) error { pos, found := t.find(node.Name) if found != nil { return errors.Errorf("node %q already present", node.Name) } // https://github.com/golang/go/wiki/SliceTricks t.Nodes = append(t.Nodes, nil) copy(t.Nodes[pos+1:], t.Nodes[pos:]) t.Nodes[pos] = node return nil } func (t *Tree) find(name string) (int, *Node) { pos := sort.Search(len(t.Nodes), func(i int) bool { return t.Nodes[i].Name >= name }) if pos < len(t.Nodes) && t.Nodes[pos].Name == name { return pos, t.Nodes[pos] } return pos, nil } // Find returns a node with the given name, or nil if none could be found. func (t *Tree) Find(name string) *Node { if t == nil { return nil } _, node := t.find(name) return node } // Sort sorts the nodes by name. func (t *Tree) Sort() { list := Nodes(t.Nodes) sort.Sort(list) t.Nodes = list } // Subtrees returns a slice of all subtree IDs of the tree. func (t *Tree) Subtrees() (trees restic.IDs) { for _, node := range t.Nodes { if node.Type == NodeTypeDir && node.Subtree != nil { trees = append(trees, *node.Subtree) } } return trees } // LoadTree loads a tree from the repository. func LoadTree(ctx context.Context, r restic.BlobLoader, id restic.ID) (*Tree, error) { debug.Log("load tree %v", id) buf, err := r.LoadBlob(ctx, restic.TreeBlob, id, nil) if err != nil { return nil, err } t := &Tree{} err = json.Unmarshal(buf, t) if err != nil { return nil, err } return t, nil } // SaveTree stores a tree into the repository and returns the ID. The ID is // checked against the index. The tree is only stored when the index does not // contain the ID. func SaveTree(ctx context.Context, r restic.BlobSaver, t *Tree) (restic.ID, error) { buf, err := json.Marshal(t) if err != nil { return restic.ID{}, errors.Wrap(err, "MarshalJSON") } // append a newline so that the data is always consistent (json.Encoder // adds a newline after each object) buf = append(buf, '\n') id, _, _, err := r.SaveBlob(ctx, restic.TreeBlob, buf, restic.ID{}, false) return id, err } var ErrTreeNotOrdered = errors.New("nodes are not ordered or duplicate") type TreeJSONBuilder struct { buf bytes.Buffer lastName string } func NewTreeJSONBuilder() *TreeJSONBuilder { tb := &TreeJSONBuilder{} _, _ = tb.buf.WriteString(`{"nodes":[`) return tb } func (builder *TreeJSONBuilder) AddNode(node *Node) error { if node.Name <= builder.lastName { return fmt.Errorf("node %q, last %q: %w", node.Name, builder.lastName, ErrTreeNotOrdered) } if builder.lastName != "" { _ = builder.buf.WriteByte(',') } builder.lastName = node.Name val, err := json.Marshal(node) if err != nil { return err } _, _ = builder.buf.Write(val) return nil } func (builder *TreeJSONBuilder) Finalize() ([]byte, error) { // append a newline so that the data is always consistent (json.Encoder // adds a newline after each object) _, _ = builder.buf.WriteString("]}\n") buf := builder.buf.Bytes() // drop reference to buffer builder.buf = bytes.Buffer{} return buf, nil } func FindTreeDirectory(ctx context.Context, repo restic.BlobLoader, id *restic.ID, dir string) (*restic.ID, error) { if id == nil { return nil, errors.New("tree id is null") } dirs := strings.Split(path.Clean(dir), "/") subfolder := "" for _, name := range dirs { if name == "" || name == "." { continue } subfolder = path.Join(subfolder, name) tree, err := LoadTree(ctx, repo, *id) if err != nil { return nil, fmt.Errorf("path %s: %w", subfolder, err) } node := tree.Find(name) if node == nil { return nil, fmt.Errorf("path %s: not found", subfolder) } if node.Type != NodeTypeDir || node.Subtree == nil { return nil, fmt.Errorf("path %s: not a directory", subfolder) } id = node.Subtree } return id, nil }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/tag_list.go
internal/data/tag_list.go
package data import ( "fmt" "strings" ) // TagList is a list of tags. type TagList []string // splitTagList splits a string into a list of tags. The tags in the string // need to be separated by commas. Whitespace is stripped around the individual // tags. func splitTagList(s string) (l TagList) { for _, t := range strings.Split(s, ",") { l = append(l, strings.TrimSpace(t)) } return l } func (l TagList) String() string { return "[" + strings.Join(l, ", ") + "]" } // Set updates the TagList's value. func (l *TagList) Set(s string) error { *l = splitTagList(s) return nil } // Type returns a description of the type. func (TagList) Type() string { return "TagList" } // TagLists consists of several TagList. type TagLists []TagList func (l TagLists) String() string { return fmt.Sprint([]TagList(l)) } // Flatten returns the list of all tags provided in TagLists func (l TagLists) Flatten() (tags TagList) { tags = make([]string, 0) for _, list := range l { for _, tag := range list { if tag != "" { tags = append(tags, tag) } } } return tags } // Set updates the TagList's value. func (l *TagLists) Set(s string) error { *l = append(*l, splitTagList(s)) return nil } // Type returns a description of the type. func (TagLists) Type() string { return "TagLists" }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/tree_stream.go
internal/data/tree_stream.go
package data import ( "context" "errors" "runtime" "sync" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/progress" "golang.org/x/sync/errgroup" ) // TreeItem is used to return either an error or the tree for a tree id type TreeItem struct { restic.ID Error error *Tree } type trackedTreeItem struct { TreeItem rootIdx int } type trackedID struct { restic.ID rootIdx int } // loadTreeWorker loads trees from repo and sends them to out. func loadTreeWorker(ctx context.Context, repo restic.Loader, in <-chan trackedID, out chan<- trackedTreeItem) { for treeID := range in { tree, err := LoadTree(ctx, repo, treeID.ID) debug.Log("load tree %v (%v) returned err: %v", tree, treeID, err) job := trackedTreeItem{TreeItem: TreeItem{ID: treeID.ID, Error: err, Tree: tree}, rootIdx: treeID.rootIdx} select { case <-ctx.Done(): return case out <- job: } } } func filterTrees(ctx context.Context, repo restic.Loader, trees restic.IDs, loaderChan chan<- trackedID, hugeTreeLoaderChan chan<- trackedID, in <-chan trackedTreeItem, out chan<- TreeItem, skip func(tree restic.ID) bool, p *progress.Counter) { var ( inCh = in outCh chan<- TreeItem loadCh chan<- trackedID job TreeItem nextTreeID trackedID outstandingLoadTreeJobs = 0 ) rootCounter := make([]int, len(trees)) backlog := make([]trackedID, 0, len(trees)) for idx, id := range trees { backlog = append(backlog, trackedID{ID: id, rootIdx: idx}) rootCounter[idx] = 1 } for { if loadCh == nil && len(backlog) > 0 { // process last added ids first, that is traverse the tree in depth-first order ln := len(backlog) - 1 nextTreeID, backlog = backlog[ln], backlog[:ln] if skip(nextTreeID.ID) { rootCounter[nextTreeID.rootIdx]-- if p != nil && rootCounter[nextTreeID.rootIdx] == 0 { p.Add(1) } continue } treeSize, found := repo.LookupBlobSize(restic.TreeBlob, nextTreeID.ID) if found && treeSize > 50*1024*1024 { loadCh = hugeTreeLoaderChan } else { loadCh = loaderChan } } if loadCh == nil && outCh == nil && outstandingLoadTreeJobs == 0 { debug.Log("backlog is empty, all channels nil, exiting") return } select { case <-ctx.Done(): return case loadCh <- nextTreeID: outstandingLoadTreeJobs++ loadCh = nil case j, ok := <-inCh: if !ok { debug.Log("input channel closed") inCh = nil in = nil continue } outstandingLoadTreeJobs-- rootCounter[j.rootIdx]-- debug.Log("input job tree %v", j.ID) if j.Error != nil { debug.Log("received job with error: %v (tree %v, ID %v)", j.Error, j.Tree, j.ID) } else if j.Tree == nil { debug.Log("received job with nil tree pointer: %v (ID %v)", j.Error, j.ID) // send a new job with the new error instead of the old one j = trackedTreeItem{TreeItem: TreeItem{ID: j.ID, Error: errors.New("tree is nil and error is nil")}, rootIdx: j.rootIdx} } else { subtrees := j.Tree.Subtrees() debug.Log("subtrees for tree %v: %v", j.ID, subtrees) // iterate backwards over subtree to compensate backwards traversal order of nextTreeID selection for i := len(subtrees) - 1; i >= 0; i-- { id := subtrees[i] if id.IsNull() { // We do not need to raise this error here, it is // checked when the tree is checked. Just make sure // that we do not add any null IDs to the backlog. debug.Log("tree %v has nil subtree", j.ID) continue } backlog = append(backlog, trackedID{ID: id, rootIdx: j.rootIdx}) rootCounter[j.rootIdx]++ } } if p != nil && rootCounter[j.rootIdx] == 0 { p.Add(1) } job = j.TreeItem outCh = out inCh = nil case outCh <- job: debug.Log("tree sent to process: %v", job.ID) outCh = nil inCh = in } } } // StreamTrees iteratively loads the given trees and their subtrees. The skip method // is guaranteed to always be called from the same goroutine. To shutdown the started // goroutines, either read all items from the channel or cancel the context. Then `Wait()` // on the errgroup until all goroutines were stopped. func StreamTrees(ctx context.Context, wg *errgroup.Group, repo restic.Loader, trees restic.IDs, skip func(tree restic.ID) bool, p *progress.Counter) <-chan TreeItem { loaderChan := make(chan trackedID) hugeTreeChan := make(chan trackedID, 10) loadedTreeChan := make(chan trackedTreeItem) treeStream := make(chan TreeItem) var loadTreeWg sync.WaitGroup // decoding a tree can take quite some time such that this can be both CPU- or IO-bound // one extra worker to handle huge tree blobs workerCount := int(repo.Connections()) + runtime.GOMAXPROCS(0) + 1 for i := 0; i < workerCount; i++ { workerLoaderChan := loaderChan if i == 0 { workerLoaderChan = hugeTreeChan } loadTreeWg.Add(1) wg.Go(func() error { defer loadTreeWg.Done() loadTreeWorker(ctx, repo, workerLoaderChan, loadedTreeChan) return nil }) } // close once all loadTreeWorkers have completed wg.Go(func() error { loadTreeWg.Wait() close(loadedTreeChan) return nil }) wg.Go(func() error { defer close(loaderChan) defer close(hugeTreeChan) defer close(treeStream) filterTrees(ctx, repo, trees, loaderChan, hugeTreeChan, loadedTreeChan, treeStream, skip, p) return nil }) return treeStream }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/snapshot_policy_test.go
internal/data/snapshot_policy_test.go
package data_test import ( "encoding/json" "fmt" "os" "path/filepath" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/restic/restic/internal/data" ) func parseTimeUTC(s string) time.Time { t, err := time.Parse("2006-01-02 15:04:05", s) if err != nil { panic(err) } return t.UTC() } // Returns the maximum number of snapshots to be kept according to this policy. // If any of the counts is -1 it will return 0. func policySum(e *data.ExpirePolicy) int { if e.Last == -1 || e.Hourly == -1 || e.Daily == -1 || e.Weekly == -1 || e.Monthly == -1 || e.Yearly == -1 { return 0 } return e.Last + e.Hourly + e.Daily + e.Weekly + e.Monthly + e.Yearly } func TestExpireSnapshotOps(t *testing.T) { data := []struct { expectEmpty bool expectSum int p *data.ExpirePolicy }{ {true, 0, &data.ExpirePolicy{}}, {true, 0, &data.ExpirePolicy{Tags: []data.TagList{}}}, {false, 22, &data.ExpirePolicy{Daily: 7, Weekly: 2, Monthly: 3, Yearly: 10}}, } for i, d := range data { isEmpty := d.p.Empty() if isEmpty != d.expectEmpty { t.Errorf("empty test %v: wrong result, want:\n %#v\ngot:\n %#v", i, d.expectEmpty, isEmpty) } hasSum := policySum(d.p) if hasSum != d.expectSum { t.Errorf("sum test %v: wrong result, want:\n %#v\ngot:\n %#v", i, d.expectSum, hasSum) } } } // ApplyPolicyResult is used to marshal/unmarshal the golden files for // TestApplyPolicy. type ApplyPolicyResult struct { Keep data.Snapshots `json:"keep"` Reasons []data.KeepReason `json:"reasons,omitempty"` } func loadGoldenFile(t testing.TB, filename string) (res ApplyPolicyResult) { buf, err := os.ReadFile(filename) if err != nil { t.Fatalf("error loading golden file %v: %v", filename, err) } err = json.Unmarshal(buf, &res) if err != nil { t.Fatalf("error unmarshalling golden file %v: %v", filename, err) } return res } func saveGoldenFile(t testing.TB, filename string, keep data.Snapshots, reasons []data.KeepReason) { res := ApplyPolicyResult{ Keep: keep, Reasons: reasons, } buf, err := json.MarshalIndent(res, "", " ") if err != nil { t.Fatalf("error marshaling result: %v", err) } if err = os.WriteFile(filename, buf, 0644); err != nil { t.Fatalf("unable to update golden file: %v", err) } } func TestApplyPolicy(t *testing.T) { var testExpireSnapshots = data.Snapshots{ {Time: parseTimeUTC("2014-09-01 10:20:30")}, {Time: parseTimeUTC("2014-09-02 10:20:30")}, {Time: parseTimeUTC("2014-09-05 10:20:30")}, {Time: parseTimeUTC("2014-09-06 10:20:30")}, {Time: parseTimeUTC("2014-09-08 10:20:30")}, {Time: parseTimeUTC("2014-09-09 10:20:30")}, {Time: parseTimeUTC("2014-09-10 10:20:30")}, {Time: parseTimeUTC("2014-09-11 10:20:30")}, {Time: parseTimeUTC("2014-09-20 10:20:30")}, {Time: parseTimeUTC("2014-09-22 10:20:30")}, {Time: parseTimeUTC("2014-08-08 10:20:30")}, {Time: parseTimeUTC("2014-08-10 10:20:30")}, {Time: parseTimeUTC("2014-08-12 10:20:30")}, {Time: parseTimeUTC("2014-08-13 10:20:30")}, {Time: parseTimeUTC("2014-08-13 10:20:30.1")}, {Time: parseTimeUTC("2014-08-15 10:20:30")}, {Time: parseTimeUTC("2014-08-18 10:20:30")}, {Time: parseTimeUTC("2014-08-20 10:20:30")}, {Time: parseTimeUTC("2014-08-21 10:20:30")}, {Time: parseTimeUTC("2014-08-22 10:20:30")}, {Time: parseTimeUTC("2014-10-01 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-10-02 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-10-05 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-10-06 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-10-08 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-10-09 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-10-10 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-10-11 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-10-20 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-10-22 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-11-08 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-11-10 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-11-12 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-11-13 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-11-13 10:20:30.1"), Tags: []string{"bar"}}, {Time: parseTimeUTC("2014-11-15 10:20:30"), Tags: []string{"foo", "bar"}}, {Time: parseTimeUTC("2014-11-18 10:20:30")}, {Time: parseTimeUTC("2014-11-20 10:20:30")}, {Time: parseTimeUTC("2014-11-21 10:20:30")}, {Time: parseTimeUTC("2014-11-22 10:20:30")}, {Time: parseTimeUTC("2015-09-01 10:20:30")}, {Time: parseTimeUTC("2015-09-02 10:20:30")}, {Time: parseTimeUTC("2015-09-05 10:20:30")}, {Time: parseTimeUTC("2015-09-06 10:20:30")}, {Time: parseTimeUTC("2015-09-08 10:20:30")}, {Time: parseTimeUTC("2015-09-09 10:20:30")}, {Time: parseTimeUTC("2015-09-10 10:20:30")}, {Time: parseTimeUTC("2015-09-11 10:20:30")}, {Time: parseTimeUTC("2015-09-20 10:20:30")}, {Time: parseTimeUTC("2015-09-22 10:20:30")}, {Time: parseTimeUTC("2015-08-08 10:20:30")}, {Time: parseTimeUTC("2015-08-10 10:20:30")}, {Time: parseTimeUTC("2015-08-12 10:20:30")}, {Time: parseTimeUTC("2015-08-13 10:20:30")}, {Time: parseTimeUTC("2015-08-13 10:20:30.1")}, {Time: parseTimeUTC("2015-08-15 10:20:30")}, {Time: parseTimeUTC("2015-08-18 10:20:30")}, {Time: parseTimeUTC("2015-08-20 10:20:30")}, {Time: parseTimeUTC("2015-08-21 10:20:30")}, {Time: parseTimeUTC("2015-08-22 10:20:30")}, {Time: parseTimeUTC("2015-10-01 10:20:30")}, {Time: parseTimeUTC("2015-10-02 10:20:30")}, {Time: parseTimeUTC("2015-10-05 10:20:30")}, {Time: parseTimeUTC("2015-10-06 10:20:30")}, {Time: parseTimeUTC("2015-10-08 10:20:30")}, {Time: parseTimeUTC("2015-10-09 10:20:30")}, {Time: parseTimeUTC("2015-10-10 10:20:30")}, {Time: parseTimeUTC("2015-10-11 10:20:30")}, {Time: parseTimeUTC("2015-10-20 10:20:30")}, {Time: parseTimeUTC("2015-10-22 10:20:30")}, {Time: parseTimeUTC("2015-10-22 10:20:30")}, {Time: parseTimeUTC("2015-10-22 10:20:30"), Tags: []string{"foo", "bar"}}, {Time: parseTimeUTC("2015-10-22 10:20:30"), Tags: []string{"foo", "bar"}}, {Time: parseTimeUTC("2015-10-22 10:20:30"), Tags: []string{"foo", "bar"}, Paths: []string{"path1", "path2"}}, {Time: parseTimeUTC("2015-11-08 10:20:30")}, {Time: parseTimeUTC("2015-11-10 10:20:30")}, {Time: parseTimeUTC("2015-11-12 10:20:30")}, {Time: parseTimeUTC("2015-11-13 10:20:30")}, {Time: parseTimeUTC("2015-11-13 10:20:30.1")}, {Time: parseTimeUTC("2015-11-15 10:20:30")}, {Time: parseTimeUTC("2015-11-18 10:20:30")}, {Time: parseTimeUTC("2015-11-20 10:20:30")}, {Time: parseTimeUTC("2015-11-21 10:20:30")}, {Time: parseTimeUTC("2015-11-22 10:20:30")}, {Time: parseTimeUTC("2016-01-01 01:02:03")}, {Time: parseTimeUTC("2016-01-01 01:03:03")}, {Time: parseTimeUTC("2016-01-01 07:08:03")}, {Time: parseTimeUTC("2016-01-03 07:02:03")}, {Time: parseTimeUTC("2016-01-04 10:23:03")}, {Time: parseTimeUTC("2016-01-04 11:23:03")}, {Time: parseTimeUTC("2016-01-04 12:23:03")}, {Time: parseTimeUTC("2016-01-04 12:24:03")}, {Time: parseTimeUTC("2016-01-04 12:28:03")}, {Time: parseTimeUTC("2016-01-04 12:30:03")}, {Time: parseTimeUTC("2016-01-04 16:23:03")}, {Time: parseTimeUTC("2016-01-05 09:02:03")}, {Time: parseTimeUTC("2016-01-06 08:02:03")}, {Time: parseTimeUTC("2016-01-07 10:02:03")}, {Time: parseTimeUTC("2016-01-08 20:02:03")}, {Time: parseTimeUTC("2016-01-09 21:02:03")}, {Time: parseTimeUTC("2016-01-12 21:02:03")}, {Time: parseTimeUTC("2016-01-12 21:08:03")}, {Time: parseTimeUTC("2016-01-18 12:02:03")}, } var tests = []data.ExpirePolicy{ {}, {Last: 10}, {Last: 15}, {Last: 99}, {Last: 200}, {Hourly: 20}, {Daily: 3}, {Daily: 10}, {Daily: 30}, {Last: 5, Daily: 5}, {Last: 2, Daily: 10}, {Weekly: 2}, {Weekly: 4}, {Daily: 3, Weekly: 4}, {Monthly: 6}, {Daily: 2, Weekly: 2, Monthly: 6}, {Yearly: 10}, {Daily: 7, Weekly: 2, Monthly: 3, Yearly: 10}, {Tags: []data.TagList{{"foo"}}}, {Tags: []data.TagList{{"foo", "bar"}}}, {Tags: []data.TagList{{"foo"}, {"bar"}}}, {Within: data.ParseDurationOrPanic("1d")}, {Within: data.ParseDurationOrPanic("2d")}, {Within: data.ParseDurationOrPanic("7d")}, {Within: data.ParseDurationOrPanic("1m")}, {Within: data.ParseDurationOrPanic("1m14d")}, {Within: data.ParseDurationOrPanic("1y1d1m")}, {Within: data.ParseDurationOrPanic("13d23h")}, {Within: data.ParseDurationOrPanic("2m2h")}, {Within: data.ParseDurationOrPanic("1y2m3d3h")}, {WithinHourly: data.ParseDurationOrPanic("1y2m3d3h")}, {WithinDaily: data.ParseDurationOrPanic("1y2m3d3h")}, {WithinWeekly: data.ParseDurationOrPanic("1y2m3d3h")}, {WithinMonthly: data.ParseDurationOrPanic("1y2m3d3h")}, {WithinYearly: data.ParseDurationOrPanic("1y2m3d3h")}, {Within: data.ParseDurationOrPanic("1h"), WithinHourly: data.ParseDurationOrPanic("1d"), WithinDaily: data.ParseDurationOrPanic("7d"), WithinWeekly: data.ParseDurationOrPanic("1m"), WithinMonthly: data.ParseDurationOrPanic("1y"), WithinYearly: data.ParseDurationOrPanic("9999y")}, {Last: -1}, // keep all {Last: -1, Hourly: -1}, // keep all (Last overrides Hourly) {Hourly: -1}, // keep all hourlies {Daily: 3, Weekly: 2, Monthly: -1, Yearly: -1}, } for i, p := range tests { t.Run("", func(t *testing.T) { keep, remove, reasons := data.ApplyPolicy(testExpireSnapshots, p) if len(keep)+len(remove) != len(testExpireSnapshots) { t.Errorf("len(keep)+len(remove) = %d != len(testExpireSnapshots) = %d", len(keep)+len(remove), len(testExpireSnapshots)) } if policySum(&p) > 0 && len(keep) > policySum(&p) { t.Errorf("not enough snapshots removed: policy allows %v snapshots to remain, but ended up with %v", policySum(&p), len(keep)) } if len(keep) != len(reasons) { t.Errorf("got %d keep reasons for %d snapshots to keep, these must be equal", len(reasons), len(keep)) } goldenFilename := filepath.Join("testdata", fmt.Sprintf("policy_keep_snapshots_%d", i)) if *updateGoldenFiles { saveGoldenFile(t, goldenFilename, keep, reasons) } want := loadGoldenFile(t, goldenFilename) cmpOpts := cmpopts.IgnoreUnexported(data.Snapshot{}) if !cmp.Equal(want.Keep, keep, cmpOpts) { t.Error(cmp.Diff(want.Keep, keep, cmpOpts)) } if !cmp.Equal(want.Reasons, reasons, cmpOpts) { t.Error(cmp.Diff(want.Reasons, reasons, cmpOpts)) } }) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/snapshot.go
internal/data/snapshot.go
package data import ( "context" "fmt" "os/user" "path/filepath" "sync" "time" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/restic" ) // Snapshot is the state of a resource at one point in time. type Snapshot struct { Time time.Time `json:"time"` Parent *restic.ID `json:"parent,omitempty"` Tree *restic.ID `json:"tree"` Paths []string `json:"paths"` Hostname string `json:"hostname,omitempty"` Username string `json:"username,omitempty"` UID uint32 `json:"uid,omitempty"` GID uint32 `json:"gid,omitempty"` Excludes []string `json:"excludes,omitempty"` Tags []string `json:"tags,omitempty"` Original *restic.ID `json:"original,omitempty"` ProgramVersion string `json:"program_version,omitempty"` Summary *SnapshotSummary `json:"summary,omitempty"` id *restic.ID // plaintext ID, used during restore } type SnapshotSummary struct { BackupStart time.Time `json:"backup_start"` BackupEnd time.Time `json:"backup_end"` // statistics from the backup json output FilesNew uint `json:"files_new"` FilesChanged uint `json:"files_changed"` FilesUnmodified uint `json:"files_unmodified"` DirsNew uint `json:"dirs_new"` DirsChanged uint `json:"dirs_changed"` DirsUnmodified uint `json:"dirs_unmodified"` DataBlobs int `json:"data_blobs"` TreeBlobs int `json:"tree_blobs"` DataAdded uint64 `json:"data_added"` DataAddedPacked uint64 `json:"data_added_packed"` TotalFilesProcessed uint `json:"total_files_processed"` TotalBytesProcessed uint64 `json:"total_bytes_processed"` } // NewSnapshot returns an initialized snapshot struct for the current user and // time. func NewSnapshot(paths []string, tags []string, hostname string, time time.Time) (*Snapshot, error) { absPaths := make([]string, 0, len(paths)) for _, path := range paths { p, err := filepath.Abs(path) if err == nil { absPaths = append(absPaths, p) } else { absPaths = append(absPaths, path) } } sn := &Snapshot{ Paths: absPaths, Time: time, Tags: tags, Hostname: hostname, } err := sn.fillUserInfo() if err != nil { return nil, err } return sn, nil } // LoadSnapshot loads the snapshot with the id and returns it. func LoadSnapshot(ctx context.Context, loader restic.LoaderUnpacked, id restic.ID) (*Snapshot, error) { sn := &Snapshot{id: &id} err := restic.LoadJSONUnpacked(ctx, loader, restic.SnapshotFile, id, sn) if err != nil { return nil, fmt.Errorf("failed to load snapshot %v: %w", id.Str(), err) } return sn, nil } // SaveSnapshot saves the snapshot sn and returns its ID. func SaveSnapshot(ctx context.Context, repo restic.SaverUnpacked[restic.WriteableFileType], sn *Snapshot) (restic.ID, error) { return restic.SaveJSONUnpacked(ctx, repo, restic.WriteableSnapshotFile, sn) } // ForAllSnapshots reads all snapshots in parallel and calls the // given function. It is guaranteed that the function is not run concurrently. // If the called function returns an error, this function is cancelled and // also returns this error. // If a snapshot ID is in excludeIDs, it will be ignored. func ForAllSnapshots(ctx context.Context, be restic.Lister, loader restic.LoaderUnpacked, excludeIDs restic.IDSet, fn func(restic.ID, *Snapshot, error) error) error { var m sync.Mutex // For most snapshots decoding is nearly for free, thus just assume were only limited by IO return restic.ParallelList(ctx, be, restic.SnapshotFile, loader.Connections(), func(ctx context.Context, id restic.ID, _ int64) error { if excludeIDs.Has(id) { return nil } sn, err := LoadSnapshot(ctx, loader, id) m.Lock() defer m.Unlock() return fn(id, sn, err) }) } func (sn Snapshot) String() string { return fmt.Sprintf("snapshot %s of %v at %s by %s@%s", sn.id.Str(), sn.Paths, sn.Time, sn.Username, sn.Hostname) } // ID returns the snapshot's ID. func (sn Snapshot) ID() *restic.ID { return sn.id } func (sn *Snapshot) fillUserInfo() error { usr, err := user.Current() if err != nil { return nil } sn.Username = usr.Username // set userid and groupid sn.UID, sn.GID, err = restic.UidGidInt(usr) return err } // AddTags adds the given tags to the snapshots tags, preventing duplicates. // It returns true if any changes were made. func (sn *Snapshot) AddTags(addTags []string) (changed bool) { nextTag: for _, add := range addTags { for _, tag := range sn.Tags { if tag == add { continue nextTag } } sn.Tags = append(sn.Tags, add) changed = true } return } // RemoveTags removes the given tags from the snapshots tags and // returns true if any changes were made. func (sn *Snapshot) RemoveTags(removeTags []string) (changed bool) { for _, remove := range removeTags { for i, tag := range sn.Tags { if tag == remove { // https://github.com/golang/go/wiki/SliceTricks sn.Tags[i] = sn.Tags[len(sn.Tags)-1] sn.Tags[len(sn.Tags)-1] = "" sn.Tags = sn.Tags[:len(sn.Tags)-1] changed = true break } } } return } func (sn *Snapshot) hasTag(tag string) bool { for _, snTag := range sn.Tags { if tag == snTag { return true } } return false } // HasTags returns true if the snapshot has all the tags in l. func (sn *Snapshot) HasTags(l []string) bool { for _, tag := range l { if tag == "" && len(sn.Tags) == 0 { return true } if !sn.hasTag(tag) { return false } } return true } // HasTagList returns true if either // - the snapshot satisfies at least one TagList, so there is a TagList in l // for which all tags are included in sn, or // - l is empty func (sn *Snapshot) HasTagList(l []TagList) bool { debug.Log("testing snapshot with tags %v against list: %v", sn.Tags, l) if len(l) == 0 { return true } for _, tags := range l { if sn.HasTags(tags) { debug.Log(" snapshot satisfies %v %v", tags, l) return true } } return false } // HasPaths returns true if the snapshot has all of the paths. func (sn *Snapshot) HasPaths(paths []string) bool { m := make(map[string]struct{}, len(sn.Paths)) for _, snPath := range sn.Paths { m[snPath] = struct{}{} } for _, path := range paths { if _, ok := m[path]; !ok { return false } } return true } // HasHostname returns true if either // - the snapshot hostname is in the list of the given hostnames, or // - the list of given hostnames is empty func (sn *Snapshot) HasHostname(hostnames []string) bool { if len(hostnames) == 0 { return true } for _, hostname := range hostnames { if sn.Hostname == hostname { return true } } return false } // Snapshots is a list of snapshots. type Snapshots []*Snapshot // Len returns the number of snapshots in sn. func (sn Snapshots) Len() int { return len(sn) } // Less returns true iff the ith snapshot has been made after the jth. func (sn Snapshots) Less(i, j int) bool { return sn[i].Time.After(sn[j].Time) } // Swap exchanges the two snapshots. func (sn Snapshots) Swap(i, j int) { sn[i], sn[j] = sn[j], sn[i] }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/node.go
internal/data/node.go
package data import ( "bytes" "encoding/json" "fmt" "os" "reflect" "strconv" "strings" "sync" "time" "unicode/utf8" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/debug" ) // ExtendedAttribute is a tuple storing the xattr name and value for various filesystems. type ExtendedAttribute struct { Name string `json:"name"` Value []byte `json:"value"` } // GenericAttributeType can be used for OS specific functionalities by defining specific types // in node.go to be used by the specific node_xx files. // OS specific attribute types should follow the convention <OS>Attributes. // GenericAttributeTypes should follow the convention <OS specific attribute type>.<attribute name> // The attributes in OS specific attribute types must be pointers as we want to distinguish nil values // and not create GenericAttributes for them. type GenericAttributeType string // OSType is the type created to represent each specific OS type OSType string const ( // When new GenericAttributeType are defined, they must be added in the init function as well. // Below are windows specific attributes. // TypeCreationTime is the GenericAttributeType used for storing creation time for windows files within the generic attributes map. TypeCreationTime GenericAttributeType = "windows.creation_time" // TypeFileAttributes is the GenericAttributeType used for storing file attributes for windows files within the generic attributes map. TypeFileAttributes GenericAttributeType = "windows.file_attributes" // TypeSecurityDescriptor is the GenericAttributeType used for storing security descriptors including owner, group, discretionary access control list (DACL), system access control list (SACL)) for windows files within the generic attributes map. TypeSecurityDescriptor GenericAttributeType = "windows.security_descriptor" // Generic Attributes for other OS types should be defined here. ) // init is called when the package is initialized. Any new GenericAttributeTypes being created must be added here as well. func init() { storeGenericAttributeType(TypeCreationTime, TypeFileAttributes, TypeSecurityDescriptor) } // genericAttributesForOS maintains a map of known genericAttributesForOS to the OSType var genericAttributesForOS = map[GenericAttributeType]OSType{} // storeGenericAttributeType adds and entry in genericAttributesForOS map func storeGenericAttributeType(attributeTypes ...GenericAttributeType) { for _, attributeType := range attributeTypes { // Get the OS attribute type from the GenericAttributeType osAttributeName := strings.Split(string(attributeType), ".")[0] genericAttributesForOS[attributeType] = OSType(osAttributeName) } } type NodeType string var ( NodeTypeFile = NodeType("file") NodeTypeDir = NodeType("dir") NodeTypeSymlink = NodeType("symlink") NodeTypeDev = NodeType("dev") NodeTypeCharDev = NodeType("chardev") NodeTypeFifo = NodeType("fifo") NodeTypeSocket = NodeType("socket") NodeTypeIrregular = NodeType("irregular") NodeTypeInvalid = NodeType("") ) // Node is a file, directory or other item in a backup. type Node struct { Name string `json:"name"` Type NodeType `json:"type"` Mode os.FileMode `json:"mode,omitempty"` ModTime time.Time `json:"mtime,omitempty"` AccessTime time.Time `json:"atime,omitempty"` ChangeTime time.Time `json:"ctime,omitempty"` UID uint32 `json:"uid"` GID uint32 `json:"gid"` User string `json:"user,omitempty"` Group string `json:"group,omitempty"` Inode uint64 `json:"inode,omitempty"` DeviceID uint64 `json:"device_id,omitempty"` // device id of the file, stat.st_dev, only stored for hardlinks Size uint64 `json:"size,omitempty"` Links uint64 `json:"links,omitempty"` LinkTarget string `json:"linktarget,omitempty"` // implicitly base64-encoded field. Only used while encoding, `linktarget_raw` will overwrite LinkTarget if present. // This allows storing arbitrary byte-sequences, which are possible as symlink targets on unix systems, // as LinkTarget without breaking backwards-compatibility. // Must only be set of the linktarget cannot be encoded as valid utf8. LinkTargetRaw []byte `json:"linktarget_raw,omitempty"` ExtendedAttributes []ExtendedAttribute `json:"extended_attributes,omitempty"` GenericAttributes map[GenericAttributeType]json.RawMessage `json:"generic_attributes,omitempty"` Device uint64 `json:"device,omitempty"` // in case of Type == "dev", stat.st_rdev Content restic.IDs `json:"content"` Subtree *restic.ID `json:"subtree,omitempty"` Error string `json:"error,omitempty"` Path string `json:"-"` } // Nodes is a slice of nodes that can be sorted. type Nodes []*Node func (n Nodes) Len() int { return len(n) } func (n Nodes) Less(i, j int) bool { return n[i].Name < n[j].Name } func (n Nodes) Swap(i, j int) { n[i], n[j] = n[j], n[i] } func (node Node) String() string { var mode os.FileMode switch node.Type { case NodeTypeFile: mode = 0 case NodeTypeDir: mode = os.ModeDir case NodeTypeSymlink: mode = os.ModeSymlink case NodeTypeDev: mode = os.ModeDevice case NodeTypeCharDev: mode = os.ModeDevice | os.ModeCharDevice case NodeTypeFifo: mode = os.ModeNamedPipe case NodeTypeSocket: mode = os.ModeSocket } return fmt.Sprintf("%s %5d %5d %6d %s %s", mode|node.Mode, node.UID, node.GID, node.Size, node.ModTime, node.Name) } // GetExtendedAttribute gets the extended attribute. func (node Node) GetExtendedAttribute(a string) []byte { for _, attr := range node.ExtendedAttributes { if attr.Name == a { return attr.Value } } return nil } // FixTime returns a time.Time which can safely be used to marshal as JSON. If // the timestamp is earlier than year zero, the year is set to zero. In the same // way, if the year is larger than 9999, the year is set to 9999. Other than // the year nothing is changed. func FixTime(t time.Time) time.Time { switch { case t.Year() < 0000: return t.AddDate(-t.Year(), 0, 0) case t.Year() > 9999: return t.AddDate(-(t.Year() - 9999), 0, 0) default: return t } } func (node Node) MarshalJSON() ([]byte, error) { // make sure invalid timestamps for mtime and atime are converted to // something we can actually save. node.ModTime = FixTime(node.ModTime) node.AccessTime = FixTime(node.AccessTime) node.ChangeTime = FixTime(node.ChangeTime) type nodeJSON Node nj := nodeJSON(node) name := strconv.Quote(node.Name) nj.Name = name[1 : len(name)-1] if nj.LinkTargetRaw != nil { panic("LinkTargetRaw must not be set manually") } if !utf8.ValidString(node.LinkTarget) { // store raw bytes if invalid utf8 nj.LinkTargetRaw = []byte(node.LinkTarget) } return json.Marshal(nj) } func (node *Node) UnmarshalJSON(data []byte) error { type nodeJSON Node nj := (*nodeJSON)(node) err := json.Unmarshal(data, nj) if err != nil { return errors.Wrap(err, "Unmarshal") } nj.Name, err = strconv.Unquote(`"` + nj.Name + `"`) if err != nil { return errors.Wrap(err, "Unquote") } if nj.LinkTargetRaw != nil { nj.LinkTarget = string(nj.LinkTargetRaw) nj.LinkTargetRaw = nil } return nil } func (node Node) Equals(other Node) bool { if node.Name != other.Name { return false } if node.Type != other.Type { return false } if node.Mode != other.Mode { return false } if !node.ModTime.Equal(other.ModTime) { return false } if !node.AccessTime.Equal(other.AccessTime) { return false } if !node.ChangeTime.Equal(other.ChangeTime) { return false } if node.UID != other.UID { return false } if node.GID != other.GID { return false } if node.User != other.User { return false } if node.Group != other.Group { return false } if node.Inode != other.Inode { return false } if node.DeviceID != other.DeviceID { return false } if node.Size != other.Size { return false } if node.Links != other.Links { return false } if node.LinkTarget != other.LinkTarget { return false } if node.Device != other.Device { return false } if !node.sameContent(other) { return false } if !node.sameExtendedAttributes(other) { return false } if !node.sameGenericAttributes(other) { return false } if node.Subtree != nil { if other.Subtree == nil { return false } if !node.Subtree.Equal(*other.Subtree) { return false } } else { if other.Subtree != nil { return false } } if node.Error != other.Error { return false } return true } func (node Node) sameContent(other Node) bool { if node.Content == nil { return other.Content == nil } if other.Content == nil { return false } if len(node.Content) != len(other.Content) { return false } for i := 0; i < len(node.Content); i++ { if !node.Content[i].Equal(other.Content[i]) { return false } } return true } func (node Node) sameExtendedAttributes(other Node) bool { ln := len(node.ExtendedAttributes) lo := len(other.ExtendedAttributes) if ln != lo { return false } else if ln == 0 { // This means lo is also of length 0 return true } // build a set of all attributes that node has type mapvalue struct { value []byte present bool } attributes := make(map[string]mapvalue) for _, attr := range node.ExtendedAttributes { attributes[attr.Name] = mapvalue{value: attr.Value} } for _, attr := range other.ExtendedAttributes { v, ok := attributes[attr.Name] if !ok { // extended attribute is not set for node debug.Log("other node has attribute %v, which is not present in node", attr.Name) return false } if !bytes.Equal(v.value, attr.Value) { // attribute has different value debug.Log("attribute %v has different value", attr.Name) return false } // remember that this attribute is present in other. v.present = true attributes[attr.Name] = v } // check for attributes that are not present in other for name, v := range attributes { if !v.present { debug.Log("attribute %v not present in other node", name) return false } } return true } func (node Node) sameGenericAttributes(other Node) bool { return deepEqual(node.GenericAttributes, other.GenericAttributes) } func deepEqual(map1, map2 map[GenericAttributeType]json.RawMessage) bool { // Check if the maps have the same number of keys if len(map1) != len(map2) { return false } // Iterate over each key-value pair in map1 for key, value1 := range map1 { // Check if the key exists in map2 value2, ok := map2[key] if !ok { return false } // Check if the JSON.RawMessage values are equal byte by byte if !bytes.Equal(value1, value2) { return false } } return true } // HandleUnknownGenericAttributesFound is used for handling and distinguing between scenarios related to future versions and cross-OS repositories func HandleUnknownGenericAttributesFound(unknownAttribs []GenericAttributeType, warn func(msg string)) { for _, unknownAttrib := range unknownAttribs { handleUnknownGenericAttributeFound(unknownAttrib, warn) } } // handleUnknownGenericAttributeFound is used for handling and distinguing between scenarios related to future versions and cross-OS repositories func handleUnknownGenericAttributeFound(genericAttributeType GenericAttributeType, warn func(msg string)) { if checkGenericAttributeNameNotHandledAndPut(genericAttributeType) { // Print the unique error only once for a given execution os, exists := genericAttributesForOS[genericAttributeType] if exists { // If genericAttributesForOS contains an entry but we still got here, it means the specific node_xx.go for the current OS did not handle it and the repository may have been originally created on a different OS. // The fact that node.go knows about the attribute, means it is not a new attribute. This may be a common situation if a repo is used across OSs. debug.Log("Ignoring a generic attribute found in the repository: %s which may not be compatible with your OS. Compatible OS: %s", genericAttributeType, os) } else { // If genericAttributesForOS in node.go does not know about this attribute, then the repository may have been created by a newer version which has a newer GenericAttributeType. warn(fmt.Sprintf("Found an unrecognized generic attribute in the repository: %s. You may need to upgrade to latest version of restic.", genericAttributeType)) } } } // HandleAllUnknownGenericAttributesFound performs validations for all generic attributes of a node. // This is not used on windows currently because windows has handling for generic attributes. func HandleAllUnknownGenericAttributesFound(attributes map[GenericAttributeType]json.RawMessage, warn func(msg string)) error { for name := range attributes { handleUnknownGenericAttributeFound(name, warn) } return nil } var unknownGenericAttributesHandlingHistory sync.Map // checkGenericAttributeNameNotHandledAndPut checks if the GenericAttributeType name entry // already exists and puts it in the map if not. func checkGenericAttributeNameNotHandledAndPut(value GenericAttributeType) bool { // If Key doesn't exist, put the value and return true because it is not already handled _, exists := unknownGenericAttributesHandlingHistory.LoadOrStore(value, "") // Key exists, then it is already handled so return false return !exists } // The functions below are common helper functions which can be used for generic attributes support // across different OS. // GenericAttributesToOSAttrs gets the os specific attribute from the generic attribute using reflection func GenericAttributesToOSAttrs(attrs map[GenericAttributeType]json.RawMessage, attributeType reflect.Type, attributeValuePtr *reflect.Value, keyPrefix string) (unknownAttribs []GenericAttributeType, err error) { attributeValue := *attributeValuePtr for key, rawMsg := range attrs { found := false for i := 0; i < attributeType.NumField(); i++ { if getFQKeyByIndex(attributeType, i, keyPrefix) == key { found = true fieldValue := attributeValue.Field(i) // For directly supported types, use json.Unmarshal directly if err := json.Unmarshal(rawMsg, fieldValue.Addr().Interface()); err != nil { return unknownAttribs, errors.Wrap(err, "Unmarshal") } break } } if !found { unknownAttribs = append(unknownAttribs, key) } } return unknownAttribs, nil } // getFQKey gets the fully qualified key for the field func getFQKey(field reflect.StructField, keyPrefix string) GenericAttributeType { return GenericAttributeType(fmt.Sprintf("%s.%s", keyPrefix, field.Tag.Get("generic"))) } // getFQKeyByIndex gets the fully qualified key for the field index func getFQKeyByIndex(attributeType reflect.Type, index int, keyPrefix string) GenericAttributeType { return getFQKey(attributeType.Field(index), keyPrefix) } // OSAttrsToGenericAttributes gets the generic attribute from the os specific attribute using reflection func OSAttrsToGenericAttributes(attributeType reflect.Type, attributeValuePtr *reflect.Value, keyPrefix string) (attrs map[GenericAttributeType]json.RawMessage, err error) { attributeValue := *attributeValuePtr attrs = make(map[GenericAttributeType]json.RawMessage) // Iterate over the fields of the struct for i := 0; i < attributeType.NumField(); i++ { field := attributeType.Field(i) // Get the field value using reflection fieldValue := attributeValue.FieldByName(field.Name) // Check if the field is nil if fieldValue.IsNil() { // If it's nil, skip this field continue } // Marshal the field value into a json.RawMessage var fieldBytes []byte if fieldBytes, err = json.Marshal(fieldValue.Interface()); err != nil { return attrs, errors.Wrap(err, "Marshal") } // Insert the field into the map attrs[getFQKey(field, keyPrefix)] = fieldBytes } return attrs, nil }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/snapshot_policy.go
internal/data/snapshot_policy.go
package data import ( "fmt" "reflect" "sort" "strings" "time" "github.com/restic/restic/internal/debug" ) // ExpirePolicy configures which snapshots should be automatically removed. type ExpirePolicy struct { Last int // keep the last n snapshots Hourly int // keep the last n hourly snapshots Daily int // keep the last n daily snapshots Weekly int // keep the last n weekly snapshots Monthly int // keep the last n monthly snapshots Yearly int // keep the last n yearly snapshots Within Duration // keep snapshots made within this duration WithinHourly Duration // keep hourly snapshots made within this duration WithinDaily Duration // keep daily snapshots made within this duration WithinWeekly Duration // keep weekly snapshots made within this duration WithinMonthly Duration // keep monthly snapshots made within this duration WithinYearly Duration // keep yearly snapshots made within this duration Tags []TagList // keep all snapshots that include at least one of the tag lists. } func (e ExpirePolicy) String() (s string) { var keeps []string var keepw []string for _, opt := range []struct { count int descr string }{ {e.Last, "latest"}, {e.Hourly, "hourly"}, {e.Daily, "daily"}, {e.Weekly, "weekly"}, {e.Monthly, "monthly"}, {e.Yearly, "yearly"}, } { if opt.count > 0 { keeps = append(keeps, fmt.Sprintf("%d %s", opt.count, opt.descr)) } else if opt.count == -1 { keeps = append(keeps, fmt.Sprintf("all %s", opt.descr)) } } if !e.WithinHourly.Zero() { keepw = append(keepw, fmt.Sprintf("hourly snapshots within %v", e.WithinHourly)) } if !e.WithinDaily.Zero() { keepw = append(keepw, fmt.Sprintf("daily snapshots within %v", e.WithinDaily)) } if !e.WithinWeekly.Zero() { keepw = append(keepw, fmt.Sprintf("weekly snapshots within %v", e.WithinWeekly)) } if !e.WithinMonthly.Zero() { keepw = append(keepw, fmt.Sprintf("monthly snapshots within %v", e.WithinMonthly)) } if !e.WithinYearly.Zero() { keepw = append(keepw, fmt.Sprintf("yearly snapshots within %v", e.WithinYearly)) } if len(keeps) > 0 { s = fmt.Sprintf("%s snapshots", strings.Join(keeps, ", ")) } if len(keepw) > 0 { if s != "" { s += ", " } s += strings.Join(keepw, ", ") } if len(e.Tags) > 0 { if s != "" { s += " and " } s += fmt.Sprintf("all snapshots with tags %s", e.Tags) } if !e.Within.Zero() { if s != "" { s += " and " } s += fmt.Sprintf("all snapshots within %s of the newest", e.Within) } if s == "" { s = "remove" } else { s = "keep " + s } return s } // Empty returns true if no policy has been configured (all values zero). func (e ExpirePolicy) Empty() bool { if len(e.Tags) != 0 { return false } empty := ExpirePolicy{Tags: e.Tags} return reflect.DeepEqual(e, empty) } // ymdh returns an integer in the form YYYYMMDDHH. func ymdh(d time.Time, _ int) int { return d.Year()*1000000 + int(d.Month())*10000 + d.Day()*100 + d.Hour() } // ymd returns an integer in the form YYYYMMDD. func ymd(d time.Time, _ int) int { return d.Year()*10000 + int(d.Month())*100 + d.Day() } // yw returns an integer in the form YYYYWW, where WW is the week number. func yw(d time.Time, _ int) int { year, week := d.ISOWeek() return year*100 + week } // ym returns an integer in the form YYYYMM. func ym(d time.Time, _ int) int { return d.Year()*100 + int(d.Month()) } // y returns the year of d. func y(d time.Time, _ int) int { return d.Year() } // always returns a unique number for d. func always(_ time.Time, nr int) int { return nr } // findLatestTimestamp returns the time stamp for the latest (newest) snapshot, // for use with policies based on time relative to latest. func findLatestTimestamp(list Snapshots) time.Time { if len(list) == 0 { panic("list of snapshots is empty") } var latest time.Time now := time.Now() for _, sn := range list { // Find the latest snapshot in the list // The latest snapshot must, however, not be in the future. if sn.Time.After(latest) && sn.Time.Before(now) { latest = sn.Time } } return latest } // KeepReason specifies why a particular snapshot was kept, and the counters at // that point in the policy evaluation. type KeepReason struct { Snapshot *Snapshot `json:"snapshot"` // description text which criteria match, e.g. "daily", "monthly" Matches []string `json:"matches"` // the counters after evaluating the current snapshot Counters struct { Last int `json:"last,omitempty"` Hourly int `json:"hourly,omitempty"` Daily int `json:"daily,omitempty"` Weekly int `json:"weekly,omitempty"` Monthly int `json:"monthly,omitempty"` Yearly int `json:"yearly,omitempty"` } `json:"counters"` } // ApplyPolicy returns the snapshots from list that are to be kept and removed // according to the policy p. list is sorted in the process. reasons contains // the reasons to keep each snapshot, it is in the same order as keep. func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reasons []KeepReason) { // sort newest snapshots first sort.Stable(list) if len(list) == 0 { return list, nil, nil } // These buckets are for keeping last n snapshots of given type var buckets = [6]struct { Count int bucker func(d time.Time, nr int) int Last int reason string }{ {p.Last, always, -1, "last snapshot"}, {p.Hourly, ymdh, -1, "hourly snapshot"}, {p.Daily, ymd, -1, "daily snapshot"}, {p.Weekly, yw, -1, "weekly snapshot"}, {p.Monthly, ym, -1, "monthly snapshot"}, {p.Yearly, y, -1, "yearly snapshot"}, } // These buckets are for keeping snapshots of given type within duration var bucketsWithin = [5]struct { Within Duration bucker func(d time.Time, nr int) int Last int reason string }{ {p.WithinHourly, ymdh, -1, "hourly within"}, {p.WithinDaily, ymd, -1, "daily within"}, {p.WithinWeekly, yw, -1, "weekly within"}, {p.WithinMonthly, ym, -1, "monthly within"}, {p.WithinYearly, y, -1, "yearly within"}, } latest := findLatestTimestamp(list) for nr, cur := range list { var keepSnap bool var keepSnapReasons []string // Tags are handled specially as they are not counted. for _, l := range p.Tags { if cur.HasTags(l) { keepSnap = true keepSnapReasons = append(keepSnapReasons, fmt.Sprintf("has tags %v", l)) } } // If the timestamp of the snapshot is within the range, then keep it. if !p.Within.Zero() { t := latest.AddDate(-p.Within.Years, -p.Within.Months, -p.Within.Days).Add(time.Hour * time.Duration(-p.Within.Hours)) if cur.Time.After(t) { keepSnap = true keepSnapReasons = append(keepSnapReasons, fmt.Sprintf("within %v", p.Within)) } } // Now update the other buckets and see if they have some counts left. for i, b := range buckets { // -1 means "keep all" if b.Count > 0 || b.Count == -1 { val := b.bucker(cur.Time, nr) // also keep the oldest snapshot if the bucket has some counts left. This maximizes the // the history length kept while some counts are left. if val != b.Last || nr == len(list)-1 { debug.Log("keep %v %v, bucker %v, val %v\n", cur.Time, cur.id.Str(), i, val) keepSnap = true if val == b.Last && nr == len(list)-1 { b.reason = fmt.Sprintf("oldest %v", b.reason) } buckets[i].Last = val if buckets[i].Count > 0 { buckets[i].Count-- } keepSnapReasons = append(keepSnapReasons, b.reason) } } } // If the timestamp is within range, and the snapshot is an hourly/daily/weekly/monthly/yearly snapshot, then keep it for i, b := range bucketsWithin { if !b.Within.Zero() { t := latest.AddDate(-b.Within.Years, -b.Within.Months, -b.Within.Days).Add(time.Hour * time.Duration(-b.Within.Hours)) if cur.Time.After(t) { val := b.bucker(cur.Time, nr) if val != b.Last || nr == len(list)-1 { debug.Log("keep %v, time %v, ID %v, bucker %v, val %v %v\n", b.reason, cur.Time, cur.id.Str(), i, val, b.Last) keepSnap = true if val == b.Last && nr == len(list)-1 { b.reason = fmt.Sprintf("oldest %v", b.reason) } bucketsWithin[i].Last = val keepSnapReasons = append(keepSnapReasons, fmt.Sprintf("%v %v", b.reason, b.Within)) } } } } if keepSnap { keep = append(keep, cur) kr := KeepReason{ Snapshot: cur, Matches: keepSnapReasons, } kr.Counters.Last = buckets[0].Count kr.Counters.Hourly = buckets[1].Count kr.Counters.Daily = buckets[2].Count kr.Counters.Weekly = buckets[3].Count kr.Counters.Monthly = buckets[4].Count kr.Counters.Yearly = buckets[5].Count reasons = append(reasons, kr) } else { remove = append(remove, cur) } } return keep, remove, reasons }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/testing_test.go
internal/data/testing_test.go
package data_test import ( "context" "testing" "time" "github.com/restic/restic/internal/checker" "github.com/restic/restic/internal/data" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" ) var testSnapshotTime = time.Unix(1460289341, 207401672) const ( testCreateSnapshots = 3 testDepth = 2 ) func TestCreateSnapshot(t *testing.T) { repo := repository.TestRepository(t) for i := 0; i < testCreateSnapshots; i++ { data.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth) } snapshots, err := data.TestLoadAllSnapshots(context.TODO(), repo, restic.NewIDSet()) if err != nil { t.Fatal(err) } if len(snapshots) != testCreateSnapshots { t.Fatalf("got %d snapshots, expected %d", len(snapshots), 1) } sn := snapshots[0] if sn.Time.Before(testSnapshotTime) || sn.Time.After(testSnapshotTime.Add(testCreateSnapshots*time.Second)) { t.Fatalf("timestamp %v is outside of the allowed time range", sn.Time) } if sn.Tree == nil { t.Fatalf("tree id is nil") } if sn.Tree.IsNull() { t.Fatalf("snapshot has zero tree ID") } checker.TestCheckRepo(t, repo) } func BenchmarkTestCreateSnapshot(t *testing.B) { repo := repository.TestRepository(t) t.ResetTimer() for i := 0; i < t.N; i++ { data.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/tag_list_test.go
internal/data/tag_list_test.go
package data import ( "testing" rtest "github.com/restic/restic/internal/test" ) func TestTagLists_Flatten(t *testing.T) { tests := []struct { name string l TagLists want TagList }{ { name: "4 tags", l: TagLists{ TagList{ "tag1", "tag2", }, TagList{ "tag3", "tag4", }, }, want: TagList{"tag1", "tag2", "tag3", "tag4"}, }, { name: "No tags", l: nil, want: TagList{}, }, { name: "Empty tags", l: TagLists{[]string{""}}, want: TagList{}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := tt.l.Flatten() rtest.Equals(t, got, tt.want) }) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/node_test.go
internal/data/node_test.go
package data import ( "encoding/json" "testing" "time" "github.com/restic/restic/internal/test" ) func parseTimeNano(t testing.TB, s string) time.Time { // 2006-01-02T15:04:05.999999999Z07:00 ts, err := time.Parse(time.RFC3339Nano, s) if err != nil { t.Fatalf("error parsing %q: %v", s, err) } return ts } func TestFixTime(t *testing.T) { // load UTC location utc, err := time.LoadLocation("") if err != nil { t.Fatal(err) } var tests = []struct { src, want time.Time }{ { src: parseTimeNano(t, "2006-01-02T15:04:05.999999999+07:00"), want: parseTimeNano(t, "2006-01-02T15:04:05.999999999+07:00"), }, { src: time.Date(0, 1, 2, 3, 4, 5, 6, utc), want: parseTimeNano(t, "0000-01-02T03:04:05.000000006+00:00"), }, { src: time.Date(-2, 1, 2, 3, 4, 5, 6, utc), want: parseTimeNano(t, "0000-01-02T03:04:05.000000006+00:00"), }, { src: time.Date(12345, 1, 2, 3, 4, 5, 6, utc), want: parseTimeNano(t, "9999-01-02T03:04:05.000000006+00:00"), }, { src: time.Date(9999, 1, 2, 3, 4, 5, 6, utc), want: parseTimeNano(t, "9999-01-02T03:04:05.000000006+00:00"), }, } for _, test := range tests { t.Run("", func(t *testing.T) { res := FixTime(test.src) if !res.Equal(test.want) { t.Fatalf("wrong result for %v, want:\n %v\ngot:\n %v", test.src, test.want, res) } }) } } func TestSymlinkSerialization(t *testing.T) { for _, link := range []string{ "válîd \t Üñi¢òde \n śẗŕinǵ", string([]byte{0, 1, 2, 0xfa, 0xfb, 0xfc}), } { n := Node{ LinkTarget: link, } ser, err := json.Marshal(n) test.OK(t, err) var n2 Node err = json.Unmarshal(ser, &n2) test.OK(t, err) t.Logf("serialized %q\n", string(ser)) test.Equals(t, n.LinkTarget, n2.LinkTarget) } } func TestSymlinkSerializationFormat(t *testing.T) { for _, d := range []struct { ser string linkTarget string }{ {`{"linktarget":"test"}`, "test"}, {`{"linktarget":"\u0000\u0001\u0002\ufffd\ufffd\ufffd","linktarget_raw":"AAEC+vv8"}`, string([]byte{0, 1, 2, 0xfa, 0xfb, 0xfc})}, } { var n2 Node err := json.Unmarshal([]byte(d.ser), &n2) test.OK(t, err) test.Equals(t, d.linkTarget, n2.LinkTarget) test.Assert(t, n2.LinkTargetRaw == nil, "quoted link target is just a helper field and must be unset after decoding") } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/find_test.go
internal/data/find_test.go
package data_test import ( "bufio" "context" "encoding/json" "flag" "fmt" "os" "path/filepath" "sort" "testing" "time" "github.com/restic/restic/internal/data" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/test" "github.com/restic/restic/internal/ui/progress" ) func loadIDSet(t testing.TB, filename string) restic.BlobSet { f, err := os.Open(filename) if err != nil { t.Logf("unable to open golden file %v: %v", filename, err) return restic.NewBlobSet() } sc := bufio.NewScanner(f) blobs := restic.NewBlobSet() for sc.Scan() { var h restic.BlobHandle err := json.Unmarshal([]byte(sc.Text()), &h) if err != nil { t.Errorf("file %v contained invalid blob: %#v", filename, err) continue } blobs.Insert(h) } if err = f.Close(); err != nil { t.Errorf("closing file %v failed with error %v", filename, err) } return blobs } func saveIDSet(t testing.TB, filename string, s restic.BlobSet) { f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { t.Fatalf("unable to update golden file %v: %v", filename, err) return } var hs restic.BlobHandles for h := range s { hs = append(hs, h) } sort.Sort(hs) enc := json.NewEncoder(f) for _, h := range hs { err = enc.Encode(h) if err != nil { t.Fatalf("Encode() returned error: %v", err) } } if err = f.Close(); err != nil { t.Fatalf("close file %v returned error: %v", filename, err) } } var updateGoldenFiles = flag.Bool("update", false, "update golden files in testdata/") const ( findTestSnapshots = 3 findTestDepth = 2 ) var findTestTime = time.Unix(1469960361, 23) func TestFindUsedBlobs(t *testing.T) { repo := repository.TestRepository(t) var snapshots []*data.Snapshot for i := 0; i < findTestSnapshots; i++ { sn := data.TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth) t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str()) snapshots = append(snapshots, sn) } p := progress.NewCounter(time.Second, findTestSnapshots, func(value uint64, total uint64, runtime time.Duration, final bool) {}) defer p.Done() for i, sn := range snapshots { usedBlobs := restic.NewBlobSet() err := data.FindUsedBlobs(context.TODO(), repo, restic.IDs{*sn.Tree}, usedBlobs, p) if err != nil { t.Errorf("FindUsedBlobs returned error: %v", err) continue } if len(usedBlobs) == 0 { t.Errorf("FindUsedBlobs returned an empty set") continue } v, _ := p.Get() test.Equals(t, v, uint64(i+1)) goldenFilename := filepath.Join("testdata", fmt.Sprintf("used_blobs_snapshot%d", i)) want := loadIDSet(t, goldenFilename) if !want.Equals(usedBlobs) { t.Errorf("snapshot %d: wrong list of blobs returned:\n missing blobs: %v\n extra blobs: %v", i, want.Sub(usedBlobs), usedBlobs.Sub(want)) } if *updateGoldenFiles { saveIDSet(t, goldenFilename, usedBlobs) } } } func TestMultiFindUsedBlobs(t *testing.T) { repo := repository.TestRepository(t) var snapshotTrees restic.IDs for i := 0; i < findTestSnapshots; i++ { sn := data.TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth) t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str()) snapshotTrees = append(snapshotTrees, *sn.Tree) } want := restic.NewBlobSet() for i := range snapshotTrees { goldenFilename := filepath.Join("testdata", fmt.Sprintf("used_blobs_snapshot%d", i)) want.Merge(loadIDSet(t, goldenFilename)) } p := progress.NewCounter(time.Second, findTestSnapshots, func(value uint64, total uint64, runtime time.Duration, final bool) {}) defer p.Done() // run twice to check progress bar handling of duplicate tree roots usedBlobs := restic.NewBlobSet() for i := 1; i < 3; i++ { err := data.FindUsedBlobs(context.TODO(), repo, snapshotTrees, usedBlobs, p) test.OK(t, err) v, _ := p.Get() test.Equals(t, v, uint64(i*len(snapshotTrees))) if !want.Equals(usedBlobs) { t.Errorf("wrong list of blobs returned:\n missing blobs: %v\n extra blobs: %v", want.Sub(usedBlobs), usedBlobs.Sub(want)) } } } type ForbiddenRepo struct{} func (r ForbiddenRepo) LoadBlob(context.Context, restic.BlobType, restic.ID, []byte) ([]byte, error) { return nil, errors.New("should not be called") } func (r ForbiddenRepo) LookupBlobSize(_ restic.BlobType, _ restic.ID) (uint, bool) { return 0, false } func (r ForbiddenRepo) Connections() uint { return 2 } func TestFindUsedBlobsSkipsSeenBlobs(t *testing.T) { repo := repository.TestRepository(t) snapshot := data.TestCreateSnapshot(t, repo, findTestTime, findTestDepth) t.Logf("snapshot %v saved, tree %v", snapshot.ID().Str(), snapshot.Tree.Str()) usedBlobs := restic.NewBlobSet() err := data.FindUsedBlobs(context.TODO(), repo, restic.IDs{*snapshot.Tree}, usedBlobs, nil) if err != nil { t.Fatalf("FindUsedBlobs returned error: %v", err) } err = data.FindUsedBlobs(context.TODO(), ForbiddenRepo{}, restic.IDs{*snapshot.Tree}, usedBlobs, nil) if err != nil { t.Fatalf("FindUsedBlobs returned error: %v", err) } } func BenchmarkFindUsedBlobs(b *testing.B) { repo := repository.TestRepository(b) sn := data.TestCreateSnapshot(b, repo, findTestTime, findTestDepth) b.ResetTimer() for i := 0; i < b.N; i++ { blobs := restic.NewBlobSet() err := data.FindUsedBlobs(context.TODO(), repo, restic.IDs{*sn.Tree}, blobs, nil) if err != nil { b.Error(err) } b.Logf("found %v blobs", len(blobs)) } }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/find.go
internal/data/find.go
package data import ( "context" "sync" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/progress" "golang.org/x/sync/errgroup" ) // FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data // blobs) to the set blobs. Already seen tree blobs will not be visited again. func FindUsedBlobs(ctx context.Context, repo restic.Loader, treeIDs restic.IDs, blobs restic.FindBlobSet, p *progress.Counter) error { var lock sync.Mutex wg, ctx := errgroup.WithContext(ctx) treeStream := StreamTrees(ctx, wg, repo, treeIDs, func(treeID restic.ID) bool { // locking is necessary the goroutine below concurrently adds data blobs lock.Lock() h := restic.BlobHandle{ID: treeID, Type: restic.TreeBlob} blobReferenced := blobs.Has(h) // noop if already referenced blobs.Insert(h) lock.Unlock() return blobReferenced }, p) wg.Go(func() error { for tree := range treeStream { if tree.Error != nil { return tree.Error } lock.Lock() for _, node := range tree.Nodes { switch node.Type { case NodeTypeFile: for _, blob := range node.Content { blobs.Insert(restic.BlobHandle{ID: blob, Type: restic.DataBlob}) } } } lock.Unlock() } return nil }) return wg.Wait() }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/snapshot_find_test.go
internal/data/snapshot_find_test.go
package data_test import ( "context" "testing" "github.com/restic/restic/internal/data" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/test" ) func TestFindLatestSnapshot(t *testing.T) { repo := repository.TestRepository(t) data.TestCreateSnapshot(t, repo, parseTimeUTC("2015-05-05 05:05:05"), 1) data.TestCreateSnapshot(t, repo, parseTimeUTC("2017-07-07 07:07:07"), 1) latestSnapshot := data.TestCreateSnapshot(t, repo, parseTimeUTC("2019-09-09 09:09:09"), 1) f := data.SnapshotFilter{Hosts: []string{"foo"}} sn, _, err := f.FindLatest(context.TODO(), repo, repo, "latest") if err != nil { t.Fatalf("FindLatest returned error: %v", err) } if *sn.ID() != *latestSnapshot.ID() { t.Errorf("FindLatest returned wrong snapshot ID: %v", *sn.ID()) } } func TestFindLatestSnapshotWithMaxTimestamp(t *testing.T) { repo := repository.TestRepository(t) data.TestCreateSnapshot(t, repo, parseTimeUTC("2015-05-05 05:05:05"), 1) desiredSnapshot := data.TestCreateSnapshot(t, repo, parseTimeUTC("2017-07-07 07:07:07"), 1) data.TestCreateSnapshot(t, repo, parseTimeUTC("2019-09-09 09:09:09"), 1) sn, _, err := (&data.SnapshotFilter{ Hosts: []string{"foo"}, TimestampLimit: parseTimeUTC("2018-08-08 08:08:08"), }).FindLatest(context.TODO(), repo, repo, "latest") if err != nil { t.Fatalf("FindLatest returned error: %v", err) } if *sn.ID() != *desiredSnapshot.ID() { t.Errorf("FindLatest returned wrong snapshot ID: %v", *sn.ID()) } } func TestFindLatestWithSubpath(t *testing.T) { repo := repository.TestRepository(t) data.TestCreateSnapshot(t, repo, parseTimeUTC("2015-05-05 05:05:05"), 1) desiredSnapshot := data.TestCreateSnapshot(t, repo, parseTimeUTC("2017-07-07 07:07:07"), 1) for _, exp := range []struct { query string subfolder string }{ {"latest", ""}, {"latest:subfolder", "subfolder"}, {desiredSnapshot.ID().Str(), ""}, {desiredSnapshot.ID().Str() + ":subfolder", "subfolder"}, {desiredSnapshot.ID().String(), ""}, {desiredSnapshot.ID().String() + ":subfolder", "subfolder"}, } { t.Run("", func(t *testing.T) { sn, subfolder, err := (&data.SnapshotFilter{}).FindLatest(context.TODO(), repo, repo, exp.query) if err != nil { t.Fatalf("FindLatest returned error: %v", err) } test.Assert(t, *sn.ID() == *desiredSnapshot.ID(), "FindLatest returned wrong snapshot ID: %v", *sn.ID()) test.Assert(t, subfolder == exp.subfolder, "FindLatest returned wrong path in snapshot: %v", subfolder) }) } } func TestFindAllSubpathError(t *testing.T) { repo := repository.TestRepository(t) desiredSnapshot := data.TestCreateSnapshot(t, repo, parseTimeUTC("2017-07-07 07:07:07"), 1) count := 0 test.OK(t, (&data.SnapshotFilter{}).FindAll(context.TODO(), repo, repo, []string{"latest:subfolder", desiredSnapshot.ID().Str() + ":subfolder"}, func(id string, sn *data.Snapshot, err error) error { if err == data.ErrInvalidSnapshotSyntax { count++ return nil } return err })) test.Assert(t, count == 2, "unexpected number of subfolder errors: %v, wanted %v", count, 2) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false
restic/restic
https://github.com/restic/restic/blob/9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59/internal/data/snapshot_find.go
internal/data/snapshot_find.go
package data import ( "context" "fmt" "path/filepath" "strings" "time" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" ) // ErrNoSnapshotFound is returned when no snapshot for the given criteria could be found. var ErrNoSnapshotFound = errors.New("no snapshot found") // A SnapshotFilter denotes a set of snapshots based on hosts, tags and paths. type SnapshotFilter struct { _ struct{} // Force naming fields in literals. Hosts []string Tags TagLists Paths []string // Match snapshots from before this timestamp. Zero for no limit. TimestampLimit time.Time } func (f *SnapshotFilter) Empty() bool { return len(f.Hosts)+len(f.Tags)+len(f.Paths) == 0 } func (f *SnapshotFilter) matches(sn *Snapshot) bool { return sn.HasHostname(f.Hosts) && sn.HasTagList(f.Tags) && sn.HasPaths(f.Paths) } // findLatest finds the latest snapshot with optional target/directory, // tags, hostname, and timestamp filters. func (f *SnapshotFilter) findLatest(ctx context.Context, be restic.Lister, loader restic.LoaderUnpacked) (*Snapshot, error) { var err error absTargets := make([]string, 0, len(f.Paths)) for _, target := range f.Paths { if !filepath.IsAbs(target) { target, err = filepath.Abs(target) if err != nil { return nil, errors.Wrap(err, "Abs") } } absTargets = append(absTargets, filepath.Clean(target)) } f.Paths = absTargets var latest *Snapshot err = ForAllSnapshots(ctx, be, loader, nil, func(id restic.ID, snapshot *Snapshot, err error) error { if err != nil { return errors.Errorf("Error loading snapshot %v: %v", id.Str(), err) } if !f.TimestampLimit.IsZero() && snapshot.Time.After(f.TimestampLimit) { return nil } if latest != nil && snapshot.Time.Before(latest.Time) { return nil } if !f.matches(snapshot) { return nil } latest = snapshot return nil }) if err != nil { return nil, err } if latest == nil { return nil, ErrNoSnapshotFound } return latest, nil } func splitSnapshotID(s string) (id, subfolder string) { id, subfolder, _ = strings.Cut(s, ":") return } // FindSnapshot takes a string and tries to find a snapshot whose ID matches // the string as closely as possible. func FindSnapshot(ctx context.Context, be restic.Lister, loader restic.LoaderUnpacked, s string) (*Snapshot, string, error) { s, subfolder := splitSnapshotID(s) // no need to list snapshots if `s` is already a full id id, err := restic.ParseID(s) if err != nil { // find snapshot id with prefix id, err = restic.Find(ctx, be, restic.SnapshotFile, s) if err != nil { return nil, "", err } } sn, err := LoadSnapshot(ctx, loader, id) return sn, subfolder, err } // FindLatest returns either the latest of a filtered list of all snapshots // or a snapshot specified by `snapshotID`. func (f *SnapshotFilter) FindLatest(ctx context.Context, be restic.Lister, loader restic.LoaderUnpacked, snapshotID string) (*Snapshot, string, error) { id, subfolder := splitSnapshotID(snapshotID) if id == "latest" { sn, err := f.findLatest(ctx, be, loader) if err == ErrNoSnapshotFound { err = fmt.Errorf("snapshot filter (Paths:%v Tags:%v Hosts:%v): %w", f.Paths, f.Tags, f.Hosts, err) } return sn, subfolder, err } return FindSnapshot(ctx, be, loader, snapshotID) } type SnapshotFindCb func(string, *Snapshot, error) error var ErrInvalidSnapshotSyntax = errors.New("<snapshot>:<subfolder> syntax not allowed") // FindAll yields Snapshots, either given explicitly by `snapshotIDs` or filtered from the list of all snapshots. func (f *SnapshotFilter) FindAll(ctx context.Context, be restic.Lister, loader restic.LoaderUnpacked, snapshotIDs []string, fn SnapshotFindCb) error { if len(snapshotIDs) != 0 { var err error usedFilter := false ids := restic.NewIDSet() // Process all snapshot IDs given as arguments. for _, s := range snapshotIDs { if ctx.Err() != nil { return ctx.Err() } var sn *Snapshot if s == "latest" { if usedFilter { continue } usedFilter = true sn, err = f.findLatest(ctx, be, loader) if err == ErrNoSnapshotFound { err = errors.Errorf("no snapshot matched given filter (Paths:%v Tags:%v Hosts:%v)", f.Paths, f.Tags, f.Hosts) } if sn != nil { ids.Insert(*sn.ID()) } } else if strings.HasPrefix(s, "latest:") { err = ErrInvalidSnapshotSyntax } else { var subfolder string sn, subfolder, err = FindSnapshot(ctx, be, loader, s) if err == nil && subfolder != "" { err = ErrInvalidSnapshotSyntax } else if err == nil { if ids.Has(*sn.ID()) { continue } ids.Insert(*sn.ID()) s = sn.ID().String() } } err = fn(s, sn, err) if err != nil { return err } } // Give the user some indication their filters are not used. if !usedFilter && !f.Empty() { return fn("filters", nil, errors.Errorf("explicit snapshot ids are given")) } return nil } return ForAllSnapshots(ctx, be, loader, nil, func(id restic.ID, sn *Snapshot, err error) error { if err == nil && !f.matches(sn) { return nil } return fn(id.String(), sn, err) }) }
go
BSD-2-Clause
9e2d60e28c662ee6e3a3d4f19e9f2d560abf9a59
2026-01-07T08:36:32.238827Z
false