repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/test/test.go
cmd/test/test.go
// Package test provides the test command. package test import ( "github.com/rclone/rclone/cmd" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(Command) } // Command definition for cobra var Command = &cobra.Command{ Use: "test <subcommand>", Short: `Run a test command`, Long: `Rclone test is used to run test commands. Select which test command you want with the subcommand, eg ` + "```console" + ` rclone test memory remote: ` + "```" + ` Each subcommand has its own options which you can see in their help. **NB** Be careful running these commands, they may do strange things so reading their documentation first is recommended.`, Annotations: map[string]string{ "versionIntroduced": "v1.55", }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/test/histogram/histogram.go
cmd/test/histogram/histogram.go
// Package histogram provides the histogram test command. package histogram import ( "context" "encoding/json" "fmt" "os" "path" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/test" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/walk" "github.com/spf13/cobra" ) func init() { test.Command.AddCommand(commandDefinition) } var commandDefinition = &cobra.Command{ Use: "histogram [remote:path]", Short: `Makes a histogram of file name characters.`, Long: `This command outputs JSON which shows the histogram of characters used in filenames in the remote:path specified. The data doesn't contain any identifying information but is useful for the rclone developers when developing filename compression.`, Annotations: map[string]string{ "versionIntroduced": "v1.55", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) f := cmd.NewFsDir(args) ctx := context.Background() ci := fs.GetConfig(ctx) cmd.Run(false, false, command, func() error { var hist [256]int64 err := walk.ListR(ctx, f, "", false, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { for _, entry := range entries { base := path.Base(entry.Remote()) for i := range base { hist[base[i]]++ } } return nil }) if err != nil { return err } enc := json.NewEncoder(os.Stdout) // enc.SetIndent("", "\t") err = enc.Encode(&hist) if err != nil { return err } fmt.Println() return nil }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/test/memory/memory.go
cmd/test/memory/memory.go
// Package memory provides the memory test command. package memory import ( "context" "runtime" "sync" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/test" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) func init() { test.Command.AddCommand(commandDefinition) } var commandDefinition = &cobra.Command{ Use: "memory remote:path", Short: `Load all the objects at remote:path into memory and report memory stats.`, Annotations: map[string]string{ "versionIntroduced": "v1.55", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { ctx := context.Background() ci := fs.GetConfig(context.Background()) metadata := ci.Metadata && fsrc.Features().ReadMetadata objects, _, _, err := operations.Count(ctx, fsrc) if err != nil { return err } objs := make([]fs.Object, 0, objects) var before, after runtime.MemStats runtime.GC() runtime.ReadMemStats(&before) var mu sync.Mutex err = operations.ListFn(ctx, fsrc, func(o fs.Object) { // Read the metadata so it gets cached in the object if metadata { _, err := fs.GetMetadata(ctx, o) if err != nil { fs.Errorf(o, "Failed to read metadata: %v", err) } } mu.Lock() objs = append(objs, o) mu.Unlock() }) if err != nil { return err } runtime.GC() runtime.ReadMemStats(&after) var allocChange int64 if after.Alloc >= before.Alloc { allocChange = int64(after.Alloc - before.Alloc) } else { allocChange = -int64(before.Alloc - after.Alloc) } var sysChange int64 if after.Sys >= before.Sys { sysChange = int64(after.Sys - before.Sys) } else { sysChange = -int64(before.Sys - after.Sys) } if ci.HumanReadable { objString := fs.CountSuffix(int64(len(objs))) var usedString string if after.Alloc >= before.Alloc { usedString = fs.SizeSuffix(int64(after.Alloc - before.Alloc)).ByteUnit() } else { usedString = "-" + fs.SizeSuffix(int64(before.Alloc-after.Alloc)).ByteUnit() } avgString := fs.SizeSuffix(allocChange / int64(len(objs))).ByteUnit() fs.Logf(nil, "%s objects took %s, %s/object", objString, usedString, avgString) var sysBeforeString string if before.Sys <= fs.SizeSuffixMaxValue { sysBeforeString = fs.SizeSuffix(int64(before.Sys)).String() } else { sysBeforeString = ">" + fs.SizeSuffixMax.String() } var sysAfterString string if after.Sys <= fs.SizeSuffixMaxValue { sysAfterString = fs.SizeSuffix(int64(after.Sys)).ByteUnit() } else { sysAfterString = ">" + fs.SizeSuffixMax.ByteUnit() } var sysUsedString string if after.Sys >= before.Sys { sysUsedString = fs.SizeSuffix(int64(after.Sys - before.Sys)).ByteUnit() } else { sysUsedString = "-" + fs.SizeSuffix(int64(before.Sys-after.Sys)).ByteUnit() } fs.Logf(nil, "System memory changed from %s to %s a change of %s", sysBeforeString, sysAfterString, sysUsedString) } else { fs.Logf(nil, "%d objects took %d bytes, %.1f bytes/object", len(objs), allocChange, float64(allocChange)/float64(len(objs))) fs.Logf(nil, "System memory changed from %d to %d bytes a change of %d bytes", before.Sys, after.Sys, sysChange) } return nil }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/test/makefiles/speed.go
cmd/test/makefiles/speed.go
package makefiles import ( "context" "encoding/json" "fmt" "io" "os" "path" "time" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/test" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/sync" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/random" "github.com/spf13/cobra" ) var ( // Flags testTime = fs.Duration(15 * time.Second) fcap = 100 small = fs.SizeSuffix(1024) medium = fs.SizeSuffix(10 * 1024 * 1024) large = fs.SizeSuffix(1024 * 1024 * 1024) useJSON = false ) func init() { test.Command.AddCommand(speedCmd) speedFlags := speedCmd.Flags() flags.FVarP(speedFlags, &testTime, "test-time", "", "Length for each test to run", "") flags.IntVarP(speedFlags, &fcap, "file-cap", "", fcap, "Maximum number of files to use in each test", "") flags.FVarP(speedFlags, &small, "small", "", "Size of small files", "") flags.FVarP(speedFlags, &medium, "medium", "", "Size of medium files", "") flags.FVarP(speedFlags, &large, "large", "", "Size of large files", "") flags.BoolVarP(speedFlags, &useJSON, "json", "", useJSON, "Output only results in JSON format", "") addCommonFlags(speedFlags) } func logf(text string, args ...any) { if !useJSON { fmt.Printf(text, args...) } } var speedCmd = &cobra.Command{ Use: "speed <remote> [flags]", Short: `Run a speed test to the remote`, Long: `Run a speed test to the remote. This command runs a series of uploads and downloads to the remote, measuring and printing the speed of each test using varying file sizes and numbers of files. Test time can be innaccurate with small file caps and large files. As it uses the results of an initial test to determine how many files to use in each subsequent test. It is recommended to use -q flag for a simpler output. e.g.: rclone test speed remote: -q **NB** This command will create and delete files on the remote in a randomly named directory which will be automatically removed on a clean exit. You can use the --json flag to only print the results in JSON format.`, Annotations: map[string]string{ "versionIntroduced": "v1.72", }, RunE: func(command *cobra.Command, args []string) error { ctx := command.Context() cmd.CheckArgs(1, 1, command, args) commonInit() // initial test size := fs.SizeSuffix(1024 * 1024) logf("Running initial test for 4 files of size %v\n", size) stats, err := speedTest(ctx, 4, size, args[0]) if err != nil { return fmt.Errorf("speed test failed: %w", err) } var results []*Stats // main tests logf("\nTest Time: %v, File cap: %d\n", testTime, fcap) for _, size := range []fs.SizeSuffix{small, medium, large} { numberOfFilesUpload := int((float64(stats.Upload.Speed) * time.Duration(testTime).Seconds()) / float64(size)) numberOfFilesDownload := int((float64(stats.Download.Speed) * time.Duration(testTime).Seconds()) / float64(size)) numberOfFiles := min(numberOfFilesUpload, numberOfFilesDownload) logf("\nNumber of files for upload and download: %v\n", numberOfFiles) if numberOfFiles < 1 { logf("Skipping test for file size %v as calculated number of files is 0\n", size) continue } else if numberOfFiles > fcap { numberOfFiles = fcap logf("Capping test for file size %v to %v files\n", size, fcap) } logf("Running test for %d files of size %v\n", numberOfFiles, size) s, err := speedTest(ctx, numberOfFiles, size, args[0]) if err != nil { return fmt.Errorf("speed test failed: %w", err) } results = append(results, s) } if useJSON { b, err := json.MarshalIndent(results, "", " ") if err != nil { return fmt.Errorf("failed to marshal results to JSON: %w", err) } fmt.Println(string(b)) } return nil }, } // Stats of a speed test type Stats struct { Size fs.SizeSuffix NumberOfFiles int Upload TestResult Download TestResult } // TestResult of a speed test operation type TestResult struct { Bytes int64 Duration time.Duration Speed fs.SizeSuffix } // measures stats for speedTest operations func measure(desc string, f func() error, size fs.SizeSuffix, numberOfFiles int, tr *TestResult) error { start := time.Now() err := f() dt := time.Since(start) if err != nil { return err } tr.Duration = dt tr.Bytes = int64(size) * int64(numberOfFiles) tr.Speed = fs.SizeSuffix(float64(tr.Bytes) / dt.Seconds()) logf("%-20s: %vB in %v at %vB/s\n", desc, tr.Bytes, dt.Round(time.Millisecond), tr.Speed) return err } func speedTest(ctx context.Context, numberOfFiles int, size fs.SizeSuffix, remote string) (*Stats, error) { stats := Stats{ Size: size, NumberOfFiles: numberOfFiles, } tempDirName := "rclone-speed-test-" + random.String(8) tempDirPath := path.Join(remote, tempDirName) fremote := cmd.NewFsDir([]string{tempDirPath}) aErr := io.EOF defer atexit.OnError(&aErr, func() { err := operations.Purge(ctx, fremote, "") if err != nil { fs.Debugf(fremote, "Failed to remove temp dir %q: %v", tempDirPath, err) } })() flocalDir, err := os.MkdirTemp("", "rclone-speedtest-local-") if err != nil { return nil, fmt.Errorf("failed to create local temp dir: %w", err) } defer atexit.OnError(&aErr, func() { _ = os.RemoveAll(flocalDir) })() flocal, err := cache.Get(ctx, flocalDir) if err != nil { return nil, fmt.Errorf("failed to create local fs: %w", err) } fdownloadDir, err := os.MkdirTemp("", "rclone-speedtest-download-") if err != nil { return nil, fmt.Errorf("failed to create download temp dir: %w", err) } defer atexit.OnError(&aErr, func() { _ = os.RemoveAll(fdownloadDir) })() fdownload, err := cache.Get(ctx, fdownloadDir) if err != nil { return nil, fmt.Errorf("failed to create download fs: %w", err) } // make the largest amount of files we will need files := make([]string, numberOfFiles) for i := range files { files[i] = path.Join(flocalDir, fmt.Sprintf("file%03d-%v.bin", i, size)) } makefiles(size, files) // upload files err = measure("Upload", func() error { return sync.CopyDir(ctx, fremote, flocal, false) }, size, numberOfFiles, &stats.Upload) if err != nil { return nil, fmt.Errorf("failed to Copy to remote: %w", err) } // download files err = measure("Download", func() error { return sync.CopyDir(ctx, fdownload, fremote, false) }, size, numberOfFiles, &stats.Download) if err != nil { return nil, fmt.Errorf("failed to Copy from remote: %w", err) } // check files opt := operations.CheckOpt{ Fsrc: flocal, Fdst: fdownload, OneWay: false, } logf("Checking file integrity\n") err = operations.CheckDownload(ctx, &opt) if err != nil { return nil, fmt.Errorf("failed to check redownloaded files were identical: %w", err) } return &stats, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/test/makefiles/makefiles.go
cmd/test/makefiles/makefiles.go
// Package makefiles builds a directory structure with the required // number of files in of the required size. package makefiles import ( "io" "math" "math/rand" "os" "path/filepath" "time" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/test" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/lib/file" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/readers" "github.com/spf13/cobra" "github.com/spf13/pflag" ) var ( // Flags numberOfFiles = 1000 averageFilesPerDirectory = 10 maxDepth = 10 minFileSize = fs.SizeSuffix(0) maxFileSize = fs.SizeSuffix(100) minFileNameLength = 4 maxFileNameLength = 12 flat = false seed = int64(1) zero = false sparse = false ascii = false pattern = false chargen = false // Globals randSource *rand.Rand source io.Reader directoriesToCreate int totalDirectories int fileNames = map[string]struct{}{} // keep a note of which file name we've used already ) func init() { test.Command.AddCommand(makefilesCmd) makefilesFlags := makefilesCmd.Flags() flags.IntVarP(makefilesFlags, &numberOfFiles, "files", "", numberOfFiles, "Number of files to create", "") flags.IntVarP(makefilesFlags, &averageFilesPerDirectory, "files-per-directory", "", averageFilesPerDirectory, "Average number of files per directory", "") flags.IntVarP(makefilesFlags, &maxDepth, "max-depth", "", maxDepth, "Maximum depth of directory hierarchy", "") flags.FVarP(makefilesFlags, &minFileSize, "min-file-size", "", "Minimum size of file to create", "") flags.FVarP(makefilesFlags, &maxFileSize, "max-file-size", "", "Maximum size of files to create", "") flags.IntVarP(makefilesFlags, &minFileNameLength, "min-name-length", "", minFileNameLength, "Minimum size of file names", "") flags.IntVarP(makefilesFlags, &maxFileNameLength, "max-name-length", "", maxFileNameLength, "Maximum size of file names", "") flags.BoolVarP(makefilesFlags, &flat, "flat", "", false, "If set create all files in the root directory", "") test.Command.AddCommand(makefileCmd) makefileFlags := makefileCmd.Flags() addCommonFlags(makefilesFlags) addCommonFlags(makefileFlags) } // Common flags for makefiles and makefile func addCommonFlags(f *pflag.FlagSet) { flags.Int64VarP(f, &seed, "seed", "", seed, "Seed for the random number generator (0 for random)", "") flags.BoolVarP(f, &zero, "zero", "", zero, "Fill files with ASCII 0x00", "") flags.BoolVarP(f, &sparse, "sparse", "", sparse, "Make the files sparse (appear to be filled with ASCII 0x00)", "") flags.BoolVarP(f, &ascii, "ascii", "", ascii, "Fill files with random ASCII printable bytes only", "") flags.BoolVarP(f, &pattern, "pattern", "", pattern, "Fill files with a periodic pattern", "") flags.BoolVarP(f, &chargen, "chargen", "", chargen, "Fill files with a ASCII chargen pattern", "") } var makefilesCmd = &cobra.Command{ Use: "makefiles <dir>", Short: `Make a random file hierarchy in a directory`, Annotations: map[string]string{ "versionIntroduced": "v1.55", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) commonInit() outputDirectory := args[0] directoriesToCreate = numberOfFiles / averageFilesPerDirectory if flat { directoriesToCreate = 0 } averageSize := (minFileSize + maxFileSize) / 2 start := time.Now() fs.Logf(nil, "Creating %d files of average size %v in %d directories in %q.", numberOfFiles, averageSize, directoriesToCreate, outputDirectory) root := &dir{name: outputDirectory, depth: 1} for totalDirectories < directoriesToCreate { root.createDirectories() } dirs := root.list("", []string{}) totalBytes := int64(0) for range numberOfFiles { dir := dirs[randSource.Intn(len(dirs))] size := int64(minFileSize) if maxFileSize > minFileSize { size += randSource.Int63n(int64(maxFileSize - minFileSize)) } writeFile(dir, fileName(), size) totalBytes += size } dt := time.Since(start) fs.Logf(nil, "Written %vB in %v at %vB/s.", fs.SizeSuffix(totalBytes), dt.Round(time.Millisecond), fs.SizeSuffix((totalBytes*int64(time.Second))/int64(dt))) }, } var makefileCmd = &cobra.Command{ Use: "makefile <size> [<file>]+ [flags]", Short: `Make files with random contents of the size given`, Annotations: map[string]string{ "versionIntroduced": "v1.59", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1e6, command, args) commonInit() var size fs.SizeSuffix err := size.Set(args[0]) if err != nil { fs.Fatalf(nil, "Failed to parse size %q: %v", args[0], err) } makefiles(size, args[1:]) }, } func makefiles(size fs.SizeSuffix, files []string) { start := time.Now() fs.Logf(nil, "Creating %d files of size %v.", len(files), size) totalBytes := int64(0) for _, filePath := range files { dir := filepath.Dir(filePath) name := filepath.Base(filePath) writeFile(dir, name, int64(size)) totalBytes += int64(size) } dt := time.Since(start) fs.Logf(nil, "Written %vB in %v at %vB/s.", fs.SizeSuffix(totalBytes), dt.Round(time.Millisecond), fs.SizeSuffix((totalBytes*int64(time.Second))/int64(dt))) } func bool2int(b bool) int { if b { return 1 } return 0 } // common initialisation for makefiles and makefile func commonInit() { if seed == 0 { seed = time.Now().UnixNano() fs.Logf(nil, "Using random seed = %d", seed) } randSource = rand.New(rand.NewSource(seed)) if bool2int(zero)+bool2int(sparse)+bool2int(ascii)+bool2int(pattern)+bool2int(chargen) > 1 { fs.Fatal(nil, "Can only supply one of --zero, --sparse, --ascii, --pattern or --chargen") } switch { case zero, sparse: source = zeroReader{} case ascii: source = asciiReader{} case pattern: source = readers.NewPatternReader(math.MaxInt64) case chargen: source = &chargenReader{} default: source = randSource } if minFileSize > maxFileSize { maxFileSize = minFileSize } } type zeroReader struct{} // Read a chunk of zeroes func (zeroReader) Read(p []byte) (n int, err error) { for i := range p { p[i] = 0 } return len(p), nil } type asciiReader struct{} // Read a chunk of printable ASCII characters func (asciiReader) Read(p []byte) (n int, err error) { n, err = randSource.Read(p) for i := range p[:n] { p[i] = (p[i] % (0x7F - 0x20)) + 0x20 } return n, err } type chargenReader struct { start byte // offset from startChar to start line with written byte // chars in line so far } // Read a chunk of printable ASCII characters in chargen format func (r *chargenReader) Read(p []byte) (n int, err error) { const ( startChar = 0x20 // ' ' endChar = 0x7E // '~' inclusive charsPerLine = 72 ) for i := range p { if r.written >= charsPerLine { r.start++ if r.start > endChar-startChar { r.start = 0 } p[i] = '\n' r.written = 0 } else { c := r.start + r.written + startChar if c > endChar { c -= endChar - startChar + 1 } p[i] = c r.written++ } } return len(p), err } // fileName creates a unique random file or directory name func fileName() (name string) { for { length := randSource.Intn(maxFileNameLength-minFileNameLength) + minFileNameLength name = random.StringFn(length, randSource) if _, found := fileNames[name]; !found { break } } fileNames[name] = struct{}{} return name } // dir is a directory in the directory hierarchy being built up type dir struct { name string depth int children []*dir parent *dir } // Create a random directory hierarchy under d func (d *dir) createDirectories() { for totalDirectories < directoriesToCreate { newDir := &dir{ name: fileName(), depth: d.depth + 1, parent: d, } d.children = append(d.children, newDir) totalDirectories++ switch randSource.Intn(4) { case 0: if d.depth < maxDepth { newDir.createDirectories() } case 1: return } } } // list the directory hierarchy func (d *dir) list(path string, output []string) []string { dirPath := filepath.Join(path, d.name) output = append(output, dirPath) for _, subDir := range d.children { output = subDir.list(dirPath, output) } return output } // writeFile writes a random file at dir/name func writeFile(dir, name string, size int64) { err := file.MkdirAll(dir, 0777) if err != nil { fs.Fatalf(nil, "Failed to make directory %q: %v", dir, err) } path := filepath.Join(dir, name) fd, err := os.Create(path) if err != nil { fs.Fatalf(nil, "Failed to open file %q: %v", path, err) } if sparse { err = fd.Truncate(size) } else { _, err = io.CopyN(fd, source, size) } if err != nil { fs.Fatalf(nil, "Failed to write %v bytes to file %q: %v", size, path, err) } err = fd.Close() if err != nil { fs.Fatalf(nil, "Failed to close file %q: %v", path, err) } fs.Infof(path, "Written file size %v", fs.SizeSuffix(size)) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/test/changenotify/changenotify.go
cmd/test/changenotify/changenotify.go
// Package changenotify tests rclone's changenotify support package changenotify import ( "context" "errors" "time" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/test" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/spf13/cobra" ) var ( pollInterval = fs.Duration(10 * time.Second) ) func init() { test.Command.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.FVarP(cmdFlags, &pollInterval, "poll-interval", "", "Time to wait between polling for changes", "") } var commandDefinition = &cobra.Command{ Use: "changenotify remote:", Short: `Log any change notify requests for the remote passed in.`, Annotations: map[string]string{ "versionIntroduced": "v1.56", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(1, 1, command, args) f := cmd.NewFsSrc(args) ctx := context.Background() // Start polling function features := f.Features() if do := features.ChangeNotify; do != nil { pollChan := make(chan time.Duration) do(ctx, changeNotify, pollChan) pollChan <- time.Duration(pollInterval) fs.Logf(nil, "Waiting for changes, polling every %v", pollInterval) } else { return errors.New("poll-interval is not supported by this remote") } select {} }, } // changeNotify invalidates the directory cache for the relativePath // passed in. // // if entryType is a directory it invalidates the parent of the directory too. func changeNotify(relativePath string, entryType fs.EntryType) { fs.Logf(nil, "%q: %v", relativePath, entryType) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/test/info/base32768.go
cmd/test/info/base32768.go
package info // Create files with all possible base 32768 file names import ( "context" "fmt" "os" "path/filepath" "strings" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/sync" ) const safeAlphabet = "ƀɀɠʀҠԀڀڠݠހ߀ကႠᄀᄠᅀᆀᇠሀሠበዠጠᎠᏀᐠᑀᑠᒀᒠᓀᓠᔀᔠᕀᕠᖀᖠᗀᗠᘀᘠᙀᚠᛀកᠠᡀᣀᦀ᧠ᨠᯀᰀᴀ⇠⋀⍀⍠⎀⎠⏀␀─┠╀╠▀■◀◠☀☠♀♠⚀⚠⛀⛠✀✠❀➀➠⠀⠠⡀⡠⢀⢠⣀⣠⤀⤠⥀⥠⦠⨠⩀⪀⪠⫠⬀⬠⭀ⰀⲀⲠⳀⴀⵀ⺠⻀㇀㐀㐠㑀㑠㒀㒠㓀㓠㔀㔠㕀㕠㖀㖠㗀㗠㘀㘠㙀㙠㚀㚠㛀㛠㜀㜠㝀㝠㞀㞠㟀㟠㠀㠠㡀㡠㢀㢠㣀㣠㤀㤠㥀㥠㦀㦠㧀㧠㨀㨠㩀㩠㪀㪠㫀㫠㬀㬠㭀㭠㮀㮠㯀㯠㰀㰠㱀㱠㲀㲠㳀㳠㴀㴠㵀㵠㶀㶠㷀㷠㸀㸠㹀㹠㺀㺠㻀㻠㼀㼠㽀㽠㾀㾠㿀㿠䀀䀠䁀䁠䂀䂠䃀䃠䄀䄠䅀䅠䆀䆠䇀䇠䈀䈠䉀䉠䊀䊠䋀䋠䌀䌠䍀䍠䎀䎠䏀䏠䐀䐠䑀䑠䒀䒠䓀䓠䔀䔠䕀䕠䖀䖠䗀䗠䘀䘠䙀䙠䚀䚠䛀䛠䜀䜠䝀䝠䞀䞠䟀䟠䠀䠠䡀䡠䢀䢠䣀䣠䤀䤠䥀䥠䦀䦠䧀䧠䨀䨠䩀䩠䪀䪠䫀䫠䬀䬠䭀䭠䮀䮠䯀䯠䰀䰠䱀䱠䲀䲠䳀䳠䴀䴠䵀䵠䶀䷀䷠一丠乀习亀亠什仠伀传佀你侀侠俀俠倀倠偀偠傀傠僀僠儀儠兀兠冀冠净几刀删剀剠劀加勀勠匀匠區占厀厠叀叠吀吠呀呠咀咠哀哠唀唠啀啠喀喠嗀嗠嘀嘠噀噠嚀嚠囀因圀圠址坠垀垠埀埠堀堠塀塠墀墠壀壠夀夠奀奠妀妠姀姠娀娠婀婠媀媠嫀嫠嬀嬠孀孠宀宠寀寠尀尠局屠岀岠峀峠崀崠嵀嵠嶀嶠巀巠帀帠幀幠庀庠廀廠开张彀彠往徠忀忠怀怠恀恠悀悠惀惠愀愠慀慠憀憠懀懠戀戠所扠技抠拀拠挀挠捀捠掀掠揀揠搀搠摀摠撀撠擀擠攀攠敀敠斀斠旀无昀映晀晠暀暠曀曠最朠杀杠枀枠柀柠栀栠桀桠梀梠检棠椀椠楀楠榀榠槀槠樀樠橀橠檀檠櫀櫠欀欠歀歠殀殠毀毠氀氠汀池沀沠泀泠洀洠浀浠涀涠淀淠渀渠湀湠満溠滀滠漀漠潀潠澀澠激濠瀀瀠灀灠炀炠烀烠焀焠煀煠熀熠燀燠爀爠牀牠犀犠狀狠猀猠獀獠玀玠珀珠琀琠瑀瑠璀璠瓀瓠甀甠畀畠疀疠痀痠瘀瘠癀癠皀皠盀盠眀眠着睠瞀瞠矀矠砀砠础硠碀碠磀磠礀礠祀祠禀禠秀秠稀稠穀穠窀窠竀章笀笠筀筠简箠節篠簀簠籀籠粀粠糀糠紀素絀絠綀綠緀締縀縠繀繠纀纠绀绠缀缠罀罠羀羠翀翠耀耠聀聠肀肠胀胠脀脠腀腠膀膠臀臠舀舠艀艠芀芠苀苠茀茠荀荠莀莠菀菠萀萠葀葠蒀蒠蓀蓠蔀蔠蕀蕠薀薠藀藠蘀蘠虀虠蚀蚠蛀蛠蜀蜠蝀蝠螀螠蟀蟠蠀蠠血衠袀袠裀裠褀褠襀襠覀覠觀觠言訠詀詠誀誠諀諠謀謠譀譠讀讠诀诠谀谠豀豠貀負賀賠贀贠赀赠趀趠跀跠踀踠蹀蹠躀躠軀軠輀輠轀轠辀辠迀迠退造遀遠邀邠郀郠鄀鄠酀酠醀醠釀釠鈀鈠鉀鉠銀銠鋀鋠錀錠鍀鍠鎀鎠鏀鏠鐀鐠鑀鑠钀钠铀铠销锠镀镠門閠闀闠阀阠陀陠隀隠雀雠需霠靀靠鞀鞠韀韠頀頠顀顠颀颠飀飠餀餠饀饠馀馠駀駠騀騠驀驠骀骠髀髠鬀鬠魀魠鮀鮠鯀鯠鰀鰠鱀鱠鲀鲠鳀鳠鴀鴠鵀鵠鶀鶠鷀鷠鸀鸠鹀鹠麀麠黀黠鼀鼠齀齠龀龠ꀀꀠꁀꁠꂀꂠꃀꃠꄀꄠꅀꅠꆀꆠꇀꇠꈀꈠꉀꉠꊀꊠꋀꋠꌀꌠꍀꍠꎀꎠꏀꏠꐀꐠꑀꑠ꒠ꔀꔠꕀꕠꖀꖠꗀꗠꙀꚠꛀ꜀꜠ꝀꞀꡀ" func (r *results) checkBase32768() { r.canBase32768 = false ctx := context.Background() n := 0 dir, err := os.MkdirTemp("", "rclone-base32768-files") if err != nil { fs.Logf(nil, "Failed to make temp dir: %v", err) return } defer func() { _ = os.RemoveAll(dir) }() // Create test files for _, c := range safeAlphabet { var out strings.Builder for i := range rune(32) { out.WriteRune(c + i) } fileName := filepath.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String())) err = os.WriteFile(fileName, []byte(fileName), 0666) if err != nil { fs.Logf(nil, "write %q failed: %v", fileName, err) return } n++ } // Make a local fs fLocal, err := fs.NewFs(ctx, dir) if err != nil { fs.Logf(nil, "Failed to make local fs: %v", err) return } testDir := "test-base32768" // Make a remote fs s := fs.ConfigStringFull(r.f) s = fspath.JoinRootPath(s, testDir) fRemote, err := fs.NewFs(ctx, s) if err != nil { fs.Logf(nil, "Failed to make remote fs: %v", err) return } defer func() { err := operations.Purge(ctx, r.f, testDir) if err != nil { fs.Logf(nil, "Failed to purge test directory: %v", err) return } }() // Sync local to remote err = sync.Sync(ctx, fRemote, fLocal, false) if err != nil { fs.Logf(nil, "Failed to sync remote fs: %v", err) return } // Check local to remote err = operations.Check(ctx, &operations.CheckOpt{ Fdst: fRemote, Fsrc: fLocal, }) if err != nil { fs.Logf(nil, "Failed to check remote fs: %v", err) return } r.canBase32768 = true }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/test/info/info.go
cmd/test/info/info.go
// Package info provides the info test command. package info // FIXME once translations are implemented will need a no-escape // option for Put so we can make these tests work again import ( "bytes" "context" "encoding/json" "fmt" "io" "os" "path" "regexp" "sort" "strconv" "strings" "sync" "time" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/test" "github.com/rclone/rclone/cmd/test/info/internal" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/random" "github.com/spf13/cobra" ) var ( writeJSON string keepTestFiles bool checkNormalization bool checkControl bool checkLength bool checkStreaming bool checkBase32768 bool all bool uploadWait fs.Duration positionLeftRe = regexp.MustCompile(`(?s)^(.*)-position-left-([[:xdigit:]]+)$`) positionMiddleRe = regexp.MustCompile(`(?s)^position-middle-([[:xdigit:]]+)-(.*)-$`) positionRightRe = regexp.MustCompile(`(?s)^position-right-([[:xdigit:]]+)-(.*)$`) ) func init() { test.Command.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.StringVarP(cmdFlags, &writeJSON, "write-json", "", "", "Write results to file", "") flags.BoolVarP(cmdFlags, &checkNormalization, "check-normalization", "", false, "Check UTF-8 Normalization", "") flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", false, "Check control characters", "") flags.FVarP(cmdFlags, &uploadWait, "upload-wait", "", "Wait after writing a file", "") flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", false, "Check max filename length", "") flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", false, "Check uploads with indeterminate file size", "") flags.BoolVarP(cmdFlags, &checkBase32768, "check-base32768", "", false, "Check can store all possible base32768 characters", "") flags.BoolVarP(cmdFlags, &all, "all", "", false, "Run all tests", "") flags.BoolVarP(cmdFlags, &keepTestFiles, "keep-test-files", "", false, "Keep test files after execution", "") } var commandDefinition = &cobra.Command{ Use: "info [remote:path]+", Short: `Discovers file name or other limitations for paths.`, Long: `Discovers what filenames and upload methods are possible to write to the paths passed in and how long they can be. It can take some time. It will write test files into the remote:path passed in. It outputs a bit of go code for each one. **NB** this can create undeletable files and other hazards - use with care!`, Annotations: map[string]string{ "versionIntroduced": "v1.55", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1e6, command, args) if !checkNormalization && !checkControl && !checkLength && !checkStreaming && !checkBase32768 && !all { fs.Fatalf(nil, "no tests selected - select a test or use --all") } if all { checkNormalization = true checkControl = true checkLength = true checkStreaming = true checkBase32768 = true } for i := range args { tempDirName := "rclone-test-info-" + random.String(8) tempDirPath := path.Join(args[i], tempDirName) f := cmd.NewFsDir([]string{tempDirPath}) fs.Infof(f, "Created temporary directory for test files: %s", tempDirPath) err := f.Mkdir(context.Background(), "") if err != nil { fs.Fatalf(nil, "couldn't create temporary directory: %v", err) } cmd.Run(false, false, command, func() error { return readInfo(context.Background(), f) }) } }, } type results struct { ctx context.Context f fs.Fs mu sync.Mutex stringNeedsEscaping map[string]internal.Position controlResults map[string]internal.ControlResult maxFileLength [4]int canWriteUnnormalized bool canReadUnnormalized bool canReadRenormalized bool canStream bool canBase32768 bool } func newResults(ctx context.Context, f fs.Fs) *results { return &results{ ctx: ctx, f: f, stringNeedsEscaping: make(map[string]internal.Position), controlResults: make(map[string]internal.ControlResult), } } // Print the results to stdout func (r *results) Print() { fmt.Printf("// %s\n", r.f.Name()) if checkControl { escape := []string{} for c, needsEscape := range r.stringNeedsEscaping { if needsEscape != internal.PositionNone { k := strconv.Quote(c) k = k[1 : len(k)-1] escape = append(escape, fmt.Sprintf("'%s'", k)) } } sort.Strings(escape) fmt.Printf("stringNeedsEscaping = []rune{\n") fmt.Printf("\t%s\n", strings.Join(escape, ", ")) fmt.Printf("}\n") } if checkLength { for i := range r.maxFileLength { fmt.Printf("maxFileLength = %d // for %d byte unicode characters\n", r.maxFileLength[i], i+1) } } if checkNormalization { fmt.Printf("canWriteUnnormalized = %v\n", r.canWriteUnnormalized) fmt.Printf("canReadUnnormalized = %v\n", r.canReadUnnormalized) fmt.Printf("canReadRenormalized = %v\n", r.canReadRenormalized) } if checkStreaming { fmt.Printf("canStream = %v\n", r.canStream) } if checkBase32768 { fmt.Printf("base32768isOK = %v // make sure maxFileLength for 2 byte unicode chars is the same as for 1 byte characters\n", r.canBase32768) } } // WriteJSON writes the results to a JSON file when requested func (r *results) WriteJSON() { if writeJSON == "" { return } report := internal.InfoReport{ Remote: r.f.Name(), } if checkControl { report.ControlCharacters = &r.controlResults } if checkLength { report.MaxFileLength = &r.maxFileLength[0] } if checkNormalization { report.CanWriteUnnormalized = &r.canWriteUnnormalized report.CanReadUnnormalized = &r.canReadUnnormalized report.CanReadRenormalized = &r.canReadRenormalized } if checkStreaming { report.CanStream = &r.canStream } if f, err := os.Create(writeJSON); err != nil { fs.Errorf(r.f, "Creating JSON file failed: %s", err) } else { defer fs.CheckClose(f, &err) enc := json.NewEncoder(f) enc.SetIndent("", " ") err := enc.Encode(report) if err != nil { fs.Errorf(r.f, "Writing JSON file failed: %s", err) } } fs.Infof(r.f, "Wrote JSON file: %s", writeJSON) } // writeFile writes a file with some random contents func (r *results) writeFile(path string) (fs.Object, error) { contents := random.String(50) src := object.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f) obj, err := r.f.Put(r.ctx, bytes.NewBufferString(contents), src) if uploadWait > 0 { time.Sleep(time.Duration(uploadWait)) } return obj, err } // check whether normalization is enforced and check whether it is // done on the files anyway func (r *results) checkUTF8Normalization() { unnormalized := "Héroique" normalized := "Héroique" _, err := r.writeFile(unnormalized) if err != nil { r.canWriteUnnormalized = false return } r.canWriteUnnormalized = true _, err = r.f.NewObject(r.ctx, unnormalized) if err == nil { r.canReadUnnormalized = true } _, err = r.f.NewObject(r.ctx, normalized) if err == nil { r.canReadRenormalized = true } } func (r *results) checkStringPositions(k, s string) { fs.Infof(r.f, "Writing position file 0x%0X", s) positionError := internal.PositionNone res := internal.ControlResult{ WriteError: make(map[internal.Position]string, 3), GetError: make(map[internal.Position]string, 3), InList: make(map[internal.Position]internal.Presence, 3), } for _, pos := range internal.PositionList { path := "" switch pos { case internal.PositionMiddle: path = fmt.Sprintf("position-middle-%0X-%s-", s, s) case internal.PositionLeft: path = fmt.Sprintf("%s-position-left-%0X", s, s) case internal.PositionRight: path = fmt.Sprintf("position-right-%0X-%s", s, s) default: panic("invalid position: " + pos.String()) } _, writeError := r.writeFile(path) if writeError != nil { res.WriteError[pos] = writeError.Error() fs.Infof(r.f, "Writing %s position file 0x%0X Error: %s", pos.String(), s, writeError) } else { fs.Infof(r.f, "Writing %s position file 0x%0X OK", pos.String(), s) } obj, getErr := r.f.NewObject(r.ctx, path) if getErr != nil { res.GetError[pos] = getErr.Error() fs.Infof(r.f, "Getting %s position file 0x%0X Error: %s", pos.String(), s, getErr) } else { if obj.Size() != 50 { res.GetError[pos] = fmt.Sprintf("invalid size %d", obj.Size()) fs.Infof(r.f, "Getting %s position file 0x%0X Invalid Size: %d", pos.String(), s, obj.Size()) } else { fs.Infof(r.f, "Getting %s position file 0x%0X OK", pos.String(), s) } } if writeError != nil || getErr != nil { positionError += pos } } r.mu.Lock() r.stringNeedsEscaping[k] = positionError r.controlResults[k] = res r.mu.Unlock() } // check we can write a file with the control chars func (r *results) checkControls() { fs.Infof(r.f, "Trying to create control character file names") ci := fs.GetConfig(context.Background()) // Concurrency control tokens := make(chan struct{}, ci.Checkers) for range ci.Checkers { tokens <- struct{}{} } var wg sync.WaitGroup for i := range rune(128) { s := string(i) if i == 0 || i == '/' { // We're not even going to check NULL or / r.stringNeedsEscaping[s] = internal.PositionAll continue } wg.Add(1) go func(s string) { defer wg.Done() token := <-tokens k := s r.checkStringPositions(k, s) tokens <- token }(s) } for _, s := range []string{"\", "\u00A0", "\xBF", "\xFE"} { wg.Add(1) go func(s string) { defer wg.Done() token := <-tokens k := s r.checkStringPositions(k, s) tokens <- token }(s) } wg.Wait() r.checkControlsList() fs.Infof(r.f, "Done trying to create control character file names") } func (r *results) checkControlsList() { l, err := r.f.List(context.TODO(), "") if err != nil { fs.Errorf(r.f, "Listing control character file names failed: %s", err) return } namesMap := make(map[string]struct{}, len(l)) for _, s := range l { namesMap[path.Base(s.Remote())] = struct{}{} } for path := range namesMap { var pos internal.Position var hex, value string if g := positionLeftRe.FindStringSubmatch(path); g != nil { pos, hex, value = internal.PositionLeft, g[2], g[1] } else if g := positionMiddleRe.FindStringSubmatch(path); g != nil { pos, hex, value = internal.PositionMiddle, g[1], g[2] } else if g := positionRightRe.FindStringSubmatch(path); g != nil { pos, hex, value = internal.PositionRight, g[1], g[2] } else { fs.Infof(r.f, "Unknown path %q", path) continue } var hexValue []byte for ; len(hex) >= 2; hex = hex[2:] { if b, err := strconv.ParseUint(hex[:2], 16, 8); err != nil { fs.Infof(r.f, "Invalid path %q: %s", path, err) continue } else { hexValue = append(hexValue, byte(b)) } } if hex != "" { fs.Infof(r.f, "Invalid path %q", path) continue } hexStr := string(hexValue) k := hexStr switch r.controlResults[k].InList[pos] { case internal.Absent: if hexStr == value { r.controlResults[k].InList[pos] = internal.Present } else { r.controlResults[k].InList[pos] = internal.Renamed } case internal.Present: r.controlResults[k].InList[pos] = internal.Multiple case internal.Renamed: r.controlResults[k].InList[pos] = internal.Multiple } delete(namesMap, path) } if len(namesMap) > 0 { fs.Infof(r.f, "Found additional control character file names:") for name := range namesMap { fs.Infof(r.f, "%q", name) } } } // find the max file name size we can use func (r *results) findMaxLength(characterLength int) { var character rune switch characterLength { case 1: character = 'a' case 2: character = 'á' case 3: character = '世' case 4: character = '🙂' default: panic("Bad characterLength") } if characterLength != len(string(character)) { panic(fmt.Sprintf("Chose the wrong character length %q is %d not %d", character, len(string(character)), characterLength)) } const maxLen = 16 * 1024 name := make([]rune, maxLen) for i := range name { name[i] = character } // Find the first size of filename we can't write i := sort.Search(len(name), func(i int) (fail bool) { defer func() { if err := recover(); err != nil { fs.Infof(r.f, "Couldn't write file with name length %d: %v", i, err) fail = true } }() path := string(name[:i]) o, err := r.writeFile(path) if err != nil { fs.Infof(r.f, "Couldn't write file with name length %d: %v", i, err) return true } fs.Infof(r.f, "Wrote file with name length %d", i) err = o.Remove(context.Background()) if err != nil { fs.Errorf(o, "Failed to remove test file") } return false }) r.maxFileLength[characterLength-1] = i - 1 fs.Infof(r.f, "Max file length is %d when writing %d byte characters %q", r.maxFileLength[characterLength-1], characterLength, character) } func (r *results) checkStreaming() { putter := r.f.Put if r.f.Features().PutStream != nil { fs.Infof(r.f, "Given remote has specialized streaming function. Using that to test streaming.") putter = r.f.Features().PutStream } contents := "thinking of test strings is hard" buf := bytes.NewBufferString(contents) hashIn := hash.NewMultiHasher() in := io.TeeReader(buf, hashIn) objIn := object.NewStaticObjectInfo("checkStreamingTest", time.Now(), -1, true, nil, r.f) objR, err := putter(r.ctx, in, objIn) if err != nil { fs.Infof(r.f, "Streamed file failed to upload (%v)", err) r.canStream = false return } hashes := hashIn.Sums() types := objR.Fs().Hashes().Array() for _, Hash := range types { sum, err := objR.Hash(r.ctx, Hash) if err != nil { fs.Infof(r.f, "Streamed file failed when getting hash %v (%v)", Hash, err) r.canStream = false return } if !hash.Equals(hashes[Hash], sum) { fs.Infof(r.f, "Streamed file has incorrect hash %v: expecting %q got %q", Hash, hashes[Hash], sum) r.canStream = false return } } if int64(len(contents)) != objR.Size() { fs.Infof(r.f, "Streamed file has incorrect file size: expecting %d got %d", len(contents), objR.Size()) r.canStream = false return } r.canStream = true } func readInfo(ctx context.Context, f fs.Fs) error { // Ensure cleanup unless --keep-test-files is specified if !keepTestFiles { defer func() { err := operations.Purge(ctx, f, "") if err != nil { fs.Errorf(f, "Failed to purge temporary directory: %v", err) } else { fs.Infof(f, "Removed temporary directory for test files: %s", f.Root()) } }() } r := newResults(ctx, f) if checkControl { r.checkControls() } if checkLength { for i := range r.maxFileLength { r.findMaxLength(i + 1) } } if checkNormalization { r.checkUTF8Normalization() } if checkStreaming { r.checkStreaming() } if checkBase32768 { r.checkBase32768() } r.Print() r.WriteJSON() return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/test/info/internal/internal.go
cmd/test/info/internal/internal.go
// Package internal provides internal implementation for the info test command. package internal import ( "bytes" "encoding/json" "fmt" "strings" ) // Presence describes the presence of a filename in file listing type Presence int // Possible Presence states const ( Absent Presence = iota Present Renamed Multiple ) // Position is the placement of the test character in the filename type Position int // Predefined positions const ( PositionMiddle Position = 1 << iota PositionLeft PositionRight PositionNone Position = 0 PositionAll Position = PositionRight<<1 - 1 ) // PositionList contains all valid positions var PositionList = []Position{PositionMiddle, PositionLeft, PositionRight} // ControlResult contains the result of a single character test type ControlResult struct { WriteError map[Position]string GetError map[Position]string InList map[Position]Presence } // InfoReport is the structure of the JSON output type InfoReport struct { Remote string ControlCharacters *map[string]ControlResult MaxFileLength *int CanStream *bool CanWriteUnnormalized *bool CanReadUnnormalized *bool CanReadRenormalized *bool } func (e Position) String() string { switch e { case PositionNone: return "none" case PositionAll: return "all" } var buf bytes.Buffer if e&PositionMiddle != 0 { buf.WriteString("middle") e &= ^PositionMiddle } if e&PositionLeft != 0 { if buf.Len() != 0 { buf.WriteRune(',') } buf.WriteString("left") e &= ^PositionLeft } if e&PositionRight != 0 { if buf.Len() != 0 { buf.WriteRune(',') } buf.WriteString("right") e &= ^PositionRight } if e != PositionNone { panic("invalid position") } return buf.String() } // MarshalText encodes the position when used as a map key func (e Position) MarshalText() ([]byte, error) { return []byte(e.String()), nil } // UnmarshalText decodes a position when used as a map key func (e *Position) UnmarshalText(text []byte) error { switch s := strings.ToLower(string(text)); s { default: *e = PositionNone for p := range strings.SplitSeq(s, ",") { switch p { case "left": *e |= PositionLeft case "middle": *e |= PositionMiddle case "right": *e |= PositionRight default: return fmt.Errorf("unknown position: %s", e) } } case "none": *e = PositionNone case "all": *e = PositionAll } return nil } func (e Presence) String() string { switch e { case Absent: return "absent" case Present: return "present" case Renamed: return "renamed" case Multiple: return "multiple" default: panic("invalid presence") } } // MarshalJSON encodes the presence when used as a JSON value func (e Presence) MarshalJSON() ([]byte, error) { return json.Marshal(e.String()) } // UnmarshalJSON decodes a presence when used as a JSON value func (e *Presence) UnmarshalJSON(text []byte) error { var s string if err := json.Unmarshal(text, &s); err != nil { return err } switch s := strings.ToLower(s); s { case "absent": *e = Absent case "present": *e = Present case "renamed": *e = Renamed case "multiple": *e = Multiple default: return fmt.Errorf("unknown presence: %s", e) } return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/test/info/internal/build_csv/main.go
cmd/test/info/internal/build_csv/main.go
// Package main provides utilities for the info test command. package main import ( "encoding/csv" "encoding/json" "flag" "fmt" "io" "os" "sort" "strconv" "github.com/rclone/rclone/cmd/test/info/internal" "github.com/rclone/rclone/fs" ) func main() { fOut := flag.String("o", "out.csv", "Output file") flag.Parse() args := flag.Args() remotes := make([]internal.InfoReport, 0, len(args)) for _, fn := range args { f, err := os.Open(fn) if err != nil { fs.Fatalf(nil, "Unable to open %q: %s", fn, err) } var remote internal.InfoReport dec := json.NewDecoder(f) err = dec.Decode(&remote) if err != nil { fs.Fatalf(nil, "Unable to decode %q: %s", fn, err) } if remote.ControlCharacters == nil { fs.Logf(nil, "Skipping remote %s: no ControlCharacters", remote.Remote) } else { remotes = append(remotes, remote) } if err := f.Close(); err != nil { fs.Fatalf(nil, "Closing %q failed: %s", fn, err) } } charsMap := make(map[string]string) var remoteNames []string for _, r := range remotes { remoteNames = append(remoteNames, r.Remote) for k := range *r.ControlCharacters { quoted := strconv.Quote(k) charsMap[k] = quoted[1 : len(quoted)-1] } } sort.Strings(remoteNames) chars := make([]string, 0, len(charsMap)) for k := range charsMap { chars = append(chars, k) } sort.Strings(chars) // char remote output recordsMap := make(map[string]map[string][]string) // remote output hRemoteMap := make(map[string][]string) hOperation := []string{"Write", "Write", "Write", "Get", "Get", "Get", "List", "List", "List"} hPosition := []string{"L", "M", "R", "L", "M", "R", "L", "M", "R"} // remote // write get list // left middle right left middle right left middle right for _, r := range remotes { hRemoteMap[r.Remote] = []string{r.Remote, "", "", "", "", "", "", "", ""} for k, v := range *r.ControlCharacters { cMap, ok := recordsMap[k] if !ok { cMap = make(map[string][]string, 1) recordsMap[k] = cMap } cMap[r.Remote] = []string{ sok(v.WriteError[internal.PositionLeft]), sok(v.WriteError[internal.PositionMiddle]), sok(v.WriteError[internal.PositionRight]), sok(v.GetError[internal.PositionLeft]), sok(v.GetError[internal.PositionMiddle]), sok(v.GetError[internal.PositionRight]), pok(v.InList[internal.PositionLeft]), pok(v.InList[internal.PositionMiddle]), pok(v.InList[internal.PositionRight]), } } } records := [][]string{ {"", ""}, {"", ""}, {"Bytes", "Char"}, } for _, r := range remoteNames { records[0] = append(records[0], hRemoteMap[r]...) records[1] = append(records[1], hOperation...) records[2] = append(records[2], hPosition...) } for _, c := range chars { k := charsMap[c] row := []string{fmt.Sprintf("%X", c), k} for _, r := range remoteNames { if m, ok := recordsMap[c][r]; ok { row = append(row, m...) } else { row = append(row, "", "", "", "", "", "", "", "", "") } } records = append(records, row) } var writer io.Writer if *fOut == "-" { writer = os.Stdout } else { f, err := os.Create(*fOut) if err != nil { fs.Fatalf(nil, "Unable to create %q: %s", *fOut, err) } defer func() { if err := f.Close(); err != nil { fs.Fatal(nil, fmt.Sprint("Error writing csv:", err)) } }() writer = f } w := csv.NewWriter(writer) err := w.WriteAll(records) if err != nil { fs.Fatal(nil, fmt.Sprint("Error writing csv:", err)) } else if err := w.Error(); err != nil { fs.Fatal(nil, fmt.Sprint("Error writing csv:", err)) } } func sok(s string) string { if s != "" { return "ERR" } return "OK" } func pok(p internal.Presence) string { switch p { case internal.Absent: return "MIS" case internal.Present: return "OK" case internal.Renamed: return "REN" case internal.Multiple: return "MUL" default: return "ERR" } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/copy/copy.go
cmd/copy/copy.go
// Package copy provides the copy command. package copy import ( "context" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations/operationsflags" "github.com/rclone/rclone/fs/sync" "github.com/spf13/cobra" ) var ( createEmptySrcDirs = false loggerOpt = operations.LoggerOpt{} loggerFlagsOpt = operationsflags.AddLoggerFlagsOptions{} ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after copy", "") operationsflags.AddLoggerFlags(cmdFlags, &loggerOpt, &loggerFlagsOpt) loggerOpt.LoggerFn = operations.NewDefaultLoggerFn(&loggerOpt) } var commandDefinition = &cobra.Command{ Use: "copy source:path dest:path", Short: `Copy files from source to dest, skipping identical files.`, // Note: "|" will be replaced by backticks below Long: strings.ReplaceAll(`Copy the source to the destination. Does not transfer files that are identical on source and destination, testing by size and modification time or MD5SUM. Doesn't delete files from the destination. If you want to also delete files from destination, to make it match source, use the [sync](/commands/rclone_sync/) command instead. Note that it is always the contents of the directory that is synced, not the directory itself. So when source:path is a directory, it's the contents of source:path that are copied, not the directory name and contents. To copy single files, use the [copyto](/commands/rclone_copyto/) command instead. If dest:path doesn't exist, it is created and the source:path contents go there. For example |||sh rclone copy source:sourcepath dest:destpath ||| Let's say there are two files in sourcepath |||text sourcepath/one.txt sourcepath/two.txt ||| This copies them to |||text destpath/one.txt destpath/two.txt ||| Not to |||text destpath/sourcepath/one.txt destpath/sourcepath/two.txt ||| If you are familiar with |rsync|, rclone always works as if you had written a trailing |/| - meaning "copy the contents of this directory". This applies to all commands and whether you are talking about the source or destination. See the [--no-traverse](/docs/#no-traverse) option for controlling whether rclone lists the destination directory or not. Supplying this option when copying a small number of files into a large destination can speed transfers up greatly. For example, if you have many files in /path/to/src but only a few of them change every day, you can copy all the files which have changed recently very efficiently like this: |||sh rclone copy --max-age 24h --no-traverse /path/to/src remote: ||| Rclone will sync the modification times of files and directories if the backend supports it. If metadata syncing is required then use the |--metadata| flag. Note that the modification time and metadata for the root directory will **not** be synced. See [issue #7652](https://github.com/rclone/rclone/issues/7652) for more info. **Note**: Use the |-P|/|--progress| flag to view real-time transfer statistics. **Note**: Use the |--dry-run| or the |--interactive|/|-i| flag to test without copying anything. `, "|", "`") + operationsflags.Help(), Annotations: map[string]string{ "groups": "Copy,Filter,Listing,Important", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(2, 2, command, args) fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args) cmd.Run(true, true, command, func() error { ctx := context.Background() close, err := operationsflags.ConfigureLoggers(ctx, fdst, command, &loggerOpt, loggerFlagsOpt) if err != nil { return err } defer close() if loggerFlagsOpt.AnySet() { ctx = operations.WithSyncLogger(ctx, loggerOpt) } if srcFileName == "" { return sync.CopyDir(ctx, fdst, fsrc, createEmptySrcDirs) } return operations.CopyFile(ctx, fdst, fsrc, srcFileName, srcFileName) }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/march.go
cmd/bisync/march.go
package bisync import ( "context" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/march" ) type bisyncMarch struct { ls1 *fileList ls2 *fileList err error firstErr error marchAliasLock sync.Mutex marchLsLock sync.Mutex marchErrLock sync.Mutex marchCtx context.Context } func (b *bisyncRun) makeMarchListing(ctx context.Context) (*fileList, *fileList, error) { ci := fs.GetConfig(ctx) b.march.marchCtx = ctx b.setupListing() fs.Debugf(b, "starting to march!") // set up a march over fdst (Path2) and fsrc (Path1) m := &march.March{ Ctx: ctx, Fdst: b.fs2, Fsrc: b.fs1, Dir: "", NoTraverse: false, Callback: b, DstIncludeAll: false, NoCheckDest: false, NoUnicodeNormalization: ci.NoUnicodeNormalization, } b.march.err = m.Run(ctx) fs.Debugf(b, "march completed. err: %v", b.march.err) if b.march.err == nil { b.march.err = b.march.firstErr } if b.march.err != nil { b.handleErr("march", "error during march", b.march.err, true, true) b.abort = true return b.march.ls1, b.march.ls2, b.march.err } // save files if b.opt.Compare.DownloadHash && b.march.ls1.hash == hash.None { b.march.ls1.hash = hash.MD5 } if b.opt.Compare.DownloadHash && b.march.ls2.hash == hash.None { b.march.ls2.hash = hash.MD5 } b.march.err = b.march.ls1.save(b.newListing1) b.handleErr(b.march.ls1, "error saving b.march.ls1 from march", b.march.err, true, true) b.march.err = b.march.ls2.save(b.newListing2) b.handleErr(b.march.ls2, "error saving b.march.ls2 from march", b.march.err, true, true) return b.march.ls1, b.march.ls2, b.march.err } // SrcOnly have an object which is on path1 only func (b *bisyncRun) SrcOnly(o fs.DirEntry) (recurse bool) { fs.Debugf(o, "path1 only") b.parse(o, true) return isDir(o) } // DstOnly have an object which is on path2 only func (b *bisyncRun) DstOnly(o fs.DirEntry) (recurse bool) { fs.Debugf(o, "path2 only") b.parse(o, false) return isDir(o) } // Match is called when object exists on both path1 and path2 (whether equal or not) func (b *bisyncRun) Match(ctx context.Context, o2, o1 fs.DirEntry) (recurse bool) { fs.Debugf(o1, "both path1 and path2") b.march.marchAliasLock.Lock() b.aliases.Add(o1.Remote(), o2.Remote()) b.march.marchAliasLock.Unlock() b.parse(o1, true) b.parse(o2, false) return isDir(o1) } func isDir(e fs.DirEntry) bool { switch x := e.(type) { case fs.Object: fs.Debugf(x, "is Object") return false case fs.Directory: fs.Debugf(x, "is Dir") return true default: fs.Debugf(e, "is unknown") } return false } func (b *bisyncRun) parse(e fs.DirEntry, isPath1 bool) { switch x := e.(type) { case fs.Object: b.ForObject(x, isPath1) case fs.Directory: if b.opt.CreateEmptySrcDirs { b.ForDir(x, isPath1) } default: fs.Debugf(e, "is unknown") } } func (b *bisyncRun) setupListing() { b.march.ls1 = newFileList() b.march.ls2 = newFileList() // note that --ignore-listing-checksum is different from --ignore-checksum // and we already checked it when we set b.opt.Compare.HashType1 and 2 b.march.ls1.hash = b.opt.Compare.HashType1 b.march.ls2.hash = b.opt.Compare.HashType2 } func (b *bisyncRun) ForObject(o fs.Object, isPath1 bool) { tr := accounting.Stats(b.march.marchCtx).NewCheckingTransfer(o, "listing file - "+whichPath(isPath1)) defer func() { tr.Done(b.march.marchCtx, nil) }() var ( hashVal string hashErr error ) ls := b.whichLs(isPath1) hashType := ls.hash if hashType != hash.None { hashVal, hashErr = o.Hash(b.march.marchCtx, hashType) b.march.marchErrLock.Lock() if b.march.firstErr == nil { b.march.firstErr = hashErr } b.march.marchErrLock.Unlock() } hashVal, hashErr = b.tryDownloadHash(b.march.marchCtx, o, hashVal) b.march.marchErrLock.Lock() if b.march.firstErr == nil { b.march.firstErr = hashErr } if b.march.firstErr != nil { b.handleErr(hashType, "error hashing during march", b.march.firstErr, false, true) } b.march.marchErrLock.Unlock() var modtime time.Time if b.opt.Compare.Modtime { modtime = o.ModTime(b.march.marchCtx).In(TZ) } id := "" // TODO: ID(o) flags := "-" // "-" for a file and "d" for a directory b.march.marchLsLock.Lock() ls.put(o.Remote(), o.Size(), modtime, hashVal, id, flags) b.march.marchLsLock.Unlock() } func (b *bisyncRun) ForDir(o fs.Directory, isPath1 bool) { tr := accounting.Stats(b.march.marchCtx).NewCheckingTransfer(o, "listing dir - "+whichPath(isPath1)) defer func() { tr.Done(b.march.marchCtx, nil) }() ls := b.whichLs(isPath1) var modtime time.Time if b.opt.Compare.Modtime { modtime = o.ModTime(b.march.marchCtx).In(TZ) } id := "" // TODO flags := "d" // "-" for a file and "d" for a directory b.march.marchLsLock.Lock() ls.put(o.Remote(), -1, modtime, "", id, flags) b.march.marchLsLock.Unlock() } func (b *bisyncRun) whichLs(isPath1 bool) *fileList { ls := b.march.ls1 if !isPath1 { ls = b.march.ls2 } return ls } func whichPath(isPath1 bool) string { s := "Path1" if !isPath1 { s = "Path2" } return s } func (b *bisyncRun) findCheckFiles(ctx context.Context) (*fileList, *fileList, error) { ctxCheckFile, filterCheckFile := filter.AddConfig(ctx) b.handleErr(b.opt.CheckFilename, "error adding CheckFilename to filter", filterCheckFile.Add(true, b.opt.CheckFilename), true, true) b.handleErr(b.opt.CheckFilename, "error adding ** exclusion to filter", filterCheckFile.Add(false, "**"), true, true) ci := fs.GetConfig(ctxCheckFile) b.march.marchCtx = ctxCheckFile b.setupListing() fs.Debugf(b, "starting to march!") // set up a march over fdst (Path2) and fsrc (Path1) m := &march.March{ Ctx: ctxCheckFile, Fdst: b.fs2, Fsrc: b.fs1, Dir: "", NoTraverse: false, Callback: b, DstIncludeAll: false, NoCheckDest: false, NoUnicodeNormalization: ci.NoUnicodeNormalization, } b.march.err = m.Run(ctxCheckFile) fs.Debugf(b, "march completed. err: %v", b.march.err) if b.march.err == nil { b.march.err = b.march.firstErr } if b.march.err != nil { b.handleErr("march", "error during findCheckFiles", b.march.err, true, true) b.abort = true } return b.march.ls1, b.march.ls2, b.march.err } // ID returns the ID of the Object if known, or "" if not func ID(o fs.Object) string { do, ok := o.(fs.IDer) if !ok { return "" } return do.ID() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/rc.go
cmd/bisync/rc.go
package bisync import ( "context" "errors" "log" "github.com/rclone/rclone/cmd/bisync/bilib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/rc" ) func init() { rc.Add(rc.Call{ Path: "sync/bisync", AuthRequired: true, Fn: rcBisync, Title: shortHelp, Help: rcHelp, }) } func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) { opt := &Options{} octx, ci := fs.AddConfig(ctx) if dryRun, err := in.GetBool("dryRun"); err == nil { ci.DryRun = dryRun opt.DryRun = dryRun } else if rc.NotErrParamNotFound(err) { return nil, err } if maxDelete, err := in.GetInt64("maxDelete"); err == nil { if maxDelete < 0 || maxDelete > 100 { return nil, rc.NewErrParamInvalid(errors.New("maxDelete must be a percentage between 0 and 100")) } opt.MaxDelete = int(maxDelete) } else if rc.NotErrParamNotFound(err) { return nil, err } if opt.Resync, err = in.GetBool("resync"); rc.NotErrParamNotFound(err) { return } if opt.CheckAccess, err = in.GetBool("checkAccess"); rc.NotErrParamNotFound(err) { return } if opt.Force, err = in.GetBool("force"); rc.NotErrParamNotFound(err) { return } if opt.CreateEmptySrcDirs, err = in.GetBool("createEmptySrcDirs"); rc.NotErrParamNotFound(err) { return } if opt.RemoveEmptyDirs, err = in.GetBool("removeEmptyDirs"); rc.NotErrParamNotFound(err) { return } if opt.NoCleanup, err = in.GetBool("noCleanup"); rc.NotErrParamNotFound(err) { return } if opt.IgnoreListingChecksum, err = in.GetBool("ignoreListingChecksum"); rc.NotErrParamNotFound(err) { return } if opt.Resilient, err = in.GetBool("resilient"); rc.NotErrParamNotFound(err) { return } if opt.CheckFilename, err = in.GetString("checkFilename"); rc.NotErrParamNotFound(err) { return } if opt.FiltersFile, err = in.GetString("filtersFile"); rc.NotErrParamNotFound(err) { return } if opt.Workdir, err = in.GetString("workdir"); rc.NotErrParamNotFound(err) { return } if opt.BackupDir1, err = in.GetString("backupdir1"); rc.NotErrParamNotFound(err) { return } if opt.BackupDir2, err = in.GetString("backupdir2"); rc.NotErrParamNotFound(err) { return } checkSync, err := in.GetString("checkSync") if rc.NotErrParamNotFound(err) { return nil, err } if checkSync == "" { checkSync = "true" } if err := opt.CheckSync.Set(checkSync); err != nil { return nil, err } fs1, err := rc.GetFsNamed(octx, in, "path1") if err != nil { return nil, err } fs2, err := rc.GetFsNamed(octx, in, "path2") if err != nil { return nil, err } output := bilib.CaptureOutput(func() { err = Bisync(octx, fs1, fs2, opt) }) _, _ = log.Writer().Write(output) return rc.Params{"output": string(output)}, err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/deltas.go
cmd/bisync/deltas.go
// Package bisync implements bisync // Copyright (c) 2017-2020 Chris Nelson package bisync import ( "context" "fmt" "path/filepath" "sort" "strings" "time" "github.com/rclone/rclone/cmd/bisync/bilib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/lib/terminal" "golang.org/x/text/unicode/norm" ) // delta type delta uint8 const ( deltaZero delta = 0 deltaNew delta = 1 << iota deltaNewer deltaOlder deltaLarger deltaSmaller deltaHash deltaDeleted ) const ( deltaSize delta = deltaLarger | deltaSmaller deltaTime delta = deltaNewer | deltaOlder deltaModified delta = deltaTime | deltaSize | deltaHash deltaOther delta = deltaNew | deltaTime | deltaSize | deltaHash ) func (d delta) is(cond delta) bool { return d&cond != 0 } // deltaSet type deltaSet struct { deltas map[string]delta size map[string]int64 time map[string]time.Time hash map[string]string opt *Options fs fs.Fs // base filesystem msg string // filesystem name for logging oldCount int // original number of files (for "excess deletes" check) deleted int // number of deleted files (for "excess deletes" check) foundSame bool // true if found at least one unchanged file checkFiles bilib.Names } func (ds *deltaSet) empty() bool { return len(ds.deltas) == 0 } func (ds *deltaSet) sort() (sorted []string) { if ds.empty() { return } sorted = make([]string, 0, len(ds.deltas)) for file := range ds.deltas { sorted = append(sorted, file) } sort.Strings(sorted) return } func (ds *deltaSet) printStats() { if ds.empty() { return } nAll := len(ds.deltas) nNew := 0 nMod := 0 nTime := 0 nNewer := 0 nOlder := 0 nSize := 0 nLarger := 0 nSmaller := 0 nHash := 0 nDeleted := 0 for _, d := range ds.deltas { if d.is(deltaNew) { nNew++ } if d.is(deltaModified) { nMod++ } if d.is(deltaTime) { nTime++ } if d.is(deltaNewer) { nNewer++ } if d.is(deltaOlder) { nOlder++ } if d.is(deltaSize) { nSize++ } if d.is(deltaLarger) { nLarger++ } if d.is(deltaSmaller) { nSmaller++ } if d.is(deltaHash) { nHash++ } if d.is(deltaDeleted) { nDeleted++ } } if nAll != nNew+nMod+nDeleted { fs.Errorf(nil, "something doesn't add up! %4d != %4d + %4d + %4d", nAll, nNew, nMod, nDeleted) } fs.Infof(nil, "%s: %4d changes: "+Color(terminal.GreenFg, "%4d new")+", "+Color(terminal.YellowFg, "%4d modified")+", "+Color(terminal.RedFg, "%4d deleted"), ds.msg, nAll, nNew, nMod, nDeleted) if nMod > 0 { details := []string{} if nTime > 0 { details = append(details, fmt.Sprintf(Color(terminal.CyanFg, "%4d newer"), nNewer)) details = append(details, fmt.Sprintf(Color(terminal.BlueFg, "%4d older"), nOlder)) } if nSize > 0 { details = append(details, fmt.Sprintf(Color(terminal.CyanFg, "%4d larger"), nLarger)) details = append(details, fmt.Sprintf(Color(terminal.BlueFg, "%4d smaller"), nSmaller)) } if nHash > 0 { details = append(details, fmt.Sprintf(Color(terminal.CyanFg, "%4d hash differs"), nHash)) } if (nNewer+nOlder != nTime) || (nLarger+nSmaller != nSize) || (nMod > nTime+nSize+nHash) { fs.Errorf(nil, "something doesn't add up!") } fs.Infof(nil, "(%s: %s)", Color(terminal.YellowFg, "Modified"), strings.Join(details, ", ")) } } // findDeltas func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string, now *fileList, msg string) (ds *deltaSet, err error) { var old *fileList newListing := oldListing + "-new" old, err = b.loadListing(oldListing) if err != nil { fs.Errorf(nil, "Failed loading prior %s listing: %s", msg, oldListing) b.abort = true return } if err = b.checkListing(old, oldListing, "prior "+msg); err != nil { return } err = b.checkListing(now, newListing, "current "+msg) if err != nil { return } ds = &deltaSet{ deltas: map[string]delta{}, size: map[string]int64{}, time: map[string]time.Time{}, hash: map[string]string{}, fs: f, msg: msg, oldCount: len(old.list), opt: b.opt, checkFiles: bilib.Names{}, } for _, file := range old.list { // REMEMBER: this section is only concerned with comparing listings from the same side (not different sides) d := deltaZero s := int64(0) h := "" var t time.Time if !now.has(file) { b.indent(msg, file, Color(terminal.RedFg, "File was deleted")) ds.deleted++ d |= deltaDeleted } else if !now.isDir(file) { // skip dirs here, as we only care if they are new/deleted, not newer/older whatchanged := []string{} if b.opt.Compare.Size { if sizeDiffers(old.getSize(file), now.getSize(file)) { fs.Debugf(file, "(old: %v current: %v)", old.getSize(file), now.getSize(file)) if now.getSize(file) > old.getSize(file) { whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (larger)")) d |= deltaLarger } else { whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (smaller)")) d |= deltaSmaller } s = now.getSize(file) } } if b.opt.Compare.Modtime { if timeDiffers(fctx, old.getTime(file), now.getTime(file), f, f) { if old.beforeOther(now, file) { fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file)) whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (newer)")) d |= deltaNewer } else { // Current version is older than prior sync. fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file)) whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (older)")) d |= deltaOlder } t = now.getTime(file) } } if b.opt.Compare.Checksum { if b.hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) { fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file)) whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash")) d |= deltaHash h = now.getHash(file) } } // concat changes and print log if d.is(deltaModified) { summary := fmt.Sprintf(Color(terminal.YellowFg, "File changed: %s"), strings.Join(whatchanged, ", ")) b.indent(msg, file, summary) } } if d.is(deltaModified) { ds.deltas[file] = d if b.opt.Compare.Size { ds.size[file] = s } if b.opt.Compare.Modtime { ds.time[file] = t } if b.opt.Compare.Checksum { ds.hash[file] = h } } else if d.is(deltaDeleted) { ds.deltas[file] = d } else { // Once we've found at least one unchanged file, // we know that not everything has changed, // as with a DST time change ds.foundSame = true } } for _, file := range now.list { if !old.has(file) { b.indent(msg, file, Color(terminal.GreenFg, "File is new")) ds.deltas[file] = deltaNew if b.opt.Compare.Size { ds.size[file] = now.getSize(file) } if b.opt.Compare.Modtime { ds.time[file] = now.getTime(file) } if b.opt.Compare.Checksum { ds.hash[file] = now.getHash(file) } } } if b.opt.CheckAccess { // checkFiles is a small structure compared with the `now`, so we // return it alone and let the full delta map be garbage collected. for _, file := range now.list { if filepath.Base(file) == b.opt.CheckFilename { ds.checkFiles.Add(file) } } } return } // applyDeltas func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (results2to1, results1to2 []Results, queues queues, err error) { path1 := bilib.FsPath(b.fs1) path2 := bilib.FsPath(b.fs2) copy1to2 := bilib.Names{} copy2to1 := bilib.Names{} delete1 := bilib.Names{} delete2 := bilib.Names{} handled := bilib.Names{} renameSkipped := bilib.Names{} deletedonboth := bilib.Names{} skippedDirs1 := newFileList() skippedDirs2 := newFileList() b.renames = renames{} ctxMove := b.opt.setDryRun(ctx) // update AliasMap for deleted files, as march does not know about them b.updateAliases(ctx, ds1, ds2) // efficient isDir check // we load the listing just once and store only the dirs dirs1, dirs1Err := b.listDirsOnly(1) if dirs1Err != nil { b.critical = true b.retryable = true fs.Debugf(nil, "Error generating dirsonly list for path1: %v", dirs1Err) return } dirs2, dirs2Err := b.listDirsOnly(2) if dirs2Err != nil { b.critical = true b.retryable = true fs.Debugf(nil, "Error generating dirsonly list for path2: %v", dirs2Err) return } // build a list of only the "deltaOther"s so we don't have to check more files than necessary // this is essentially the same as running rclone check with a --files-from filter, then exempting the --match results from being renamed // we therefore avoid having to list the same directory more than once. // we are intentionally overriding DryRun here because we need to perform the check, even during a dry run, or the results would be inaccurate. // check is a read-only operation by its nature, so it's already "dry" in that sense. ctxNew, ciCheck := fs.AddConfig(ctx) ciCheck.DryRun = false ctxCheck, filterCheck := filter.AddConfig(ctxNew) for _, file := range ds1.sort() { alias := b.aliases.Alias(file) d1 := ds1.deltas[file] if d1.is(deltaOther) { d2, in2 := ds2.deltas[file] file2 := file if !in2 && file != alias { d2 = ds2.deltas[alias] file2 = alias } if d2.is(deltaOther) { // if size or hash differ, skip this, as we already know they're not equal if (b.opt.Compare.Size && sizeDiffers(ds1.size[file], ds2.size[file2])) || (b.opt.Compare.Checksum && b.hashDiffers(ds1.hash[file], ds2.hash[file2], b.opt.Compare.HashType1, b.opt.Compare.HashType2, ds1.size[file], ds2.size[file2])) { fs.Debugf(file, "skipping equality check as size/hash definitely differ") } else { checkit := func(filename string) { if err := filterCheck.AddFile(filename); err != nil { fs.Debugf(nil, "Non-critical error adding file to list of potential conflicts to check: %s", err) } else { fs.Debugf(nil, "Added file to list of potential conflicts to check: %s", filename) } } checkit(file) if file != alias { checkit(alias) } } } } } // if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop matches, err := b.checkconflicts(ctxCheck, filterCheck, b.fs1, b.fs2) for _, file := range ds1.sort() { alias := b.aliases.Alias(file) p1 := path1 + file p2 := path2 + alias d1 := ds1.deltas[file] if d1.is(deltaOther) { d2, in2 := ds2.deltas[file] // try looking under alternate name if !in2 && file != alias { d2, in2 = ds2.deltas[alias] } if !in2 { b.indent("Path1", p2, "Queue copy to Path2") copy1to2.Add(file) } else if d2.is(deltaDeleted) { b.indent("Path1", p2, "Queue copy to Path2") copy1to2.Add(file) handled.Add(file) } else if d2.is(deltaOther) { b.indent("!WARNING", file, "New or changed in both paths") // if files are identical, leave them alone instead of renaming if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) { fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file) b.march.ls1.getPut(file, skippedDirs1) b.march.ls2.getPut(file, skippedDirs2) b.debugFn(file, func() { b.debug(file, fmt.Sprintf("deltas dir: %s, ls1 has name?: %v, ls2 has name?: %v", file, b.march.ls1.has(b.DebugName), b.march.ls2.has(b.DebugName))) }) } else { equal := matches.Has(file) if !equal { equal = matches.Has(alias) } if equal { if ciCheck.FixCase && file != alias { // the content is equal but filename still needs to be FixCase'd, so copy1to2 // the Path1 version is deemed "correct" in this scenario fs.Infof(alias, "Files are equal but will copy anyway to fix case to %s", file) copy1to2.Add(file) } else if b.opt.Compare.Modtime && timeDiffers(ctx, b.march.ls1.getTime(b.march.ls1.getTryAlias(file, alias)), b.march.ls2.getTime(b.march.ls2.getTryAlias(file, alias)), b.fs1, b.fs2) { fs.Infof(file, "Files are equal but will copy anyway to update modtime (will not rename)") if b.march.ls1.getTime(b.march.ls1.getTryAlias(file, alias)).Before(b.march.ls2.getTime(b.march.ls2.getTryAlias(file, alias))) { // Path2 is newer b.indent("Path2", p1, "Queue copy to Path1") copy2to1.Add(b.march.ls2.getTryAlias(file, alias)) } else { // Path1 is newer b.indent("Path1", p2, "Queue copy to Path2") copy1to2.Add(b.march.ls1.getTryAlias(file, alias)) } } else { fs.Infof(nil, "Files are equal! Skipping: %s", file) renameSkipped.Add(file) renameSkipped.Add(alias) } } else { fs.Debugf(nil, "Files are NOT equal: %s", file) err = b.resolve(ctxMove, path1, path2, file, alias, &renameSkipped, &copy1to2, &copy2to1, ds1, ds2) if err != nil { return } } } handled.Add(file) } } else { // Path1 deleted d2, in2 := ds2.deltas[file] // try looking under alternate name fs.Debugf(file, "alias: %s, in2: %v", alias, in2) if !in2 && file != alias { fs.Debugf(file, "looking for alias: %s", alias) d2, in2 = ds2.deltas[alias] if in2 { fs.Debugf(file, "detected alias: %s", alias) } } if !in2 { b.indent("Path2", p2, "Queue delete") delete2.Add(file) copy1to2.Add(file) } else if d2.is(deltaOther) { b.indent("Path2", p1, "Queue copy to Path1") copy2to1.Add(file) handled.Add(file) } else if d2.is(deltaDeleted) { handled.Add(file) deletedonboth.Add(file) deletedonboth.Add(alias) } } } for _, file := range ds2.sort() { alias := b.aliases.Alias(file) p1 := path1 + alias d2 := ds2.deltas[file] if handled.Has(file) || handled.Has(alias) { continue } if d2.is(deltaOther) { b.indent("Path2", p1, "Queue copy to Path1") copy2to1.Add(file) } else { // Deleted b.indent("Path1", p1, "Queue delete") delete1.Add(file) copy2to1.Add(file) } } // Do the batch operation if copy2to1.NotEmpty() && !b.InGracefulShutdown { b.indent("Path2", "Path1", "Do queued copies to") ctx = b.setBackupDir(ctx, 1) results2to1, err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1") // retries, if any results2to1, err = b.retryFastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1", results2to1, err) if !b.InGracefulShutdown && err != nil { return } // copy empty dirs from path2 to path1 (if --create-empty-src-dirs) b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, &results2to1, "make") } if copy1to2.NotEmpty() && !b.InGracefulShutdown { b.indent("Path1", "Path2", "Do queued copies to") ctx = b.setBackupDir(ctx, 2) results1to2, err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2") // retries, if any results1to2, err = b.retryFastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2", results1to2, err) if !b.InGracefulShutdown && err != nil { return } // copy empty dirs from path1 to path2 (if --create-empty-src-dirs) b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, &results1to2, "make") } if delete1.NotEmpty() && !b.InGracefulShutdown { if err = b.saveQueue(delete1, "delete1"); err != nil { return } // propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs) b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, &results2to1, "remove") } if delete2.NotEmpty() && !b.InGracefulShutdown { if err = b.saveQueue(delete2, "delete2"); err != nil { return } // propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs) b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, &results1to2, "remove") } queues.copy1to2 = copy1to2 queues.copy2to1 = copy2to1 queues.renameSkipped = renameSkipped queues.deletedonboth = deletedonboth queues.skippedDirs1 = skippedDirs1 queues.skippedDirs2 = skippedDirs2 return } // excessDeletes checks whether number of deletes is within allowed range func (ds *deltaSet) excessDeletes() bool { maxDelete := ds.opt.MaxDelete maxRatio := float64(maxDelete) / 100.0 curRatio := 0.0 if ds.deleted > 0 && ds.oldCount > 0 { curRatio = float64(ds.deleted) / float64(ds.oldCount) } if curRatio <= maxRatio { return false } fs.Errorf("Safety abort", "too many deletes (>%d%%, %d of %d) on %s %s. Run with --force if desired.", maxDelete, ds.deleted, ds.oldCount, ds.msg, quotePath(bilib.FsPath(ds.fs))) return true } // normally we build the AliasMap from march results, // however, march does not know about deleted files, so need to manually check them for aliases func (b *bisyncRun) updateAliases(ctx context.Context, ds1, ds2 *deltaSet) { ci := fs.GetConfig(ctx) // skip if not needed if ci.NoUnicodeNormalization && !ci.IgnoreCaseSync && !b.fs1.Features().CaseInsensitive && !b.fs2.Features().CaseInsensitive { return } if ds1.deleted < 1 && ds2.deleted < 1 { return } fs.Debugf(nil, "Updating AliasMap") transform := func(s string) string { if !ci.NoUnicodeNormalization { s = norm.NFC.String(s) } // note: march only checks the dest, but we check both here if ci.IgnoreCaseSync || b.fs1.Features().CaseInsensitive || b.fs2.Features().CaseInsensitive { s = strings.ToLower(s) } return s } delMap1 := map[string]string{} // [transformedname]originalname delMap2 := map[string]string{} // [transformedname]originalname fullMap1 := map[string]string{} // [transformedname]originalname fullMap2 := map[string]string{} // [transformedname]originalname for _, name := range b.march.ls1.list { fullMap1[transform(name)] = name } for _, name := range b.march.ls2.list { fullMap2[transform(name)] = name } addDeletes := func(ds *deltaSet, delMap, fullMap map[string]string) { for _, file := range ds.sort() { d := ds.deltas[file] if d.is(deltaDeleted) { delMap[transform(file)] = file fullMap[transform(file)] = file } } } addDeletes(ds1, delMap1, fullMap1) addDeletes(ds2, delMap2, fullMap2) addAliases := func(delMap, fullMap map[string]string) { for transformedname, name := range delMap { matchedName, found := fullMap[transformedname] if found && name != matchedName { fs.Debugf(name, "adding alias %s", matchedName) b.aliases.Add(name, matchedName) } } } addAliases(delMap1, fullMap2) addAliases(delMap2, fullMap1) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/compare.go
cmd/bisync/compare.go
package bisync import ( "context" "errors" "fmt" "strings" mutex "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/terminal" ) // CompareOpt describes the Compare options in force type CompareOpt = struct { Modtime bool Size bool Checksum bool HashType1 hash.Type HashType2 hash.Type NoSlowHash bool SlowHashSyncOnly bool SlowHashDetected bool DownloadHash bool } func (b *bisyncRun) setCompareDefaults(ctx context.Context) (err error) { ci := fs.GetConfig(ctx) // defaults b.opt.Compare.Size = true b.opt.Compare.Modtime = true b.opt.Compare.Checksum = false if ci.SizeOnly { b.opt.Compare.Size = true b.opt.Compare.Modtime = false b.opt.Compare.Checksum = false } else if ci.CheckSum && !b.opt.IgnoreListingChecksum { b.opt.Compare.Size = true b.opt.Compare.Modtime = false b.opt.Compare.Checksum = true } if ci.IgnoreSize { b.opt.Compare.Size = false } err = b.setFromCompareFlag(ctx) if err != nil { return err } if b.fs1.Features().SlowHash || b.fs2.Features().SlowHash { b.opt.Compare.SlowHashDetected = true } if b.opt.Compare.Checksum && !b.opt.IgnoreListingChecksum { b.setHashType(ci) } if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected && b.opt.Resync { fs.Log(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set.")) ci.CheckSum = false // note not setting b.opt.Compare.Checksum = false as we still want to build listings on the non-slow side, if any } else if b.opt.Compare.Checksum && !ci.CheckSum { fs.Log(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set.")) } if b.opt.Compare.Modtime && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) { fs.Log(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead.")) } if (ci.CheckSum || b.opt.Compare.Checksum) && b.opt.IgnoreListingChecksum { if (b.opt.Compare.HashType1 == hash.None || b.opt.Compare.HashType2 == hash.None) && !b.opt.Compare.DownloadHash { fs.Logf(nil, Color(terminal.YellowFg, `WARNING: Checksum compare was requested but at least one remote does not support checksums (or checksums are being ignored) and --ignore-listing-checksum is set. Ignoring Checksums globally and falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime). Path1 (%s): %s, Path2 (%s): %s`), b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String()) b.opt.Compare.Modtime = true b.opt.Compare.Size = true ci.CheckSum = false b.opt.Compare.Checksum = false } else { fs.Log(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set")) // note: --checksum will still affect the internal sync calls } } if !ci.CheckSum && !b.opt.Compare.Checksum && !b.opt.IgnoreListingChecksum { fs.Infoc(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.")) b.opt.IgnoreListingChecksum = true } if !b.opt.Compare.Size && !b.opt.Compare.Modtime && !b.opt.Compare.Checksum { return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)")) } notSupported := func(label string, value bool, opt *bool) { if value { fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label) *opt = false } } notSupported("--update", ci.UpdateOlder, &ci.UpdateOlder) notSupported("--no-check-dest", ci.NoCheckDest, &ci.NoCheckDest) notSupported("--no-traverse", ci.NoTraverse, &ci.NoTraverse) // TODO: thorough search for other flags that should be on this list... prettyprint(b.opt.Compare, "Bisyncing with Comparison Settings", fs.LogLevelInfo) return nil } // returns true if the sizes are definitely different. // returns false if equal, or if either is unknown. func sizeDiffers(a, b int64) bool { if a < 0 || b < 0 { return false } return a != b } // returns true if the hashes are definitely different. // returns false if equal, or if either is unknown. func (b *bisyncRun) hashDiffers(stringA, stringB string, ht1, ht2 hash.Type, size1, size2 int64) bool { if stringA == "" || stringB == "" { if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) { fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), stringA, stringB) } return false } if ht1 != ht2 { if !(b.downloadHashOpt.downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) { fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String()) return false } } return stringA != stringB } // chooses hash type, giving priority to types both sides have in common func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) { b.downloadHashOpt.downloadHash = b.opt.Compare.DownloadHash if b.opt.Compare.NoSlowHash && b.opt.Compare.SlowHashDetected { fs.Infof(nil, "Not checking for common hash as at least one slow hash detected.") } else { common := b.fs1.Hashes().Overlap(b.fs2.Hashes()) if common.Count() > 0 && common.GetOne() != hash.None { ht := common.GetOne() b.opt.Compare.HashType1 = ht b.opt.Compare.HashType2 = ht if !b.opt.Compare.SlowHashSyncOnly || !b.opt.Compare.SlowHashDetected { return } } else if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected { fs.Log(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common.")) b.opt.Compare.SlowHashSyncOnly = false b.opt.Compare.NoSlowHash = true ci.CheckSum = false } } if !b.opt.Compare.DownloadHash && !b.opt.Compare.SlowHashSyncOnly { fs.Log(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)")) fs.Infof("Path1 hashes", "%v", b.fs1.Hashes().String()) fs.Infof("Path2 hashes", "%v", b.fs2.Hashes().String()) b.opt.Compare.Modtime = true b.opt.Compare.Size = true ci.CheckSum = false } if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs1.Features().SlowHash { fs.Infoc(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings")) b.opt.Compare.HashType1 = hash.None } else { b.opt.Compare.HashType1 = b.fs1.Hashes().GetOne() if b.opt.Compare.HashType1 != hash.None { fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1) } } if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs2.Features().SlowHash { fs.Infoc(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings")) b.opt.Compare.HashType2 = hash.None } else { b.opt.Compare.HashType2 = b.fs2.Hashes().GetOne() if b.opt.Compare.HashType2 != hash.None { fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2) } } if b.opt.Compare.HashType1 == hash.None && b.opt.Compare.HashType2 == hash.None && !b.opt.Compare.DownloadHash { fs.Log(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides.")) b.opt.Compare.Checksum = false ci.CheckSum = false b.opt.IgnoreListingChecksum = true } } // returns true if the times are definitely different (by more than the modify window). // returns false if equal, within modify window, or if either is unknown. // considers precision per-Fs. func timeDiffers(ctx context.Context, a, b time.Time, fsA, fsB fs.Info) bool { modifyWindow := fs.GetModifyWindow(ctx, fsA, fsB) if modifyWindow == fs.ModTimeNotSupported { return false } if a.IsZero() || b.IsZero() { fs.Logf(fsA, "Fs supports modtime, but modtime is missing") return false } dt := b.Sub(a) if dt < modifyWindow && dt > -modifyWindow { fs.Debugf(a, "modification time the same (differ by %s, within tolerance %s)", dt, modifyWindow) return false } fs.Debugf(a, "Modification times differ by %s: %v, %v", dt, a, b) return true } func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error { if b.opt.CompareFlag == "" { return nil } var CompareFlag CompareOpt // for exclusions opts := strings.SplitSeq(b.opt.CompareFlag, ",") for opt := range opts { switch strings.ToLower(strings.TrimSpace(opt)) { case "size": b.opt.Compare.Size = true CompareFlag.Size = true case "modtime": b.opt.Compare.Modtime = true CompareFlag.Modtime = true case "checksum": b.opt.Compare.Checksum = true CompareFlag.Checksum = true default: return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt) } } // exclusions (override defaults, only if --compare != "") if !CompareFlag.Size { b.opt.Compare.Size = false } if !CompareFlag.Modtime { b.opt.Compare.Modtime = false } if !CompareFlag.Checksum { b.opt.Compare.Checksum = false } // override sync flags to match ci := fs.GetConfig(ctx) if b.opt.Compare.Checksum { ci.CheckSum = true } if b.opt.Compare.Modtime && !b.opt.Compare.Checksum { ci.CheckSum = false } if !b.opt.Compare.Size { ci.IgnoreSize = true } if !b.opt.Compare.Modtime { ci.UseServerModTime = true } if b.opt.Compare.Size && !b.opt.Compare.Modtime && !b.opt.Compare.Checksum { ci.SizeOnly = true } return nil } // b.downloadHashOpt.downloadHash is true if we should attempt to compute hash by downloading when otherwise unavailable type downloadHashOpt struct { downloadHash bool downloadHashWarn mutex.Once firstDownloadHash mutex.Once } func (b *bisyncRun) tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string, error) { if hashVal != "" || !b.downloadHashOpt.downloadHash { return hashVal, nil } obj, ok := o.(fs.Object) if !ok { fs.Infof(o, "failed to download hash -- not an fs.Object") return hashVal, fs.ErrorObjectNotFound } if o.Size() < 0 { b.downloadHashOpt.downloadHashWarn.Do(func() { fs.Log(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length.")) }) fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.") return hashVal, hash.ErrUnsupported } b.downloadHashOpt.firstDownloadHash.Do(func() { fs.Infoc(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes...")) }) tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash") defer func() { tr.Done(ctx, nil) }() sum, err := operations.HashSum(ctx, hash.MD5, false, true, obj) if err != nil { fs.Infof(o, "DownloadHash -- hash: %v, err: %v", sum, err) } else { fs.Debugf(o, "DownloadHash -- hash: %v", sum) } return sum, err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/resolve.go
cmd/bisync/resolve.go
package bisync import ( "context" "fmt" "math" "strings" "time" "github.com/rclone/rclone/cmd/bisync/bilib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/terminal" "github.com/rclone/rclone/lib/transform" ) // Prefer describes strategies for resolving sync conflicts type Prefer = fs.Enum[preferChoices] // Supported --conflict-resolve strategies const ( PreferNone Prefer = iota PreferPath1 PreferPath2 PreferNewer PreferOlder PreferLarger PreferSmaller ) type preferChoices struct{} func (preferChoices) Choices() []string { return []string{ PreferNone: "none", PreferNewer: "newer", PreferOlder: "older", PreferLarger: "larger", PreferSmaller: "smaller", PreferPath1: "path1", PreferPath2: "path2", } } func (preferChoices) Type() string { return "string" } // ConflictResolveList is a list of --conflict-resolve flag choices used in the help var ConflictResolveList = Opt.ConflictResolve.Help() // ConflictLoserAction describes possible actions to take on the loser of a sync conflict type ConflictLoserAction = fs.Enum[conflictLoserChoices] // Supported --conflict-loser actions const ( ConflictLoserSkip ConflictLoserAction = iota // Reserved as zero but currently unused ConflictLoserNumber // file.conflict1, file.conflict2, file.conflict3, etc. ConflictLoserPathname // file.path1, file.path2 ConflictLoserDelete // delete the loser, keep winner only ) type conflictLoserChoices struct{} func (conflictLoserChoices) Choices() []string { return []string{ ConflictLoserNumber: "num", ConflictLoserPathname: "pathname", ConflictLoserDelete: "delete", } } func (conflictLoserChoices) Type() string { return "ConflictLoserAction" } // ConflictLoserList is a list of --conflict-loser flag choices used in the help var ConflictLoserList = Opt.ConflictLoser.Help() func (b *bisyncRun) setResolveDefaults() error { if b.opt.ConflictLoser == ConflictLoserSkip { b.opt.ConflictLoser = ConflictLoserNumber } if b.opt.ConflictSuffixFlag == "" { b.opt.ConflictSuffixFlag = "conflict" } suffixes := strings.Split(b.opt.ConflictSuffixFlag, ",") if len(suffixes) == 1 { b.opt.ConflictSuffix1 = suffixes[0] b.opt.ConflictSuffix2 = suffixes[0] } else if len(suffixes) == 2 { b.opt.ConflictSuffix1 = suffixes[0] b.opt.ConflictSuffix2 = suffixes[1] } else { return fmt.Errorf("--conflict-suffix cannot have more than 2 comma-separated values. Received %v: %v", len(suffixes), suffixes) } // replace glob variables, if any t := time.Now() // capture static time here so it is the same for all files throughout this run b.opt.ConflictSuffix1 = transform.AppyTimeGlobs(b.opt.ConflictSuffix1, t) b.opt.ConflictSuffix2 = transform.AppyTimeGlobs(b.opt.ConflictSuffix2, t) // append dot (intentionally allow more than one) b.opt.ConflictSuffix1 = "." + b.opt.ConflictSuffix1 b.opt.ConflictSuffix2 = "." + b.opt.ConflictSuffix2 // checks and warnings if (b.opt.ConflictResolve == PreferNewer || b.opt.ConflictResolve == PreferOlder) && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) { fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --conflict-resolve %s as at least one remote does not support modtimes."), b.opt.ConflictResolve.String()) b.opt.ConflictResolve = PreferNone } else if (b.opt.ConflictResolve == PreferNewer || b.opt.ConflictResolve == PreferOlder) && !b.opt.Compare.Modtime { fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --conflict-resolve %s as --compare does not include modtime."), b.opt.ConflictResolve.String()) b.opt.ConflictResolve = PreferNone } if (b.opt.ConflictResolve == PreferLarger || b.opt.ConflictResolve == PreferSmaller) && !b.opt.Compare.Size { fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --conflict-resolve %s as --compare does not include size."), b.opt.ConflictResolve.String()) b.opt.ConflictResolve = PreferNone } return nil } type ( renames map[string]renamesInfo // [originalName]newName (remember the originalName may have an alias) // the newName may be the same as the old name (if winner), but should not be blank, unless we're deleting. // the oldNames may not match each other, if we're normalizing case or unicode // all names should be "remotes" (relative names, without base path) renamesInfo struct { path1 namePair path2 namePair } ) type namePair struct { oldName string newName string } func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias string, renameSkipped, copy1to2, copy2to1 *bilib.Names, ds1, ds2 *deltaSet) (err error) { winningPath := 0 if b.opt.ConflictResolve != PreferNone { winningPath = b.conflictWinner(ds1, ds2, file, alias) if winningPath > 0 { fs.Infof(file, Color(terminal.GreenFg, "The winner is: Path%d"), winningPath) } else { fs.Infoc(file, Color(terminal.RedFg, "A winner could not be determined.")) } } suff1 := b.opt.ConflictSuffix1 // copy to new var to make sure our changes here don't persist suff2 := b.opt.ConflictSuffix2 if b.opt.ConflictLoser == ConflictLoserPathname && b.opt.ConflictSuffix1 == b.opt.ConflictSuffix2 { // numerate, but not if user supplied two different suffixes suff1 += "1" suff2 += "2" } r := renamesInfo{ path1: namePair{ oldName: file, newName: SuffixName(ctxMove, file, suff1), }, path2: namePair{ oldName: alias, newName: SuffixName(ctxMove, alias, suff2), }, } // handle auto-numbering // note that we still queue copies for both files, whether or not we renamed // we also set these for ConflictLoserDelete in case there is no winner. if b.opt.ConflictLoser == ConflictLoserNumber || b.opt.ConflictLoser == ConflictLoserDelete { num := b.numerate(ctxMove, 1, file, alias) switch winningPath { case 1: // keep path1, rename path2 r.path1.newName = r.path1.oldName r.path2.newName = SuffixName(ctxMove, r.path2.oldName, b.opt.ConflictSuffix2+fmt.Sprint(num)) case 2: // keep path2, rename path1 r.path1.newName = SuffixName(ctxMove, r.path1.oldName, b.opt.ConflictSuffix1+fmt.Sprint(num)) r.path2.newName = r.path2.oldName default: // no winner, so rename both to different numbers (unless suffixes are already different) if b.opt.ConflictSuffix1 == b.opt.ConflictSuffix2 { r.path1.newName = SuffixName(ctxMove, r.path1.oldName, b.opt.ConflictSuffix1+fmt.Sprint(num)) // let's just make sure num + 1 is available... num2 := b.numerate(ctxMove, num+1, file, alias) r.path2.newName = SuffixName(ctxMove, r.path2.oldName, b.opt.ConflictSuffix2+fmt.Sprint(num2)) } else { // suffixes are different, so numerate independently num = b.numerateSingle(ctxMove, 1, file, alias, 1) r.path1.newName = SuffixName(ctxMove, r.path1.oldName, b.opt.ConflictSuffix1+fmt.Sprint(num)) num = b.numerateSingle(ctxMove, 1, file, alias, 2) r.path2.newName = SuffixName(ctxMove, r.path2.oldName, b.opt.ConflictSuffix2+fmt.Sprint(num)) } } } // when winningPath == 0 (no winner), we ignore settings and rename both, do not delete // note also that deletes and renames are mutually exclusive -- we never delete one path and rename the other. if b.opt.ConflictLoser == ConflictLoserDelete && winningPath == 1 { // delete 2, copy 1 to 2 err = b.delete(ctxMove, r.path2, path2, b.fs2, 2, renameSkipped) if err != nil { return err } r.path2.newName = "" // copy the one that wasn't deleted b.indent("Path1", r.path1.oldName, "Queue copy to Path2") copy1to2.Add(r.path1.oldName) } else if b.opt.ConflictLoser == ConflictLoserDelete && winningPath == 2 { // delete 1, copy 2 to 1 err = b.delete(ctxMove, r.path1, path1, b.fs1, 1, renameSkipped) if err != nil { return err } r.path1.newName = "" // copy the one that wasn't deleted b.indent("Path2", r.path2.oldName, "Queue copy to Path1") copy2to1.Add(r.path2.oldName) } else { err = b.rename(ctxMove, r.path1, path1, path2, b.fs1, 1, 2, winningPath, copy1to2, renameSkipped) if err != nil { return err } err = b.rename(ctxMove, r.path2, path2, path1, b.fs2, 2, 1, winningPath, copy2to1, renameSkipped) if err != nil { return err } } b.renames[r.path1.oldName] = r // note map index is path1's oldName, which may be different from path2 if aliases return nil } // SuffixName adds the current --conflict-suffix to the remote, obeying // --suffix-keep-extension if set // It is a close cousin of operations.SuffixName, but we don't want to // use ci.Suffix for this because it might be used for --backup-dir. func SuffixName(ctx context.Context, remote, suffix string) string { if suffix == "" { return remote } ci := fs.GetConfig(ctx) if ci.SuffixKeepExtension { return transform.SuffixKeepExtension(remote, suffix) } return remote + suffix } // NotEmpty checks whether set is not empty func (r renames) NotEmpty() bool { return len(r) > 0 } func (ri *renamesInfo) getNames(is1to2 bool) (srcOldName, srcNewName, dstOldName, dstNewName string) { if is1to2 { return ri.path1.oldName, ri.path1.newName, ri.path2.oldName, ri.path2.newName } return ri.path2.oldName, ri.path2.newName, ri.path1.oldName, ri.path1.newName } // work out the lowest number that neither side has, return it for suffix func (b *bisyncRun) numerate(ctx context.Context, startnum int, file, alias string) int { for i := startnum; i < math.MaxInt; i++ { iStr := fmt.Sprint(i) if !b.march.ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) && !b.march.ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) && !b.march.ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) && !b.march.ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) { // make sure it still holds true with suffixes switched (it should) if !b.march.ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) && !b.march.ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) && !b.march.ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) && !b.march.ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) { fs.Debugf(file, "The first available suffix is: %s", iStr) return i } } } return 0 // not really possible, as no one has 9223372036854775807 conflicts, and if they do, they have bigger problems } // like numerate, but consider only one side's suffix (for when suffixes are different) func (b *bisyncRun) numerateSingle(ctx context.Context, startnum int, file, alias string, path int) int { lsA, lsB := b.march.ls1, b.march.ls2 suffix := b.opt.ConflictSuffix1 if path == 2 { lsA, lsB = b.march.ls2, b.march.ls1 suffix = b.opt.ConflictSuffix2 } for i := startnum; i < math.MaxInt; i++ { iStr := fmt.Sprint(i) if !lsA.has(SuffixName(ctx, file, suffix+iStr)) && !lsA.has(SuffixName(ctx, alias, suffix+iStr)) && !lsB.has(SuffixName(ctx, file, suffix+iStr)) && !lsB.has(SuffixName(ctx, alias, suffix+iStr)) { fs.Debugf(file, "The first available suffix is: %s", iStr) return i } } return 0 // not really possible, as no one has 9223372036854775807 conflicts, and if they do, they have bigger problems } func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum, winningPath int, q, renameSkipped *bilib.Names) (err error) { if winningPath == thisPathNum { b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.newName, fmt.Sprintf("Not renaming Path%d copy, as it was determined the winner", thisPathNum)) } else { skip := operations.SkipDestructive(ctx, thisNamePair.oldName, "rename") if !skip { b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.newName, fmt.Sprintf("Renaming Path%d copy", thisPathNum)) ctx = b.setBackupDir(ctx, thisPathNum) // in case already a file with new name if err = operations.MoveFile(ctx, thisFs, thisFs, thisNamePair.newName, thisNamePair.oldName); err != nil { err = fmt.Errorf("%s rename failed for %s: %w", thisPath, thisPath+thisNamePair.oldName, err) b.critical = true return err } } else { renameSkipped.Add(thisNamePair.oldName) // (due to dry-run, not equality) } } b.indent(fmt.Sprintf("!Path%d", thisPathNum), thatPath+thisNamePair.newName, fmt.Sprintf("Queue copy to Path%d", thatPathNum)) q.Add(thisNamePair.newName) return nil } func (b *bisyncRun) delete(ctx context.Context, thisNamePair namePair, thisPath string, thisFs fs.Fs, thisPathNum int, renameSkipped *bilib.Names) (err error) { skip := operations.SkipDestructive(ctx, thisNamePair.oldName, "delete") if !skip { b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.oldName, fmt.Sprintf("Deleting Path%d copy", thisPathNum)) ctx = b.setBackupDir(ctx, thisPathNum) ci := fs.GetConfig(ctx) var backupDir fs.Fs if ci.BackupDir != "" { backupDir, err = operations.BackupDir(ctx, thisFs, thisFs, thisNamePair.oldName) if err != nil { b.critical = true return err } } obj, err := thisFs.NewObject(ctx, thisNamePair.oldName) if err != nil { b.critical = true return err } if err = operations.DeleteFileWithBackupDir(ctx, obj, backupDir); err != nil { err = fmt.Errorf("%s delete failed for %s: %w", thisPath, thisPath+thisNamePair.oldName, err) b.critical = true return err } } else { renameSkipped.Add(thisNamePair.oldName) // (due to dry-run, not equality) } return nil } func (b *bisyncRun) conflictWinner(ds1, ds2 *deltaSet, remote1, remote2 string) int { switch b.opt.ConflictResolve { case PreferPath1: return 1 case PreferPath2: return 2 case PreferNewer, PreferOlder: t1, t2 := ds1.time[remote1], ds2.time[remote2] return b.resolveNewerOlder(t1, t2, remote1, b.opt.ConflictResolve) case PreferLarger, PreferSmaller: s1, s2 := ds1.size[remote1], ds2.size[remote2] return b.resolveLargerSmaller(s1, s2, remote1, b.opt.ConflictResolve) default: return 0 } } // returns the winning path number, or 0 if winner can't be determined func (b *bisyncRun) resolveNewerOlder(t1, t2 time.Time, remote1 string, prefer Prefer) int { if fs.GetModifyWindow(b.octx, b.fs1, b.fs2) == fs.ModTimeNotSupported { fs.Infof(remote1, "Winner cannot be determined as at least one path lacks modtime support.") return 0 } if t1.IsZero() || t2.IsZero() { fs.Infof(remote1, "Winner cannot be determined as at least one modtime is missing. Path1: %v, Path2: %v", t1, t2) return 0 } if t1.After(t2) { if prefer == PreferNewer { fs.Infof(remote1, "Path1 is newer. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t1.Sub(t2)) return 1 } else if prefer == PreferOlder { fs.Infof(remote1, "Path2 is older. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t1.Sub(t2)) return 2 } } else if t1.Before(t2) { if prefer == PreferNewer { fs.Infof(remote1, "Path2 is newer. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t2.Sub(t1)) return 2 } else if prefer == PreferOlder { fs.Infof(remote1, "Path1 is older. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t2.Sub(t1)) return 1 } } if t1.Equal(t2) { fs.Infof(remote1, "Winner cannot be determined as times are equal. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t2.Sub(t1)) return 0 } fs.Errorf(remote1, "Winner cannot be determined. Path1: %v, Path2: %v", t1.In(LogTZ), t2.In(LogTZ)) // shouldn't happen unless prefer is of wrong type return 0 } // returns the winning path number, or 0 if winner can't be determined func (b *bisyncRun) resolveLargerSmaller(s1, s2 int64, remote1 string, prefer Prefer) int { if s1 < 0 || s2 < 0 { fs.Infof(remote1, "Winner cannot be determined as at least one size is unknown. Path1: %v, Path2: %v", s1, s2) return 0 } if s1 > s2 { if prefer == PreferLarger { fs.Infof(remote1, "Path1 is larger. Path1: %v, Path2: %v, Difference: %v", s1, s2, s1-s2) return 1 } else if prefer == PreferSmaller { fs.Infof(remote1, "Path2 is smaller. Path1: %v, Path2: %v, Difference: %v", s1, s2, s1-s2) return 2 } } else if s1 < s2 { if prefer == PreferLarger { fs.Infof(remote1, "Path2 is larger. Path1: %v, Path2: %v, Difference: %v", s1, s2, s2-s1) return 2 } else if prefer == PreferSmaller { fs.Infof(remote1, "Path1 is smaller. Path1: %v, Path2: %v, Difference: %v", s1, s2, s2-s1) return 1 } } if s1 == s2 { fs.Infof(remote1, "Winner cannot be determined as sizes are equal. Path1: %v, Path2: %v, Difference: %v", s1, s2, s1-s2) return 0 } fs.Errorf(remote1, "Winner cannot be determined. Path1: %v, Path2: %v", s1, s2) // shouldn't happen unless prefer is of wrong type return 0 }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/log.go
cmd/bisync/log.go
package bisync import ( "encoding/json" "fmt" "runtime" "strconv" "strings" "sync" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/terminal" ) func (b *bisyncRun) indentf(tag, file, format string, args ...any) { b.indent(tag, file, fmt.Sprintf(format, args...)) } func (b *bisyncRun) indent(tag, file, msg string) { logf := fs.Infof switch { case tag == "ERROR": tag = "" logf = fs.Errorf case tag == "INFO": tag = "" case strings.HasPrefix(tag, "!"): tag = tag[1:] logf = fs.Logf } if b.opt.DryRun { logf = fs.Logf } if tag == "Path1" { tag = Color(terminal.CyanFg, "Path1") } else { tag = Color(terminal.BlueFg, tag) } msg = Color(terminal.MagentaFg, msg) msg = strings.ReplaceAll(msg, "Queue copy to", Color(terminal.GreenFg, "Queue copy to")) msg = strings.ReplaceAll(msg, "Queue delete", Color(terminal.RedFg, "Queue delete")) file = Color(terminal.CyanFg, escapePath(file, false)) logf(nil, "- %-18s%-43s - %s", tag, msg, file) } // escapePath will escape control characters in path. // It won't quote just due to backslashes on Windows. func escapePath(path string, forceQuotes bool) string { path = encode(path) test := path if runtime.GOOS == "windows" { test = strings.ReplaceAll(path, "\\", "/") } if strconv.Quote(test) != `"`+test+`"` { return strconv.Quote(path) } if forceQuotes { return `"` + path + `"` } return path } func quotePath(path string) string { return escapePath(path, true) } // Colors controls whether terminal colors are enabled var ( Colors bool ColorsLock sync.Mutex ) // Color handles terminal colors for bisync func Color(style string, s string) string { ColorsLock.Lock() defer ColorsLock.Unlock() if !Colors { return s } terminal.Start() return style + s + terminal.Reset } // ColorX handles terminal colors for bisync func ColorX(style string, s string) string { ColorsLock.Lock() defer ColorsLock.Unlock() if !Colors { return s } terminal.Start() return style + s + terminal.Reset } func encode(s string) string { return encoder.OS.ToStandardPath(encoder.OS.FromStandardPath(s)) } // prettyprint formats JSON for improved readability in debug logs func prettyprint(in any, label string, level fs.LogLevel) { inBytes, err := json.MarshalIndent(in, "", "\t") if err != nil { fs.Debugf(nil, "failed to marshal input: %v", err) } if level == fs.LogLevelDebug { fs.Debugf(nil, "%s: \n%s\n", label, string(inBytes)) } else if level == fs.LogLevelInfo { fs.Infof(nil, "%s: \n%s\n", label, string(inBytes)) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/cmd.go
cmd/bisync/cmd.go
// Package bisync implements bisync // Copyright (c) 2017-2020 Chris Nelson package bisync import ( "context" "crypto/md5" "encoding/hex" "errors" "fmt" "io" "os" "path/filepath" "strings" "time" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/bisync/bilib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/hash" "github.com/spf13/cobra" ) // TestFunc allows mocking errors during tests type TestFunc func() // Options keep bisync options type Options struct { Resync bool // whether or not this is a resync ResyncMode Prefer // which mode to use for resync CheckAccess bool CheckFilename string CheckSync CheckSyncMode CreateEmptySrcDirs bool RemoveEmptyDirs bool MaxDelete int // percentage from 0 to 100 Force bool FiltersFile string Workdir string OrigBackupDir string BackupDir1 string BackupDir2 string DryRun bool NoCleanup bool SaveQueues bool // save extra debugging files (test only flag) IgnoreListingChecksum bool Resilient bool Recover bool TestFn TestFunc // test-only option, for mocking errors Compare CompareOpt CompareFlag string DebugName string MaxLock fs.Duration ConflictResolve Prefer ConflictLoser ConflictLoserAction ConflictSuffixFlag string ConflictSuffix1 string ConflictSuffix2 string } // Default values const ( DefaultMaxDelete int = 50 DefaultCheckFilename string = "RCLONE_TEST" ) // DefaultWorkdir is default working directory var DefaultWorkdir = filepath.Join(config.GetCacheDir(), "bisync") // CheckSyncMode controls when to compare final listings type CheckSyncMode int // CheckSync modes const ( CheckSyncTrue CheckSyncMode = iota // Compare final listings (default) CheckSyncFalse // Disable comparison of final listings CheckSyncOnly // Only compare listings from the last run, do not sync ) func (x CheckSyncMode) String() string { switch x { case CheckSyncTrue: return "true" case CheckSyncFalse: return "false" case CheckSyncOnly: return "only" } return "unknown" } // Set a CheckSync mode from a string func (x *CheckSyncMode) Set(s string) error { switch strings.ToLower(s) { case "true": *x = CheckSyncTrue case "false": *x = CheckSyncFalse case "only": *x = CheckSyncOnly default: return fmt.Errorf("unknown check-sync mode for bisync: %q", s) } return nil } // Type of the CheckSync value func (x *CheckSyncMode) Type() string { return "string" } // Opt keeps command line options // internal functions should use b.opt instead var Opt Options func init() { Opt.MaxLock = 0 cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() // when adding new flags, remember to also update the rc params: // cmd/bisync/rc.go cmd/bisync/help.go (not docs/content/rc.md) // and the Command line syntax section of docs/content/bisync.md (it doesn't update automatically) flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Equivalent to --resync-mode path1. Consider using --verbose or --dry-run first.", "") flags.FVarP(cmdFlags, &Opt.ResyncMode, "resync-mode", "", "During resync, prefer the version that is: path1, path2, newer, older, larger, smaller (default: path1 if --resync, otherwise none for no resync.)", "") flags.BoolVarP(cmdFlags, &Opt.CheckAccess, "check-access", "", Opt.CheckAccess, makeHelp("Ensure expected {CHECKFILE} files are found on both Path1 and Path2 filesystems, else abort."), "") flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"), "") flags.BoolVarP(cmdFlags, &Opt.Force, "force", "", Opt.Force, "Bypass --max-delete safety check and run the sync. Consider using with --verbose", "") flags.FVarP(cmdFlags, &Opt.CheckSync, "check-sync", "", "Controls comparison of final listings: true|false|only (default: true)", "") flags.BoolVarP(cmdFlags, &Opt.CreateEmptySrcDirs, "create-empty-src-dirs", "", Opt.CreateEmptySrcDirs, "Sync creation and deletion of empty directories. (Not compatible with --remove-empty-dirs)", "") flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove ALL empty directories at the final cleanup step.", "") flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file", "") flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"), "") flags.StringVarP(cmdFlags, &Opt.BackupDir1, "backup-dir1", "", Opt.BackupDir1, "--backup-dir for Path1. Must be a non-overlapping path on the same remote.", "") flags.StringVarP(cmdFlags, &Opt.BackupDir2, "backup-dir2", "", Opt.BackupDir2, "--backup-dir for Path2. Must be a non-overlapping path on the same remote.", "") flags.StringVarP(cmdFlags, &Opt.DebugName, "debugname", "", Opt.DebugName, "Debug by tracking one file at various points throughout a bisync run (when -v or -vv)", "") flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "") flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "") flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "") flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync.", "") flags.BoolVarP(cmdFlags, &Opt.Recover, "recover", "", Opt.Recover, "Automatically recover from interruptions without requiring --resync.", "") flags.StringVarP(cmdFlags, &Opt.CompareFlag, "compare", "", Opt.CompareFlag, "Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')", "") flags.BoolVarP(cmdFlags, &Opt.Compare.NoSlowHash, "no-slow-hash", "", Opt.Compare.NoSlowHash, "Ignore listing checksums only on backends where they are slow", "") flags.BoolVarP(cmdFlags, &Opt.Compare.SlowHashSyncOnly, "slow-hash-sync-only", "", Opt.Compare.SlowHashSyncOnly, "Ignore slow checksums for listings and deltas, but still consider them during sync calls.", "") flags.BoolVarP(cmdFlags, &Opt.Compare.DownloadHash, "download-hash", "", Opt.Compare.DownloadHash, "Compute hash by downloading when otherwise unavailable. (warning: may be slow and use lots of data!)", "") flags.FVarP(cmdFlags, &Opt.MaxLock, "max-lock", "", "Consider lock files older than this to be expired (default: 0 (never expire)) (minimum: 2m)", "") flags.FVarP(cmdFlags, &Opt.ConflictResolve, "conflict-resolve", "", "Automatically resolve conflicts by preferring the version that is: "+ConflictResolveList+" (default: none)", "") flags.FVarP(cmdFlags, &Opt.ConflictLoser, "conflict-loser", "", "Action to take on the loser of a sync conflict (when there is a winner) or on both files (when there is no winner): "+ConflictLoserList+" (default: num)", "") flags.StringVarP(cmdFlags, &Opt.ConflictSuffixFlag, "conflict-suffix", "", Opt.ConflictSuffixFlag, "Suffix to use when renaming a --conflict-loser. Can be either one string or two comma-separated strings to assign different suffixes to Path1/Path2. (default: 'conflict')", "") _ = cmdFlags.MarkHidden("debugname") _ = cmdFlags.MarkHidden("localtime") } // bisync command definition var commandDefinition = &cobra.Command{ Use: "bisync remote1:path1 remote2:path2", Short: shortHelp, Long: longHelp, Annotations: map[string]string{ "versionIntroduced": "v1.58", "groups": "Filter,Copy,Important", }, RunE: func(command *cobra.Command, args []string) error { // NOTE: avoid putting too much handling here, as it won't apply to the rc. // Generally it's best to put init-type stuff in Bisync() (operations.go) cmd.CheckArgs(2, 2, command, args) fs1, file1, fs2, file2 := cmd.NewFsSrcDstFiles(args) if file1 != "" || file2 != "" { return errors.New("paths must be existing directories") } ctx := context.Background() opt := Opt opt.applyContext(ctx) if tzLocal { TZ = time.Local } commonHashes := fs1.Hashes().Overlap(fs2.Hashes()) isDropbox1 := strings.HasPrefix(fs1.String(), "Dropbox") isDropbox2 := strings.HasPrefix(fs2.String(), "Dropbox") if commonHashes == hash.Set(0) && (isDropbox1 || isDropbox2) { ci := fs.GetConfig(ctx) if !ci.DryRun && !ci.RefreshTimes { fs.Debugf(nil, "Using flag --refresh-times is recommended") } } cmd.Run(false, true, command, func() error { err := Bisync(ctx, fs1, fs2, &opt) if err == ErrBisyncAborted { return fserrors.FatalError(err) } return err }) return nil }, } func (opt *Options) applyContext(ctx context.Context) { maxDelete := DefaultMaxDelete ci := fs.GetConfig(ctx) if ci.MaxDelete >= 0 { maxDelete = int(ci.MaxDelete) } if maxDelete < 0 { maxDelete = 0 } if maxDelete > 100 { maxDelete = 100 } opt.MaxDelete = maxDelete // reset MaxDelete for fs/operations, bisync handles this parameter specially ci.MaxDelete = -1 opt.DryRun = ci.DryRun } func (opt *Options) setDryRun(ctx context.Context) context.Context { ctxNew, ci := fs.AddConfig(ctx) ci.DryRun = opt.DryRun return ctxNew } func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) { filtersFile := opt.FiltersFile if filtersFile == "" { return ctx, nil } f, err := os.Open(filtersFile) if err != nil { return ctx, fmt.Errorf("specified filters file does not exist: %s", filtersFile) } fs.Infof(nil, "Using filters file %s", filtersFile) hasher := md5.New() if _, err := io.Copy(hasher, f); err != nil { _ = f.Close() return ctx, err } gotHash := hex.EncodeToString(hasher.Sum(nil)) _ = f.Close() hashFile := filtersFile + ".md5" wantHash, err := os.ReadFile(hashFile) if err != nil && !opt.Resync { return ctx, fmt.Errorf("filters file md5 hash not found (must run --resync): %s", filtersFile) } if gotHash != string(wantHash) && !opt.Resync { return ctx, fmt.Errorf("filters file has changed (must run --resync): %s", filtersFile) } if opt.Resync { if opt.DryRun { fs.Infof(nil, "Skipped storing filters file hash to %s as --dry-run is set", hashFile) } else { fs.Infof(nil, "Storing filters file hash to %s", hashFile) if err := os.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil { return ctx, err } } } // Prepend our filter file first in the list filterOpt := filter.GetConfig(ctx).Opt filterOpt.FilterFrom = append([]string{filtersFile}, filterOpt.FilterFrom...) newFilter, err := filter.NewFilter(&filterOpt) if err != nil { return ctx, fmt.Errorf("invalid filters file: %s: %w", filtersFile, err) } return filter.ReplaceConfig(ctx, newFilter), nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/operations.go
cmd/bisync/operations.go
// Package bisync implements bisync // Copyright (c) 2017-2020 Chris Nelson // Contributions to original python version: Hildo G. Jr., e2t, kalemas, silenceleaf package bisync import ( "context" "errors" "fmt" "os" "path/filepath" "runtime" "strings" gosync "sync" "time" "github.com/rclone/rclone/cmd/bisync/bilib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/terminal" ) // ErrBisyncAborted signals that bisync is aborted and forces non-zero exit code var ErrBisyncAborted = errors.New("bisync aborted") // bisyncRun keeps bisync runtime state type bisyncRun struct { fs1 fs.Fs fs2 fs.Fs abort bool critical bool retryable bool basePath string workDir string listing1 string listing2 string newListing1 string newListing2 string aliases bilib.AliasMap opt *Options octx context.Context fctx context.Context InGracefulShutdown bool CleanupCompleted bool SyncCI *fs.ConfigInfo CancelSync context.CancelFunc DebugName string lockFile string renames renames resyncIs1to2 bool march bisyncMarch check bisyncCheck queueOpt bisyncQueueOpt downloadHashOpt downloadHashOpt lockFileOpt lockFileOpt } type queues struct { copy1to2 bilib.Names copy2to1 bilib.Names renameSkipped bilib.Names // not renamed because it was equal skippedDirs1 *fileList skippedDirs2 *fileList deletedonboth bilib.Names } // Bisync handles lock file, performs bisync run and checks exit status func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) { opt := *optArg // ensure that input is never changed b := &bisyncRun{ fs1: fs1, fs2: fs2, opt: &opt, DebugName: opt.DebugName, } if opt.CheckFilename == "" { opt.CheckFilename = DefaultCheckFilename } if opt.Workdir == "" { opt.Workdir = DefaultWorkdir } ci := fs.GetConfig(ctx) opt.OrigBackupDir = ci.BackupDir if ci.TerminalColorMode == fs.TerminalColorModeAlways || (ci.TerminalColorMode == fs.TerminalColorModeAuto && !log.Redirected()) { ColorsLock.Lock() Colors = true ColorsLock.Unlock() } err = b.setCompareDefaults(ctx) if err != nil { return err } b.setResyncDefaults() err = b.setResolveDefaults() if err != nil { return err } if b.workDir, err = filepath.Abs(opt.Workdir); err != nil { return fmt.Errorf("failed to make workdir absolute: %w", err) } if err = os.MkdirAll(b.workDir, os.ModePerm); err != nil { return fmt.Errorf("failed to create workdir: %w", err) } // Produce a unique name for the sync operation b.basePath = bilib.BasePath(ctx, b.workDir, b.fs1, b.fs2) b.listing1 = b.basePath + ".path1.lst" b.listing2 = b.basePath + ".path2.lst" b.newListing1 = b.listing1 + "-new" b.newListing2 = b.listing2 + "-new" b.aliases = bilib.AliasMap{} err = b.checkSyntax() if err != nil { return err } // Handle lock file err = b.setLockFile() if err != nil { return err } b.queueOpt.logger = operations.NewLoggerOpt() // Handle SIGINT var finaliseOnce gosync.Once finalise := func() { finaliseOnce.Do(func() { if atexit.Signalled() { if b.opt.Resync { fs.Log(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)")) } else { fs.Log(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)")) b.InGracefulShutdown = true if b.SyncCI != nil { fs.Infoc(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early.")) b.SyncCI.MaxTransfer = 1 b.SyncCI.MaxDuration = fs.Duration(1 * time.Second) b.SyncCI.CutoffMode = fs.CutoffModeSoft gracePeriod := 30 * time.Second // TODO: flag to customize this? if !waitFor("Canceling Sync if not done in", gracePeriod, func() bool { return b.CleanupCompleted }) { fs.Log(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up")) b.CancelSync() waitFor("Aborting Bisync if not done in", 60*time.Second, func() bool { return b.CleanupCompleted }) } } else { // we haven't started to sync yet, so we're good. // no need to worry about the listing files, as we haven't overwritten them yet. b.CleanupCompleted = true fs.Log(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully.")) } } if !b.CleanupCompleted { if !b.opt.Resync { fs.Log(nil, Color(terminal.HiRedFg, "Graceful shutdown failed.")) fs.Log(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover.")) } markFailed(b.listing1) markFailed(b.listing2) } err = b.removeLockFile() } }) } fnHandle := atexit.Register(finalise) defer atexit.Unregister(fnHandle) // run bisync err = b.runLocked(ctx) removeLockErr := b.removeLockFile() if err == nil { err = removeLockErr } b.CleanupCompleted = true if b.InGracefulShutdown { if err == context.Canceled || err == accounting.ErrorMaxTransferLimitReachedGraceful { err = nil b.critical = false } if err == nil { fs.Log(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully.")) } } if b.critical { if b.retryable && b.opt.Resilient { fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err) fs.Error(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode.")) } else { if bilib.FileExists(b.listing1) { _ = os.Rename(b.listing1, b.listing1+"-err") } if bilib.FileExists(b.listing2) { _ = os.Rename(b.listing2, b.listing2+"-err") } fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err) fs.Error(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover.")) } return ErrBisyncAborted } if b.abort && !b.InGracefulShutdown { fs.Log(nil, Color(terminal.RedFg, "Bisync aborted. Please try again.")) } if err == nil { fs.Infoc(nil, Color(terminal.GreenFg, "Bisync successful")) } return err } // runLocked performs a full bisync run func (b *bisyncRun) runLocked(octx context.Context) (err error) { opt := b.opt path1 := bilib.FsPath(b.fs1) path2 := bilib.FsPath(b.fs2) if opt.CheckSync == CheckSyncOnly { fs.Infof(nil, "Validating listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2)) if err = b.checkSync(b.listing1, b.listing2); err != nil { b.critical = true b.retryable = true } return err } fs.Infof(nil, "Synching Path1 %s with Path2 %s", quotePath(path1), quotePath(path2)) if opt.DryRun { // In --dry-run mode, preserve original listings and save updates to the .lst-dry files origListing1 := b.listing1 origListing2 := b.listing2 b.listing1 += "-dry" b.listing2 += "-dry" b.newListing1 = b.listing1 + "-new" b.newListing2 = b.listing2 + "-new" if err := bilib.CopyFileIfExists(origListing1, b.listing1); err != nil { return err } if err := bilib.CopyFileIfExists(origListing2, b.listing2); err != nil { return err } } // Create second context with filters var fctx context.Context if fctx, err = b.opt.applyFilters(octx); err != nil { b.critical = true b.retryable = true return } b.octx = octx b.fctx = fctx // overlapping paths check err = b.overlappingPathsCheck(fctx, b.fs1, b.fs2) if err != nil { b.critical = true b.retryable = true return err } // Generate Path1 and Path2 listings and copy any unique Path2 files to Path1 if opt.Resync { return b.resync(fctx) } // Check for existence of prior Path1 and Path2 listings if !bilib.FileExists(b.listing1) || !bilib.FileExists(b.listing2) { if b.opt.Recover && bilib.FileExists(b.listing1+"-old") && bilib.FileExists(b.listing2+"-old") { errTip := fmt.Sprintf(Color(terminal.CyanFg, "Path1: %s\n"), Color(terminal.HiBlueFg, b.listing1)) errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path2: %s"), Color(terminal.HiBlueFg, b.listing2)) fs.Log(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip) if opt.CheckSync != CheckSyncFalse { // Run CheckSync to ensure old listing is valid (garbage in, garbage out!) fs.Infof(nil, "Validating backup listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2)) if err = b.checkSync(b.listing1+"-old", b.listing2+"-old"); err != nil { b.critical = true b.retryable = true return err } fs.Infoc(nil, Color(terminal.GreenFg, "Backup listing is valid.")) } b.revertToOldListings() } else { // On prior critical error abort, the prior listings are renamed to .lst-err to lock out further runs b.critical = true b.retryable = true errTip := Color(terminal.MagentaFg, "Tip: here are the filenames we were looking for. Do they exist? \n") errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path1: %s\n"), Color(terminal.HiBlueFg, b.listing1)) errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path2: %s\n"), Color(terminal.HiBlueFg, b.listing2)) errTip += Color(terminal.MagentaFg, "Try running this command to inspect the work dir: \n") errTip += fmt.Sprintf(Color(terminal.HiCyanFg, "rclone lsl \"%s\""), b.workDir) return errors.New("cannot find prior Path1 or Path2 listings, likely due to critical error on prior run \n" + errTip) } } fs.Infof(nil, "Building Path1 and Path2 listings") b.march.ls1, b.march.ls2, err = b.makeMarchListing(fctx) if err != nil || accounting.Stats(fctx).Errored() { fs.Error(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue.")) b.critical = true b.retryable = true return err } // Check for Path1 deltas relative to the prior sync fs.Infof(nil, "Path1 checking for diffs") ds1, err := b.findDeltas(fctx, b.fs1, b.listing1, b.march.ls1, "Path1") if err != nil { return err } ds1.printStats() // Check for Path2 deltas relative to the prior sync fs.Infof(nil, "Path2 checking for diffs") ds2, err := b.findDeltas(fctx, b.fs2, b.listing2, b.march.ls2, "Path2") if err != nil { return err } ds2.printStats() // Check access health on the Path1 and Path2 filesystems if opt.CheckAccess { fs.Infof(nil, "Checking access health") err = b.checkAccess(ds1.checkFiles, ds2.checkFiles) if err != nil { b.critical = true b.retryable = true return } } // Check for too many deleted files - possible error condition. // Don't want to start deleting on the other side! if !opt.Force { if ds1.excessDeletes() || ds2.excessDeletes() { b.abort = true return errors.New("too many deletes") } } // Check for all files changed such as all dates changed due to DST change // to avoid errant copy everything. if !opt.Force { msg := "Safety abort: all files were changed on %s %s. Run with --force if desired." if !ds1.foundSame { fs.Errorf(nil, msg, ds1.msg, quotePath(path1)) } if !ds2.foundSame { fs.Errorf(nil, msg, ds2.msg, quotePath(path2)) } if !ds1.foundSame || !ds2.foundSame { b.abort = true return errors.New("all files were changed") } } // Determine and apply changes to Path1 and Path2 noChanges := ds1.empty() && ds2.empty() results2to1 := []Results{} results1to2 := []Results{} queues := queues{} if noChanges { fs.Infof(nil, "No changes found") } else { fs.Infof(nil, "Applying changes") results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2) if err != nil { if b.InGracefulShutdown && (err == context.Canceled || err == accounting.ErrorMaxTransferLimitReachedGraceful || strings.Contains(err.Error(), "context canceled")) { fs.Infof(nil, "Ignoring sync error due to Graceful Shutdown: %v", err) } else { b.critical = true // b.retryable = true // not sure about this one return err } } } // Clean up and check listings integrity fs.Infof(nil, "Updating listings") var err1, err2 error if b.DebugName != "" { l1, _ := b.loadListing(b.listing1) l2, _ := b.loadListing(b.listing2) newl1, _ := b.loadListing(b.newListing1) newl2, _ := b.loadListing(b.newListing2) b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, ls1 has name?: %v, ls2 has name?: %v", l1.has(b.DebugName), l2.has(b.DebugName))) b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, newls1 has name?: %v, ls2 has name?: %v", newl1.has(b.DebugName), newl2.has(b.DebugName))) } b.saveOldListings() // save new listings if noChanges { b.replaceCurrentListings() } else { err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false) // 2to1 err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true) // 1to2 } if b.DebugName != "" { l1, _ := b.loadListing(b.listing1) l2, _ := b.loadListing(b.listing2) b.debug(b.DebugName, fmt.Sprintf("post-modifyListing, ls1 has name?: %v, ls2 has name?: %v", l1.has(b.DebugName), l2.has(b.DebugName))) } err = err1 if err == nil { err = err2 } if err != nil { b.critical = true b.retryable = true return err } if !opt.NoCleanup { _ = os.Remove(b.newListing1) _ = os.Remove(b.newListing2) } if opt.CheckSync == CheckSyncTrue && !opt.DryRun { fs.Infof(nil, "Validating listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2)) if err := b.checkSync(b.listing1, b.listing2); err != nil { b.critical = true return err } } // Optional rmdirs for empty directories if opt.RemoveEmptyDirs { fs.Infof(nil, "Removing empty directories") fctx = b.setBackupDir(fctx, 1) err1 := operations.Rmdirs(fctx, b.fs1, "", true) fctx = b.setBackupDir(fctx, 2) err2 := operations.Rmdirs(fctx, b.fs2, "", true) err := err1 if err == nil { err = err2 } if err != nil { b.critical = true b.retryable = true return err } } return nil } // checkSync validates listings func (b *bisyncRun) checkSync(listing1, listing2 string) error { files1, err := b.loadListing(listing1) if err != nil { return fmt.Errorf("cannot read prior listing of Path1: %w", err) } files2, err := b.loadListing(listing2) if err != nil { return fmt.Errorf("cannot read prior listing of Path2: %w", err) } ok := true for _, file := range files1.list { if !files2.has(file) && !files2.has(b.aliases.Alias(file)) { b.indent("ERROR", file, "Path1 file not found in Path2") ok = false } else if !b.fileInfoEqual(file, files2.getTryAlias(file, b.aliases.Alias(file)), files1, files2) { ok = false } } for _, file := range files2.list { if !files1.has(file) && !files1.has(b.aliases.Alias(file)) { b.indent("ERROR", file, "Path2 file not found in Path1") ok = false } } if !ok { return errors.New("path1 and path2 are out of sync, run --resync to recover") } return nil } // checkAccess validates access health func (b *bisyncRun) checkAccess(checkFiles1, checkFiles2 bilib.Names) error { ok := true opt := b.opt prefix := "Access test failed:" numChecks1 := len(checkFiles1) numChecks2 := len(checkFiles2) if numChecks1 == 0 || numChecks1 != numChecks2 { if numChecks1 == 0 && numChecks2 == 0 { fs.Logf("--check-access", Color(terminal.RedFg, "Failed to find any files named %s\n More info: %s"), Color(terminal.CyanFg, opt.CheckFilename), Color(terminal.BlueFg, "https://rclone.org/bisync/#check-access")) } fs.Errorf(nil, "%s Path1 count %d, Path2 count %d - %s", prefix, numChecks1, numChecks2, opt.CheckFilename) ok = false } for file := range checkFiles1 { if !checkFiles2.Has(file) { b.indentf("ERROR", file, "%s Path1 file not found in Path2", prefix) ok = false } } for file := range checkFiles2 { if !checkFiles1.Has(file) { b.indentf("ERROR", file, "%s Path2 file not found in Path1", prefix) ok = false } } if !ok { return errors.New("check file check failed") } fs.Infof(nil, "Found %d matching %q files on both paths", numChecks1, opt.CheckFilename) return nil } func (b *bisyncRun) testFn() { if b.opt.TestFn != nil { b.opt.TestFn() } } func (b *bisyncRun) handleErr(o any, msg string, err error, critical, retryable bool) { if err != nil { if retryable { b.retryable = true } if critical { b.critical = true b.abort = true fs.Errorf(o, "%s: %v", msg, err) } else { fs.Infof(o, "%s: %v", msg, err) } } } // setBackupDir overrides --backup-dir with path-specific version, if set, in each direction func (b *bisyncRun) setBackupDir(ctx context.Context, destPath int) context.Context { ci := fs.GetConfig(ctx) ci.BackupDir = b.opt.OrigBackupDir if destPath == 1 && b.opt.BackupDir1 != "" { ci.BackupDir = b.opt.BackupDir1 } if destPath == 2 && b.opt.BackupDir2 != "" { ci.BackupDir = b.opt.BackupDir2 } fs.Debugf(ci.BackupDir, "updated backup-dir for Path%d", destPath) return ctx } func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) (err error) { if operations.OverlappingFilterCheck(fctx, fs2, fs1) { err = errors.New(Color(terminal.RedFg, "Overlapping paths detected. Cannot bisync between paths that overlap, unless excluded by filters.")) return err } // need to test our BackupDirs too, as sync will be fooled by our --files-from filters testBackupDir := func(ctx context.Context, destPath int) error { src := fs1 dst := fs2 if destPath == 1 { src = fs2 dst = fs1 } ctxBackupDir := b.setBackupDir(ctx, destPath) ci := fs.GetConfig(ctxBackupDir) if ci.BackupDir != "" { // operations.BackupDir should return an error if not properly excluded _, err = operations.BackupDir(fctx, dst, src, "") return err } return nil } err = testBackupDir(fctx, 1) if err != nil { return err } err = testBackupDir(fctx, 2) if err != nil { return err } return nil } func (b *bisyncRun) checkSyntax() (err error) { // check for odd number of quotes in path, usually indicating an escaping issue path1 := bilib.FsPath(b.fs1) path2 := bilib.FsPath(b.fs2) if strings.Count(path1, `"`)%2 != 0 || strings.Count(path2, `"`)%2 != 0 { return fmt.Errorf(Color(terminal.RedFg, `detected an odd number of quotes in your path(s). This is usually a mistake indicating incorrect escaping. Please check your command and try again. Note that on Windows, quoted paths must not have a trailing slash, or it will be interpreted as escaping the quote. path1: %v path2: %v`), path1, path2) } // check for other syntax issues _, err = os.Stat(b.basePath) if err != nil { if strings.Contains(err.Error(), "syntax is incorrect") { return fmt.Errorf(Color(terminal.RedFg, `syntax error detected in your path(s). Please check your command and try again. Note that on Windows, quoted paths must not have a trailing slash, or it will be interpreted as escaping the quote. path1: %v path2: %v error: %v`), path1, path2, err) } } if runtime.GOOS == "windows" && (strings.Contains(path1, " --") || strings.Contains(path2, " --")) { return fmt.Errorf(Color(terminal.RedFg, `detected possible flags in your path(s). This is usually a mistake indicating incorrect escaping or quoting (possibly closing quote is missing?). Please check your command and try again. Note that on Windows, quoted paths must not have a trailing slash, or it will be interpreted as escaping the quote. path1: %v path2: %v`), path1, path2) } return nil } func (b *bisyncRun) debug(nametocheck, msgiftrue string) { if b.DebugName != "" && b.DebugName == nametocheck { fs.Infoc(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue)) } } func (b *bisyncRun) debugFn(nametocheck string, fn func()) { if b.DebugName != "" && b.DebugName == nametocheck { fn() } } // waitFor runs fn() until it returns true or the timeout expires func waitFor(msg string, totalWait time.Duration, fn func() bool) (ok bool) { const individualWait = 1 * time.Second for i := range int(totalWait / individualWait) { ok = fn() if ok { return ok } fs.Infof(nil, Color(terminal.YellowFg, "%s: %vs"), msg, int(totalWait/individualWait)-i) time.Sleep(individualWait) } return false }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/resync.go
cmd/bisync/resync.go
package bisync import ( "context" "os" "path/filepath" "github.com/rclone/rclone/cmd/bisync/bilib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/lib/terminal" ) // for backward compatibility, --resync is now equivalent to --resync-mode path1 // and either flag is sufficient without the other. func (b *bisyncRun) setResyncDefaults() { if b.opt.Resync && b.opt.ResyncMode == PreferNone { fs.Debug(nil, Color(terminal.Dim, "defaulting to --resync-mode path1 as --resync is set")) b.opt.ResyncMode = PreferPath1 } if b.opt.ResyncMode != PreferNone { b.opt.Resync = true } // checks and warnings if (b.opt.ResyncMode == PreferNewer || b.opt.ResyncMode == PreferOlder) && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) { fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --resync-mode %s as at least one remote does not support modtimes."), b.opt.ResyncMode.String()) b.opt.ResyncMode = PreferPath1 } else if (b.opt.ResyncMode == PreferNewer || b.opt.ResyncMode == PreferOlder) && !b.opt.Compare.Modtime { fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --resync-mode %s as --compare does not include modtime."), b.opt.ResyncMode.String()) b.opt.ResyncMode = PreferPath1 } if (b.opt.ResyncMode == PreferLarger || b.opt.ResyncMode == PreferSmaller) && !b.opt.Compare.Size { fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --resync-mode %s as --compare does not include size."), b.opt.ResyncMode.String()) b.opt.ResyncMode = PreferPath1 } } // resync implements the --resync mode. // It will generate path1 and path2 listings, // copy any unique files to the opposite path, // and resolve any differing files according to the --resync-mode. func (b *bisyncRun) resync(fctx context.Context) (err error) { fs.Infof(nil, "Copying Path2 files to Path1") // Save blank filelists (will be filled from sync results) ls1 := newFileList() ls2 := newFileList() err = ls1.save(b.newListing1) if err != nil { b.handleErr(ls1, "error saving ls1 from resync", err, true, true) b.abort = true } err = ls2.save(b.newListing2) if err != nil { b.handleErr(ls2, "error saving ls2 from resync", err, true, true) b.abort = true } // Check access health on the Path1 and Path2 filesystems // enforce even though this is --resync if b.opt.CheckAccess { fs.Infof(nil, "Checking access health") filesNow1, filesNow2, err := b.findCheckFiles(fctx) if err != nil { b.critical = true b.retryable = true return err } ds1 := &deltaSet{ checkFiles: bilib.Names{}, } ds2 := &deltaSet{ checkFiles: bilib.Names{}, } for _, file := range filesNow1.list { if filepath.Base(file) == b.opt.CheckFilename { ds1.checkFiles.Add(file) } } for _, file := range filesNow2.list { if filepath.Base(file) == b.opt.CheckFilename { ds2.checkFiles.Add(file) } } err = b.checkAccess(ds1.checkFiles, ds2.checkFiles) if err != nil { b.critical = true b.retryable = true return err } } var results2to1 []Results var results1to2 []Results queues := queues{} b.indent("Path2", "Path1", "Resync is copying files to") ctxRun := b.opt.setDryRun(fctx) // fctx has our extra filters added! ctxSync, filterSync := filter.AddConfig(ctxRun) if filterSync.Opt.MinSize == -1 { fs.Debugf(nil, "filterSync.Opt.MinSize: %v", filterSync.Opt.MinSize) } b.resyncIs1to2 = false ctxSync = b.setResyncConfig(ctxSync) ctxSync = b.setBackupDir(ctxSync, 1) // 2 to 1 if results2to1, err = b.resyncDir(ctxSync, b.fs2, b.fs1); err != nil { b.critical = true return err } b.indent("Path1", "Path2", "Resync is copying files to") b.resyncIs1to2 = true ctxSync = b.setResyncConfig(ctxSync) ctxSync = b.setBackupDir(ctxSync, 2) // 1 to 2 if results1to2, err = b.resyncDir(ctxSync, b.fs1, b.fs2); err != nil { b.critical = true return err } fs.Infof(nil, "Resync updating listings") b.saveOldListings() // may not exist, as this is --resync b.replaceCurrentListings() resultsToQueue := func(results []Results) bilib.Names { names := bilib.Names{} for _, result := range results { if result.Name != "" && (result.Flags != "d" || b.opt.CreateEmptySrcDirs) && result.IsSrc && result.Src != "" && (result.Winner.Err == nil || result.Flags == "d") { names.Add(result.Name) } } return names } // resync 2to1 queues.copy2to1 = resultsToQueue(results2to1) if err = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false); err != nil { b.critical = true return err } // resync 1to2 queues.copy1to2 = resultsToQueue(results1to2) if err = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true); err != nil { b.critical = true return err } if b.opt.CheckSync == CheckSyncTrue && !b.opt.DryRun { path1 := bilib.FsPath(b.fs1) path2 := bilib.FsPath(b.fs2) fs.Infof(nil, "Validating listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2)) if err := b.checkSync(b.listing1, b.listing2); err != nil { b.critical = true return err } } if !b.opt.NoCleanup { _ = os.Remove(b.newListing1) _ = os.Remove(b.newListing2) } return nil } /* --resync-mode implementation: PreferPath1: set ci.IgnoreExisting true, then false PreferPath2: set ci.IgnoreExisting false, then true PreferNewer: set ci.UpdateOlder in both directions PreferOlder: override EqualFn to implement custom logic PreferLarger: override EqualFn to implement custom logic PreferSmaller: override EqualFn to implement custom logic */ func (b *bisyncRun) setResyncConfig(ctx context.Context) context.Context { ci := fs.GetConfig(ctx) switch b.opt.ResyncMode { case PreferPath1: if !b.resyncIs1to2 { // 2to1 (remember 2to1 is first) ci.IgnoreExisting = true } else { // 1to2 ci.IgnoreExisting = false } case PreferPath2: if !b.resyncIs1to2 { // 2to1 (remember 2to1 is first) ci.IgnoreExisting = false } else { // 1to2 ci.IgnoreExisting = true } case PreferNewer: ci.UpdateOlder = true } // for older, larger, and smaller, we return it unchanged and handle it later return ctx } func (b *bisyncRun) resyncWhichIsWhich(src, dst fs.ObjectInfo) (path1, path2 fs.ObjectInfo) { if b.resyncIs1to2 { return src, dst } return dst, src } // equal in this context really means "don't transfer", so we should // return true if the files are actually equal or if dest is winner, // false if src is winner // When can't determine, we end up running the normal Equal() to tie-break (due to our differ functions). func (b *bisyncRun) resyncWinningPathToEqual(winningPath int) bool { if b.resyncIs1to2 { return winningPath != 1 } return winningPath != 2 }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/lockfile.go
cmd/bisync/lockfile.go
package bisync import ( "encoding/json" "fmt" "io" "os" "strconv" "sync" "time" "github.com/rclone/rclone/cmd/bisync/bilib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/terminal" ) const basicallyforever = fs.Duration(200 * 365 * 24 * time.Hour) type lockFileOpt struct { stopRenewal func() data struct { Session string PID string TimeRenewed time.Time TimeExpires time.Time } } func (b *bisyncRun) setLockFile() (err error) { b.lockFile = "" b.setLockFileExpiration() if !b.opt.DryRun { b.lockFile = b.basePath + ".lck" if bilib.FileExists(b.lockFile) { if !b.lockFileIsExpired() { errTip := Color(terminal.MagentaFg, "Tip: this indicates that another bisync run (of these same paths) either is still running or was interrupted before completion. \n") errTip += Color(terminal.MagentaFg, "If you're SURE you want to override this safety feature, you can delete the lock file with the following command, then run bisync again: \n") errTip += fmt.Sprintf(Color(terminal.HiRedFg, "rclone deletefile \"%s\""), b.lockFile) return fmt.Errorf(Color(terminal.RedFg, "prior lock file found: %s \n")+errTip, Color(terminal.HiYellowFg, b.lockFile)) } } pidStr := []byte(strconv.Itoa(os.Getpid())) if err = os.WriteFile(b.lockFile, pidStr, bilib.PermSecure); err != nil { return fmt.Errorf(Color(terminal.RedFg, "cannot create lock file: %s: %w"), b.lockFile, err) } fs.Debugf(nil, "Lock file created: %s", b.lockFile) b.renewLockFile() b.lockFileOpt.stopRenewal = b.startLockRenewal() } return nil } func (b *bisyncRun) removeLockFile() (err error) { if b.lockFile != "" { b.lockFileOpt.stopRenewal() err = os.Remove(b.lockFile) if err == nil { fs.Debugf(nil, "Lock file removed: %s", b.lockFile) } else { fs.Errorf(nil, "cannot remove lockfile %s: %v", b.lockFile, err) } b.lockFile = "" // block removing it again } return err } func (b *bisyncRun) setLockFileExpiration() { if b.opt.MaxLock > 0 && b.opt.MaxLock < fs.Duration(2*time.Minute) { fs.Logf(nil, Color(terminal.YellowFg, "--max-lock cannot be shorter than 2 minutes (unless 0.) Changing --max-lock from %v to %v"), b.opt.MaxLock, 2*time.Minute) b.opt.MaxLock = fs.Duration(2 * time.Minute) } else if b.opt.MaxLock <= 0 { b.opt.MaxLock = basicallyforever } } func (b *bisyncRun) renewLockFile() { if b.lockFile != "" && bilib.FileExists(b.lockFile) { b.lockFileOpt.data.Session = b.basePath b.lockFileOpt.data.PID = strconv.Itoa(os.Getpid()) b.lockFileOpt.data.TimeRenewed = time.Now() b.lockFileOpt.data.TimeExpires = time.Now().Add(time.Duration(b.opt.MaxLock)) // save data file df, err := os.Create(b.lockFile) b.handleErr(b.lockFile, "error renewing lock file", err, true, true) b.handleErr(b.lockFile, "error encoding JSON to lock file", json.NewEncoder(df).Encode(b.lockFileOpt.data), true, true) b.handleErr(b.lockFile, "error closing lock file", df.Close(), true, true) if b.opt.MaxLock < basicallyforever { fs.Infof(nil, Color(terminal.HiBlueFg, "lock file renewed for %v. New expiration: %v"), b.opt.MaxLock, b.lockFileOpt.data.TimeExpires) } } } func (b *bisyncRun) lockFileIsExpired() bool { if b.lockFile != "" && bilib.FileExists(b.lockFile) { rdf, err := os.Open(b.lockFile) b.handleErr(b.lockFile, "error reading lock file", err, true, true) dec := json.NewDecoder(rdf) for { if err := dec.Decode(&b.lockFileOpt.data); err != nil { if err != io.EOF { fs.Errorf(b.lockFile, "err: %v", err) } break } } b.handleErr(b.lockFile, "error closing file", rdf.Close(), true, true) if !b.lockFileOpt.data.TimeExpires.IsZero() && b.lockFileOpt.data.TimeExpires.Before(time.Now()) { fs.Infof(b.lockFile, Color(terminal.GreenFg, "Lock file found, but it expired at %v. Will delete it and proceed."), b.lockFileOpt.data.TimeExpires) markFailed(b.listing1) // listing is untrusted so force revert to prior (if --recover) or create new ones (if --resync) markFailed(b.listing2) return true } fs.Infof(b.lockFile, Color(terminal.RedFg, "Valid lock file found. Expires at %v. (%v from now)"), b.lockFileOpt.data.TimeExpires, time.Since(b.lockFileOpt.data.TimeExpires).Abs().Round(time.Second)) prettyprint(b.lockFileOpt.data, "Lockfile info", fs.LogLevelInfo) } return false } // StartLockRenewal renews the lockfile every --max-lock minus one minute. // // It returns a func which should be called to stop the renewal. func (b *bisyncRun) startLockRenewal() func() { if b.opt.MaxLock <= 0 || b.opt.MaxLock >= basicallyforever || b.lockFile == "" { return func() {} } stopLockRenewal := make(chan struct{}) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() ticker := time.NewTicker(time.Duration(b.opt.MaxLock) - time.Minute) for { select { case <-ticker.C: b.renewLockFile() case <-stopLockRenewal: ticker.Stop() return } } }() return func() { close(stopLockRenewal) wg.Wait() } } func markFailed(file string) { failFile := file + "-err" if bilib.FileExists(file) { _ = os.Remove(failFile) _ = os.Rename(file, failFile) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/checkfn.go
cmd/bisync/checkfn.go
package bisync import ( "bytes" "context" "fmt" "strings" "github.com/rclone/rclone/backend/crypt" "github.com/rclone/rclone/cmd/bisync/bilib" "github.com/rclone/rclone/cmd/check" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" ) type bisyncCheck = struct { hashType hash.Type fsrc, fdst fs.Fs fcrypt *crypt.Fs } // WhichCheck determines which CheckFn we should use based on the Fs types // It is more robust and accurate than Check because // it will fallback to CryptCheck or DownloadCheck instead of --size-only! // it returns the *operations.CheckOpt with the CheckFn set. func (b *bisyncRun) WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.CheckOpt { ci := fs.GetConfig(ctx) common := opt.Fsrc.Hashes().Overlap(opt.Fdst.Hashes()) // note that ci.IgnoreChecksum doesn't change the behavior of Check -- it's just a way to opt-out of cryptcheck/download if common.Count() > 0 || ci.SizeOnly || ci.IgnoreChecksum { // use normal check opt.Check = CheckFn return opt } FsrcCrypt, srcIsCrypt := opt.Fsrc.(*crypt.Fs) FdstCrypt, dstIsCrypt := opt.Fdst.(*crypt.Fs) if (srcIsCrypt && dstIsCrypt) || (!srcIsCrypt && dstIsCrypt) { // if both are crypt or only dst is crypt b.check.hashType = FdstCrypt.UnWrap().Hashes().GetOne() if b.check.hashType != hash.None { // use cryptcheck b.check.fsrc = opt.Fsrc b.check.fdst = opt.Fdst b.check.fcrypt = FdstCrypt fs.Infof(b.check.fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)") opt.Check = b.CryptCheckFn return opt } } else if srcIsCrypt && !dstIsCrypt { // if only src is crypt b.check.hashType = FsrcCrypt.UnWrap().Hashes().GetOne() if b.check.hashType != hash.None { // use reverse cryptcheck b.check.fsrc = opt.Fdst b.check.fdst = opt.Fsrc b.check.fcrypt = FsrcCrypt fs.Infof(b.check.fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)") opt.Check = b.ReverseCryptCheckFn return opt } } // if we've gotten this far, neither check or cryptcheck will work, so use --download fs.Infof(b.check.fdst, "Can't compare hashes, so using check --download for safety. (Use --size-only or --ignore-checksum to disable)") opt.Check = DownloadCheckFn return opt } // CheckFn is a slightly modified version of Check func CheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) { same, ht, err := operations.CheckHashes(ctx, src, dst) if err != nil { return true, false, err } if ht == hash.None { return false, true, nil } if !same { err = fmt.Errorf("%v differ", ht) fs.Errorf(src, "%v", err) return true, false, nil } return false, false, nil } // CryptCheckFn is a slightly modified version of CryptCheck func (b *bisyncRun) CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) { cryptDst := dst.(*crypt.Object) underlyingDst := cryptDst.UnWrap() underlyingHash, err := underlyingDst.Hash(ctx, b.check.hashType) if err != nil { return true, false, fmt.Errorf("error reading hash from underlying %v: %w", underlyingDst, err) } if underlyingHash == "" { return false, true, nil } cryptHash, err := b.check.fcrypt.ComputeHash(ctx, cryptDst, src, b.check.hashType) if err != nil { return true, false, fmt.Errorf("error computing hash: %w", err) } if cryptHash == "" { return false, true, nil } if cryptHash != underlyingHash { err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", b.check.fdst.Name(), b.check.fdst.Root(), cryptHash, b.check.fsrc.Name(), b.check.fsrc.Root(), underlyingHash) fs.Debugf(src, "%s", err.Error()) // using same error msg as CheckFn so integration tests match err = fmt.Errorf("%v differ", b.check.hashType) fs.Errorf(src, "%s", err.Error()) return true, false, nil } return false, false, nil } // ReverseCryptCheckFn is like CryptCheckFn except src and dst are switched // result: src is crypt, dst is non-crypt func (b *bisyncRun) ReverseCryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) { return b.CryptCheckFn(ctx, src, dst) } // DownloadCheckFn is a slightly modified version of Check with --download func DownloadCheckFn(ctx context.Context, dst, src fs.Object) (equal bool, noHash bool, err error) { equal, err = operations.CheckIdenticalDownload(ctx, src, dst) if err != nil { return true, true, fmt.Errorf("failed to download: %w", err) } return !equal, false, nil } // check potential conflicts (to avoid renaming if already identical) func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter.Filter, fs1, fs2 fs.Fs) (bilib.Names, error) { matches := bilib.Names{} if filterCheck.HaveFilesFrom() { fs.Debugf(nil, "There are potential conflicts to check.") opt, close, checkopterr := check.GetCheckOpt(fs1, fs2) if checkopterr != nil { b.critical = true b.retryable = true fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr) return matches, checkopterr } defer close() opt.Match = new(bytes.Buffer) opt = b.WhichCheck(ctxCheck, opt) fs.Infof(nil, "Checking potential conflicts...") check := operations.CheckFn(ctxCheck, opt) fs.Infof(nil, "Finished checking the potential conflicts. %s", check) // reset error count, because we don't want to count check errors as bisync errors accounting.Stats(ctxCheck).ResetErrors() // return the list of identical files to check against later if len(fmt.Sprint(opt.Match)) > 0 { matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n")) } if matches.NotEmpty() { fs.Debugf(nil, "The following potential conflicts were determined to be identical. %v", matches) } else { fs.Debugf(nil, "None of the conflicts were determined to be identical.") } } return matches, nil } // WhichEqual is similar to WhichCheck, but checks a single object. // Returns true if the objects are equal, false if they differ or if we don't know func (b *bisyncRun) WhichEqual(ctx context.Context, src, dst fs.Object, Fsrc, Fdst fs.Fs) bool { opt, close, checkopterr := check.GetCheckOpt(Fsrc, Fdst) if checkopterr != nil { fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr) } defer close() opt = b.WhichCheck(ctx, opt) differ, noHash, err := opt.Check(ctx, dst, src) if err != nil { fs.Errorf(src, "failed to check: %v", err) return false } if noHash { fs.Errorf(src, "failed to check as hash is missing") return false } return !differ } // Replaces the standard Equal func with one that also considers checksum // Note that it also updates the modtime the same way as Sync func (b *bisyncRun) EqualFn(ctx context.Context) context.Context { ci := fs.GetConfig(ctx) ci.CheckSum = false // force checksum off so modtime is evaluated if needed // modtime and size settings should already be set correctly for Equal var equalFn operations.EqualFn = func(ctx context.Context, src fs.ObjectInfo, dst fs.Object) bool { fs.Debugf(src, "evaluating...") equal := false logger, _ := operations.GetLogger(ctx) // temporarily unset logger, we don't want Equal to duplicate it noop := func(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) { fs.Debugf(src, "equal skipped") } ctxNoLogger := operations.WithLogger(ctx, noop) timeSizeEqualFn := func() (equal bool, skipHash bool) { return operations.Equal(ctxNoLogger, src, dst), false } // normally use Equal() if b.opt.ResyncMode == PreferOlder || b.opt.ResyncMode == PreferLarger || b.opt.ResyncMode == PreferSmaller { timeSizeEqualFn = func() (equal bool, skipHash bool) { return b.resyncTimeSizeEqual(ctxNoLogger, src, dst) } // but override for --resync-mode older, larger, smaller } skipHash := false // (note that we might skip it anyway based on compare/ht settings) equal, skipHash = timeSizeEqualFn() if equal && !skipHash { whichHashType := func(f fs.Info) hash.Type { ht := b.getHashType(f.Name()) if ht == hash.None && b.opt.Compare.SlowHashSyncOnly && !b.opt.Resync { ht = f.Hashes().GetOne() } return ht } srcHash, _ := src.Hash(ctx, whichHashType(src.Fs())) dstHash, _ := dst.Hash(ctx, whichHashType(dst.Fs())) srcHash, _ = b.tryDownloadHash(ctx, src, srcHash) dstHash, _ = b.tryDownloadHash(ctx, dst, dstHash) equal = !b.hashDiffers(srcHash, dstHash, whichHashType(src.Fs()), whichHashType(dst.Fs()), src.Size(), dst.Size()) } if equal { logger(ctx, operations.Match, src, dst, nil) fs.Debugf(src, "EqualFn: files are equal") return true } logger(ctx, operations.Differ, src, dst, nil) fs.Debugf(src, "EqualFn: files are NOT equal") return false } return operations.WithEqualFn(ctx, equalFn) } func (b *bisyncRun) resyncTimeSizeEqual(ctxNoLogger context.Context, src fs.ObjectInfo, dst fs.Object) (equal bool, skipHash bool) { switch b.opt.ResyncMode { case PreferLarger, PreferSmaller: // note that arg order is path1, path2, regardless of src/dst path1, path2 := b.resyncWhichIsWhich(src, dst) if sizeDiffers(path1.Size(), path2.Size()) { winningPath := b.resolveLargerSmaller(path1.Size(), path2.Size(), path1.Remote(), b.opt.ResyncMode) // don't need to check/update modtime here, as sizes definitely differ and something will be transferred return b.resyncWinningPathToEqual(winningPath), b.resyncWinningPathToEqual(winningPath) // skip hash check if true } // sizes equal or don't know, so continue to checking time/hash, if applicable return operations.Equal(ctxNoLogger, src, dst), false // note we're back to src/dst, not path1/path2 case PreferOlder: // note that arg order is path1, path2, regardless of src/dst path1, path2 := b.resyncWhichIsWhich(src, dst) if timeDiffers(ctxNoLogger, path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Fs(), path2.Fs()) { winningPath := b.resolveNewerOlder(path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Remote(), b.opt.ResyncMode) // if src is winner, proceed with equal to check size/hash and possibly just update dest modtime instead of transferring if !b.resyncWinningPathToEqual(winningPath) { return operations.Equal(ctxNoLogger, src, dst), false // note we're back to src/dst, not path1/path2 } // if dst is winner (and definitely unequal), do not proceed further as we want dst to overwrite src regardless of size difference, and we do not want dest modtime updated return true, true } // times equal or don't know, so continue to checking size/hash, if applicable } return operations.Equal(ctxNoLogger, src, dst), false // note we're back to src/dst, not path1/path2 }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/bisync_debug_test.go
cmd/bisync/bisync_debug_test.go
package bisync_test import ( "fmt" "os" "path/filepath" "strings" "github.com/rclone/rclone/cmd/bisync/bilib" "github.com/rclone/rclone/fs" "github.com/stretchr/testify/assert" "gopkg.in/yaml.v3" ) const configFile = "../../fstest/test_all/config.yaml" // Config describes the config for this program type Config struct { Tests []Test Backends []Backend } // Test describes an integration test to run with `go test` type Test struct { Path string // path to the source directory FastList bool // if it is possible to add -fast-list to tests Short bool // if it is possible to run the test with -short AddBackend bool // set if Path needs the current backend appending NoRetries bool // set if no retries should be performed NoBinary bool // set to not build a binary in advance LocalOnly bool // if set only run with the local backend } // Backend describes a backend test // // FIXME make bucket-based remotes set sub-dir automatically??? type Backend struct { Backend string // name of the backend directory Remote string // name of the test remote FastList bool // set to test with -fast-list Short bool // set to test with -short OneOnly bool // set to run only one backend test at once MaxFile string // file size limit CleanUp bool // when running clean, run cleanup first Ignore []string // test names to ignore the failure of Tests []string // paths of tests to run, blank for all ListRetries int // -list-retries if > 0 ExtraTime float64 // factor to multiply the timeout by } func parseConfig() (*Config, error) { d, err := os.ReadFile(configFile) if err != nil { return nil, fmt.Errorf("failed to read config file: %w", err) } config := &Config{} err = yaml.Unmarshal(d, &config) if err != nil { return nil, fmt.Errorf("failed to parse config file: %w", err) } return config, nil } const debugFormat = ` { "name": %q, "type": "go", "request": "launch", "mode": "test", "program": "./cmd/bisync", "args": ["-remote", %q, "-remote2", %q, "-case", %q, "-no-cleanup"] }, ` const docFormat = `{ "version": "0.2.0", "configurations": [ %s ] }` // generates a launch.json file for debugging in VS Code. // note: just copy the ones you need into your real launch.json file, as VS Code will crash if there are too many! func (b *bisyncTest) generateDebuggers() { config, err := parseConfig() if err != nil { fs.Errorf(config, "failed to parse config: %v", err) } testList := []string{} for _, testCase := range b.listDir(b.dataRoot) { if strings.HasPrefix(testCase, "test_") { // if dir is empty, skip it (can happen due to gitignored files/dirs when checking out branch) if len(b.listDir(filepath.Join(b.dataRoot, testCase))) == 0 { continue } testList = append(testList, testCase) } } variations := []string{"LocalRemote", "RemoteLocal", "RemoteRemote"} var debuggers strings.Builder for _, backend := range config.Backends { if backend.Remote == "" { backend.Remote = "local" } for _, testcase := range testList { for _, variation := range variations { if variation != "RemoteRemote" && backend.Remote == "local" { continue } name := fmt.Sprintf("Test %s %s %s", backend.Remote, testcase, variation) switch variation { case "LocalRemote": debuggers.WriteString(fmt.Sprintf(debugFormat, name, "local", backend.Remote, testcase)) case "RemoteLocal": debuggers.WriteString(fmt.Sprintf(debugFormat, name, backend.Remote, "local", testcase)) case "RemoteRemote": debuggers.WriteString(fmt.Sprintf(debugFormat, name, backend.Remote, backend.Remote, testcase)) } } } } out := fmt.Sprintf(docFormat, debuggers.String()) outpath := "./testdata/bisync_vscode_debuggers_launch.json" err = os.WriteFile(outpath, []byte(out), bilib.PermSecure) assert.NoError(b.t, err, "writing golden file %s", outpath) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/listing.go
cmd/bisync/listing.go
// Package bisync implements bisync // Copyright (c) 2017-2020 Chris Nelson package bisync import ( "bufio" "context" "errors" "fmt" "io" "os" "regexp" "slices" "sort" "strconv" "strings" "time" "github.com/rclone/rclone/cmd/bisync/bilib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" ) // ListingHeader defines first line of a listing const ListingHeader = "# bisync listing v1 from" // lineRegex and lineFormat define listing line format // // flags <- size -> <- hash -> id <------------ modtime -----------> "<----- remote" // - 3009805 md5:xxxxxx - 2006-01-02T15:04:05.000000000-0700 "12 - Wait.mp3" // // flags: "-" for a file and "d" for a directory (reserved) // hash: "type:value" or "-" (example: "md5:378840336ab14afa9c6b8d887e68a340") // id: "-" (reserved) const lineFormat = "%s %8d %s %s %s %q\n" var lineRegex = regexp.MustCompile(`^(\S) +(-?\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d{9}[+-]\d{4}) (".+")$`) // timeFormat defines time format used in listings const timeFormat = "2006-01-02T15:04:05.000000000-0700" var ( // TZ defines time zone used in listings TZ = time.UTC tzLocal = false // LogTZ defines time zone used in logs (which may be different than that used in listings). // time.Local by default, but we force UTC on tests to make them deterministic regardless of tester's location. LogTZ = time.Local ) // fileInfo describes a file type fileInfo struct { size int64 time time.Time hash string id string flags string } // fileList represents a listing type fileList struct { list []string info map[string]*fileInfo hash hash.Type } func newFileList() *fileList { return &fileList{ info: map[string]*fileInfo{}, list: []string{}, } } func (ls *fileList) empty() bool { if ls == nil { return true } return len(ls.list) == 0 } func (ls *fileList) has(file string) bool { if file == "" { fs.Debugf(nil, "called ls.has() with blank string") return false } _, found := ls.info[file] if !found { // try unquoting file, _ = strconv.Unquote(`"` + file + `"`) _, found = ls.info[file] } return found } func (ls *fileList) get(file string) *fileInfo { info, found := ls.info[file] if !found { // try unquoting file, _ = strconv.Unquote(`"` + file + `"`) info = ls.info[fmt.Sprint(file)] } return info } // copy file from ls to dest func (ls *fileList) getPut(file string, dest *fileList) { f := ls.get(file) dest.put(file, f.size, f.time, f.hash, f.id, f.flags) } func (ls *fileList) getPutAll(dest *fileList) { for file, f := range ls.info { dest.put(file, f.size, f.time, f.hash, f.id, f.flags) } } func (ls *fileList) remove(file string) { if ls.has(file) { ls.list = slices.Delete(ls.list, slices.Index(ls.list, file), slices.Index(ls.list, file)+1) delete(ls.info, file) } } func (ls *fileList) put(file string, size int64, modtime time.Time, hash, id string, flags string) { fi := ls.get(file) if fi != nil { fi.size = size // if already have higher precision of same time, avoid overwriting it if fi.time != modtime { if modtime.Before(fi.time) && fi.time.Sub(modtime) < time.Second { modtime = fi.time } } fi.time = modtime fi.hash = hash fi.id = id fi.flags = flags } else { fi = &fileInfo{ size: size, time: modtime, hash: hash, id: id, flags: flags, } ls.info[file] = fi ls.list = append(ls.list, file) } } func (ls *fileList) getTryAlias(file, alias string) string { if ls.has(file) { return file } else if ls.has(alias) { return alias } return "" } func (ls *fileList) getTime(file string) time.Time { fi := ls.get(file) if fi == nil { return time.Time{} } return fi.time } func (ls *fileList) getSize(file string) int64 { fi := ls.get(file) if fi == nil { return 0 } return fi.size } func (ls *fileList) getHash(file string) string { fi := ls.get(file) if fi == nil { return "" } return fi.hash } func (b *bisyncRun) fileInfoEqual(file1, file2 string, ls1, ls2 *fileList) bool { equal := true if ls1.isDir(file1) && ls2.isDir(file2) { return equal } if b.opt.Compare.Size { if sizeDiffers(ls1.getSize(file1), ls2.getSize(file2)) { b.indent("ERROR", file1, fmt.Sprintf("Size not equal in listing. Path1: %v, Path2: %v", ls1.getSize(file1), ls2.getSize(file2))) equal = false } } if b.opt.Compare.Modtime { if timeDiffers(b.fctx, ls1.getTime(file1), ls2.getTime(file2), b.fs1, b.fs2) { b.indent("ERROR", file1, fmt.Sprintf("Modtime not equal in listing. Path1: %v, Path2: %v", ls1.getTime(file1), ls2.getTime(file2))) equal = false } } if b.opt.Compare.Checksum && !b.queueOpt.ignoreListingChecksum { if b.hashDiffers(ls1.getHash(file1), ls2.getHash(file2), b.opt.Compare.HashType1, b.opt.Compare.HashType2, ls1.getSize(file1), ls2.getSize(file2)) { b.indent("ERROR", file1, fmt.Sprintf("Checksum not equal in listing. Path1: %v, Path2: %v", ls1.getHash(file1), ls2.getHash(file2))) equal = false } } return equal } // also returns false if not found func (ls *fileList) isDir(file string) bool { fi := ls.get(file) if fi != nil { if fi.flags == "d" { return true } } return false } func (ls *fileList) beforeOther(other *fileList, file string) bool { thisTime := ls.getTime(file) thatTime := other.getTime(file) if thisTime.IsZero() || thatTime.IsZero() { return false } return thisTime.Before(thatTime) } func (ls *fileList) afterTime(file string, time time.Time) bool { fi := ls.get(file) if fi == nil { return false } return fi.time.After(time) } // sort by path name func (ls *fileList) sort() { sort.SliceStable(ls.list, func(i, j int) bool { return ls.list[i] < ls.list[j] }) } // save will save listing to a file. func (ls *fileList) save(listing string) error { file, err := os.Create(listing) if err != nil { return err } ls.sort() hashName := "" if ls.hash != hash.None { hashName = ls.hash.String() } _, err = fmt.Fprintf(file, "%s %s\n", ListingHeader, time.Now().In(TZ).Format(timeFormat)) if err != nil { _ = file.Close() _ = os.Remove(listing) return err } for _, remote := range ls.list { fi := ls.get(remote) time := fi.time.In(TZ).Format(timeFormat) hash := "-" if hashName != "" && fi.hash != "" { hash = hashName + ":" + fi.hash } id := fi.id if id == "" { id = "-" } flags := fi.flags if flags == "" { flags = "-" } _, err = fmt.Fprintf(file, lineFormat, flags, fi.size, hash, id, time, remote) if err != nil { _ = file.Close() _ = os.Remove(listing) return err } } return file.Close() } // loadListing will load listing from a file. // The key is the path to the file relative to the Path1/Path2 base. func (b *bisyncRun) loadListing(listing string) (*fileList, error) { file, err := os.Open(listing) if err != nil { return nil, err } defer func() { _ = file.Close() }() reader := bufio.NewReader(file) ls := newFileList() lastHashName := "" for { line, err := reader.ReadString('\n') if err == io.EOF { break } if err != nil { return nil, err } line = strings.TrimSuffix(line, "\n") if line == "" || line[0] == '#' { continue } match := lineRegex.FindStringSubmatch(line) if match == nil { fs.Logf(listing, "Ignoring incorrect line: %q", line) continue } flags, sizeStr, hashStr := match[1], match[2], match[3] id, timeStr, nameStr := match[4], match[5], match[6] sizeVal, sizeErr := strconv.ParseInt(sizeStr, 10, 64) timeVal, timeErr := time.ParseInLocation(timeFormat, timeStr, TZ) nameVal, nameErr := strconv.Unquote(nameStr) hashName, hashVal, hashErr := parseHash(hashStr) if hashErr == nil && hashName != "" { if lastHashName == "" { lastHashName = hashName hashErr = ls.hash.Set(hashName) } else if hashName != lastHashName { fs.Logf(listing, "Inconsistent hash type in line: %q", line) continue } } if (flags != "-" && flags != "d") || id != "-" || sizeErr != nil || timeErr != nil || hashErr != nil || nameErr != nil { fs.Logf(listing, "Ignoring incorrect line: %q", line) continue } if ls.has(nameVal) { fs.Logf(listing, "Duplicate line (keeping latest): %q", line) if ls.afterTime(nameVal, timeVal) { continue } } ls.put(nameVal, sizeVal, timeVal.In(TZ), hashVal, id, flags) } return ls, nil } // saveOldListings saves the most recent successful listing, in case we need to rollback on error func (b *bisyncRun) saveOldListings() { b.handleErr(b.listing1, "error saving old Path1 listing", bilib.CopyFileIfExists(b.listing1, b.listing1+"-old"), true, true) b.handleErr(b.listing2, "error saving old Path2 listing", bilib.CopyFileIfExists(b.listing2, b.listing2+"-old"), true, true) } // replaceCurrentListings saves both ".lst-new" listings as ".lst" func (b *bisyncRun) replaceCurrentListings() { b.handleErr(b.newListing1, "error replacing Path1 listing", bilib.CopyFileIfExists(b.newListing1, b.listing1), true, true) b.handleErr(b.newListing2, "error replacing Path2 listing", bilib.CopyFileIfExists(b.newListing2, b.listing2), true, true) } // revertToOldListings reverts to the most recent successful listing func (b *bisyncRun) revertToOldListings() { b.handleErr(b.listing1, "error reverting to old Path1 listing", bilib.CopyFileIfExists(b.listing1+"-old", b.listing1), true, true) b.handleErr(b.listing2, "error reverting to old Path2 listing", bilib.CopyFileIfExists(b.listing2+"-old", b.listing2), true, true) } func parseHash(str string) (string, string, error) { if str == "-" { return "", "", nil } if before, after, ok := strings.Cut(str, ":"); ok { name, val := before, after if name != "" && val != "" { return name, val, nil } } return "", "", fmt.Errorf("invalid hash %q", str) } // checkListing verifies that listing is not empty (unless resyncing) func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error { if b.opt.Resync || !ls.empty() { return nil } fs.Errorf(nil, "Empty %s listing. Cannot sync to an empty directory: %s", msg, listing) b.critical = true b.retryable = true return fmt.Errorf("empty %s listing: %s", msg, listing) } // listingNum should be 1 for path1 or 2 for path2 func (b *bisyncRun) loadListingNum(listingNum int) (*fileList, error) { listingpath := b.basePath + ".path1.lst-new" if listingNum == 2 { listingpath = b.basePath + ".path2.lst-new" } if b.opt.DryRun { listingpath = strings.Replace(listingpath, ".lst-", ".lst-dry-", 1) } fs.Debugf(nil, "loading listing for path %d at: %s", listingNum, listingpath) return b.loadListing(listingpath) } func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) { var fulllisting *fileList dirsonly := newFileList() var err error if !b.opt.CreateEmptySrcDirs { return dirsonly, err } fulllisting, err = b.loadListingNum(listingNum) if err != nil { b.critical = true b.retryable = true fs.Debugf(nil, "Error loading listing to generate dirsonly list: %v", err) return dirsonly, err } for _, obj := range fulllisting.list { info := fulllisting.get(obj) if info.flags == "d" { fs.Debugf(nil, "found a dir: %s", obj) dirsonly.put(obj, info.size, info.time, info.hash, info.id, info.flags) } else { fs.Debugf(nil, "not a dir: %s", obj) } } return dirsonly, err } // modifyListing will modify the listing based on the results of the sync func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, results []Results, queues queues, is1to2 bool) (err error) { queue := queues.copy2to1 direction := "2to1" if is1to2 { queue = queues.copy1to2 direction = "1to2" } fs.Debugf(nil, "updating %s", direction) prettyprint(results, "results", fs.LogLevelDebug) prettyprint(queue, "queue", fs.LogLevelDebug) srcListing, dstListing := b.getListingNames(is1to2) srcList, err := b.loadListing(srcListing) if err != nil { return fmt.Errorf("cannot read prior listing: %w", err) } dstList, err := b.loadListing(dstListing) if err != nil { return fmt.Errorf("cannot read prior listing: %w", err) } // set list hash type if b.opt.Resync && !b.opt.IgnoreListingChecksum { if is1to2 { srcList.hash = b.opt.Compare.HashType1 dstList.hash = b.opt.Compare.HashType2 } else { srcList.hash = b.opt.Compare.HashType2 dstList.hash = b.opt.Compare.HashType1 } if b.opt.Compare.DownloadHash && srcList.hash == hash.None { srcList.hash = hash.MD5 } if b.opt.Compare.DownloadHash && dstList.hash == hash.None { dstList.hash = hash.MD5 } } b.debugFn(b.DebugName, func() { var rs ResultsSlice = results b.debug(b.DebugName, fmt.Sprintf("modifyListing direction: %s, results has name?: %v", direction, rs.has(b.DebugName))) b.debug(b.DebugName, fmt.Sprintf("modifyListing direction: %s, srcList has name?: %v, dstList has name?: %v", direction, srcList.has(b.DebugName), dstList.has(b.DebugName))) }) srcWinners := newFileList() dstWinners := newFileList() errors := newFileList() ctxRecheck, filterRecheck := filter.AddConfig(ctx) for _, result := range results { if result.Name == "" { continue } if result.AltName != "" { b.aliases.Add(result.Name, result.AltName) } if result.Flags == "d" && !b.opt.CreateEmptySrcDirs { continue } // build src winners list if result.IsSrc && result.Src != "" && (result.Winner.Err == nil || result.Flags == "d") { srcWinners.put(result.Name, result.Size, result.Modtime, result.Hash, "-", result.Flags) prettyprint(result, "winner: copy to src", fs.LogLevelDebug) } // build dst winners list if result.IsWinner && result.Winner.Side != "none" && (result.Winner.Err == nil || result.Flags == "d") { dstWinners.put(result.Name, result.Size, result.Modtime, result.Hash, "-", result.Flags) prettyprint(result, "winner: copy to dst", fs.LogLevelDebug) } // build errors list if result.Err != nil || result.Winner.Err != nil { errors.put(result.Name, result.Size, result.Modtime, result.Hash, "-", result.Flags) if err := filterRecheck.AddFile(result.Name); err != nil { fs.Debugf(result.Name, "error adding file to recheck filter: %v", err) } } } ci := fs.GetConfig(ctx) updateLists := func(side string, winners, list *fileList) { for _, queueFile := range queue.ToList() { if !winners.has(queueFile) && list.has(queueFile) && !errors.has(queueFile) { // removals from side list.remove(queueFile) fs.Debugf(nil, "decision: removed from %s: %v", side, queueFile) } else if winners.has(queueFile) { // copies to side new := winners.get(queueFile) // handle normalization if side == "dst" { alias := b.aliases.Alias(queueFile) if alias != queueFile { // use the (non-identical) existing name, unless --fix-case if ci.FixCase { fs.Debugf(direction, "removing %s and adding %s as --fix-case was specified", alias, queueFile) list.remove(alias) } else { fs.Debugf(direction, "casing/unicode difference detected. using %s instead of %s", alias, queueFile) queueFile = alias } } } list.put(queueFile, new.size, new.time, new.hash, new.id, new.flags) fs.Debugf(nil, "decision: copied to %s: %v", side, queueFile) } else { fs.Debugf(queueFile, "file in queue but missing from %s transfers", side) if err := filterRecheck.AddFile(queueFile); err != nil { fs.Debugf(queueFile, "error adding file to recheck filter: %v", err) } } } } updateLists("src", srcWinners, srcList) updateLists("dst", dstWinners, dstList) // account for "deltaOthers" we handled separately if queues.deletedonboth.NotEmpty() { for file := range queues.deletedonboth { srcList.remove(file) dstList.remove(file) } } if b.renames.NotEmpty() && !b.opt.DryRun { // renamed on src and copied to dst for _, rename := range b.renames { srcOldName, srcNewName, dstOldName, dstNewName := rename.getNames(is1to2) fs.Debugf(nil, "%s: srcOldName: %v srcNewName: %v dstOldName: %v dstNewName: %v", direction, srcOldName, srcNewName, dstOldName, dstNewName) // we'll handle the other side when we go the other direction var new *fileInfo // we prefer to get the info from the newNamed versions // since they were actually copied as opposed to operations.MoveFile()'d. // the size/time/hash info is therefore fresher on the renames // but we'll settle for the original if we have to. if srcList.has(srcNewName) { new = srcList.get(srcNewName) } else if srcList.has(dstNewName) { new = srcList.get(dstNewName) } else if srcList.has(srcOldName) { new = srcList.get(srcOldName) } else { // something's odd, so let's recheck if err := filterRecheck.AddFile(srcOldName); err != nil { fs.Debugf(srcOldName, "error adding file to recheck filter: %v", err) } } if srcNewName != "" { // if it was renamed and not deleted if new == nil { // should not happen. log error and debug info b.handleErr(b.renames, "internal error", fmt.Errorf("missing info for %q. Please report a bug at https://github.com/rclone/rclone/issues", srcNewName), true, true) fs.PrettyPrint(srcList, "srcList for debugging", fs.LogLevelNotice) continue } srcList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags) dstList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags) } if srcNewName != srcOldName { srcList.remove(srcOldName) } if srcNewName != dstOldName { dstList.remove(dstOldName) } } } // recheck the ones we skipped because they were equal // we never got their info because they were never synced. // TODO: add flag to skip this? (since it re-lists) if queues.renameSkipped.NotEmpty() { skippedList := queues.renameSkipped.ToList() for _, file := range skippedList { if err := filterRecheck.AddFile(file); err != nil { fs.Debugf(file, "error adding file to recheck filter: %v", err) } } } // skipped dirs -- nothing to recheck, just add them // (they are not necessarily there already, if they are new) path1List := srcList path2List := dstList if !is1to2 { path1List = dstList path2List = srcList } if !queues.skippedDirs1.empty() { queues.skippedDirs1.getPutAll(path1List) } if !queues.skippedDirs2.empty() { queues.skippedDirs2.getPutAll(path2List) } if filterRecheck.HaveFilesFrom() { // also include any aliases recheckFiles := filterRecheck.Files() for recheckFile := range recheckFiles { alias := b.aliases.Alias(recheckFile) if recheckFile != alias { if err := filterRecheck.AddFile(alias); err != nil { fs.Debugf(alias, "error adding file to recheck filter: %v", err) } } } b.recheck(ctxRecheck, src, dst, srcList, dstList, is1to2) } if b.InGracefulShutdown { var toKeep []string var toRollback []string fs.Debugf(direction, "stats for %s", direction) trs := accounting.Stats(ctx).Transferred() for _, tr := range trs { b.debugFn(tr.Name, func() { prettyprint(tr, tr.Name, fs.LogLevelInfo) }) if tr.Error == nil && tr.Bytes > 0 || tr.Size <= 0 { prettyprint(tr, "keeping: "+tr.Name, fs.LogLevelDebug) toKeep = append(toKeep, tr.Name) } } // Dirs (for the unlikely event that the shutdown was triggered post-sync during syncEmptyDirs) for _, r := range results { if r.Origin == "syncEmptyDirs" { if srcWinners.has(r.Name) || dstWinners.has(r.Name) { toKeep = append(toKeep, r.Name) fs.Infof(r.Name, "keeping empty dir") } } } oldSrc, oldDst := b.getOldLists(is1to2) prettyprint(oldSrc.list, "oldSrc", fs.LogLevelDebug) prettyprint(oldDst.list, "oldDst", fs.LogLevelDebug) prettyprint(srcList.list, "srcList", fs.LogLevelDebug) prettyprint(dstList.list, "dstList", fs.LogLevelDebug) combinedList := Concat(oldSrc.list, oldDst.list, srcList.list, dstList.list) for _, f := range combinedList { if !slices.Contains(toKeep, f) && !slices.Contains(toKeep, b.aliases.Alias(f)) && !b.opt.DryRun { toRollback = append(toRollback, f) } } b.prepareRollback(toRollback, srcList, dstList, is1to2) prettyprint(oldSrc.list, "oldSrc", fs.LogLevelDebug) prettyprint(oldDst.list, "oldDst", fs.LogLevelDebug) prettyprint(srcList.list, "srcList", fs.LogLevelDebug) prettyprint(dstList.list, "dstList", fs.LogLevelDebug) // clear stats so we only do this once accounting.Stats(ctx).RemoveDoneTransfers() } if b.DebugName != "" { b.debug(b.DebugName, fmt.Sprintf("%s pre-save srcList has it?: %v", direction, srcList.has(b.DebugName))) b.debug(b.DebugName, fmt.Sprintf("%s pre-save dstList has it?: %v", direction, dstList.has(b.DebugName))) } // update files err = srcList.save(srcListing) b.handleErr(srcList, "error saving srcList from modifyListing", err, true, true) err = dstList.save(dstListing) b.handleErr(dstList, "error saving dstList from modifyListing", err, true, true) return err } // recheck the ones we're not sure about func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList, dstList *fileList, is1to2 bool) { var srcObjs []fs.Object var dstObjs []fs.Object var resolved []string var toRollback []string if err := operations.ListFn(ctxRecheck, src, func(obj fs.Object) { srcObjs = append(srcObjs, obj) }); err != nil { fs.Debugf(src, "error recchecking src obj: %v", err) } if err := operations.ListFn(ctxRecheck, dst, func(obj fs.Object) { dstObjs = append(dstObjs, obj) }); err != nil { fs.Debugf(dst, "error recchecking dst obj: %v", err) } putObj := func(obj fs.Object, list *fileList) { hashVal := "" if !b.opt.IgnoreListingChecksum { hashType := list.hash if hashType != hash.None { hashVal, _ = obj.Hash(ctxRecheck, hashType) } hashVal, _ = b.tryDownloadHash(ctxRecheck, obj, hashVal) } var modtime time.Time if b.opt.Compare.Modtime { modtime = obj.ModTime(ctxRecheck).In(TZ) } list.put(obj.Remote(), obj.Size(), modtime, hashVal, "-", "-") } for _, srcObj := range srcObjs { fs.Debugf(srcObj, "rechecking") for _, dstObj := range dstObjs { if srcObj.Remote() == dstObj.Remote() || srcObj.Remote() == b.aliases.Alias(dstObj.Remote()) { // note: unlike Equal(), WhichEqual() does not update the modtime in dest if sums match but modtimes don't. if b.opt.DryRun || b.WhichEqual(ctxRecheck, srcObj, dstObj, src, dst) { putObj(srcObj, srcList) putObj(dstObj, dstList) resolved = append(resolved, srcObj.Remote()) } else { fs.Infof(srcObj, "files not equal on recheck: %v %v", srcObj, dstObj) } } } // if srcObj not resolved by now (either because no dstObj match or files not equal), // roll it back to old version, so it gets retried next time. // skip and error during --resync, as rollback is not possible if !slices.Contains(resolved, srcObj.Remote()) && !b.opt.DryRun { if b.opt.Resync { err := errors.New("no dstObj match or files not equal") b.handleErr(srcObj, "Unable to rollback during --resync", err, true, false) } else { toRollback = append(toRollback, srcObj.Remote()) } } } if len(toRollback) > 0 { srcListing, dstListing := b.getListingNames(is1to2) oldSrc, err := b.loadListing(srcListing + "-old") b.handleErr(oldSrc, "error loading old src listing", err, true, true) oldDst, err := b.loadListing(dstListing + "-old") b.handleErr(oldDst, "error loading old dst listing", err, true, true) if b.critical { return } for _, item := range toRollback { b.rollback(item, oldSrc, srcList) b.rollback(item, oldDst, dstList) } } } func (b *bisyncRun) getListingNames(is1to2 bool) (srcListing string, dstListing string) { if is1to2 { return b.listing1, b.listing2 } return b.listing2, b.listing1 } func (b *bisyncRun) rollback(item string, oldList, newList *fileList) { alias := b.aliases.Alias(item) if oldList.has(item) { oldList.getPut(item, newList) fs.Debugf(nil, "adding to newlist: %s", item) } else if oldList.has(alias) { oldList.getPut(alias, newList) fs.Debugf(nil, "adding to newlist: %s", alias) } else { fs.Debugf(nil, "removing from newlist: %s (has it?: %v)", item, newList.has(item)) prettyprint(newList.list, "newList", fs.LogLevelDebug) newList.remove(item) newList.remove(alias) } } func (b *bisyncRun) prepareRollback(toRollback []string, srcList, dstList *fileList, is1to2 bool) { if len(toRollback) > 0 { oldSrc, oldDst := b.getOldLists(is1to2) if b.critical { return } fs.Debugf("new lists", "src: (%v), dest: (%v)", len(srcList.list), len(dstList.list)) for _, item := range toRollback { b.debugFn(item, func() { b.debug(item, fmt.Sprintf("pre-rollback oldSrc has it?: %v", oldSrc.has(item))) b.debug(item, fmt.Sprintf("pre-rollback oldDst has it?: %v", oldDst.has(item))) b.debug(item, fmt.Sprintf("pre-rollback srcList has it?: %v", srcList.has(item))) b.debug(item, fmt.Sprintf("pre-rollback dstList has it?: %v", dstList.has(item))) }) b.rollback(item, oldSrc, srcList) b.rollback(item, oldDst, dstList) b.debugFn(item, func() { b.debug(item, fmt.Sprintf("post-rollback oldSrc has it?: %v", oldSrc.has(item))) b.debug(item, fmt.Sprintf("post-rollback oldDst has it?: %v", oldDst.has(item))) b.debug(item, fmt.Sprintf("post-rollback srcList has it?: %v", srcList.has(item))) b.debug(item, fmt.Sprintf("post-rollback dstList has it?: %v", dstList.has(item))) }) } } } func (b *bisyncRun) getOldLists(is1to2 bool) (*fileList, *fileList) { srcListing, dstListing := b.getListingNames(is1to2) oldSrc, err := b.loadListing(srcListing + "-old") b.handleErr(oldSrc, "error loading old src listing", err, true, true) oldDst, err := b.loadListing(dstListing + "-old") b.handleErr(oldDst, "error loading old dst listing", err, true, true) fs.Debugf("get old lists", "is1to2: %v, oldsrc: %s (%v), olddest: %s (%v)", is1to2, srcListing+"-old", len(oldSrc.list), dstListing+"-old", len(oldDst.list)) return oldSrc, oldDst } // Concat returns a new slice concatenating the passed in slices. func Concat[S ~[]E, E any](ss ...S) S { size := 0 for _, s := range ss { size += len(s) if size < 0 { panic("len out of range") } } newslice := slices.Grow[S](nil, size) for _, s := range ss { newslice = append(newslice, s...) } return newslice }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/help.go
cmd/bisync/help.go
package bisync import ( "strconv" "strings" ) func makeHelp(help string) string { replacer := strings.NewReplacer( "|", "`", "{MAXDELETE}", strconv.Itoa(DefaultMaxDelete), "{CHECKFILE}", DefaultCheckFilename, // "{WORKDIR}", DefaultWorkdir, ) return replacer.Replace(help) } var shortHelp = `Perform bidirectional synchronization between two paths.` var rcHelp = makeHelp(`This takes the following parameters - path1 - a remote directory string e.g. |drive:path1| - path2 - a remote directory string e.g. |drive:path2| - dryRun - dry-run mode - resync - performs the resync run - checkAccess - abort if {CHECKFILE} files are not found on both filesystems - checkFilename - file name for checkAccess (default: {CHECKFILE}) - maxDelete - abort sync if percentage of deleted files is above this threshold (default: {MAXDELETE}) - force - Bypass maxDelete safety check and run the sync - checkSync - |true| by default, |false| disables comparison of final listings, |only| will skip sync, only compare listings from the last run - createEmptySrcDirs - Sync creation and deletion of empty directories. (Not compatible with --remove-empty-dirs) - removeEmptyDirs - remove empty directories at the final cleanup step - filtersFile - read filtering patterns from a file - ignoreListingChecksum - Do not use checksums for listings - resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync. - workdir - server directory for history files (default: |~/.cache/rclone/bisync|) - backupdir1 - --backup-dir for Path1. Must be a non-overlapping path on the same remote. - backupdir2 - --backup-dir for Path2. Must be a non-overlapping path on the same remote. - noCleanup - retain working files See [bisync command help](https://rclone.org/commands/rclone_bisync/) and [full bisync description](https://rclone.org/bisync/) for more information.`) var longHelp = shortHelp + makeHelp(` [Bisync](https://rclone.org/bisync/) provides a bidirectional cloud sync solution in rclone. It retains the Path1 and Path2 filesystem listings from the prior run. On each successive run it will: - list files on Path1 and Path2, and check for changes on each side. Changes include |New|, |Newer|, |Older|, and |Deleted| files. - Propagate changes on Path1 to Path2, and vice-versa. Bisync is considered an **advanced command**, so use with care. Make sure you have read and understood the entire [manual](https://rclone.org/bisync) (especially the [Limitations](https://rclone.org/bisync/#limitations) section) before using, or data loss can result. Questions can be asked in the [Rclone Forum](https://forum.rclone.org/). See [full bisync description](https://rclone.org/bisync/) for details.`)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/bisync_test.go
cmd/bisync/bisync_test.go
// TestBisync is a test engine for bisync test cases. // See https://rclone.org/bisync/#testing for documentation. // Test cases are organized in subdirs beneath ./testdata // Results are compared against golden listings and log file. package bisync_test import ( "bytes" "context" "errors" "flag" "fmt" "os" "path" "path/filepath" "regexp" "runtime" "slices" "sort" "strconv" "strings" "testing" "time" "unicode/utf8" "github.com/rclone/rclone/cmd/bisync" "github.com/rclone/rclone/cmd/bisync/bilib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/sync" "github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/terminal" "golang.org/x/text/unicode/norm" "github.com/pmezard/go-difflib/difflib" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" _ "github.com/rclone/rclone/backend/all" // for integration tests ) const ( touchDateFormat = "2006-01-02" goldenCanonBase = "_testdir_" logFileName = "test.log" dropMe = "*** [DROP THIS LINE] ***" eol = "\n" slash = string(os.PathSeparator) fixSlash = (runtime.GOOS == "windows") ) var initDate = time.Date(2000, time.January, 1, 0, 0, 0, 0, bisync.TZ) /* Useful Command Shortcuts */ // go test ./cmd/bisync -remote local -race // go test ./cmd/bisync -remote local -golden // go test ./cmd/bisync -remote local -case extended_filenames // go run ./fstest/test_all -run '^TestBisync.*$' -timeout 3h -verbose -maxtries 5 // go run ./fstest/test_all -remotes local,TestCrypt:,TestDrive:,TestOneDrive:,TestOneDriveBusiness:,TestDropbox:,TestCryptDrive:,TestOpenDrive:,TestChunker:,:memory:,TestCryptNoEncryption:,TestCombine:DirA,TestFTPRclone:,TestWebdavRclone:,TestS3Rclone:,TestSFTPRclone:,TestSFTPRcloneSSH:,TestNextcloud:,TestChunkerNometaLocal:,TestChunkerChunk3bLocal:,TestChunkerLocal:,TestChunkerChunk3bNometaLocal:,TestStorj: -run '^TestBisync.*$' -timeout 3h -verbose -maxtries 5 // go test -timeout 3h -run '^TestBisync.*$' github.com/rclone/rclone/cmd/bisync -remote TestDrive:Bisync -v // go test -timeout 3h -run '^TestBisyncRemoteRemote/basic$' github.com/rclone/rclone/cmd/bisync -remote TestDropbox:Bisync -v // TestFTPProftpd:,TestFTPPureftpd:,TestFTPRclone:,TestFTPVsftpd:,TestHdfs:,TestS3Minio:,TestS3MinioEdge:,TestS3Rclone:,TestSeafile:,TestSeafileEncrypted:,TestSeafileV6:,TestSFTPOpenssh:,TestSFTPRclone:,TestSFTPRcloneSSH:,TestSia:,TestSwiftAIO:,TestWebdavNextcloud:,TestWebdavOwncloud:,TestWebdavRclone: // logReplacements make modern test logs comparable with golden dir. // It is a string slice of even length with this structure: // // {`matching regular expression`, "mangled result string", ...} var logReplacements = []string{ // skip syslog facility markers `^(<[1-9]>)(INFO |ERROR |NOTICE|DEBUG ):(.*)$`, "$2:$3", // skip log prefixes `^\d+/\d\d/\d\d \d\d:\d\d:\d\d(?:\.\d{6})? `, "", // ignore rclone info messages `^INFO : .*?: (Deleted|Copied |Moved |Updated ).*$`, dropMe, `^NOTICE: .*?: Replacing invalid UTF-8 characters in "[^"]*"$`, dropMe, // ignore rclone debug messages `^DEBUG : .*$`, dropMe, // ignore dropbox info messages `^NOTICE: too_many_(requests|write_operations)/\.*: Too many requests or write operations.*$`, dropMe, `^NOTICE: .*?: Forced to upload files to set modification times on this backend.$`, dropMe, `^INFO : .*? Committing uploads - please wait...$`, dropMe, `^INFO : .*?: src and dst identical but can't set mod time without deleting and re-uploading$`, dropMe, `^INFO : .*?: src and dst identical but can't set mod time without re-uploading$`, dropMe, // ignore crypt info messages `^INFO : .*?: Crypt detected! Using cryptcheck instead of check. \(Use --size-only or --ignore-checksum to disable\)$`, dropMe, // ignore drive info messages `^NOTICE:.*?Files of unknown size \(such as Google Docs\) do not sync reliably with --checksum or --size-only\. Consider using modtime instead \(the default\) or --drive-skip-gdocs.*?$`, dropMe, // ignore cache backend cache expired messages `^INFO : .*cache expired.*$`, dropMe, // ignore "Implicitly create directory" messages (TestnStorage:) `^INFO : .*Implicitly create directory.*$`, dropMe, // ignore differences in backend features `^.*?"HashType1":.*?$`, dropMe, `^.*?"HashType2":.*?$`, dropMe, `^.*?"SlowHashDetected":.*?$`, dropMe, `^.*? for same-side diffs on .*?$`, dropMe, `^.*?Downloading hashes.*?$`, dropMe, `^.*?Can't compare hashes, so using check --download.*?$`, dropMe, // ignore timestamps in directory time updates `^(INFO : .*?: (Made directory with|Set directory) (metadata|modification time)).*$`, dropMe, // ignore equivalent log for backends lacking dir modtime support `^(INFO : .*?: Making directory).*$`, dropMe, // ignore sizes in directory time updates `^(NOTICE: .*?: Skipped set directory modification time as --dry-run is set).*$`, dropMe, // ignore sizes in directory metadata updates `^(NOTICE: .*?: Skipped update directory metadata as --dry-run is set).*$`, dropMe, } // Some dry-run messages differ depending on the particular remote. var dryrunReplacements = []string{ `^(NOTICE: file5.txt: Skipped) (copy|update modification time) (as --dry-run is set [(]size \d+[)])$`, `$1 copy (or update modification time) $3`, } // Some groups of log lines may appear unordered because rclone applies // many operations in parallel to boost performance. var logHoppers = []string{ // Test case `dry-run` produced log mismatches due to non-deterministic // order of captured dry-run info messages. `NOTICE: \S+?: Skipped (?:copy|move|delete|copy \(or [^)]+\)|update modification time) as --dry-run is set \(size \d+\)`, // Test case `extended-filenames` detected difference in order of files // with extended unicode names between Windows and Unix or GDrive, // but the order is in fact not important for success. `(?:INFO |NOTICE): - Path[12] +File (?:was deleted|is new|is newer|is OLDER) +- .*`, // Test case `check-access-filters` detected listing miscompares due // to indeterminate order of rclone operations in presence of multiple // subdirectories. The order inconsistency initially showed up in the // listings and triggered reordering of log messages, but the actual // files will in fact match. `.* +.....Access test failed: Path[12] file not found in Path[12].*`, // Test case `resync` suffered from the order of queued copies. `(?:INFO |NOTICE): - Path2 Resync will copy to Path1 +- .*`, // Test case `normalization` can have random order of fix-case files. `(?:INFO |NOTICE): .*: Fixed case by renaming to: .*`, // order of files re-checked prior to a conflict rename `ERROR : .*: {hashtype} differ.*`, // Directory modification time setting can happen in any order `INFO : .*: (Set directory modification time|Made directory with metadata).*`, } // Some log lines can contain Windows path separator that must be // converted to "/" in every matching token to match golden logs. var logLinesWithSlash = []string{ `.*\(\d\d\) :.*(fix-names|touch-glob|touch-copy|copy-file|copy-as|copy-dir|delete-file) `, `INFO : - .*Path[12].* +.*Queue copy to.* Path[12].*`, `INFO : Synching Path1 .*? with Path2 `, `INFO : Validating listings for `, } var regexFixSlash = regexp.MustCompile("^(" + strings.Join(logLinesWithSlash, "|") + ")") // Command line flags for bisync test var ( argTestCase = flag.String("case", "", "Bisync test case to run") argRemote2 = flag.String("remote2", "", "Path2 for bisync tests") argNoCompare = flag.Bool("no-compare", false, "Do not compare test results with golden") argNoCleanup = flag.Bool("no-cleanup", false, "Keep test files") argGolden = flag.Bool("golden", false, "Store results as golden") argDebug = flag.Bool("debug", false, "Print debug messages") argStopAt = flag.Int("stop-at", 0, "Stop after given test step") // Flag -refresh-times helps with Dropbox tests failing with message // "src and dst identical but can't set mod time without deleting and re-uploading" argRefreshTimes = flag.Bool("refresh-times", false, "Force refreshing the target modtime, useful for Dropbox (default: false)") ignoreLogs = flag.Bool("ignore-logs", false, "skip comparing log lines but still compare listings") argPCount = flag.Int("pcount", 2, "number of parallel subtests to run for TestBisyncConcurrent") // go test ./cmd/bisync -race -pcount 10 ) // bisyncTest keeps all test data in a single place type bisyncTest struct { // per-test state t *testing.T step int stopped bool stepStr string testCase string sessionName string // test dirs testDir string dataDir string initDir string goldenDir string workDir string fs1 fs.Fs path1 string canonPath1 string fs2 fs.Fs path2 string canonPath2 string // test log logDir string logPath string logFile *os.File // global state dataRoot string randName string tempDir string parent1 fs.Fs parent2 fs.Fs // global flags argRemote1 string argRemote2 string noCompare bool noCleanup bool golden bool debug bool stopAt int TestFn bisync.TestFunc ignoreModtime bool // ignore modtimes when comparing final listings, for backends without support ignoreBlankHash bool // ignore blank hashes for backends where we allow them to be blank } var color = bisync.Color // TestMain drives the tests func TestMain(m *testing.M) { bisync.LogTZ = time.UTC ci := fs.GetConfig(context.TODO()) ciSave := *ci defer func() { *ci = ciSave }() // need to set context.TODO() here as we cannot pass a ctx to fs.LogLevelPrintf ci.LogLevel = fs.LogLevelInfo if *argDebug { ci.LogLevel = fs.LogLevelDebug } fstest.Initialise() fstest.TestMain(m) } // Path1 is remote, Path2 is local func TestBisyncRemoteLocal(t *testing.T) { if *fstest.RemoteName == *argRemote2 { t.Skip("path1 and path2 are the same remote") } _, remote, cleanup, err := fstest.RandomRemote() fs.Logf(nil, "remote: %v", remote) require.NoError(t, err) defer cleanup() ctx, _ := fs.AddConfig(context.TODO()) testBisync(ctx, t, remote, *argRemote2) } // Path1 is local, Path2 is remote func TestBisyncLocalRemote(t *testing.T) { if *fstest.RemoteName == *argRemote2 { t.Skip("path1 and path2 are the same remote") } _, remote, cleanup, err := fstest.RandomRemote() fs.Logf(nil, "remote: %v", remote) require.NoError(t, err) defer cleanup() ctx, _ := fs.AddConfig(context.TODO()) testBisync(ctx, t, *argRemote2, remote) } // Path1 and Path2 are both different directories on remote // (useful for testing server-side copy/move) func TestBisyncRemoteRemote(t *testing.T) { _, remote, cleanup, err := fstest.RandomRemote() fs.Logf(nil, "remote: %v", remote) require.NoError(t, err) defer cleanup() ctx, _ := fs.AddConfig(context.TODO()) testBisync(ctx, t, remote, remote) } // make sure rc can cope with running concurrent jobs func TestBisyncConcurrent(t *testing.T) { if !isLocal(*fstest.RemoteName) { t.Skip("TestBisyncConcurrent is skipped on non-local") } if *argTestCase != "" && *argTestCase != "basic" { t.Skip("TestBisyncConcurrent only tests 'basic'") } if *argPCount < 2 { t.Skip("TestBisyncConcurrent is pointless with -pcount < 2") } if *argGolden { t.Skip("skip TestBisyncConcurrent when goldenizing") } oldArgTestCase := argTestCase *argTestCase = "basic" *ignoreLogs = true // not useful to compare logs here because both runs will be logging at once t.Cleanup(func() { argTestCase = oldArgTestCase *ignoreLogs = false }) for i := 0; i < *argPCount; i++ { t.Run(fmt.Sprintf("test%v", i), testParallel) } } func testParallel(t *testing.T) { t.Parallel() TestBisyncRemoteRemote(t) } // TestBisync is a test engine for bisync test cases. func testBisync(ctx context.Context, t *testing.T, path1, path2 string) { ci := fs.GetConfig(ctx) ciSave := *ci defer func() { *ci = ciSave }() if *argRefreshTimes { ci.RefreshTimes = true } bisync.ColorsLock.Lock() bisync.Colors = true bisync.ColorsLock.Unlock() ci.FsCacheExpireDuration = fs.Duration(5 * time.Hour) baseDir, err := os.Getwd() require.NoError(t, err, "get current directory") randName := time.Now().Format("150405") + random.String(2) // some bucket backends don't like dots, keep this short to avoid linux errors tempDir := filepath.Join(os.TempDir(), randName) workDir := filepath.Join(tempDir, "workdir") b := &bisyncTest{ // per-test state t: t, // global state tempDir: tempDir, randName: randName, workDir: workDir, dataRoot: filepath.Join(baseDir, "testdata"), logDir: filepath.Join(tempDir, "logs"), logPath: filepath.Join(workDir, logFileName), // global flags argRemote1: path1, argRemote2: path2, noCompare: *argNoCompare, noCleanup: *argNoCleanup, golden: *argGolden, debug: *argDebug, stopAt: *argStopAt, } b.mkdir(b.tempDir) b.mkdir(b.logDir) fnHandle := atexit.Register(func() { if atexit.Signalled() { b.cleanupAll() } }) defer func() { b.cleanupAll() atexit.Unregister(fnHandle) }() argCase := *argTestCase if argCase == "" { argCase = "all" if testing.Short() { // remote tests can be long, help with "go test -short" argCase = "basic" } } testList := strings.Split(argCase, ",") if strings.ToLower(argCase) == "all" { testList = nil for _, testCase := range b.listDir(b.dataRoot) { if strings.HasPrefix(testCase, "test_") { // if dir is empty, skip it (can happen due to gitignored files/dirs when checking out branch) if len(b.listDir(filepath.Join(b.dataRoot, testCase))) == 0 { continue } testList = append(testList, testCase) } } } require.False(t, b.stopAt > 0 && len(testList) > 1, "-stop-at is meaningful only for a single test") deadline, hasDeadline := t.Deadline() var maxRunDuration time.Duration for _, testCase := range testList { testCase = strings.ReplaceAll(testCase, "-", "_") testCase = strings.TrimPrefix(testCase, "test_") t.Run(testCase, func(childTest *testing.T) { startTime := time.Now() remaining := time.Until(deadline) if hasDeadline && (remaining < maxRunDuration || remaining < 10*time.Second) { // avoid starting tests we don't have time to finish childTest.Fatalf("test %v timed out - not enough time to start test (%v remaining, need %v for test)", testCase, remaining, maxRunDuration) } bCopy := *b bCopy.runTestCase(ctx, childTest, testCase) if time.Since(startTime) > maxRunDuration { maxRunDuration = time.Since(startTime) } }) } } func (b *bisyncTest) cleanupAll() { if b.noCleanup { return } ctx := context.Background() if b.parent1 != nil { _ = operations.Purge(ctx, b.parent1, "") } if b.parent2 != nil { _ = operations.Purge(ctx, b.parent2, "") } _ = os.RemoveAll(b.tempDir) } func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase string) { b.t = t b.testCase = testCase var err error b.fs1, b.parent1, b.path1, b.canonPath1 = b.makeTempRemote(ctx, b.argRemote1, "path1") b.fs2, b.parent2, b.path2, b.canonPath2 = b.makeTempRemote(ctx, b.argRemote2, "path2") if strings.Contains(b.replaceHex(b.path1), " ") || strings.Contains(b.replaceHex(b.path2), " ") { b.t.Skip("skipping as tests can't handle spaces config string") } b.sessionName = bilib.SessionName(b.fs1, b.fs2) b.testDir = b.ensureDir(b.dataRoot, "test_"+b.testCase, false) b.initDir = b.ensureDir(b.testDir, "initial", false) b.goldenDir = b.ensureDir(b.testDir, "golden", false) b.dataDir = b.ensureDir(b.testDir, "modfiles", true) // optional // normalize unicode so tets are runnable on macOS b.sessionName = norm.NFC.String(b.sessionName) b.goldenDir = norm.NFC.String(b.goldenDir) // For test stability, jam initial dates to a fixed past date. // Test cases that change files will touch specific files to fixed new dates. err = filepath.Walk(b.initDir, func(path string, info os.FileInfo, err error) error { if err == nil && !info.IsDir() { return os.Chtimes(path, initDate, initDate) } return err }) require.NoError(b.t, err, "jamming initial dates") // copy to a new unique initdir and datadir so concurrent tests don't interfere with each other ctxNoDsStore, _ := ctxNoDsStore(ctx, b.t) makeUnique := func(label, oldPath string) (newPath string) { newPath = oldPath info, err := os.Stat(oldPath) if err == nil && info.IsDir() { // datadir is optional oldFs, err := cache.Get(ctx, oldPath) require.NoError(b.t, err) newPath = b.tempDir + "/" + label + "/" + "test_" + b.testCase + "-" + random.String(8) newFs, err := cache.Get(ctx, newPath) require.NoError(b.t, err) require.NoError(b.t, sync.CopyDir(ctxNoDsStore, newFs, oldFs, true), "setting up "+label) } return newPath } b.initDir = makeUnique("initdir", b.initDir) b.dataDir = makeUnique("datadir", b.dataDir) // Prepare initial content b.cleanupCase(ctx) ctx = accounting.WithStatsGroup(ctx, random.String(8)) fstest.CheckListingWithPrecision(b.t, b.fs1, []fstest.Item{}, []string{}, b.fs1.Precision()) // verify starting from empty fstest.CheckListingWithPrecision(b.t, b.fs2, []fstest.Item{}, []string{}, b.fs2.Precision()) initFs, err := cache.Get(ctx, b.initDir) require.NoError(b.t, err) // verify pre-test equality (garbage in, garbage out!) srcObjs, srcDirs, err := walk.GetAll(ctxNoDsStore, initFs, "", false, -1) assert.NoError(b.t, err) items := []fstest.Item{} for _, obj := range srcObjs { require.False(b.t, strings.Contains(obj.Remote(), ".partial")) rc, err := operations.Open(ctxNoDsStore, obj) assert.NoError(b.t, err) bytes := make([]byte, obj.Size()) _, err = rc.Read(bytes) assert.NoError(b.t, err) assert.NoError(b.t, rc.Close()) item := fstest.NewItem(norm.NFC.String(obj.Remote()), string(bytes), obj.ModTime(ctxNoDsStore)) items = append(items, item) } dirs := []string{} for _, dir := range srcDirs { dirs = append(dirs, norm.NFC.String(dir.Remote())) } fs.Logf(nil, "checking initFs %s", initFs) fstest.CheckListingWithPrecision(b.t, initFs, items, dirs, initFs.Precision()) checkError(b.t, sync.CopyDir(ctxNoDsStore, b.fs1, initFs, true), "setting up path1") fs.Logf(nil, "checking Path1 %s", b.fs1) fstest.CheckListingWithPrecision(b.t, b.fs1, items, dirs, b.fs1.Precision()) checkError(b.t, sync.CopyDir(ctxNoDsStore, b.fs2, initFs, true), "setting up path2") fs.Logf(nil, "checking path2 %s", b.fs2) fstest.CheckListingWithPrecision(b.t, b.fs2, items, dirs, b.fs2.Precision()) // Create log file b.mkdir(b.workDir) b.logFile, err = os.Create(b.logPath) require.NoError(b.t, err, "creating log file") // Execute test scenario scenFile := filepath.Join(b.testDir, "scenario.txt") scenBuf, err := os.ReadFile(scenFile) scenReplacer := b.newReplacer(false) require.NoError(b.t, err) b.step = 0 b.stopped = false for line := range strings.SplitSeq(string(scenBuf), "\n") { comment := strings.Index(line, "#") if comment != -1 { line = line[:comment] } line = strings.TrimSpace(line) if line == "" { if b.golden { // Keep empty lines in golden logs _, _ = b.logFile.WriteString("\n") } continue } b.step++ b.stepStr = fmt.Sprintf("(%02d) :", b.step) line = scenReplacer.Replace(line) if err = b.runTestStep(ctx, line); err != nil { require.Failf(b.t, "test step failed", "step %d failed: %v", b.step, err) return } if b.stopAt > 0 && b.step >= b.stopAt { comment := "" if b.golden { comment = " (ignoring -golden)" } b.logPrintf("Stopping after step %d%s", b.step, comment) b.stopped = true b.noCleanup = true b.noCompare = true break } } // Perform post-run activities require.NoError(b.t, b.logFile.Close(), "flushing test log") b.logFile = nil savedLog := b.testCase + ".log" err = bilib.CopyFile(b.logPath, filepath.Join(b.logDir, savedLog)) require.NoError(b.t, err, "saving log file %s", savedLog) if b.golden && !b.stopped { fs.Logf(nil, "Store results to golden directory") b.storeGolden() return } errorCount := 0 if b.noCompare { fs.Logf(nil, "Skip comparing results with golden directory") errorCount = -2 } else { errorCount = b.compareResults() } if b.noCleanup { fs.Logf(nil, "Skip cleanup") } else { b.cleanupCase(ctx) } var msg string var passed bool switch errorCount { case 0: msg = color(terminal.GreenFg, fmt.Sprintf("TEST %s PASSED", b.testCase)) passed = true case -2: msg = color(terminal.YellowFg, fmt.Sprintf("TEST %s SKIPPED", b.testCase)) passed = true case -1: msg = color(terminal.RedFg, fmt.Sprintf("TEST %s FAILED - WRONG NUMBER OF FILES", b.testCase)) passed = false default: msg = color(terminal.RedFg, fmt.Sprintf("TEST %s FAILED - %d MISCOMPARED FILES", b.testCase, errorCount)) buckets := b.fs1.Features().BucketBased || b.fs2.Features().BucketBased passed = false if b.testCase == "rmdirs" && buckets { msg += " (expected failure on bucket remotes)" passed = true } } b.t.Log(msg) if !passed { b.t.FailNow() } } func isLocal(remote string) bool { return bilib.IsLocalPath(remote) && !strings.HasPrefix(remote, ":") && !strings.Contains(remote, ",") } // makeTempRemote creates temporary folder and makes a filesystem // if a local path is provided, it's ignored (the test will run under system temp) func (b *bisyncTest) makeTempRemote(ctx context.Context, remote, subdir string) (f, parent fs.Fs, path, canon string) { var err error if isLocal(remote) { if remote != "" && !strings.HasPrefix(remote, "local") && *fstest.RemoteName != "" { b.t.Fatalf(`Missing ":" in remote %q. Use "local" to test with local filesystem.`, remote) } parent, err = cache.Get(ctx, b.tempDir) checkError(b.t, err, "parsing local tempdir %s", b.tempDir) path = filepath.Join(b.tempDir, b.testCase) path = filepath.Join(path, subdir) } else { last := remote[len(remote)-1] if last != ':' && last != '/' { remote += "/" } remote += b.randName parent, err = cache.Get(ctx, remote) checkError(b.t, err, "parsing remote %s", remote) checkError(b.t, operations.Mkdir(ctx, parent, subdir), "Mkdir "+subdir) // ensure dir exists (storj seems to need this) path = remote + "/" + b.testCase path += "/" + subdir } f, err = cache.Get(ctx, path) checkError(b.t, err, "parsing remote/subdir %s/%s", remote, subdir) path = bilib.FsPath(f) // Make it canonical canon = bilib.StripHexString(bilib.CanonicalPath(strings.TrimSuffix(strings.TrimSuffix(path, `\`+subdir+`\`), "/"+subdir+"/"))) + "_" // account for possible connection string return } func (b *bisyncTest) cleanupCase(ctx context.Context) { _ = operations.Purge(ctx, b.fs1, "") _ = operations.Purge(ctx, b.fs2, "") _ = os.RemoveAll(b.workDir) } func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) { var fsrc, fdst fs.Fs ctx = accounting.WithStatsGroup(ctx, random.String(8)) b.logPrintf("%s %s", color(terminal.CyanFg, b.stepStr), color(terminal.BlueFg, line)) ci := fs.GetConfig(ctx) ciSave := *ci defer func() { *ci = ciSave }() testFunc := func() { src := filepath.Join(b.dataDir, "file7.txt") for i := range 50 { dst := "file" + fmt.Sprint(i) + ".txt" err := b.copyFile(ctx, src, b.replaceHex(b.path2), dst) if err != nil { fs.Errorf(src, "error copying file: %v", err) } dst = "file" + fmt.Sprint(100-i) + ".txt" err = b.copyFile(ctx, src, b.replaceHex(b.path1), dst) if err != nil { fs.Errorf(dst, "error copying file: %v", err) } } } args := splitLine(line) switch args[0] { case "test": b.checkArgs(args, 1, 0) return nil case "copy-listings": b.checkArgs(args, 1, 1) return b.saveTestListings(args[1], true) case "move-listings": b.checkArgs(args, 1, 1) return b.saveTestListings(args[1], false) case "purge-children": b.checkArgs(args, 1, 1) dir := "" if strings.HasPrefix(args[1], b.replaceHex(b.path1)) { fsrc = b.fs1 dir = strings.TrimPrefix(args[1], b.replaceHex(b.path1)) } else if strings.HasPrefix(args[1], b.replaceHex(b.path2)) { fsrc = b.fs2 dir = strings.TrimPrefix(args[1], b.replaceHex(b.path2)) } else { return fmt.Errorf("error parsing arg: %q (path1: %q, path2: %q)", args[1], b.path1, b.path2) } return purgeChildren(ctx, fsrc, dir) case "delete-file": b.checkArgs(args, 1, 1) dir, file := filepath.Split(args[1]) if fsrc, err = cache.Get(ctx, dir); err != nil { return err } var obj fs.Object if obj, err = fsrc.NewObject(ctx, file); err != nil { return err } return operations.DeleteFile(ctx, obj) case "delete-glob": b.checkArgs(args, 2, 2) if fsrc, err = cache.Get(ctx, args[1]); err != nil { return err } return deleteFiles(ctx, fsrc, args[2]) case "touch-glob": b.checkArgs(args, 3, 3) date, src, glob := args[1], args[2], args[3] if fsrc, err = cache.Get(ctx, b.replaceHex(src)); err != nil { return err } _, err = touchFiles(ctx, date, fsrc, src, glob) return err case "touch-copy": b.checkArgs(args, 3, 3) date, src, dst := args[1], args[2], args[3] dir, file := filepath.Split(src) if fsrc, err = cache.Get(ctx, dir); err != nil { return err } if _, err = touchFiles(ctx, date, fsrc, dir, file); err != nil { return err } return b.copyFile(ctx, src, dst, "") case "copy-file": b.checkArgs(args, 2, 2) return b.copyFile(ctx, args[1], args[2], "") case "copy-as": b.checkArgs(args, 3, 3) return b.copyFile(ctx, args[1], args[2], args[3]) case "copy-as-NFC": b.checkArgs(args, 3, 3) ci.NoUnicodeNormalization = true ci.FixCase = true return b.copyFile(ctx, args[1], norm.NFC.String(args[2]), norm.NFC.String(args[3])) case "copy-as-NFD": b.checkArgs(args, 3, 3) ci.NoUnicodeNormalization = true ci.FixCase = true return b.copyFile(ctx, args[1], norm.NFD.String(args[2]), norm.NFD.String(args[3])) case "copy-dir", "sync-dir": b.checkArgs(args, 2, 2) if fsrc, err = cache.Get(ctx, args[1]); err != nil { return err } if fdst, err = cache.Get(ctx, args[2]); err != nil { return err } switch args[0] { case "copy-dir": ctxNoDsStore, _ := ctxNoDsStore(ctx, b.t) err = sync.CopyDir(ctxNoDsStore, fdst, fsrc, true) case "sync-dir": ctxNoDsStore, _ := ctxNoDsStore(ctx, b.t) err = sync.Sync(ctxNoDsStore, fdst, fsrc, true) } return err case "list-dirs": b.checkArgs(args, 1, 1) return b.listSubdirs(ctx, args[1], true) case "list-files": b.checkArgs(args, 1, 1) return b.listSubdirs(ctx, args[1], false) case "bisync": ci.NoUnicodeNormalization = false ci.IgnoreCaseSync = false // ci.FixCase = true return b.runBisync(ctx, args[1:]) case "test-func": b.TestFn = testFunc return case "concurrent-func": b.TestFn = func() { src := filepath.Join(b.dataDir, "file7.txt") dst := "file1.txt" err := b.copyFile(ctx, src, b.replaceHex(b.path2), dst) if err != nil { fs.Errorf(src, "error copying file: %v", err) } } return case "fix-names": // in case the local os converted any filenames ci.NoUnicodeNormalization = true ci.FixCase = true ci.IgnoreTimes = true reset := func() { ci.NoUnicodeNormalization = false ci.FixCase = false ci.IgnoreTimes = false } defer reset() b.checkArgs(args, 1, 1) var ok bool var remoteName string var remotePath string remoteName, remotePath, err = fspath.SplitFs(args[1]) if err != nil { return err } if remoteName == "" { remoteName = "/" } fsrc, err = cache.Get(ctx, remoteName) if err != nil { return err } // DEBUG fs.Debugf(remotePath, "is NFC: %v", norm.NFC.IsNormalString(remotePath)) fs.Debugf(remotePath, "is NFD: %v", norm.NFD.IsNormalString(remotePath)) fs.Debugf(remotePath, "is valid UTF8: %v", utf8.ValidString(remotePath)) // check if it's a dir, try moving it var leaf string _, leaf, err = fspath.Split(remotePath) if err == nil && leaf == "" { remotePath = args[1] fs.Debugf(remotePath, "attempting to fix directory") fixDirname := func(old, new string) { if new != old { oldName, err := cache.Get(ctx, old) if err != nil { fs.Errorf(old, "error getting Fs: %v", err) return } fs.Debugf(nil, "Attempting to move %s to %s", oldName.Root(), new) // Create random name to temporarily move dir to tmpDirName := strings.TrimSuffix(new, slash) + "-rclone-move-" + random.String(8) var tmpDirFs fs.Fs tmpDirFs, err = cache.Get(ctx, tmpDirName) if err != nil { fs.Errorf(tmpDirName, "error creating temp dir for move: %v", err) } if tmpDirFs == nil { return } err = sync.MoveDir(ctx, tmpDirFs, oldName, true, true) if err != nil { fs.Debugf(oldName, "error attempting to move folder: %v", err) } // now move the temp dir to real name fsrc, err = cache.Get(ctx, new) if err != nil { fs.Errorf(new, "error creating fsrc dir for move: %v", err) } if fsrc == nil { return } err = sync.MoveDir(ctx, fsrc, tmpDirFs, true, true) if err != nil { fs.Debugf(tmpDirFs, "error attempting to move folder to %s: %v", fsrc.Root(), err) } } else { fs.Debugf(nil, "old and new are equal. Skipping. %s (%s) %s (%s)", old, stringToHash(old), new, stringToHash(new)) } } if norm.NFC.String(remotePath) != remotePath && norm.NFD.String(remotePath) != remotePath { fs.Debugf(remotePath, "This is neither fully NFD or NFC -- can't fix reliably!") } fixDirname(norm.NFC.String(remotePath), remotePath) fixDirname(norm.NFD.String(remotePath), remotePath) return } // if it's a file fs.Debugf(remotePath, "attempting to fix file -- filename hash: %s", stringToHash(leaf)) fixFilename := func(old, new string) { ok, err := fs.FileExists(ctx, fsrc, old) if err != nil { fs.Debugf(remotePath, "error checking if file exists: %v", err) } fs.Debugf(old, "file exists: %v %s", ok, stringToHash(old)) fs.Debugf(nil, "FILE old: %s new: %s equal: %v", old, new, old == new) fs.Debugf(nil, "HASH old: %s new: %s equal: %v", stringToHash(old), stringToHash(new), stringToHash(old) == stringToHash(new)) if ok && new != old { fs.Debugf(new, "attempting to rename %s to %s", old, new) srcObj, err := fsrc.NewObject(ctx, old) if err != nil { fs.Errorf(old, "errorfinding srcObj - %v", err) } _, err = operations.MoveCaseInsensitive(ctx, fsrc, fsrc, new, old, false, srcObj) if err != nil { fs.Errorf(new, "error trying to rename %s to %s - %v", old, new, err) } } } // look for NFC version fixFilename(norm.NFC.String(remotePath), remotePath) // if it's in a subdir we just moved, the file and directory might have different encodings. Check for that. mixed := strings.TrimSuffix(norm.NFD.String(remotePath), norm.NFD.String(leaf)) + norm.NFC.String(leaf) fixFilename(mixed, remotePath) // Try NFD fixFilename(norm.NFD.String(remotePath), remotePath) // Try mixed in reverse mixed = strings.TrimSuffix(norm.NFC.String(remotePath), norm.NFC.String(leaf)) + norm.NFD.String(leaf) fixFilename(mixed, remotePath) // check if it's right now, error if not ok, err = fs.FileExists(ctx, fsrc, remotePath) if !ok || err != nil { fs.Logf(remotePath, "Can't find expected file %s (was it renamed by the os?) %v", args[1], err) return } // include hash of filename to make unicode form differences easier to see in logs fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf)) return default: return fmt.Errorf("unknown command: %q", args[0]) } } // splitLine splits scenario line into tokens and performs // substitutions that involve whitespace or control chars. func splitLine(line string) (args []string) { for s := range strings.FieldsSeq(line) { b := []byte(whitespaceReplacer.Replace(s)) b = regexChar.ReplaceAllFunc(b, func(b []byte) []byte { c, _ := strconv.ParseUint(string(b[5:7]), 16, 8) return []byte{byte(c)} }) args = append(args, string(b)) } return }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/queue.go
cmd/bisync/queue.go
package bisync import ( "context" "encoding/json" "fmt" "io" "sort" mutex "sync" // renamed as "sync" already in use "time" "github.com/rclone/rclone/cmd/bisync/bilib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/sync" "github.com/rclone/rclone/lib/terminal" ) // Results represents a pair of synced files, as reported by the LoggerFn // Bisync uses this to determine what happened during the sync, and modify the listings accordingly type Results struct { Src string Dst string Name string AltName string Size int64 Modtime time.Time Hash string Flags string Sigil operations.Sigil Err error Winner operations.Winner IsWinner bool IsSrc bool IsDst bool Origin string } // ResultsSlice is a slice of Results (obviously) type ResultsSlice []Results func (rs *ResultsSlice) has(name string) bool { for _, r := range *rs { if r.Name == name { return true } } return false } type bisyncQueueOpt struct { logger operations.LoggerOpt lock mutex.Mutex once mutex.Once ignoreListingChecksum bool ignoreListingModtime bool hashTypes map[string]hash.Type queueCI *fs.ConfigInfo } // allows us to get the right hashtype during the LoggerFn without knowing whether it's Path1/Path2 func (b *bisyncRun) getHashType(fname string) hash.Type { ht, ok := b.queueOpt.hashTypes[fname] if ok { return ht } return hash.None } // FsPathIfAny handles type assertions and returns a formatted bilib.FsPath if valid, otherwise "" func FsPathIfAny(x fs.DirEntry) string { obj, ok := x.(fs.Object) if x != nil && ok { return bilib.FsPath(obj.Fs()) } return "" } func resultName(result Results, side, src, dst fs.DirEntry) string { if side != nil { return side.Remote() } else if result.IsSrc && dst != nil { return dst.Remote() } else if src != nil { return src.Remote() } return "" } // returns the opposite side's name, only if different func altName(name string, src, dst fs.DirEntry) string { if src != nil && dst != nil { if src.Remote() != dst.Remote() { switch name { case src.Remote(): return dst.Remote() case dst.Remote(): return src.Remote() } } } return "" } // WriteResults is Bisync's LoggerFn func (b *bisyncRun) WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) { b.queueOpt.lock.Lock() defer b.queueOpt.lock.Unlock() opt := operations.GetLoggerOpt(ctx) result := Results{ Sigil: sigil, Src: FsPathIfAny(src), Dst: FsPathIfAny(dst), Err: err, Origin: "sync", } result.Winner = operations.WinningSide(ctx, sigil, src, dst, err) fss := []fs.DirEntry{src, dst} for i, side := range fss { result.Name = resultName(result, side, src, dst) result.AltName = altName(result.Name, src, dst) result.IsSrc = i == 0 result.IsDst = i == 1 result.Flags = "-" if side != nil { result.Size = side.Size() if !b.queueOpt.ignoreListingModtime { result.Modtime = side.ModTime(ctx).In(TZ) } if !b.queueOpt.ignoreListingChecksum { sideObj, ok := side.(fs.ObjectInfo) if ok { result.Hash, _ = sideObj.Hash(ctx, b.getHashType(sideObj.Fs().Name())) result.Hash, _ = b.tryDownloadHash(ctx, sideObj, result.Hash) } } } result.IsWinner = result.Winner.Obj == side // used during resync only if err == fs.ErrorIsDir { if src != nil { result.Src = src.Remote() result.Name = src.Remote() } else { result.Dst = dst.Remote() result.Name = dst.Remote() } result.Flags = "d" result.Size = -1 } prettyprint(result, "writing result", fs.LogLevelDebug) if result.Size < 0 && result.Flags != "d" && ((b.queueOpt.queueCI.CheckSum && !b.downloadHashOpt.downloadHash) || b.queueOpt.queueCI.SizeOnly) { b.queueOpt.once.Do(func() { fs.Log(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs")) }) } err := json.NewEncoder(opt.JSON).Encode(result) if err != nil { fs.Errorf(result, "Error encoding JSON: %v", err) } } } // ReadResults decodes the JSON data from WriteResults func ReadResults(results io.Reader) []Results { dec := json.NewDecoder(results) var slice []Results for { var r Results if err := dec.Decode(&r); err == io.EOF { break } prettyprint(r, "result", fs.LogLevelDebug) slice = append(slice, r) } return slice } // for setup code shared by both fastCopy and resyncDir func (b *bisyncRun) preCopy(ctx context.Context) context.Context { b.queueOpt.queueCI = fs.GetConfig(ctx) b.queueOpt.ignoreListingChecksum = b.opt.IgnoreListingChecksum b.queueOpt.ignoreListingModtime = !b.opt.Compare.Modtime b.queueOpt.hashTypes = map[string]hash.Type{ b.fs1.Name(): b.opt.Compare.HashType1, b.fs2.Name(): b.opt.Compare.HashType2, } b.queueOpt.logger.LoggerFn = b.WriteResults overridingEqual := false if (b.opt.Compare.Modtime && b.opt.Compare.Checksum) || b.opt.Compare.DownloadHash { overridingEqual = true fs.Debugf(nil, "overriding equal") // otherwise impossible in Sync, so override Equal ctx = b.EqualFn(ctx) } if b.opt.ResyncMode == PreferOlder || b.opt.ResyncMode == PreferLarger || b.opt.ResyncMode == PreferSmaller { overridingEqual = true fs.Debugf(nil, "overriding equal") ctx = b.EqualFn(ctx) } ctxCopyLogger := operations.WithSyncLogger(ctx, b.queueOpt.logger) if b.opt.Compare.Checksum && (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.opt.Compare.SlowHashDetected { // set here in case !b.opt.Compare.Modtime b.queueOpt.queueCI = fs.GetConfig(ctxCopyLogger) if b.opt.Compare.NoSlowHash { b.queueOpt.queueCI.CheckSum = false } if b.opt.Compare.SlowHashSyncOnly && !overridingEqual { b.queueOpt.queueCI.CheckSum = true } } return ctxCopyLogger } func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.Names, queueName string) ([]Results, error) { if b.InGracefulShutdown { return nil, nil } ctx = b.preCopy(ctx) if err := b.saveQueue(files, queueName); err != nil { return nil, err } ctxCopy, filterCopy := filter.AddConfig(b.opt.setDryRun(ctx)) for _, file := range files.ToList() { if err := filterCopy.AddFile(file); err != nil { return nil, err } alias := b.aliases.Alias(file) if alias != file { if err := filterCopy.AddFile(alias); err != nil { return nil, err } } } b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown accounting.Stats(ctxCopy).SetMaxCompletedTransfers(-1) // we need a complete list in the event of graceful shutdown ctxCopy, b.CancelSync = context.WithCancel(ctxCopy) b.testFn() err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs) prettyprint(b.queueOpt.logger, "b.queueOpt.logger", fs.LogLevelDebug) getResults := ReadResults(b.queueOpt.logger.JSON) fs.Debugf(nil, "Got %v results for %v", len(getResults), queueName) lineFormat := "%s %8d %s %s %s %q\n" for _, result := range getResults { fs.Debugf(nil, lineFormat, result.Flags, result.Size, result.Hash, "", result.Modtime, result.Name) } return getResults, err } func (b *bisyncRun) retryFastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.Names, queueName string, results []Results, err error) ([]Results, error) { ci := fs.GetConfig(ctx) if err != nil && b.opt.Resilient && !b.InGracefulShutdown && ci.Retries > 1 { for tries := 1; tries <= ci.Retries; tries++ { fs.Logf(queueName, Color(terminal.YellowFg, "Received error: %v - retrying as --resilient is set. Retry %d/%d"), err, tries, ci.Retries) accounting.GlobalStats().ResetErrors() if retryAfter := accounting.GlobalStats().RetryAfter(); !retryAfter.IsZero() { d := time.Until(retryAfter) if d > 0 { fs.Logf(nil, "Received retry after error - sleeping until %s (%v)", retryAfter.Format(time.RFC3339Nano), d) time.Sleep(d) } } if ci.RetriesInterval > 0 { naptime(ci.RetriesInterval) } results, err = b.fastCopy(ctx, fsrc, fdst, files, queueName) if err == nil || b.InGracefulShutdown { return results, err } } } return results, err } func (b *bisyncRun) resyncDir(ctx context.Context, fsrc, fdst fs.Fs) ([]Results, error) { ctx = b.preCopy(ctx) err := sync.CopyDir(ctx, fdst, fsrc, b.opt.CreateEmptySrcDirs) prettyprint(b.queueOpt.logger, "b.queueOpt.logger", fs.LogLevelDebug) getResults := ReadResults(b.queueOpt.logger.JSON) fs.Debugf(nil, "Got %v results for %v", len(getResults), "resync") return getResults, err } // operation should be "make" or "remove" func (b *bisyncRun) syncEmptyDirs(ctx context.Context, dst fs.Fs, candidates bilib.Names, dirsList *fileList, results *[]Results, operation string) { if b.InGracefulShutdown { return } fs.Debugf(nil, "syncing empty dirs") if b.opt.CreateEmptySrcDirs && (!b.opt.Resync || operation == "make") { candidatesList := candidates.ToList() if operation == "remove" { // reverse the sort order to ensure we remove subdirs before parent dirs sort.Sort(sort.Reverse(sort.StringSlice(candidatesList))) } for _, s := range candidatesList { var direrr error if dirsList.has(s) { // make sure it's a dir, not a file r := Results{} r.Name = s r.Size = -1 r.Modtime = dirsList.getTime(s).In(time.UTC) r.Flags = "d" r.Err = nil r.Origin = "syncEmptyDirs" r.Winner = operations.Winner{ // note: Obj not set Side: "src", Err: nil, } rSrc := r rDst := r rSrc.IsSrc = true rSrc.IsDst = false rDst.IsSrc = false rDst.IsDst = true rSrc.IsWinner = true rDst.IsWinner = false if operation == "remove" { // directories made empty by the sync will have already been deleted during the sync // this just catches the already-empty ones (excluded from sync by --files-from filter) direrr = operations.TryRmdir(ctx, dst, s) rSrc.Sigil = operations.MissingOnSrc rDst.Sigil = operations.MissingOnSrc rSrc.Dst = s rDst.Dst = s rSrc.Winner.Side = "none" rDst.Winner.Side = "none" } else if operation == "make" { direrr = operations.Mkdir(ctx, dst, s) rSrc.Sigil = operations.MissingOnDst rDst.Sigil = operations.MissingOnDst rSrc.Src = s rDst.Src = s } else { direrr = fmt.Errorf("invalid operation. Expected 'make' or 'remove', received '%q'", operation) } if direrr != nil { fs.Debugf(nil, "Error syncing directory: %v", direrr) } else { *results = append(*results, rSrc, rDst) } } } } } func (b *bisyncRun) saveQueue(files bilib.Names, jobName string) error { if !b.opt.SaveQueues { return nil } queueFile := fmt.Sprintf("%s.%s.que", b.basePath, jobName) return files.Save(queueFile) } func naptime(totalWait fs.Duration) { expireTime := time.Now().Add(time.Duration(totalWait)) fs.Logf(nil, "will retry in %v at %v", totalWait, expireTime.Format("2006-01-02 15:04:05 MST")) for i := 0; time.Until(expireTime) > 0; i++ { if i > 0 && i%10 == 0 { fs.Infof(nil, Color(terminal.Dim, "retrying in %v..."), time.Until(expireTime).Round(1*time.Second)) } else { fs.Debugf(nil, Color(terminal.Dim, "retrying in %v..."), time.Until(expireTime).Round(1*time.Second)) } time.Sleep(1 * time.Second) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/bilib/canonical.go
cmd/bisync/bilib/canonical.go
// Package bilib provides common stuff for bisync and bisync_test package bilib import ( "context" "os" "path/filepath" "regexp" "runtime" "strings" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/operations" ) // FsPath converts Fs to a suitable rclone argument func FsPath(f fs.Info) string { name, path, slash := f.Name(), f.Root(), "/" if name == "local" { slash = string(os.PathSeparator) if runtime.GOOS == "windows" { path = strings.ReplaceAll(path, "/", slash) path = strings.TrimPrefix(path, `\\?\`) } } else { path = name + ":" + path } if !strings.HasSuffix(path, slash) { path += slash } return path } // CanonicalPath converts a remote to a suitable base file name func CanonicalPath(remote string) string { trimmed := strings.Trim(remote, `\/`) return nonCanonicalChars.ReplaceAllString(trimmed, "_") } var nonCanonicalChars = regexp.MustCompile(`[\s\\/:?*]`) // SessionName makes a unique base name for the sync operation func SessionName(fs1, fs2 fs.Fs) string { return StripHexString(CanonicalPath(FsPath(fs1))) + ".." + StripHexString(CanonicalPath(FsPath(fs2))) } // StripHexString strips the (first) canonical {hexstring} suffix func StripHexString(path string) string { open := strings.IndexRune(path, '{') close := strings.IndexRune(path, '}') if open >= 0 && close > open { return path[:open] + path[close+1:] // (trailing underscore) } return path } // HasHexString returns true if path contains at least one canonical {hexstring} suffix func HasHexString(path string) bool { open := strings.IndexRune(path, '{') if open >= 0 && strings.IndexRune(path, '}') > open { return true } return false } // BasePath joins the workDir with the SessionName, stripping {hexstring} suffix if necessary func BasePath(ctx context.Context, workDir string, fs1, fs2 fs.Fs) string { suffixedSession := CanonicalPath(FsPath(fs1)) + ".." + CanonicalPath(FsPath(fs2)) suffixedBasePath := filepath.Join(workDir, suffixedSession) listing1 := suffixedBasePath + ".path1.lst" listing2 := suffixedBasePath + ".path2.lst" sessionName := SessionName(fs1, fs2) basePath := filepath.Join(workDir, sessionName) // Normalize to non-canonical version for overridden configs // to ensure that backend-specific flags don't change the listing filename. // For backward-compatibility, we first check if we found a listing file with the suffixed version. // If so, we rename it (and overwrite non-suffixed version, if any.) // If not, we carry on with the non-suffixed version. // We should only find a suffixed version if bisync v1.66 or older created it. if HasHexString(suffixedSession) && FileExists(listing1) { fs.Infof(listing1, "renaming to: %s", basePath+".path1.lst") if !operations.SkipDestructive(ctx, listing1, "rename to "+basePath+".path1.lst") { _ = os.Rename(listing1, basePath+".path1.lst") } } if HasHexString(suffixedSession) && FileExists(listing2) { fs.Infof(listing2, "renaming to: %s", basePath+".path2.lst") if !operations.SkipDestructive(ctx, listing1, "rename to "+basePath+".path2.lst") { _ = os.Rename(listing2, basePath+".path2.lst") } else { return suffixedBasePath } } return basePath }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/bilib/names.go
cmd/bisync/bilib/names.go
package bilib import ( "bytes" "os" "sort" "strconv" ) // Names comprises a set of file names type Names map[string]any // ToNames converts string slice to a set of names func ToNames(list []string) Names { ns := Names{} for _, f := range list { ns.Add(f) } return ns } // Add adds new file name to the set func (ns Names) Add(name string) { ns[name] = nil } // Has checks whether given name is present in the set func (ns Names) Has(name string) bool { _, ok := ns[name] return ok } // NotEmpty checks whether set is not empty func (ns Names) NotEmpty() bool { return len(ns) > 0 } // ToList converts name set to string slice func (ns Names) ToList() []string { list := []string{} for file := range ns { list = append(list, file) } sort.Strings(list) return list } // Save saves name set in a text file func (ns Names) Save(path string) error { return SaveList(ns.ToList(), path) } // SaveList saves file name list in a text file func SaveList(list []string, path string) error { buf := &bytes.Buffer{} for _, s := range list { _, _ = buf.WriteString(strconv.Quote(s)) _ = buf.WriteByte('\n') } return os.WriteFile(path, buf.Bytes(), PermSecure) } // AliasMap comprises a pair of names that are not equal but treated as equal for comparison purposes // For example, when normalizing unicode and casing // This helps reduce repeated normalization functions, which really slow things down type AliasMap map[string]string // Add adds new pair to the set, in both directions func (am AliasMap) Add(name1, name2 string) { if name1 != name2 { am[name1] = name2 am[name2] = name1 } } // Alias returns the alternate version, if any, else the original. func (am AliasMap) Alias(name1 string) string { // note: we don't need to check normalization settings, because we already did it in March. // the AliasMap will only exist if March paired up two unequal filenames. name2, ok := am[name1] if ok { return name2 } return name1 }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/bilib/files.go
cmd/bisync/bilib/files.go
// Package bilib provides common stuff for bisync and bisync_test // Here it's got local file/directory helpers (nice to have in lib/file) package bilib import ( "fmt" "io" "os" "path/filepath" "regexp" "runtime" ) // PermSecure is a Unix permission for a file accessible only by its owner const PermSecure = 0600 var ( regexLocalPath = regexp.MustCompile(`^[./\\]`) regexWindowsPath = regexp.MustCompile(`^[a-zA-Z]:`) regexRemotePath = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_-]*:`) ) // IsLocalPath returns true if its argument is a non-remote path. // Empty string or a relative path will be considered local. // Note: `c:dir` will be considered local on Windows but remote on Linux. func IsLocalPath(path string) bool { if path == "" || regexLocalPath.MatchString(path) { return true } if runtime.GOOS == "windows" && regexWindowsPath.MatchString(path) { return true } return !regexRemotePath.MatchString(path) } // FileExists returns true if the local file exists func FileExists(file string) bool { _, err := os.Stat(file) return !os.IsNotExist(err) } // CopyFileIfExists is like CopyFile but does not fail if source does not exist func CopyFileIfExists(srcFile, dstFile string) error { if !FileExists(srcFile) { return nil } return CopyFile(srcFile, dstFile) } // CopyFile copies a local file func CopyFile(src, dst string) (err error) { var ( rd io.ReadCloser wr io.WriteCloser info os.FileInfo ) if info, err = os.Stat(src); err != nil { return } if rd, err = os.Open(src); err != nil { return } defer func() { _ = rd.Close() }() if wr, err = os.Create(dst); err != nil { return } _, err = io.Copy(wr, rd) if e := wr.Close(); err == nil { err = e } if e := os.Chmod(dst, info.Mode()); err == nil { err = e } if e := os.Chtimes(dst, info.ModTime(), info.ModTime()); err == nil { err = e } return } // CopyDir copies a local directory func CopyDir(src string, dst string) (err error) { src = filepath.Clean(src) dst = filepath.Clean(dst) si, err := os.Stat(src) if err != nil { return err } if !si.IsDir() { return fmt.Errorf("source is not a directory") } _, err = os.Stat(dst) if err != nil && !os.IsNotExist(err) { return } if err == nil { return fmt.Errorf("destination already exists") } err = os.MkdirAll(dst, si.Mode()) if err != nil { return } entries, err := os.ReadDir(src) if err != nil { return } for _, entry := range entries { srcPath := filepath.Join(src, entry.Name()) dstPath := filepath.Join(dst, entry.Name()) if entry.IsDir() { err = CopyDir(srcPath, dstPath) if err != nil { return } } else { // Skip symlinks. if entry.Type()&os.ModeSymlink != 0 { continue } err = CopyFile(srcPath, dstPath) if err != nil { return } } } return }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/bisync/bilib/output.go
cmd/bisync/bilib/output.go
// Package bilib provides common stuff for bisync and bisync_test package bilib import ( "bytes" "log/slog" "sync" "github.com/rclone/rclone/fs/log" ) // CaptureOutput runs a function capturing its output at log level INFO. func CaptureOutput(fun func()) []byte { var mu sync.Mutex buf := &bytes.Buffer{} oldLevel := log.Handler.SetLevel(slog.LevelInfo) log.Handler.SetOutput(func(level slog.Level, text string) { mu.Lock() defer mu.Unlock() buf.WriteString(text) }) defer func() { log.Handler.ResetOutput() log.Handler.SetLevel(oldLevel) }() fun() mu.Lock() defer mu.Unlock() return buf.Bytes() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/copyurl/copyurl_test.go
cmd/copyurl/copyurl_test.go
package copyurl import ( "context" "errors" "os" "path/filepath" "sync" "sync/atomic" "testing" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/operations" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func resetGlobals() { autoFilename = false headerFilename = false printFilename = false stdout = false noClobber = false urls = false copyURL = operations.CopyURL } func TestRun_RequiresTwoArgsWhenNotStdout(t *testing.T) { t.Cleanup(resetGlobals) resetGlobals() err := run([]string{"https://example.com/foo"}) require.Error(t, err) assert.Contains(t, err.Error(), "need 2 arguments if not using --stdout") } func TestRun_CallsCopyURL_WithExplicitFilename_Success(t *testing.T) { t.Cleanup(resetGlobals) resetGlobals() tmp := t.TempDir() dstPath := filepath.Join(tmp, "out.txt") var called int32 copyURL = func(_ctx context.Context, _dst fs.Fs, dstFileName, url string, auto, header, noclobber bool) (fs.Object, error) { atomic.AddInt32(&called, 1) assert.Equal(t, "https://example.com/file", url) assert.Equal(t, "out.txt", dstFileName) assert.False(t, auto) assert.False(t, header) assert.False(t, noclobber) return nil, nil } err := run([]string{"https://example.com/file", dstPath}) require.NoError(t, err) assert.Equal(t, int32(1), atomic.LoadInt32(&called)) } func TestRun_CallsCopyURL_WithAutoFilename_AndPropagatesError(t *testing.T) { t.Cleanup(resetGlobals) resetGlobals() tmp := t.TempDir() autoFilename = true want := errors.New("boom") var called int32 copyURL = func(_ctx context.Context, _dst fs.Fs, dstFileName, url string, auto, header, noclobber bool) (fs.Object, error) { atomic.AddInt32(&called, 1) assert.Equal(t, "", dstFileName) // auto filename -> empty assert.True(t, auto) return nil, want } err := run([]string{"https://example.com/auto/name", tmp}) require.Error(t, err) assert.Equal(t, want, err) assert.Equal(t, int32(1), atomic.LoadInt32(&called)) } func TestRunURLS_ErrorsWithStdoutAndWithPrintFilename(t *testing.T) { t.Cleanup(resetGlobals) resetGlobals() stdout = true err := runURLS([]string{"dummy.csv", "destDir"}) require.Error(t, err) assert.Contains(t, err.Error(), "can't use --stdout with --urls") resetGlobals() printFilename = true err = runURLS([]string{"dummy.csv", "destDir"}) require.Error(t, err) assert.Contains(t, err.Error(), "can't use --print-filename with --urls") } func TestRunURLS_ProcessesCSV_ParallelCalls_AndAggregatesError(t *testing.T) { t.Cleanup(resetGlobals) resetGlobals() tmp := t.TempDir() csvPath := filepath.Join(tmp, "urls.csv") csvContent := []byte( "https://example.com/a,aaa.txt\n" + // success "https://example.com/b\n" + // auto filename "https://example.com/c,ccc.txt\n") // error require.NoError(t, os.WriteFile(csvPath, csvContent, 0o600)) // destination dir (local backend) dest := t.TempDir() // mock copyURL: succeed for /a and /b, fail for /c var calls int32 var mu sync.Mutex var seen []string copyURL = func(_ctx context.Context, _dst fs.Fs, dstFileName, url string, auto, header, noclobber bool) (fs.Object, error) { atomic.AddInt32(&calls, 1) mu.Lock() seen = append(seen, url+"|"+dstFileName) mu.Unlock() switch { case url == "https://example.com/a": require.Equal(t, "aaa.txt", dstFileName) return nil, nil case url == "https://example.com/b": require.Equal(t, "", dstFileName) // auto-name path return nil, nil case url == "https://example.com/c": return nil, errors.New("network down") default: return nil, nil } } err := runURLS([]string{csvPath, dest}) require.Error(t, err) assert.Contains(t, err.Error(), "not all URLs copied successfully") // 3 lines => 3 calls assert.Equal(t, int32(3), atomic.LoadInt32(&calls)) // sanity: all expected URLs were seen assert.ElementsMatch(t, []string{ "https://example.com/a|aaa.txt", "https://example.com/b|", "https://example.com/c|ccc.txt", }, seen, ) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/copyurl/copyurl.go
cmd/copyurl/copyurl.go
// Package copyurl provides the copyurl command. package copyurl import ( "context" "encoding/csv" "errors" "fmt" "os" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/errcount" "github.com/spf13/cobra" "golang.org/x/sync/errgroup" ) var ( autoFilename = false headerFilename = false printFilename = false stdout = false noClobber = false urls = false ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the URL and use it for destination file path", "") flags.BoolVarP(cmdFlags, &headerFilename, "header-filename", "", headerFilename, "Get the file name from the Content-Disposition header", "") flags.BoolVarP(cmdFlags, &printFilename, "print-filename", "p", printFilename, "Print the resulting name from --auto-filename", "") flags.BoolVarP(cmdFlags, &noClobber, "no-clobber", "", noClobber, "Prevent overwriting file with same name", "") flags.BoolVarP(cmdFlags, &stdout, "stdout", "", stdout, "Write the output to stdout rather than a file", "") flags.BoolVarP(cmdFlags, &urls, "urls", "", stdout, "Use a CSV file of links to process multiple URLs", "") } var commandDefinition = &cobra.Command{ Use: "copyurl https://example.com dest:path", Short: `Copy the contents of the URL supplied content to dest:path.`, Long: strings.ReplaceAll(`Download a URL's content and copy it to the destination without saving it in temporary storage. Setting |--auto-filename| will attempt to automatically determine the filename from the URL (after any redirections) and used in the destination path. With |--header-filename| in addition, if a specific filename is set in HTTP headers, it will be used instead of the name from the URL. With |--print-filename| in addition, the resulting file name will be printed. Setting |--no-clobber| will prevent overwriting file on the destination if there is one with the same name. Setting |--stdout| or making the output file name |-| will cause the output to be written to standard output. Setting |--urls| allows you to input a CSV file of URLs in format: URL, FILENAME. If |--urls| is in use then replace the URL in the arguments with the file containing the URLs, e.g.: |||sh rclone copyurl --urls myurls.csv remote:dir ||| Missing filenames will be autogenerated equivalent to using |--auto-filename|. Note that |--stdout| and |--print-filename| are incompatible with |--urls|. This will do |--transfers| copies in parallel. Note that if |--auto-filename| is desired for all URLs then a file with only URLs and no filename can be used. ### Troubleshooting If you can't get |rclone copyurl| to work then here are some things you can try: - |--disable-http2| rclone will use HTTP2 if available - try disabling it - |--bind 0.0.0.0| rclone will use IPv6 if available - try disabling it - |--bind ::0| to disable IPv4 - |--user agent curl| - some sites have whitelists for curl's user-agent - try that - Make sure the site works with |curl| directly`, "|", "`"), Annotations: map[string]string{ "versionIntroduced": "v1.43", "groups": "Important", }, RunE: func(command *cobra.Command, args []string) (err error) { cmd.CheckArgs(1, 2, command, args) cmd.Run(true, true, command, func() error { if !urls { return run(args) } return runURLS(args) }) return nil }, } var copyURL = operations.CopyURL // for testing // runURLS processes a .csv file of urls and filenames func runURLS(args []string) (err error) { if stdout { return errors.New("can't use --stdout with --urls") } if printFilename { return errors.New("can't use --print-filename with --urls") } dstFs := cmd.NewFsDir(args[1:]) f, err := os.Open(args[0]) if err != nil { return fmt.Errorf("failed to open .csv file: %w", err) } defer fs.CheckClose(f, &err) reader := csv.NewReader(f) reader.FieldsPerRecord = -1 urlList, err := reader.ReadAll() if err != nil { return fmt.Errorf("failed reading .csv file: %w", err) } ec := errcount.New() g, gCtx := errgroup.WithContext(context.Background()) ci := fs.GetConfig(gCtx) g.SetLimit(ci.Transfers) for _, urlEntry := range urlList { if len(urlEntry) == 0 { continue } g.Go(func() error { url := urlEntry[0] var filename string if len(urlEntry) > 1 { filename = urlEntry[1] } _, err := copyURL(gCtx, dstFs, filename, url, filename == "", headerFilename, noClobber) if err != nil { fs.Errorf(filename, "failed to copy URL %q: %v", url, err) ec.Add(err) } return nil }) } ec.Add(g.Wait()) return ec.Err("not all URLs copied successfully") } // run runs the command for a single URL func run(args []string) error { var err error var dstFileName string var fsdst fs.Fs if !stdout { if len(args) < 2 { return errors.New("need 2 arguments if not using --stdout") } if args[1] == "-" { stdout = true } else if autoFilename { fsdst = cmd.NewFsDir(args[1:]) } else { fsdst, dstFileName = cmd.NewFsDstFile(args[1:]) } } var dst fs.Object if stdout { err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout) } else { dst, err = copyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, headerFilename, noClobber) if printFilename && err == nil && dst != nil { fmt.Println(dst.Remote()) } } return err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/purge/purge.go
cmd/purge/purge.go
// Package purge provides the purge command. package purge import ( "context" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(commandDefinition) } var commandDefinition = &cobra.Command{ Use: "purge remote:path", Short: `Remove the path and all of its contents.`, Long: `Remove the path and all of its contents. Note that this does not obey include/exclude filters - everything will be removed. Use the [delete](/commands/rclone_delete/) command if you want to selectively delete files. To delete empty directories only, use command [rmdir](/commands/rclone_rmdir/) or [rmdirs](/commands/rclone_rmdirs/). The concurrency of this operation is controlled by the ` + "`--checkers`" + ` global flag. However, some backends will implement this command directly, in which case ` + "`--checkers`" + ` will be ignored. **Important**: Since this can cause data loss, test first with the ` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.`, Annotations: map[string]string{ "groups": "Important", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) fdst := cmd.NewFsDir(args) cmd.Run(true, false, command, func() error { ctx := context.Background() fi := filter.GetConfig(ctx) if !fi.InActive() { fs.Fatalf(nil, "filters are not supported with purge (purge will delete everything unconditionally)") } return operations.Purge(context.Background(), fdst, "") }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/cleanup/cleanup.go
cmd/cleanup/cleanup.go
// Package cleanup provides the cleanup command. package cleanup import ( "context" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(commandDefinition) } var commandDefinition = &cobra.Command{ Use: "cleanup remote:path", Short: `Clean up the remote if possible.`, Long: `Clean up the remote if possible. Empty the trash or delete old file versions. Not supported by all remotes.`, Annotations: map[string]string{ "versionIntroduced": "v1.31", "groups": "Important", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) cmd.Run(true, false, command, func() error { return operations.CleanUp(context.Background(), fsrc) }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/move/move.go
cmd/move/move.go
// Package move provides the move command. package move import ( "context" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations/operationsflags" "github.com/rclone/rclone/fs/sync" "github.com/spf13/cobra" ) // Globals var ( deleteEmptySrcDirs = false createEmptySrcDirs = false loggerOpt = operations.LoggerOpt{} loggerFlagsOpt = operationsflags.AddLoggerFlagsOptions{} ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move", "") flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move", "") operationsflags.AddLoggerFlags(cmdFlags, &loggerOpt, &loggerFlagsOpt) loggerOpt.LoggerFn = operations.NewDefaultLoggerFn(&loggerOpt) } var commandDefinition = &cobra.Command{ Use: "move source:path dest:path", Short: `Move files from source to dest.`, // Warning! "|" will be replaced by backticks below Long: strings.ReplaceAll(`Moves the contents of the source directory to the destination directory. Rclone will error if the source and destination overlap and the remote does not support a server-side directory move operation. To move single files, use the [moveto](/commands/rclone_moveto/) command instead. If no filters are in use and if possible this will server-side move |source:path| into |dest:path|. After this |source:path| will no longer exist. Otherwise for each file in |source:path| selected by the filters (if any) this will move it into |dest:path|. If possible a server-side move will be used, otherwise it will copy it (server-side if possible) into |dest:path| then delete the original (if no errors on copy) in |source:path|. If you want to delete empty source directories after move, use the |--delete-empty-src-dirs| flag. See the [--no-traverse](/docs/#no-traverse) option for controlling whether rclone lists the destination directory or not. Supplying this option when moving a small number of files into a large destination can speed transfers up greatly. Rclone will sync the modification times of files and directories if the backend supports it. If metadata syncing is required then use the |--metadata| flag. Note that the modification time and metadata for the root directory will **not** be synced. See <https://github.com/rclone/rclone/issues/7652> for more info. **Important**: Since this can cause data loss, test first with the |--dry-run| or the |--interactive|/|-i| flag. **Note**: Use the |-P|/|--progress| flag to view real-time transfer statistics. `, "|", "`") + operationsflags.Help(), Annotations: map[string]string{ "versionIntroduced": "v1.19", "groups": "Filter,Listing,Important,Copy", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(2, 2, command, args) fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args) cmd.Run(true, true, command, func() error { ctx := context.Background() close, err := operationsflags.ConfigureLoggers(ctx, fdst, command, &loggerOpt, loggerFlagsOpt) if err != nil { return err } defer close() if loggerFlagsOpt.AnySet() { ctx = operations.WithSyncLogger(ctx, loggerOpt) } if srcFileName == "" { return sync.MoveDir(ctx, fdst, fsrc, deleteEmptySrcDirs, createEmptySrcDirs) } return operations.MoveFile(ctx, fdst, fsrc, srcFileName, srcFileName) }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/lsf/lsf.go
cmd/lsf/lsf.go
// Package lsf provides the lsf command. package lsf import ( "context" "fmt" "io" "os" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/ls/lshelp" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) var ( format string timeFormat string separator string dirSlash bool recurse bool hashType = hash.MD5 filesOnly bool dirsOnly bool csv bool absolute bool ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.StringVarP(cmdFlags, &format, "format", "F", "p", "Output format - see help for details", "") flags.StringVarP(cmdFlags, &timeFormat, "time-format", "t", "", "Specify a custom time format - see docs for details (default: 2006-01-02 15:04:05)", "") flags.StringVarP(cmdFlags, &separator, "separator", "s", ";", "Separator for the items in the format", "") flags.BoolVarP(cmdFlags, &dirSlash, "dir-slash", "d", true, "Append a slash to directory names", "") flags.FVarP(cmdFlags, &hashType, "hash", "", "Use this hash when `h` is used in the format MD5|SHA-1|DropboxHash", "") flags.BoolVarP(cmdFlags, &filesOnly, "files-only", "", false, "Only list files", "") flags.BoolVarP(cmdFlags, &dirsOnly, "dirs-only", "", false, "Only list directories", "") flags.BoolVarP(cmdFlags, &csv, "csv", "", false, "Output in CSV format", "") flags.BoolVarP(cmdFlags, &absolute, "absolute", "", false, "Put a leading / in front of path names", "") flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing", "") } var commandDefinition = &cobra.Command{ Use: "lsf remote:path", Short: `List directories and objects in remote:path formatted for parsing.`, Long: `List the contents of the source path (directories and objects) to standard output in a form which is easy to parse by scripts. By default this will just be the names of the objects and directories, one per line. The directories will have a / suffix. E.g. ` + "```console" + ` $ rclone lsf swift:bucket bevajer5jef canole diwogej7 ferejej3gux/ fubuwic ` + "```" + ` Use the ` + "`--format`" + ` option to control what gets listed. By default this is just the path, but you can use these parameters to control the output: ` + "```text" + ` p - path s - size t - modification time h - hash i - ID of object o - Original ID of underlying object m - MimeType of object if known e - encrypted name T - tier of storage if known, e.g. "Hot" or "Cool" M - Metadata of object in JSON blob format, eg {"key":"value"} ` + "```" + ` So if you wanted the path, size and modification time, you would use ` + "`--format \"pst\"`, or maybe `--format \"tsp\"`" + ` to put the path last. E.g. ` + "```console" + ` $ rclone lsf --format "tsp" swift:bucket 2016-06-25 18:55:41;60295;bevajer5jef 2016-06-25 18:55:43;90613;canole 2016-06-25 18:55:43;94467;diwogej7 2018-04-26 08:50:45;0;ferejej3gux/ 2016-06-25 18:55:40;37600;fubuwic ` + "```" + ` If you specify "h" in the format you will get the MD5 hash by default, use the ` + "`--hash`" + ` flag to change which hash you want. Note that this can be returned as an empty string if it isn't available on the object (and for directories), "ERROR" if there was an error reading it from the object and "UNSUPPORTED" if that object does not support that hash type. For example, to emulate the md5sum command you can use ` + "```console" + ` rclone lsf -R --hash MD5 --format hp --separator " " --files-only . ` + "```" + ` E.g. ` + "```console" + ` $ rclone lsf -R --hash MD5 --format hp --separator " " --files-only swift:bucket 7908e352297f0f530b84a756f188baa3 bevajer5jef cd65ac234e6fea5925974a51cdd865cc canole 03b5341b4f234b9d984d03ad076bae91 diwogej7 8fd37c3810dd660778137ac3a66cc06d fubuwic 99713e14a4c4ff553acaf1930fad985b gixacuh7ku ` + "```" + ` (Though "rclone md5sum ." is an easier way of typing this.) By default the separator is ";" this can be changed with the ` + "`--separator`" + ` flag. Note that separators aren't escaped in the path so putting it last is a good strategy. E.g. ` + "```console" + ` $ rclone lsf --separator "," --format "tshp" swift:bucket 2016-06-25 18:55:41,60295,7908e352297f0f530b84a756f188baa3,bevajer5jef 2016-06-25 18:55:43,90613,cd65ac234e6fea5925974a51cdd865cc,canole 2016-06-25 18:55:43,94467,03b5341b4f234b9d984d03ad076bae91,diwogej7 2018-04-26 08:52:53,0,,ferejej3gux/ 2016-06-25 18:55:40,37600,8fd37c3810dd660778137ac3a66cc06d,fubuwic ` + "```" + ` You can output in CSV standard format. This will escape things in " if they contain, E.g. ` + "```console" + ` $ rclone lsf --csv --files-only --format ps remote:path test.log,22355 test.sh,449 "this file contains a comma, in the file name.txt",6 ` + "```" + ` Note that the ` + "`--absolute`" + ` parameter is useful for making lists of files to pass to an rclone copy with the ` + "`--files-from-raw`" + ` flag. For example, to find all the files modified within one day and copy those only (without traversing the whole directory structure): ` + "```console" + ` rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files rclone copy --files-from-raw new_files /path/to/local remote:path ` + "```" + ` The default time format is ` + "`'2006-01-02 15:04:05'`" + `. [Other formats](https://pkg.go.dev/time#pkg-constants) can be specified with the ` + "`--time-format`" + ` flag. Examples: ` + "```console" + ` rclone lsf remote:path --format pt --time-format 'Jan 2, 2006 at 3:04pm (MST)' rclone lsf remote:path --format pt --time-format '2006-01-02 15:04:05.000000000' rclone lsf remote:path --format pt --time-format '2006-01-02T15:04:05.999999999Z07:00' rclone lsf remote:path --format pt --time-format RFC3339 rclone lsf remote:path --format pt --time-format DateOnly rclone lsf remote:path --format pt --time-format max rclone lsf remote:path --format pt --time-format unix rclone lsf remote:path --format pt --time-format unixnano ` + "```" + ` ` + "`--time-format max`" + ` will automatically truncate ` + "`2006-01-02 15:04:05.000000000`" + ` to the maximum precision supported by the remote. ` + lshelp.Help, Annotations: map[string]string{ "versionIntroduced": "v1.40", "groups": "Filter,Listing", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { // Work out if the separatorFlag was supplied or not separatorFlag := command.Flags().Lookup("separator") separatorFlagSupplied := separatorFlag != nil && separatorFlag.Changed // Default the separator to , if using CSV if csv && !separatorFlagSupplied { separator = "," } return Lsf(context.Background(), fsrc, os.Stdout) }) }, } // Lsf lists all the objects in the path with modification time, size // and path in specific format. func Lsf(ctx context.Context, fsrc fs.Fs, out io.Writer) error { var list operations.ListFormat list.SetSeparator(separator) list.SetCSV(csv) list.SetDirSlash(dirSlash) list.SetAbsolute(absolute) var opt = operations.ListJSONOpt{ NoModTime: true, NoMimeType: true, DirsOnly: dirsOnly, FilesOnly: filesOnly, Recurse: recurse, } for _, char := range format { switch char { case 'p': list.AddPath() case 't': if timeFormat == "max" { timeFormat = operations.FormatForLSFPrecision(fsrc.Precision()) } list.AddModTime(timeFormat) opt.NoModTime = false case 's': list.AddSize() case 'h': list.AddHash(hashType) opt.ShowHash = true opt.HashTypes = []string{hashType.String()} case 'i': list.AddID() case 'm': list.AddMimeType() opt.NoMimeType = false case 'e': list.AddEncrypted() opt.ShowEncrypted = true case 'o': list.AddOrigID() opt.ShowOrigIDs = true case 'T': list.AddTier() case 'M': list.AddMetadata() opt.Metadata = true default: return fmt.Errorf("unknown format character %q", char) } } return operations.ListJSON(ctx, fsrc, "", &opt, func(item *operations.ListJSONItem) error { // Make size deterministic for tests if item.IsDir { item.Size = -1 } _, _ = fmt.Fprintln(out, list.Format(item)) return nil }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/lsf/lsf_test.go
cmd/lsf/lsf_test.go
package lsf import ( "bytes" "context" "strings" "testing" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fstest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestDefaultLsf(t *testing.T) { fstest.Initialise() buf := new(bytes.Buffer) f, err := fs.NewFs(context.Background(), "testfiles") require.NoError(t, err) err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `file1 file2 file3 subdir/ `, buf.String()) } func TestRecurseFlag(t *testing.T) { fstest.Initialise() buf := new(bytes.Buffer) f, err := fs.NewFs(context.Background(), "testfiles") require.NoError(t, err) recurse = true err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `file1 file2 file3 subdir/ subdir/file1 subdir/file2 subdir/file3 `, buf.String()) recurse = false } func TestDirSlashFlag(t *testing.T) { fstest.Initialise() buf := new(bytes.Buffer) f, err := fs.NewFs(context.Background(), "testfiles") require.NoError(t, err) dirSlash = true format = "p" err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `file1 file2 file3 subdir/ `, buf.String()) buf = new(bytes.Buffer) dirSlash = false err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `file1 file2 file3 subdir `, buf.String()) } func TestFormat(t *testing.T) { fstest.Initialise() f, err := fs.NewFs(context.Background(), "testfiles") require.NoError(t, err) buf := new(bytes.Buffer) format = "p" err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `file1 file2 file3 subdir `, buf.String()) buf = new(bytes.Buffer) format = "s" err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `0 321 1234 -1 `, buf.String()) buf = new(bytes.Buffer) format = "hp" err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `d41d8cd98f00b204e9800998ecf8427e;file1 409d6c19451dd39d4a94e42d2ff2c834;file2 9b4c8a5e36d3be7e2c4b1d75ded8c8a1;file3 ;subdir `, buf.String()) buf = new(bytes.Buffer) format = "p" filesOnly = true err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `file1 file2 file3 `, buf.String()) filesOnly = false buf = new(bytes.Buffer) format = "p" dirsOnly = true err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `subdir `, buf.String()) dirsOnly = false buf = new(bytes.Buffer) format = "t" err = Lsf(context.Background(), f, buf) require.NoError(t, err) items, _ := list.DirSorted(context.Background(), f, true, "") var expectedOutput strings.Builder for _, item := range items { expectedOutput.WriteString(item.ModTime(context.Background()).Format("2006-01-02 15:04:05") + "\n") } assert.Equal(t, expectedOutput.String(), buf.String()) buf = new(bytes.Buffer) format = "sp" err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `0;file1 321;file2 1234;file3 -1;subdir `, buf.String()) format = "" } func TestSeparator(t *testing.T) { fstest.Initialise() f, err := fs.NewFs(context.Background(), "testfiles") require.NoError(t, err) format = "ps" buf := new(bytes.Buffer) err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `file1;0 file2;321 file3;1234 subdir;-1 `, buf.String()) separator = "__SEP__" buf = new(bytes.Buffer) err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `file1__SEP__0 file2__SEP__321 file3__SEP__1234 subdir__SEP__-1 `, buf.String()) format = "" separator = "" } func TestWholeLsf(t *testing.T) { fstest.Initialise() f, err := fs.NewFs(context.Background(), "testfiles") require.NoError(t, err) format = "pst" separator = "_+_" recurse = true dirSlash = true buf := new(bytes.Buffer) err = Lsf(context.Background(), f, buf) require.NoError(t, err) items, _ := list.DirSorted(context.Background(), f, true, "") itemsInSubdir, _ := list.DirSorted(context.Background(), f, true, "subdir") var expectedOutput []string for _, item := range items { expectedOutput = append(expectedOutput, item.ModTime(context.Background()).Format("2006-01-02 15:04:05")) } for _, item := range itemsInSubdir { expectedOutput = append(expectedOutput, item.ModTime(context.Background()).Format("2006-01-02 15:04:05")) } assert.Equal(t, `file1_+_0_+_`+expectedOutput[0]+` file2_+_321_+_`+expectedOutput[1]+` file3_+_1234_+_`+expectedOutput[2]+` subdir/_+_-1_+_`+expectedOutput[3]+` subdir/file1_+_0_+_`+expectedOutput[4]+` subdir/file2_+_1_+_`+expectedOutput[5]+` subdir/file3_+_111_+_`+expectedOutput[6]+` `, buf.String()) format = "" separator = "" recurse = false dirSlash = false } func TestTimeFormat(t *testing.T) { fstest.Initialise() f, err := fs.NewFs(context.Background(), "testfiles") require.NoError(t, err) format = "pst" separator = "_+_" recurse = true dirSlash = true timeFormat = "Jan 2, 2006 at 3:04pm (MST)" buf := new(bytes.Buffer) err = Lsf(context.Background(), f, buf) require.NoError(t, err) items, _ := list.DirSorted(context.Background(), f, true, "") itemsInSubdir, _ := list.DirSorted(context.Background(), f, true, "subdir") var expectedOutput []string for _, item := range items { expectedOutput = append(expectedOutput, item.ModTime(context.Background()).Format(timeFormat)) } for _, item := range itemsInSubdir { expectedOutput = append(expectedOutput, item.ModTime(context.Background()).Format(timeFormat)) } assert.Equal(t, `file1_+_0_+_`+expectedOutput[0]+` file2_+_321_+_`+expectedOutput[1]+` file3_+_1234_+_`+expectedOutput[2]+` subdir/_+_-1_+_`+expectedOutput[3]+` subdir/file1_+_0_+_`+expectedOutput[4]+` subdir/file2_+_1_+_`+expectedOutput[5]+` subdir/file3_+_111_+_`+expectedOutput[6]+` `, buf.String()) format = "" separator = "" recurse = false dirSlash = false } func TestTimeFormatMax(t *testing.T) { fstest.Initialise() f, err := fs.NewFs(context.Background(), "testfiles") require.NoError(t, err) format = "pst" separator = "_+_" recurse = true dirSlash = true timeFormat = "max" precision := operations.FormatForLSFPrecision(f.Precision()) buf := new(bytes.Buffer) err = Lsf(context.Background(), f, buf) require.NoError(t, err) items, _ := list.DirSorted(context.Background(), f, true, "") itemsInSubdir, _ := list.DirSorted(context.Background(), f, true, "subdir") var expectedOutput []string for _, item := range items { expectedOutput = append(expectedOutput, item.ModTime(context.Background()).Format(precision)) } for _, item := range itemsInSubdir { expectedOutput = append(expectedOutput, item.ModTime(context.Background()).Format(precision)) } assert.Equal(t, `file1_+_0_+_`+expectedOutput[0]+` file2_+_321_+_`+expectedOutput[1]+` file3_+_1234_+_`+expectedOutput[2]+` subdir/_+_-1_+_`+expectedOutput[3]+` subdir/file1_+_0_+_`+expectedOutput[4]+` subdir/file2_+_1_+_`+expectedOutput[5]+` subdir/file3_+_111_+_`+expectedOutput[6]+` `, buf.String()) format = "" separator = "" recurse = false dirSlash = false }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/gendocs/gendocs.go
cmd/gendocs/gendocs.go
// Package gendocs provides the gendocs command. package gendocs import ( "bytes" "fmt" "os" "path" "path/filepath" "regexp" "runtime" "strings" "text/template" "time" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/lib/file" "github.com/spf13/cobra" "github.com/spf13/cobra/doc" ) func init() { cmd.Root.AddCommand(commandDefinition) } // define things which go into the frontmatter type frontmatter struct { Date string Title string Description string Source string Aliases []string Annotations map[string]string } var frontmatterTemplate = template.Must(template.New("frontmatter").Parse(`--- title: "{{ .Title }}" description: "{{ .Description }}" {{- if .Aliases }} aliases: {{- range $value := .Aliases }} - {{ $value }} {{- end }} {{- end }} {{- range $key, $value := .Annotations }} {{ $key }}: {{ $value }} {{- end }} # autogenerated - DO NOT EDIT, instead edit the source code in {{ .Source }} and as part of making a release run "make commanddocs" --- `)) var commandDefinition = &cobra.Command{ Use: "gendocs output_directory", Short: `Output markdown docs for rclone to the directory supplied.`, Long: `This produces markdown docs for the rclone commands to the directory supplied. These are in a format suitable for hugo to render into the rclone.org website.`, Annotations: map[string]string{ "versionIntroduced": "v1.33", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(1, 1, command, args) now := time.Now().Format(time.RFC3339) // Create the directory structure root := args[0] out := filepath.Join(root, "commands") err := file.MkdirAll(out, 0777) if err != nil { return err } // Write the flags page var buf bytes.Buffer cmd.Root.SetOut(&buf) cmd.Root.SetArgs([]string{"help", "flags"}) cmd.GeneratingDocs = true err = cmd.Root.Execute() if err != nil { return err } err = os.WriteFile(filepath.Join(root, "flags.md"), buf.Bytes(), 0777) if err != nil { return err } // Look up name => details for prepender type commandDetails struct { Short string Aliases []string Annotations map[string]string } commands := map[string]commandDetails{} var addCommandDetails func(root *cobra.Command, parentAliases []string) addCommandDetails = func(root *cobra.Command, parentAliases []string) { name := strings.ReplaceAll(root.CommandPath(), " ", "_") + ".md" var aliases []string for _, p := range parentAliases { aliases = append(aliases, p+" "+root.Name()) for _, v := range root.Aliases { aliases = append(aliases, p+" "+v) } } for _, v := range root.Aliases { if root.HasParent() { aliases = append(aliases, root.Parent().CommandPath()+" "+v) } else { aliases = append(aliases, v) } } commands[name] = commandDetails{ Short: root.Short, Aliases: aliases, Annotations: root.Annotations, } for _, c := range root.Commands() { addCommandDetails(c, aliases) } } addCommandDetails(cmd.Root, []string{}) // markup for the docs files prepender := func(filename string) string { name := filepath.Base(filename) base := strings.TrimSuffix(name, path.Ext(name)) data := frontmatter{ Date: now, Title: strings.ReplaceAll(base, "_", " "), Description: commands[name].Short, Source: strings.ReplaceAll(strings.ReplaceAll(base, "rclone", "cmd"), "_", "/") + "/", Aliases: []string{}, Annotations: map[string]string{}, } for _, v := range commands[name].Aliases { data.Aliases = append(data.Aliases, "/commands/"+strings.ReplaceAll(v, " ", "_")+"/") } // Filter out annotations that confuse hugo from the frontmatter for k, v := range commands[name].Annotations { if k != "groups" { data.Annotations[k] = v } } var buf bytes.Buffer err := frontmatterTemplate.Execute(&buf, data) if err != nil { fs.Fatalf(nil, "Failed to render frontmatter template: %v", err) } return buf.String() } linkHandler := func(name string) string { base := strings.TrimSuffix(name, path.Ext(name)) return "/commands/" + strings.ToLower(base) + "/" } err = doc.GenMarkdownTreeCustom(cmd.Root, out, prepender, linkHandler) if err != nil { return err } outdentTitle := regexp.MustCompile(`(?m)^#(#+)`) // Munge the files to add a link to the global flags page err = filepath.Walk(out, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if !info.IsDir() { name := filepath.Base(path) cmd, ok := commands[name] if !ok { switch name { case "rclone_mount.md": switch runtime.GOOS { case "darwin", "windows": fs.Logf(nil, "Skipping docs for command not available without the cmount build tag: %v", name) return nil } case "rclone_nfsmount.md", "rclone_serve_nfs.md": switch runtime.GOOS { case "windows": fs.Logf(nil, "Skipping docs for command not supported on %v: %v", runtime.GOOS, name) return nil } } return fmt.Errorf("didn't find command for %q", name) } b, err := os.ReadFile(path) if err != nil { return err } doc := string(b) startCut := strings.Index(doc, `### Options inherited from parent commands`) endCut := strings.Index(doc, `### SEE ALSO`) if startCut < 0 || endCut < 0 { if name != "rclone.md" { return fmt.Errorf("internal error: failed to find cut points: startCut = %d, endCut = %d", startCut, endCut) } if endCut >= 0 { doc = doc[:endCut] + `### See Also <!-- markdownlint-capture --> <!-- markdownlint-disable ul-style line-length -->` + doc[endCut+12:] + ` <!-- markdownlint-restore --> ` } } else { var out strings.Builder if groupsString := cmd.Annotations["groups"]; groupsString != "" { _, _ = out.WriteString("Options shared with other commands are described next.\n") _, _ = out.WriteString("See the [global flags page](/flags/) for global options not listed here.\n\n") groups := flags.All.Include(groupsString) for _, group := range groups.Groups { if group.Flags.HasFlags() { _, _ = fmt.Fprintf(&out, "#### %s Options\n\n", group.Name) _, _ = fmt.Fprintf(&out, "%s\n\n", group.Help) _, _ = out.WriteString("```text\n") _, _ = out.WriteString(group.Flags.FlagUsages()) _, _ = out.WriteString("```\n\n") } } } else { _, _ = out.WriteString("See the [global flags page](/flags/) for global options not listed here.\n\n") } doc = doc[:startCut] + out.String() + `### See Also <!-- markdownlint-capture --> <!-- markdownlint-disable ul-style line-length -->` + doc[endCut+12:] + ` <!-- markdownlint-restore --> ` } // outdent all the titles by one doc = outdentTitle.ReplaceAllString(doc, `$1`) err = os.WriteFile(path, []byte(doc), 0777) if err != nil { return err } } return nil }) if err != nil { return err } return nil }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/backend/backend.go
cmd/backend/backend.go
// Package backend provides the backend command. package backend import ( "context" "encoding/json" "fmt" "os" "sort" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/rc" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) var ( options []string useJSON bool ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name", "") flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format", "") } var commandDefinition = &cobra.Command{ Use: "backend <command> remote:path [opts] <args>", Short: `Run a backend-specific command.`, Long: `This runs a backend-specific command. The commands themselves (except for "help" and "features") are defined by the backends and you should see the backend docs for definitions. You can discover what commands a backend implements by using ` + "```console" + ` rclone backend help remote: rclone backend help <backendname> ` + "```" + ` You can also discover information about the backend using (see [operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs for more info). ` + "```console" + ` rclone backend features remote: ` + "```" + ` Pass options to the backend command with -o. This should be key=value or key, e.g.: ` + "```console" + ` rclone backend stats remote:path stats -o format=json -o long ` + "```" + ` Pass arguments to the backend by placing them on the end of the line ` + "```console" + ` rclone backend cleanup remote:path file1 file2 file3 ` + "```" + ` Note to run these commands on a running backend then see [backend/command](/rc/#backend-command) in the rc docs.`, Annotations: map[string]string{ "versionIntroduced": "v1.52", "groups": "Important", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(2, 1e6, command, args) name, remote := args[0], args[1] cmd.Run(false, false, command, func() error { // show help if remote is a backend name if name == "help" { fsInfo, err := fs.Find(remote) if err == nil { return showHelp(fsInfo) } } // Create remote fsInfo, configName, fsPath, config, err := fs.ConfigFs(remote) if err != nil { return err } f, err := fsInfo.NewFs(context.Background(), configName, fsPath, config) if err != nil { return err } // Run the command var out any switch name { case "help": return showHelp(fsInfo) case "features": out = operations.GetFsInfo(f) default: doCommand := f.Features().Command if doCommand == nil { return fmt.Errorf("%v: doesn't support backend commands", f) } arg := args[2:] opt := rc.ParseOptions(options) out, err = doCommand(context.Background(), name, arg, opt) } if err != nil { if err == fs.ErrorCommandNotFound { extra := "" if f.Features().Overlay { extra = " (try the underlying remote)" } return fmt.Errorf("%q %w%s", name, err, extra) } return fmt.Errorf("command %q failed: %w", name, err) } // Output the result writeJSON := false if useJSON { writeJSON = true } else { switch x := out.(type) { case nil: case string: fmt.Println(out) case []string: for _, line := range x { fmt.Println(line) } default: writeJSON = true } } if writeJSON { // Write indented JSON to the output enc := json.NewEncoder(os.Stdout) enc.SetIndent("", "\t") err = enc.Encode(out) if err != nil { return fmt.Errorf("failed to write JSON: %w", err) } } return nil }) return nil }, } // show help for a backend func showHelp(fsInfo *fs.RegInfo) error { cmds := fsInfo.CommandHelp name := fsInfo.Name if len(cmds) == 0 { return fmt.Errorf("%s backend has no commands", name) } fmt.Printf("## Backend commands\n\n") fmt.Printf(`Here are the commands specific to the %s backend. Run them with: `+"```console"+` rclone backend COMMAND remote: `+"```"+` The help below will explain what arguments each command takes. See the [backend](/commands/rclone_backend/) command for more info on how to pass options and arguments. These can be run on a running backend using the rc command [backend/command](/rc/#backend-command). `, name) for _, cmd := range cmds { fmt.Printf("### %s\n\n", cmd.Name) fmt.Printf("%s\n\n", cmd.Short) fmt.Printf("```console\nrclone backend %s remote: [options] [<arguments>+]\n```\n\n", cmd.Name) if cmd.Long != "" { fmt.Printf("%s\n\n", cmd.Long) } if len(cmd.Opts) != 0 { fmt.Printf("Options:\n\n") ks := []string{} for k := range cmd.Opts { ks = append(ks, k) } sort.Strings(ks) for _, k := range ks { v := cmd.Opts[k] fmt.Printf("- %q: %s\n", k, v) } fmt.Printf("\n") } } return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/rcat/rcat.go
cmd/rcat/rcat.go
// Package rcat provides the rcat command. package rcat import ( "context" "os" "time" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) var ( size = int64(-1) ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.Int64VarP(cmdFlags, &size, "size", "", size, "File size hint to preallocate", "") } var commandDefinition = &cobra.Command{ Use: "rcat remote:path", Short: `Copies standard input to file on remote.`, Long: `Reads from standard input (stdin) and copies it to a single remote file. ` + "```console" + ` echo "hello world" | rclone rcat remote:path/to/file ffmpeg - | rclone rcat remote:path/to/file ` + "```" + ` If the remote file already exists, it will be overwritten. rcat will try to upload small files in a single request, which is usually more efficient than the streaming/chunked upload endpoints, which use multiple requests. Exact behaviour depends on the remote. What is considered a small file may be set through ` + "`--streaming-upload-cutoff`" + `. Uploading only starts after the cutoff is reached or if the file ends before that. The data must fit into RAM. The cutoff needs to be small enough to adhere the limits of your remote, please see there. Generally speaking, setting this cutoff too high will decrease your performance. Use the ` + "`--size`" + ` flag to preallocate the file in advance at the remote end and actually stream it, even if remote backend doesn't support streaming. ` + "`--size`" + ` should be the exact size of the input stream in bytes. If the size of the stream is different in length to the ` + "`--size`" + ` passed in then the transfer will likely fail. Note that the upload cannot be retried because the data is not stored. If the backend supports multipart uploading then individual chunks can be retried. If you need to transfer a lot of data, you may be better off caching it locally and then ` + "`rclone move`" + ` it to the destination which can use retries.`, Annotations: map[string]string{ "versionIntroduced": "v1.38", "groups": "Important", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) stat, _ := os.Stdin.Stat() if (stat.Mode() & os.ModeCharDevice) != 0 { fs.Fatalf(nil, "nothing to read from standard input (stdin).") } fdst, dstFileName := cmd.NewFsDstFile(args) cmd.Run(false, false, command, func() error { _, err := operations.RcatSize(context.Background(), fdst, dstFileName, os.Stdin, size, time.Now(), nil) return err }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/cryptcheck/cryptcheck.go
cmd/cryptcheck/cryptcheck.go
// Package cryptcheck provides the cryptcheck command. package cryptcheck import ( "context" "fmt" "github.com/rclone/rclone/backend/crypt" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/check" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlag := commandDefinition.Flags() check.AddFlags(cmdFlag) } var commandDefinition = &cobra.Command{ Use: "cryptcheck remote:path cryptedremote:path", Short: `Cryptcheck checks the integrity of an encrypted remote.`, Long: `Checks a remote against an [encrypted](/crypt/) remote. This is the equivalent of running rclone [check](/commands/rclone_check/), but able to check the checksums of the encrypted remote. For it to work the underlying remote of the cryptedremote must support some kind of checksum. It works by reading the nonce from each file on the cryptedremote: and using that to encrypt each file on the remote:. It then checks the checksum of the underlying file on the cryptedremote: against the checksum of the file it has just encrypted. Use it like this ` + "```console" + ` rclone cryptcheck /path/to/files encryptedremote:path ` + "```" + ` You can use it like this also, but that will involve downloading all the files in ` + "`remote:path`" + `. ` + "```console" + ` rclone cryptcheck remote:path encryptedremote:path ` + "```" + ` After it has run it will log the status of the ` + "`encryptedremote:`" + `. ` + check.FlagsHelp, Annotations: map[string]string{ "versionIntroduced": "v1.36", "groups": "Filter,Listing,Check", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(2, 2, command, args) fsrc, fdst := cmd.NewFsSrcDst(args) cmd.Run(false, true, command, func() error { return cryptCheck(context.Background(), fdst, fsrc) }) }, } // cryptCheck checks the integrity of an encrypted remote func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error { // Check to see fcrypt is a crypt fcrypt, ok := fdst.(*crypt.Fs) if !ok { return fmt.Errorf("%s:%s is not a crypt remote", fdst.Name(), fdst.Root()) } // Find a hash to use funderlying := fcrypt.UnWrap() hashType := funderlying.Hashes().GetOne() if hashType == hash.None { return fmt.Errorf("%s:%s does not support any hashes", funderlying.Name(), funderlying.Root()) } fs.Infof(nil, "Using %v for hash comparisons", hashType) opt, close, err := check.GetCheckOpt(fsrc, fcrypt) if err != nil { return err } defer close() // checkIdentical checks to see if dst and src are identical // // it returns true if differences were found // it also returns whether it couldn't be hashed opt.Check = func(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) { cryptDst := dst.(*crypt.Object) underlyingDst := cryptDst.UnWrap() underlyingHash, err := underlyingDst.Hash(ctx, hashType) if err != nil { return true, false, fmt.Errorf("error reading hash from underlying %v: %w", underlyingDst, err) } if underlyingHash == "" { return false, true, nil } cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType) if err != nil { return true, false, fmt.Errorf("error computing hash: %w", err) } if cryptHash == "" { return false, true, nil } if cryptHash != underlyingHash { err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash) fs.Errorf(src, "%s", err.Error()) return true, false, nil } return false, false, nil } return operations.CheckFn(ctx, opt) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/check/check.go
cmd/check/check.go
// Package check provides the check command. package check import ( "context" "fmt" "io" "os" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" "github.com/spf13/pflag" ) // Globals var ( download = false oneway = false combined = "" missingOnSrc = "" missingOnDst = "" match = "" differ = "" errFile = "" checkFileHashType = "" ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash", "") flags.StringVarP(cmdFlags, &checkFileHashType, "checkfile", "C", checkFileHashType, "Treat source:path as a SUM file with hashes of given type", "") AddFlags(cmdFlags) } // AddFlags adds the check flags to the cmdFlags command func AddFlags(cmdFlags *pflag.FlagSet) { flags.BoolVarP(cmdFlags, &oneway, "one-way", "", oneway, "Check one way only, source files must exist on remote", "") flags.StringVarP(cmdFlags, &combined, "combined", "", combined, "Make a combined report of changes to this file", "") flags.StringVarP(cmdFlags, &missingOnSrc, "missing-on-src", "", missingOnSrc, "Report all files missing from the source to this file", "") flags.StringVarP(cmdFlags, &missingOnDst, "missing-on-dst", "", missingOnDst, "Report all files missing from the destination to this file", "") flags.StringVarP(cmdFlags, &match, "match", "", match, "Report all matching files to this file", "") flags.StringVarP(cmdFlags, &differ, "differ", "", differ, "Report all non-matching files to this file", "") flags.StringVarP(cmdFlags, &errFile, "error", "", errFile, "Report all files with errors (hashing or reading) to this file", "") } // FlagsHelp describes the flags for the help // Warning! "|" will be replaced by backticks below var FlagsHelp = strings.ReplaceAll(` If you supply the |--one-way| flag, it will only check that files in the source match the files in the destination, not the other way around. This means that extra files in the destination that are not in the source will not be detected. The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--match| and |--error| flags write paths, one per line, to the file name (or stdout if it is |-|) supplied. What they write is described in the help below. For example |--differ| will write all paths which are present on both the source and destination but different. The |--combined| flag will write a file (or stdout) which contains all file paths with a symbol and then a space and then the path to tell you what happened to it. These are reminiscent of diff files. - |= path| means path was found in source and destination and was identical - |- path| means path was missing on the source, so only in the destination - |+ path| means path was missing on the destination, so only in the source - |* path| means path was present in source and destination but different. - |! path| means there was an error reading or hashing the source or dest. The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int) option for more information.`, "|", "`") // GetCheckOpt gets the options corresponding to the check flags func GetCheckOpt(fsrc, fdst fs.Fs) (opt *operations.CheckOpt, close func(), err error) { closers := []io.Closer{} opt = &operations.CheckOpt{ Fsrc: fsrc, Fdst: fdst, OneWay: oneway, } open := func(name string, pout *io.Writer) error { if name == "" { return nil } if name == "-" { *pout = os.Stdout return nil } out, err := os.Create(name) if err != nil { return err } *pout = out closers = append(closers, out) return nil } if err = open(combined, &opt.Combined); err != nil { return nil, nil, err } if err = open(missingOnSrc, &opt.MissingOnSrc); err != nil { return nil, nil, err } if err = open(missingOnDst, &opt.MissingOnDst); err != nil { return nil, nil, err } if err = open(match, &opt.Match); err != nil { return nil, nil, err } if err = open(differ, &opt.Differ); err != nil { return nil, nil, err } if err = open(errFile, &opt.Error); err != nil { return nil, nil, err } close = func() { for _, closer := range closers { err := closer.Close() if err != nil { fs.Errorf(nil, "Failed to close report output: %v", err) } } } return opt, close, nil } var commandDefinition = &cobra.Command{ Use: "check source:path dest:path", Short: `Checks the files in the source and destination match.`, Long: strings.ReplaceAll(`Checks the files in the source and destination match. It compares sizes and hashes (MD5 or SHA1) and logs a report of files that don't match. It doesn't alter the source or destination. For the [crypt](/crypt/) remote there is a dedicated command, [cryptcheck](/commands/rclone_cryptcheck/), that are able to check the checksums of the encrypted files. If you supply the |--size-only| flag, it will only compare the sizes not the hashes as well. Use this for a quick check. If you supply the |--download| flag, it will download the data from both remotes and check them against each other on the fly. This can be useful for remotes that don't support hashes or if you really want to check all the data. If you supply the |--checkfile HASH| flag with a valid hash name, the |source:path| must point to a text file in the SUM format. `, "|", "`") + FlagsHelp, Annotations: map[string]string{ "groups": "Filter,Listing,Check", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(2, 2, command, args) var ( fsrc, fdst fs.Fs hashType hash.Type fsum fs.Fs sumFile string ) if checkFileHashType != "" { if err := hashType.Set(checkFileHashType); err != nil { fmt.Println(hash.HelpString(0)) return err } fsum, sumFile, fsrc = cmd.NewFsSrcFileDst(args) } else { fsrc, fdst = cmd.NewFsSrcDst(args) } cmd.Run(false, true, command, func() error { opt, close, err := GetCheckOpt(fsrc, fdst) if err != nil { return err } defer close() if checkFileHashType != "" { return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, hashType, opt, download) } if download { return operations.CheckDownload(context.Background(), opt) } hashType := fsrc.Hashes().Overlap(fdst.Hashes()).GetOne() if hashType == hash.None { fs.Errorf(nil, "No common hash found - not using a hash for checks") } else { fs.Infof(nil, "Using %v for hash comparisons", hashType) } return operations.Check(context.Background(), opt) }) return nil }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/nfsmount/nfsmount_unsupported.go
cmd/nfsmount/nfsmount_unsupported.go
// Build for nfsmount for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build !unix // Package nfsmount implements mount command using NFS. package nfsmount
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/nfsmount/nfsmount.go
cmd/nfsmount/nfsmount.go
//go:build unix // Package nfsmount implements mounting functionality using serve nfs command // // This can potentially work on all unix like systems which can mount NFS. package nfsmount import ( "bytes" "context" "fmt" "net" "os/exec" "runtime" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/cmd/serve/nfs" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/vfs" ) var ( sudo = false ) func init() { name := "nfsmount" cmd := mountlib.NewMountCommand(name, false, mount) cmd.Annotations["versionIntroduced"] = "v1.65" cmd.Annotations["status"] = "Experimental" mountlib.AddRc(name, mount) cmdFlags := cmd.Flags() flags.BoolVarP(cmdFlags, &sudo, "sudo", "", sudo, "Use sudo to run the mount/umount commands as root.", "") nfs.AddFlags(cmdFlags) } func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (asyncerrors <-chan error, unmount func() error, err error) { s, err := nfs.NewServer(context.Background(), VFS, &nfs.Opt) if err != nil { return } errChan := make(chan error, 1) go func() { errChan <- s.Serve() }() // The port is always picked at random after the NFS server has started // we need to query the server for the port number so we can mount it _, port, err := net.SplitHostPort(s.Addr().String()) if err != nil { err = fmt.Errorf("cannot find port number in %s", s.Addr().String()) return } // Options options := []string{ "-o", fmt.Sprintf("port=%s", port), "-o", fmt.Sprintf("mountport=%s", port), "-o", "tcp", } for _, option := range opt.ExtraOptions { options = append(options, "-o", option) } options = append(options, opt.ExtraFlags...) cmd := []string{} if sudo { cmd = append(cmd, "sudo") } cmd = append(cmd, "mount") cmd = append(cmd, options...) cmd = append(cmd, "localhost:"+opt.VolumeName, mountpoint) fs.Debugf(nil, "Running mount command: %q", cmd) out, err := exec.Command(cmd[0], cmd[1:]...).CombinedOutput() if err != nil { out = bytes.TrimSpace(out) err = fmt.Errorf("%s: failed to mount NFS volume: %v", out, err) return } asyncerrors = errChan unmount = func() error { if s.UnmountedExternally { return nil } var umountErr error var out []byte if runtime.GOOS == "darwin" { out, umountErr = exec.Command("diskutil", "umount", "force", mountpoint).CombinedOutput() } else { cmd := []string{} if sudo { cmd = append(cmd, "sudo") } cmd = append(cmd, "umount", "-f", mountpoint) out, umountErr = exec.Command(cmd[0], cmd[1:]...).CombinedOutput() } shutdownErr := s.Shutdown() VFS.Shutdown() if umountErr != nil { out = bytes.TrimSpace(out) return fmt.Errorf("%s: failed to umount the NFS volume %e", out, umountErr) } else if shutdownErr != nil { return fmt.Errorf("failed to shutdown NFS server: %e", shutdownErr) } return nil } nfs.OnUnmountFunc = func() { s.UnmountedExternally = true errChan <- nil VFS.Shutdown() } return }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/nfsmount/nfsmount_test.go
cmd/nfsmount/nfsmount_test.go
//go:build unix package nfsmount import ( "context" "errors" "os" "os/exec" "runtime" "testing" "github.com/rclone/rclone/cmd/serve/nfs" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" "github.com/rclone/rclone/vfs/vfstest" "github.com/stretchr/testify/require" ) // Return true if the command ran without error func commandOK(name string, arg ...string) bool { cmd := exec.Command(name, arg...) _, err := cmd.CombinedOutput() return err == nil } func TestMount(t *testing.T) { if runtime.GOOS != "darwin" { if !commandOK("sudo", "-n", "mount", "--help") { t.Skip("Can't run sudo mount without a password") } if !commandOK("sudo", "-n", "umount", "--help") { t.Skip("Can't run sudo umount without a password") } sudo = true } for _, cacheType := range []string{"memory", "disk", "symlink"} { t.Run(cacheType, func(t *testing.T) { nfs.Opt.HandleCacheDir = t.TempDir() require.NoError(t, nfs.Opt.HandleCache.Set(cacheType)) // Check we can create a handler _, err := nfs.NewHandler(context.Background(), vfs.New(object.MemoryFs, nil), &nfs.Opt) if errors.Is(err, nfs.ErrorSymlinkCacheNotSupported) || errors.Is(err, nfs.ErrorSymlinkCacheNoPermission) { t.Skip(err.Error() + ": run with: go test -c && sudo setcap cap_dac_read_search+ep ./nfsmount.test && ./nfsmount.test -test.v") } require.NoError(t, err) // Configure rclone via environment var since the mount gets run in a subprocess _ = os.Setenv("RCLONE_NFS_CACHE_DIR", nfs.Opt.HandleCacheDir) _ = os.Setenv("RCLONE_NFS_CACHE_TYPE", cacheType) t.Cleanup(func() { _ = os.Unsetenv("RCLONE_NFS_CACHE_DIR") _ = os.Unsetenv("RCLONE_NFS_CACHE_TYPE") }) vfstest.RunTests(t, false, vfscommon.CacheModeWrites, false, mount) }) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/gitannex/configparse.go
cmd/gitannex/configparse.go
package gitannex import ( "fmt" "slices" "strings" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/fspath" ) type configID int const ( configRemoteName configID = iota configPrefix configLayout ) // configDefinition describes a configuration value required by this command. We // use "GETCONFIG" messages to query git-annex for these values at runtime. type configDefinition struct { id configID names []string description string defaultValue string } const ( defaultRclonePrefix = "git-annex-rclone" defaultRcloneLayout = "nodir" ) var requiredConfigs = []configDefinition{ { id: configRemoteName, names: []string{"rcloneremotename", "target"}, description: "Name of the rclone remote to use. " + "Must match a remote known to rclone. " + "(Note that rclone remotes are a distinct concept from git-annex remotes.)", }, { id: configPrefix, names: []string{"rcloneprefix", "prefix"}, description: "Directory where rclone will write git-annex content. " + fmt.Sprintf("If not specified, defaults to %q. ", defaultRclonePrefix) + "This directory will be created on init if it does not exist.", defaultValue: defaultRclonePrefix, }, { id: configLayout, names: []string{"rclonelayout", "rclone_layout"}, description: "Defines where, within the rcloneprefix directory, rclone will write git-annex content. " + fmt.Sprintf("Must be one of %v. ", allLayoutModes()) + fmt.Sprintf("If empty, defaults to %q.", defaultRcloneLayout), defaultValue: defaultRcloneLayout, }, } func (c *configDefinition) getCanonicalName() string { if len(c.names) < 1 { panic(fmt.Errorf("configDefinition must have at least one name: %v", c)) } return c.names[0] } // fullDescription returns a single-line, human-readable description for this // config. The returned string begins with a list of synonyms and ends with // `c.description`. func (c *configDefinition) fullDescription() string { if len(c.names) <= 1 { return c.description } // Exclude the canonical name from the list of synonyms. synonyms := c.names[1:len(c.names)] commaSeparatedSynonyms := strings.Join(synonyms, ", ") return fmt.Sprintf("(synonyms: %s) %s", commaSeparatedSynonyms, c.description) } // validateRemoteName validates the "rcloneremotename" config that we receive // from git-annex. It returns nil iff `value` is valid. Otherwise, it returns a // descriptive error suitable for sending back to git-annex via stdout. // // The value is only valid when: // 1. It is the exact name of an existing remote. // 2. It is an fspath string that names an existing remote or a backend. The // string may include options, but it must not include a path. (That's what // the "rcloneprefix" config is for.) // // While backends are not remote names, per se, they are permitted for // compatibility with [fstest]. We could guard this behavior behind // [testing.Testing] to prevent users from specifying backend strings, but // there's no obvious harm in permitting it. func validateRemoteName(value string) error { remoteNames := config.GetRemoteNames() // Check whether `value` is an exact match for an existing remote. // // If we checked whether [cache.Get] returns [fs.ErrorNotFoundInConfigFile], // we would incorrectly identify file names as valid remote names. We also // avoid [config.FileSections] because it will miss remotes that are defined // by environment variables. if slices.Contains(remoteNames, value) { return nil } parsed, err := fspath.Parse(value) if err != nil { return fmt.Errorf("remote could not be parsed: %s", value) } if parsed.Path != "" { return fmt.Errorf("remote does not exist or incorrectly contains a path: %s", value) } // Now that we've established `value` is an fspath string that does not // include a path component, we only need to check whether it names an // existing remote or backend. if slices.Contains(remoteNames, parsed.Name) { return nil } maybeBackend := strings.HasPrefix(value, ":") if !maybeBackend { return fmt.Errorf("remote does not exist: %s", value) } // Strip the leading colon before searching for the backend. For instance, // search for "local" instead of ":local". Note that `parsed.Name` already // omits any config options baked into the string. trimmedBackendName := strings.TrimPrefix(parsed.Name, ":") if _, err = fs.Find(trimmedBackendName); err != nil { return fmt.Errorf("backend does not exist: %s", trimmedBackendName) } return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/gitannex/gitannex.go
cmd/gitannex/gitannex.go
// Package gitannex provides the "gitannex" command, which enables [git-annex] // to communicate with rclone by implementing the [external special remote // protocol]. The protocol is line delimited and spoken over stdin and stdout. // // # Milestones // // (Tracked in [issue #7625].) // // 1. ✅ Minimal support for the [external special remote protocol]. Tested on // "local", "drive", and "dropbox" backends. // 2. Add support for the ASYNC protocol extension. This may improve performance. // 3. Support the [simple export interface]. This will enable `git-annex // export` functionality. // 4. Once the draft is finalized, support import/export interface. // // [git-annex]: https://git-annex.branchable.com/ // [external special remote protocol]: https://git-annex.branchable.com/design/external_special_remote_protocol/ // [simple export interface]: https://git-annex.branchable.com/design/external_special_remote_protocol/export_and_import_appendix/ // [issue #7625]: https://github.com/rclone/rclone/issues/7625 package gitannex import ( "bufio" "context" _ "embed" "errors" "fmt" "io" "os" "path/filepath" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) const subcommandName string = "gitannex" const uniqueCommandName string = "git-annex-remote-rclone-builtin" //go:embed gitannex.md var gitannexHelp string func init() { os.Args = maybeTransformArgs(os.Args) cmd.Root.AddCommand(command) } // maybeTransformArgs returns a modified version of `args` with the "gitannex" // subcommand inserted when `args` indicates that the program was executed as // "git-annex-remote-rclone-builtin". One way this can happen is when rclone is // invoked via symlink. Otherwise, returns `args`. func maybeTransformArgs(args []string) []string { if len(args) == 0 || filepath.Base(args[0]) != uniqueCommandName { return args } newArgs := make([]string, 0, len(args)+1) newArgs = append(newArgs, args[0]) newArgs = append(newArgs, subcommandName) newArgs = append(newArgs, args[1:]...) return newArgs } // messageParser helps parse messages we receive from git-annex into a sequence // of parameters. Messages are not quite trivial to parse because they are // separated by spaces, but the final parameter may itself contain spaces. // // This abstraction is necessary because simply splitting on space doesn't cut // it. Also, we cannot know how many parameters to parse until we've parsed the // first parameter. type messageParser struct { line string } // nextSpaceDelimitedParameter consumes the next space-delimited parameter. func (m *messageParser) nextSpaceDelimitedParameter() (string, error) { m.line = strings.TrimRight(m.line, "\r\n") if len(m.line) == 0 { return "", errors.New("nothing remains to parse") } before, after, found := strings.Cut(m.line, " ") if found { if len(before) == 0 { return "", fmt.Errorf("found an empty space-delimited parameter in line: %q", m.line) } m.line = after return before, nil } remaining := m.line m.line = "" return remaining, nil } // finalParameter consumes the final parameter, which may contain spaces. func (m *messageParser) finalParameter() string { m.line = strings.TrimRight(m.line, "\r\n") if len(m.line) == 0 { return "" } param := m.line m.line = "" return param } // server contains this command's current state. type server struct { reader *bufio.Reader writer io.Writer // When true, the server prints a transcript of messages sent and received // to stderr. verbose bool extensionInfo bool extensionAsync bool extensionGetGitRemoteName bool extensionUnavailableResponse bool configsDone bool configPrefix string configRcloneRemoteName string configRcloneLayout string } func (s *server) sendMsg(msg string) { msg += "\n" if _, err := io.WriteString(s.writer, msg); err != nil { panic(err) } if s.verbose { _, err := os.Stderr.WriteString(fmt.Sprintf("server sent %q\n", msg)) if err != nil { panic(fmt.Errorf("failed to write verbose message to stderr: %w", err)) } } } func (s *server) getMsg() (*messageParser, error) { msg, err := s.reader.ReadString('\n') if err != nil { if len(msg) == 0 { // Git-annex closes stdin when it is done with us, so failing to // read a new line is not an error. return nil, nil } return nil, fmt.Errorf("expected message to end with newline: %q", msg) } if s.verbose { _, err := os.Stderr.WriteString(fmt.Sprintf("server received %q\n", msg)) if err != nil { return nil, fmt.Errorf("failed to write verbose message to stderr: %w", err) } } return &messageParser{msg}, nil } func (s *server) run() error { // The remote sends the first message. s.sendMsg("VERSION 1") for { message, err := s.getMsg() if err != nil { return fmt.Errorf("error receiving message: %w", err) } if message == nil { break } command, err := message.nextSpaceDelimitedParameter() if err != nil { return fmt.Errorf("failed to parse command") } switch command { // // Git-annex requires that these requests are supported. // case "INITREMOTE": err = s.handleInitRemote() case "PREPARE": err = s.handlePrepare() case "EXPORTSUPPORTED": // Indicate that we do not support exports. s.sendMsg("EXPORTSUPPORTED-FAILURE") case "TRANSFER": err = s.handleTransfer(message) case "CHECKPRESENT": err = s.handleCheckPresent(message) case "REMOVE": err = s.handleRemove(message) case "ERROR": errorMessage := message.finalParameter() err = fmt.Errorf("received error message from git-annex: %s", errorMessage) // // These requests are optional. // case "EXTENSIONS": // Git-annex just told us which protocol extensions it supports. // Respond with the list of extensions that we want to use (none). err = s.handleExtensions(message) case "LISTCONFIGS": s.handleListConfigs() case "GETCOST": // Git-annex wants to know the "cost" of using this remote. It // probably depends on the backend we will be using, but let's just // consider this an "expensive remote" per git-annex's // Config/Cost.hs. s.sendMsg("COST 200") case "GETAVAILABILITY": // Indicate that this is a cloud service. s.sendMsg("AVAILABILITY GLOBAL") case "CLAIMURL", "CHECKURL", "WHEREIS", "GETINFO": s.sendMsg("UNSUPPORTED-REQUEST") default: err = fmt.Errorf("received unexpected message from git-annex: %s", message.line) } if err != nil { return err } } return nil } // Idempotently handle an incoming INITREMOTE message. This should perform // one-time setup operations for the remote, such as validating or rejecting // config values. We may receive the INITREMOTE message again in later sessions, // e.g. when the same git-annex remote is initialized in a different repository. // However, we are *not* guaranteed to receive the INITREMOTE message once per // session, so do not mutate state here and expect it to always be available in // other handler functions. func (s *server) handleInitRemote() error { if err := s.queryConfigs(); err != nil { return fmt.Errorf("failed to get configs: %w", err) } if err := validateRemoteName(s.configRcloneRemoteName); err != nil { s.sendMsg(fmt.Sprintf("INITREMOTE-FAILURE %s", err)) return fmt.Errorf("failed to init remote: %w", err) } if mode := parseLayoutMode(s.configRcloneLayout); mode == layoutModeUnknown { err := fmt.Errorf("unknown layout mode: %s", s.configRcloneLayout) s.sendMsg(fmt.Sprintf("INITREMOTE-FAILURE %s", err)) return fmt.Errorf("failed to init remote: %w", err) } s.sendMsg("INITREMOTE-SUCCESS") return nil } func (s *server) mustSetConfigValue(id configID, value string) { switch id { case configRemoteName: s.configRcloneRemoteName = value case configPrefix: s.configPrefix = value case configLayout: s.configRcloneLayout = value default: panic(fmt.Errorf("unhandled configId: %v", id)) } } // Query git-annex for config values. func (s *server) queryConfigs() error { if s.configsDone { return nil } // Send a "GETCONFIG" message for each required config and parse git-annex's // "VALUE" response. queryNextConfig: for _, config := range requiredConfigs { // Try each of the config's names in sequence, starting with the // canonical name. for _, configName := range config.names { s.sendMsg(fmt.Sprintf("GETCONFIG %s", configName)) message, err := s.getMsg() if err != nil { return err } valueKeyword, err := message.nextSpaceDelimitedParameter() if err != nil || valueKeyword != "VALUE" { return fmt.Errorf("failed to parse config value: %s %s", valueKeyword, message.line) } if value := message.finalParameter(); value != "" { s.mustSetConfigValue(config.id, value) continue queryNextConfig } } if config.defaultValue == "" { return fmt.Errorf("did not receive a non-empty config value for %q", config.getCanonicalName()) } s.mustSetConfigValue(config.id, config.defaultValue) } s.configsDone = true return nil } func (s *server) handlePrepare() error { if err := s.queryConfigs(); err != nil { s.sendMsg("PREPARE-FAILURE Error getting configs") return fmt.Errorf("error getting configs: %w", err) } s.sendMsg("PREPARE-SUCCESS") return nil } // Git-annex is asking us to return the list of settings that we use. Keep this // in sync with `handlePrepare()`. func (s *server) handleListConfigs() { for _, config := range requiredConfigs { s.sendMsg(fmt.Sprintf("CONFIG %s %s", config.getCanonicalName(), config.fullDescription())) } s.sendMsg("CONFIGEND") } func (s *server) handleTransfer(message *messageParser) error { argMode, err := message.nextSpaceDelimitedParameter() if err != nil { s.sendMsg("TRANSFER-FAILURE failed to parse direction") return fmt.Errorf("malformed arguments for TRANSFER: %w", err) } argKey, err := message.nextSpaceDelimitedParameter() if err != nil { s.sendMsg("TRANSFER-FAILURE failed to parse key") return fmt.Errorf("malformed arguments for TRANSFER: %w", err) } argFile := message.finalParameter() if argFile == "" { s.sendMsg("TRANSFER-FAILURE failed to parse file path") return errors.New("failed to parse file path") } if err := s.queryConfigs(); err != nil { s.sendMsg(fmt.Sprintf("TRANSFER-FAILURE %s %s failed to get configs", argMode, argKey)) return fmt.Errorf("error getting configs: %w", err) } layout := parseLayoutMode(s.configRcloneLayout) if layout == layoutModeUnknown { s.sendMsg(fmt.Sprintf("TRANSFER-FAILURE %s", argKey)) return fmt.Errorf("error parsing layout mode: %q", s.configRcloneLayout) } remoteFsString, err := buildFsString(s.queryDirhash, layout, argKey, s.configRcloneRemoteName, s.configPrefix) if err != nil { s.sendMsg(fmt.Sprintf("TRANSFER-FAILURE %s", argKey)) return fmt.Errorf("error building fs string: %w", err) } remoteFs, err := cache.Get(context.TODO(), remoteFsString) if err != nil { s.sendMsg(fmt.Sprintf("TRANSFER-FAILURE %s %s failed to get remote fs", argMode, argKey)) return err } localDir := filepath.Dir(argFile) localFs, err := cache.Get(context.TODO(), localDir) if err != nil { s.sendMsg(fmt.Sprintf("TRANSFER-FAILURE %s %s failed to get local fs", argMode, argKey)) return fmt.Errorf("failed to get local fs: %w", err) } remoteFileName := argKey localFileName := filepath.Base(argFile) switch argMode { case "STORE": err = operations.CopyFile(context.TODO(), remoteFs, localFs, remoteFileName, localFileName) if err != nil { s.sendMsg(fmt.Sprintf("TRANSFER-FAILURE %s %s failed to copy file: %s", argMode, argKey, err)) return err } case "RETRIEVE": err = operations.CopyFile(context.TODO(), localFs, remoteFs, localFileName, remoteFileName) // It is non-fatal when retrieval fails because the file is missing on // the remote. if err == fs.ErrorObjectNotFound { s.sendMsg(fmt.Sprintf("TRANSFER-FAILURE %s %s not found", argMode, argKey)) return nil } if err != nil { s.sendMsg(fmt.Sprintf("TRANSFER-FAILURE %s %s failed to copy file: %s", argMode, argKey, err)) return err } default: s.sendMsg(fmt.Sprintf("TRANSFER-FAILURE %s %s unrecognized mode", argMode, argKey)) return fmt.Errorf("received malformed TRANSFER mode: %v", argMode) } s.sendMsg(fmt.Sprintf("TRANSFER-SUCCESS %s %s", argMode, argKey)) return nil } func (s *server) handleCheckPresent(message *messageParser) error { argKey := message.finalParameter() if argKey == "" { return errors.New("failed to parse response for CHECKPRESENT") } if err := s.queryConfigs(); err != nil { s.sendMsg(fmt.Sprintf("CHECKPRESENT-FAILURE %s failed to get configs", argKey)) return fmt.Errorf("error getting configs: %s", err) } layout := parseLayoutMode(s.configRcloneLayout) if layout == layoutModeUnknown { s.sendMsg(fmt.Sprintf("CHECKPRESENT-FAILURE %s", argKey)) return fmt.Errorf("error parsing layout mode: %q", s.configRcloneLayout) } remoteFsString, err := buildFsString(s.queryDirhash, layout, argKey, s.configRcloneRemoteName, s.configPrefix) if err != nil { s.sendMsg(fmt.Sprintf("CHECKPRESENT-FAILURE %s", argKey)) return fmt.Errorf("error building fs string: %w", err) } remoteFs, err := cache.Get(context.TODO(), remoteFsString) if err != nil { s.sendMsg(fmt.Sprintf("CHECKPRESENT-UNKNOWN %s failed to get remote fs", argKey)) return err } _, err = remoteFs.NewObject(context.TODO(), argKey) if err == fs.ErrorObjectNotFound { s.sendMsg(fmt.Sprintf("CHECKPRESENT-FAILURE %s", argKey)) return nil } if err != nil { s.sendMsg(fmt.Sprintf("CHECKPRESENT-UNKNOWN %s error finding file", argKey)) return err } s.sendMsg(fmt.Sprintf("CHECKPRESENT-SUCCESS %s", argKey)) return nil } func (s *server) queryDirhash(msg string) (string, error) { s.sendMsg(msg) parser, err := s.getMsg() if err != nil { return "", err } keyword, err := parser.nextSpaceDelimitedParameter() if err != nil { return "", err } if keyword != "VALUE" { return "", fmt.Errorf("expected VALUE keyword, but got %q", keyword) } dirhash, err := parser.nextSpaceDelimitedParameter() if err != nil { return "", fmt.Errorf("failed to parse dirhash: %w", err) } return dirhash, nil } func (s *server) handleRemove(message *messageParser) error { argKey := message.finalParameter() if argKey == "" { return errors.New("failed to parse key for REMOVE") } layout := parseLayoutMode(s.configRcloneLayout) if layout == layoutModeUnknown { s.sendMsg(fmt.Sprintf("REMOVE-FAILURE %s", argKey)) return fmt.Errorf("error parsing layout mode: %q", s.configRcloneLayout) } remoteFsString, err := buildFsString(s.queryDirhash, layout, argKey, s.configRcloneRemoteName, s.configPrefix) if err != nil { s.sendMsg(fmt.Sprintf("REMOVE-FAILURE %s", argKey)) return fmt.Errorf("error building fs string: %w", err) } remoteFs, err := cache.Get(context.TODO(), remoteFsString) if err != nil { s.sendMsg(fmt.Sprintf("REMOVE-FAILURE %s", argKey)) return fmt.Errorf("error getting remote fs: %w", err) } fileObj, err := remoteFs.NewObject(context.TODO(), argKey) // It is non-fatal when removal fails because the file is missing on the // remote. if errors.Is(err, fs.ErrorObjectNotFound) { s.sendMsg(fmt.Sprintf("REMOVE-SUCCESS %s", argKey)) return nil } if err != nil { s.sendMsg(fmt.Sprintf("REMOVE-FAILURE %s error getting new fs object: %s", argKey, err)) return fmt.Errorf("error getting new fs object: %w", err) } if err := operations.DeleteFile(context.TODO(), fileObj); err != nil { s.sendMsg(fmt.Sprintf("REMOVE-FAILURE %s error deleting file", argKey)) return fmt.Errorf("error deleting file: %q", argKey) } s.sendMsg(fmt.Sprintf("REMOVE-SUCCESS %s", argKey)) return nil } func (s *server) handleExtensions(message *messageParser) error { for { extension, err := message.nextSpaceDelimitedParameter() if err != nil { break } switch extension { case "INFO": s.extensionInfo = true case "ASYNC": s.extensionAsync = true case "GETGITREMOTENAME": s.extensionGetGitRemoteName = true case "UNAVAILABLERESPONSE": s.extensionUnavailableResponse = true } } s.sendMsg("EXTENSIONS") return nil } var command = &cobra.Command{ Aliases: []string{uniqueCommandName}, Use: subcommandName, Short: "Speaks with git-annex over stdin/stdout.", Long: strings.TrimSpace(gitannexHelp), Annotations: map[string]string{ "versionIntroduced": "v1.67.0", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(0, 0, command, args) s := server{ reader: bufio.NewReader(os.Stdin), writer: os.Stdout, } err := s.run() if err != nil { s.sendMsg(fmt.Sprintf("ERROR %s", err.Error())) panic(err) } }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/gitannex/e2e_test.go
cmd/gitannex/e2e_test.go
package gitannex import ( "bytes" "encoding/json" "errors" "fmt" "os" "os/exec" "path/filepath" "runtime" "strings" "testing" "github.com/stretchr/testify/require" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/lib/buildinfo" ) // checkRcloneBinaryVersion runs whichever rclone is on the PATH and checks // whether it reports a version that matches the test's expectations. Returns // nil when the version is the expected version, otherwise returns an error. func checkRcloneBinaryVersion(t *testing.T) error { // versionInfo is a subset of information produced by "core/version". type versionInfo struct { Version string IsGit bool GoTags string } cmd := exec.Command("rclone", "rc", "--loopback", "core/version") stdout, err := cmd.Output() require.NoError(t, err) var parsed versionInfo if err := json.Unmarshal(stdout, &parsed); err != nil { return fmt.Errorf("failed to parse rclone version: %w", err) } if parsed.Version != fs.Version { return fmt.Errorf("expected version %q, but got %q", fs.Version, parsed.Version) } if parsed.IsGit != strings.HasSuffix(fs.Version, "-DEV") { return errors.New("expected rclone to be a dev build") } _, tagString := buildinfo.GetLinkingAndTags() if parsed.GoTags != tagString { // TODO: Skip the test when tags do not match. t.Logf("expected tag string %q, but got %q. Not skipping!", tagString, parsed.GoTags) } return nil } // countFilesRecursively returns the number of files nested underneath `dir`. It // counts files only and excludes directories. func countFilesRecursively(t *testing.T, dir string) int { remoteFiles, err := os.ReadDir(dir) require.NoError(t, err) var count int for _, f := range remoteFiles { if f.IsDir() { subdir := filepath.Join(dir, f.Name()) count += countFilesRecursively(t, subdir) } else { count++ } } return count } func findFileWithContents(t *testing.T, dir string, wantContents []byte) bool { remoteFiles, err := os.ReadDir(dir) require.NoError(t, err) for _, f := range remoteFiles { fPath := filepath.Join(dir, f.Name()) if f.IsDir() { if findFileWithContents(t, fPath, wantContents) { return true } } else { contents, err := os.ReadFile(fPath) require.NoError(t, err) if bytes.Equal(contents, wantContents) { return true } } } return false } type e2eTestingContext struct { t *testing.T tempDir string binDir string homeDir string configDir string rcloneConfigDir string ephemeralRepoDir string } // makeE2eTestingContext sets up a new e2eTestingContext rooted under // `t.TempDir()`. It creates the skeleton directory structure shown below in the // temp directory without creating any files. // // . // |-- bin // | `-- git-annex-remote-rclone-builtin -> ${PATH_TO_RCLONE_BINARY} // |-- ephemeralRepo // `-- user // `-- .config // `-- rclone // `-- rclone.conf func makeE2eTestingContext(t *testing.T) e2eTestingContext { tempDir := t.TempDir() binDir := filepath.Join(tempDir, "bin") homeDir := filepath.Join(tempDir, "user") configDir := filepath.Join(homeDir, ".config") rcloneConfigDir := filepath.Join(configDir, "rclone") ephemeralRepoDir := filepath.Join(tempDir, "ephemeralRepo") for _, dir := range []string{binDir, homeDir, configDir, rcloneConfigDir, ephemeralRepoDir} { require.NoError(t, os.Mkdir(dir, 0700)) } return e2eTestingContext{t, tempDir, binDir, homeDir, configDir, rcloneConfigDir, ephemeralRepoDir} } // Install the symlink that enables git-annex to invoke "rclone gitannex" // without explicitly specifying the subcommand. func (e *e2eTestingContext) installRcloneGitannexSymlink(t *testing.T) { rcloneBinaryPath, err := exec.LookPath("rclone") require.NoError(t, err) require.NoError(t, os.Symlink( rcloneBinaryPath, filepath.Join(e.binDir, "git-annex-remote-rclone-builtin"))) } // Install a rclone.conf file in an appropriate location in the fake home // directory. The config defines an rclone remote named "MyRcloneRemote" using // the local backend. func (e *e2eTestingContext) installRcloneConfig(t *testing.T) { // Install the rclone.conf file that defines the remote. rcloneConfigPath := filepath.Join(e.rcloneConfigDir, "rclone.conf") rcloneConfigContents := "[MyRcloneRemote]\ntype = local" require.NoError(t, os.WriteFile(rcloneConfigPath, []byte(rcloneConfigContents), 0600)) } // runInRepo runs the given command from within the ephemeral repo directory. To // prevent accidental changes in the real home directory, it sets the HOME // variable to a subdirectory of the temp directory. It also ensures that the // git-annex-remote-rclone-builtin symlink will be found by extending the PATH. func (e *e2eTestingContext) runInRepo(t *testing.T, command string, args ...string) { if testing.Verbose() { t.Logf("Running %s %v\n", command, args) } cmd := exec.Command(command, args...) cmd.Dir = e.ephemeralRepoDir cmd.Env = []string{ "HOME=" + e.homeDir, "PATH=" + os.Getenv("PATH") + ":" + e.binDir, } buf, err := cmd.CombinedOutput() require.NoError(t, err, fmt.Sprintf("+ %s %v failed:\n%s\n", command, args, buf)) } // createGitRepo creates an empty git repository in the ephemeral repo // directory. It makes "global" config changes that are ultimately scoped to the // calling test thanks to runInRepo() overriding the HOME environment variable. func (e *e2eTestingContext) createGitRepo(t *testing.T) { e.runInRepo(t, "git", "annex", "version") e.runInRepo(t, "git", "config", "--global", "user.name", "User Name") e.runInRepo(t, "git", "config", "--global", "user.email", "user@example.com") e.runInRepo(t, "git", "config", "--global", "init.defaultBranch", "main") e.runInRepo(t, "git", "init") e.runInRepo(t, "git", "annex", "init") } func skipE2eTestIfNecessary(t *testing.T) { if testing.Short() { t.Skip("Skipping due to short mode.") } // TODO(#7984): Port e2e tests to `fstest` framework. if *fstest.RemoteName != "" { t.Skip("Skipping because fstest remote was specified.") } // TODO: Support e2e tests on Windows. Need to evaluate the semantics of the // HOME and PATH environment variables. switch runtime.GOOS { case "darwin", "freebsd", "linux", "netbsd", "openbsd", "plan9", "solaris": default: t.Skipf("GOOS %q is not supported.", runtime.GOOS) } if err := checkRcloneBinaryVersion(t); err != nil { t.Skipf("Skipping due to rclone version: %s", err) } if _, err := exec.LookPath("git-annex"); err != nil { t.Skipf("Skipping because git-annex was not found: %s", err) } } // This end-to-end test runs `git annex testremote` in a temporary git repo. // This test will be skipped unless the `rclone` binary on PATH reports the // expected version. // // When run on CI, an rclone binary built from HEAD will be on the PATH. When // running locally, you will likely need to ensure the current binary is on the // PATH like so: // // go build && PATH="$(realpath .):$PATH" go test -v ./cmd/gitannex/... // // In the future, this test will probably be extended to test a number of // parameters like repo layouts, and runtime may suffer from a combinatorial // explosion. func TestEndToEnd(t *testing.T) { skipE2eTestIfNecessary(t) for _, mode := range allLayoutModes() { t.Run(string(mode), func(t *testing.T) { t.Parallel() testingContext := makeE2eTestingContext(t) testingContext.installRcloneGitannexSymlink(t) testingContext.installRcloneConfig(t) testingContext.createGitRepo(t) testingContext.runInRepo(t, "git", "annex", "initremote", "MyTestRemote", "type=external", "externaltype=rclone-builtin", "encryption=none", "rcloneremotename=MyRcloneRemote", "rcloneprefix="+testingContext.ephemeralRepoDir, "rclonelayout="+string(mode)) testingContext.runInRepo(t, "git", "annex", "testremote", "MyTestRemote") }) } } // For each layout mode, migrate a single remote from git-annex-remote-rclone to // git-annex-remote-rclone-builtin and run `git annex testremote`. func TestEndToEndMigration(t *testing.T) { skipE2eTestIfNecessary(t) if _, err := exec.LookPath("git-annex-remote-rclone"); err != nil { t.Skipf("Skipping because git-annex-remote-rclone was not found: %s", err) } for _, mode := range allLayoutModes() { t.Run(string(mode), func(t *testing.T) { t.Parallel() tc := makeE2eTestingContext(t) tc.installRcloneGitannexSymlink(t) tc.installRcloneConfig(t) tc.createGitRepo(t) remoteStorage := filepath.Join(tc.tempDir, "remotePrefix") require.NoError(t, os.Mkdir(remoteStorage, 0777)) tc.runInRepo(t, "git", "annex", "initremote", "MigratedRemote", "type=external", "externaltype=rclone", "encryption=none", "target=MyRcloneRemote", "rclone_layout="+string(mode), "prefix="+remoteStorage, ) fooFileContents := []byte{1, 2, 3, 4} fooFilePath := filepath.Join(tc.ephemeralRepoDir, "foo") require.NoError(t, os.WriteFile(fooFilePath, fooFileContents, 0700)) tc.runInRepo(t, "git", "annex", "add", "foo") tc.runInRepo(t, "git", "commit", "-m", "Add foo file") // Git-annex objects are not writable, which prevents `testing` from // cleaning up the temp directory. We can work around this by // explicitly dropping any files we add to the annex. t.Cleanup(func() { tc.runInRepo(t, "git", "annex", "drop", "--force", "foo") }) tc.runInRepo(t, "git", "annex", "copy", "--to=MigratedRemote", "foo") tc.runInRepo(t, "git", "annex", "fsck", "--from=MigratedRemote", "foo") tc.runInRepo(t, "git", "annex", "enableremote", "MigratedRemote", "externaltype=rclone-builtin", "rcloneremotename=MyRcloneRemote", "rclonelayout="+string(mode), "rcloneprefix="+remoteStorage, ) tc.runInRepo(t, "git", "annex", "fsck", "--from=MigratedRemote", "foo") tc.runInRepo(t, "git", "annex", "testremote", "MigratedRemote") }) } } // For each layout mode, create two git-annex remotes with externaltype=rclone // and externaltype=rclone-builtin respectively. Test that files copied to one // remote are present on the other. Similarly, test that files deleted from one // are removed on the other. func TestEndToEndRepoLayoutCompat(t *testing.T) { skipE2eTestIfNecessary(t) if _, err := exec.LookPath("git-annex-remote-rclone"); err != nil { t.Skipf("Skipping because git-annex-remote-rclone was not found: %s", err) } for _, mode := range allLayoutModes() { t.Run(string(mode), func(t *testing.T) { t.Parallel() tc := makeE2eTestingContext(t) tc.installRcloneGitannexSymlink(t) tc.installRcloneConfig(t) tc.createGitRepo(t) remoteStorage := filepath.Join(tc.tempDir, "remotePrefix") require.NoError(t, os.Mkdir(remoteStorage, 0777)) tc.runInRepo(t, "git", "annex", "initremote", "Control", "type=external", "externaltype=rclone", "encryption=none", "target=MyRcloneRemote", "rclone_layout="+string(mode), "prefix="+remoteStorage) tc.runInRepo(t, "git", "annex", "initremote", "Experiment", "type=external", "externaltype=rclone-builtin", "encryption=none", "rcloneremotename=MyRcloneRemote", "rclonelayout="+string(mode), "rcloneprefix="+remoteStorage) fooFileContents := []byte{1, 2, 3, 4} fooFilePath := filepath.Join(tc.ephemeralRepoDir, "foo") require.NoError(t, os.WriteFile(fooFilePath, fooFileContents, 0700)) tc.runInRepo(t, "git", "annex", "add", "foo") tc.runInRepo(t, "git", "commit", "-m", "Add foo file") // Git-annex objects are not writable, which prevents `testing` from // cleaning up the temp directory. We can work around this by // explicitly dropping any files we add to the annex. t.Cleanup(func() { tc.runInRepo(t, "git", "annex", "drop", "--force", "foo") }) require.Equal(t, 0, countFilesRecursively(t, remoteStorage)) require.False(t, findFileWithContents(t, remoteStorage, fooFileContents)) // Copy the file to Control and verify it's present on Experiment. tc.runInRepo(t, "git", "annex", "copy", "--to=Control", "foo") require.Equal(t, 1, countFilesRecursively(t, remoteStorage)) require.True(t, findFileWithContents(t, remoteStorage, fooFileContents)) tc.runInRepo(t, "git", "annex", "fsck", "--from=Experiment", "foo") require.Equal(t, 1, countFilesRecursively(t, remoteStorage)) require.True(t, findFileWithContents(t, remoteStorage, fooFileContents)) // Drop the file locally and verify we can copy it back from Experiment. tc.runInRepo(t, "git", "annex", "drop", "--force", "foo") require.Equal(t, 1, countFilesRecursively(t, remoteStorage)) require.True(t, findFileWithContents(t, remoteStorage, fooFileContents)) tc.runInRepo(t, "git", "annex", "copy", "--from=Experiment", "foo") require.Equal(t, 1, countFilesRecursively(t, remoteStorage)) require.True(t, findFileWithContents(t, remoteStorage, fooFileContents)) // Drop the file from Experiment, copy it back to Experiment, and // verify it's still present on Control. tc.runInRepo(t, "git", "annex", "drop", "--from=Experiment", "--force", "foo") require.Equal(t, 0, countFilesRecursively(t, remoteStorage)) require.False(t, findFileWithContents(t, remoteStorage, fooFileContents)) tc.runInRepo(t, "git", "annex", "copy", "--to=Experiment", "foo") require.Equal(t, 1, countFilesRecursively(t, remoteStorage)) require.True(t, findFileWithContents(t, remoteStorage, fooFileContents)) tc.runInRepo(t, "git", "annex", "fsck", "--from=Control", "foo") require.Equal(t, 1, countFilesRecursively(t, remoteStorage)) require.True(t, findFileWithContents(t, remoteStorage, fooFileContents)) // Drop the file from Control. tc.runInRepo(t, "git", "annex", "drop", "--from=Control", "--force", "foo") require.Equal(t, 0, countFilesRecursively(t, remoteStorage)) require.False(t, findFileWithContents(t, remoteStorage, fooFileContents)) }) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/gitannex/layout.go
cmd/gitannex/layout.go
package gitannex import ( "fmt" "strings" "github.com/rclone/rclone/fs/fspath" ) type layoutMode string // All layout modes from git-annex-remote-rclone are supported. const ( layoutModeLower layoutMode = "lower" layoutModeDirectory layoutMode = "directory" layoutModeNodir layoutMode = "nodir" layoutModeMixed layoutMode = "mixed" layoutModeFrankencase layoutMode = "frankencase" layoutModeUnknown layoutMode = "" ) func allLayoutModes() []layoutMode { return []layoutMode{ layoutModeLower, layoutModeDirectory, layoutModeNodir, layoutModeMixed, layoutModeFrankencase, } } func parseLayoutMode(mode string) layoutMode { for _, knownMode := range allLayoutModes() { if mode == string(knownMode) { return knownMode } } return layoutModeUnknown } type queryDirhashFunc func(msg string) (string, error) func buildFsString(queryDirhash queryDirhashFunc, mode layoutMode, key, remoteName, prefix string) (string, error) { remoteName = strings.TrimSuffix(remoteName, ":") + ":" remoteString := fspath.JoinRootPath(remoteName, prefix) if mode == layoutModeNodir { return remoteString, nil } var dirhash string var err error switch mode { case layoutModeLower, layoutModeDirectory: dirhash, err = queryDirhash("DIRHASH-LOWER " + key) case layoutModeMixed, layoutModeFrankencase: dirhash, err = queryDirhash("DIRHASH " + key) default: panic("unreachable") } if err != nil { return "", fmt.Errorf("buildFsString failed to query dirhash: %w", err) } switch mode { case layoutModeLower: return fmt.Sprintf("%s/%s", remoteString, dirhash), nil case layoutModeDirectory: return fmt.Sprintf("%s/%s%s", remoteString, dirhash, key), nil case layoutModeMixed: return fmt.Sprintf("%s/%s", remoteString, dirhash), nil case layoutModeFrankencase: return fmt.Sprintf("%s/%s", remoteString, strings.ToLower(dirhash)), nil default: panic("unreachable") } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/gitannex/gitannex_test.go
cmd/gitannex/gitannex_test.go
package gitannex import ( "bufio" "context" "fmt" "io" "os" "path/filepath" "regexp" "runtime" "strings" "testing" "time" // Without this import, the various backends would be unavailable. It looks // unused, but the act of importing runs the package's `init()` function. _ "github.com/rclone/rclone/backend/all" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fstest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestFixArgsForSymlinkIdentity(t *testing.T) { for _, argList := range [][]string{ []string{}, []string{"foo"}, []string{"foo", "bar"}, []string{"foo", "bar", "baz"}, } { assert.Equal(t, maybeTransformArgs(argList), argList) } } func TestFixArgsForSymlinkCorrectName(t *testing.T) { assert.Equal(t, maybeTransformArgs([]string{"git-annex-remote-rclone-builtin"}), []string{"git-annex-remote-rclone-builtin", "gitannex"}) assert.Equal(t, maybeTransformArgs([]string{"/path/to/git-annex-remote-rclone-builtin"}), []string{"/path/to/git-annex-remote-rclone-builtin", "gitannex"}) } type messageParserTestCase struct { label string testFunc func(*testing.T) } var messageParserTestCases = []messageParserTestCase{ { "OneParam", func(t *testing.T) { m := messageParser{"foo\n"} param, err := m.nextSpaceDelimitedParameter() assert.NoError(t, err) assert.Equal(t, param, "foo") param, err = m.nextSpaceDelimitedParameter() assert.Error(t, err) assert.Equal(t, param, "") param = m.finalParameter() assert.Equal(t, param, "") param = m.finalParameter() assert.Equal(t, param, "") param, err = m.nextSpaceDelimitedParameter() assert.Error(t, err) assert.Equal(t, param, "") }, }, { "TwoParams", func(t *testing.T) { m := messageParser{"foo bar\n"} param, err := m.nextSpaceDelimitedParameter() assert.NoError(t, err) assert.Equal(t, param, "foo") param, err = m.nextSpaceDelimitedParameter() assert.NoError(t, err) assert.Equal(t, param, "bar") param, err = m.nextSpaceDelimitedParameter() assert.Error(t, err) assert.Equal(t, param, "") param = m.finalParameter() assert.Equal(t, param, "") }, }, { "TwoParamsNoTrailingNewline", func(t *testing.T) { m := messageParser{"foo bar"} param, err := m.nextSpaceDelimitedParameter() assert.NoError(t, err) assert.Equal(t, param, "foo") param, err = m.nextSpaceDelimitedParameter() assert.NoError(t, err) assert.Equal(t, param, "bar") param, err = m.nextSpaceDelimitedParameter() assert.Error(t, err) assert.Equal(t, param, "") param = m.finalParameter() assert.Equal(t, param, "") }, }, { "ThreeParamsWhereFinalParamContainsSpaces", func(t *testing.T) { m := messageParser{"firstparam secondparam final param with spaces"} param, err := m.nextSpaceDelimitedParameter() assert.NoError(t, err) assert.Equal(t, param, "firstparam") param, err = m.nextSpaceDelimitedParameter() assert.NoError(t, err) assert.Equal(t, param, "secondparam") param = m.finalParameter() assert.Equal(t, param, "final param with spaces") }, }, { "OneLongFinalParameter", func(t *testing.T) { for _, lineEnding := range []string{"", "\n", "\r", "\r\n", "\n\r"} { testName := fmt.Sprintf("lineEnding%x", lineEnding) t.Run(testName, func(t *testing.T) { m := messageParser{"one long final parameter" + lineEnding} param := m.finalParameter() assert.Equal(t, param, "one long final parameter") param = m.finalParameter() assert.Equal(t, param, "") }) } }, }, { "MultipleSpaces", func(t *testing.T) { m := messageParser{"foo bar\n\r"} param, err := m.nextSpaceDelimitedParameter() assert.NoError(t, err) assert.Equal(t, param, "foo") param, err = m.nextSpaceDelimitedParameter() assert.Error(t, err, "blah") assert.Equal(t, param, "") }, }, { "StartsWithSpace", func(t *testing.T) { m := messageParser{" foo"} param, err := m.nextSpaceDelimitedParameter() assert.Error(t, err, "blah") assert.Equal(t, param, "") }, }, } func TestMessageParser(t *testing.T) { for _, testCase := range messageParserTestCases { t.Run(testCase.label, func(t *testing.T) { t.Parallel() testCase.testFunc(t) }) } } func TestConfigDefinitionOneName(t *testing.T) { configFoo := configDefinition{ names: []string{"foo"}, description: "The foo config is utterly useless.", defaultValue: "abc", } assert.Equal(t, "foo", configFoo.getCanonicalName()) assert.Equal(t, configFoo.description, configFoo.fullDescription()) } func TestConfigDefinitionTwoNames(t *testing.T) { configFoo := configDefinition{ names: []string{"foo", "bar"}, description: "The foo config is utterly useless.", defaultValue: "abc", } assert.Equal(t, "foo", configFoo.getCanonicalName()) assert.Equal(t, "(synonyms: bar) The foo config is utterly useless.", configFoo.fullDescription()) } func TestConfigDefinitionThreeNames(t *testing.T) { configFoo := configDefinition{ names: []string{"foo", "bar", "baz"}, description: "The foo config is utterly useless.", defaultValue: "abc", } assert.Equal(t, "foo", configFoo.getCanonicalName()) assert.Equal(t, `(synonyms: bar, baz) The foo config is utterly useless.`, configFoo.fullDescription()) } type testState struct { t *testing.T server *server mockStdinW *io.PipeWriter mockStdoutReader *bufio.Reader // readLineTimeout is the maximum duration of time to wait for [server] to // write a line to be written to the mock stdout. readLineTimeout time.Duration fstestRun *fstest.Run remoteName string remotePrefix string } func makeTestState(t *testing.T) testState { stdinR, stdinW := io.Pipe() stdoutR, stdoutW := io.Pipe() return testState{ t: t, server: &server{ reader: bufio.NewReader(stdinR), writer: stdoutW, }, mockStdinW: stdinW, mockStdoutReader: bufio.NewReader(stdoutR), // The default readLineTimeout must be large enough to accommodate slow // operations on real remotes. Without a timeout, attempts to read a // line that's never written would block indefinitely. readLineTimeout: time.Second * 30, } } func (h *testState) requireRemoteIsEmpty() { h.fstestRun.CheckRemoteItems(h.t) } // readLineWithTimeout attempts to read a line from the mock stdout. Returns an // error if the read operation times out or fails for any reason. func (h *testState) readLineWithTimeout() (string, error) { ctx, cancel := context.WithTimeout(context.Background(), h.readLineTimeout) defer cancel() lineChan := make(chan string) errChan := make(chan error) go func() { line, err := h.mockStdoutReader.ReadString('\n') if err != nil { errChan <- err } else { lineChan <- line } }() select { case line := <-lineChan: return line, nil case err := <-errChan: return "", err case <-ctx.Done(): return "", fmt.Errorf("attempt to read line timed out: %w", ctx.Err()) } } // requireReadLineExact requires that a line matching wantLine can be read from // the mock stdout. func (h *testState) requireReadLineExact(wantLine string) { receivedLine, err := h.readLineWithTimeout() require.NoError(h.t, err) require.Equal(h.t, wantLine+"\n", receivedLine) } // requireReadLine requires that a line can be read from the mock stdout and // returns the line. func (h *testState) requireReadLine() string { receivedLine, err := h.readLineWithTimeout() require.NoError(h.t, err) return receivedLine } // requireWriteLine requires that the given line is successfully written to the // mock stdin. func (h *testState) requireWriteLine(line string) { _, err := h.mockStdinW.Write([]byte(line + "\n")) require.NoError(h.t, err) } // Preconfigure the handle. This enables the calling test to skip the PREPARE // handshake. func (h *testState) preconfigureServer() { h.server.configRcloneRemoteName = h.remoteName h.server.configPrefix = h.remotePrefix h.server.configRcloneLayout = string(layoutModeNodir) h.server.configsDone = true } // Drop-in replacement for `filepath.Rel()` that works around a Windows-specific // quirk when one of the paths begins with `\\?\` or `//?/`. It seems that // fstest gives us paths with this prefix on Windows, which throws a wrench in // the gitannex tests that need to construct relative paths from absolute paths. // For a demonstration, see `TestWindowsFilepathRelQuirk` below. // // The `\\?\` prefix tells Windows APIs to pass strings unmodified to the // filesystem without additional parsing [1]. Our workaround is roughly to add // the prefix to whichever parameter doesn't have it (when the OS is Windows). // I'm not sure this generalizes, but it works for the the kinds of inputs we're // throwing at it. // // [1]: https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces func relativeFilepathWorkaround(basepath, targpath string) (string, error) { if runtime.GOOS != "windows" { return filepath.Rel(basepath, targpath) } // Canonicalize paths to use backslashes. basepath = filepath.Clean(basepath) targpath = filepath.Clean(targpath) const winFilePrefixDisableStringParsing = `\\?\` baseHasPrefix := strings.HasPrefix(basepath, winFilePrefixDisableStringParsing) targHasPrefix := strings.HasPrefix(targpath, winFilePrefixDisableStringParsing) if baseHasPrefix && !targHasPrefix { targpath = winFilePrefixDisableStringParsing + targpath } if !baseHasPrefix && targHasPrefix { basepath = winFilePrefixDisableStringParsing + basepath } return filepath.Rel(basepath, targpath) } func TestWindowsFilepathRelQuirk(t *testing.T) { if runtime.GOOS != "windows" { t.Skip() } t.Run("filepathRelQuirk", func(t *testing.T) { var err error _, err = filepath.Rel(`C:\foo`, `\\?\C:\foo\bar`) require.Error(t, err) _, err = filepath.Rel(`C:/foo`, `//?/C:/foo/bar`) require.Error(t, err) _, err = filepath.Rel(`\\?\C:\foo`, `C:\foo\bar`) require.Error(t, err) _, err = filepath.Rel(`//?/C:/foo`, `C:/foo/bar`) require.Error(t, err) path, err := filepath.Rel(`\\?\C:\foo`, `\\?\C:\foo\bar`) require.NoError(t, err) require.Equal(t, path, `bar`) path, err = filepath.Rel(`//?/C:/foo`, `//?/C:/foo/bar`) require.NoError(t, err) require.Equal(t, path, `bar`) }) t.Run("fstestAndTempDirHaveDifferentPrefixes", func(t *testing.T) { r := fstest.NewRun(t) p := r.Flocal.Root() require.True(t, strings.HasPrefix(p, `//?/`)) tempDir := t.TempDir() require.False(t, strings.HasPrefix(tempDir, `//?/`)) require.False(t, strings.HasPrefix(tempDir, `\\?\`)) }) t.Run("workaroundWorks", func(t *testing.T) { path, err := relativeFilepathWorkaround(`C:\foo`, `\\?\C:\foo\bar`) require.NoError(t, err) require.Equal(t, path, "bar") path, err = relativeFilepathWorkaround(`C:/foo`, `//?/C:/foo/bar`) require.NoError(t, err) require.Equal(t, path, "bar") path, err = relativeFilepathWorkaround(`\\?\C:\foo`, `C:\foo\bar`) require.NoError(t, err) require.Equal(t, path, `bar`) path, err = relativeFilepathWorkaround(`//?/C:/foo`, `C:/foo/bar`) require.NoError(t, err) require.Equal(t, path, `bar`) path, err = relativeFilepathWorkaround(`\\?\C:\foo`, `\\?\C:\foo\bar`) require.NoError(t, err) require.Equal(t, path, `bar`) }) } type testCase struct { label string testProtocolFunc func(*testing.T, *testState) expectedError string } // These test cases run against a backend selected by the `-remote` flag. var fstestTestCases = []testCase{ { label: "HandlesInit", testProtocolFunc: func(t *testing.T, h *testState) { h.preconfigureServer() h.requireReadLineExact("VERSION 1") h.requireWriteLine("INITREMOTE") h.requireReadLineExact("INITREMOTE-SUCCESS") require.NoError(t, h.mockStdinW.Close()) }, }, { label: "HandlesListConfigs", testProtocolFunc: func(t *testing.T, h *testState) { h.preconfigureServer() h.requireReadLineExact("VERSION 1") h.requireWriteLine("INITREMOTE") h.requireReadLineExact("INITREMOTE-SUCCESS") h.requireWriteLine("LISTCONFIGS") require.Regexp(t, regexp.MustCompile(`^CONFIG rcloneremotename \(synonyms: target\) (.|\n)*$`), h.requireReadLine(), ) require.Regexp(t, regexp.MustCompile(`^CONFIG rcloneprefix \(synonyms: prefix\) (.|\n)*$`), h.requireReadLine(), ) require.Regexp(t, regexp.MustCompile(`^CONFIG rclonelayout \(synonyms: rclone_layout\) (.|\n)*$`), h.requireReadLine(), ) h.requireReadLineExact("CONFIGEND") require.NoError(t, h.mockStdinW.Close()) }, }, { label: "HandlesPrepare", testProtocolFunc: func(t *testing.T, h *testState) { h.requireReadLineExact("VERSION 1") h.requireWriteLine("EXTENSIONS INFO") // Advertise that we support the INFO extension h.requireReadLineExact("EXTENSIONS") require.True(t, h.server.extensionInfo) h.requireWriteLine("PREPARE") h.requireReadLineExact("GETCONFIG rcloneremotename") h.requireWriteLine("VALUE " + h.remoteName) h.requireReadLineExact("GETCONFIG rcloneprefix") h.requireWriteLine("VALUE " + h.remotePrefix) h.requireReadLineExact("GETCONFIG rclonelayout") h.requireWriteLine("VALUE frankencase") h.requireReadLineExact("PREPARE-SUCCESS") require.Equal(t, h.server.configRcloneRemoteName, h.remoteName) require.Equal(t, h.server.configPrefix, h.remotePrefix) require.True(t, h.server.configsDone) require.NoError(t, h.mockStdinW.Close()) }, }, { label: "HandlesPrepareWithUnknownLayout", testProtocolFunc: func(t *testing.T, h *testState) { h.requireReadLineExact("VERSION 1") h.requireWriteLine("EXTENSIONS INFO") // Advertise that we support the INFO extension h.requireReadLineExact("EXTENSIONS") require.True(t, h.server.extensionInfo) h.requireWriteLine("PREPARE") h.requireReadLineExact("GETCONFIG rcloneremotename") h.requireWriteLine("VALUE " + h.remoteName) h.requireReadLineExact("GETCONFIG rcloneprefix") h.requireWriteLine("VALUE " + h.remotePrefix) h.requireReadLineExact("GETCONFIG rclonelayout") h.requireWriteLine("VALUE nonexistentLayoutMode") h.requireReadLineExact("PREPARE-SUCCESS") require.Equal(t, h.server.configRcloneRemoteName, h.remoteName) require.Equal(t, h.server.configPrefix, h.remotePrefix) require.True(t, h.server.configsDone) h.requireWriteLine("INITREMOTE") h.requireReadLineExact("INITREMOTE-FAILURE unknown layout mode: nonexistentLayoutMode") require.NoError(t, h.mockStdinW.Close()) }, expectedError: "unknown layout mode: nonexistentLayoutMode", }, { label: "HandlesPrepareWithNonexistentRemote", testProtocolFunc: func(t *testing.T, h *testState) { h.requireReadLineExact("VERSION 1") h.requireWriteLine("EXTENSIONS INFO") // Advertise that we support the INFO extension h.requireReadLineExact("EXTENSIONS") require.True(t, h.server.extensionInfo) h.requireWriteLine("PREPARE") h.requireReadLineExact("GETCONFIG rcloneremotename") h.requireWriteLine("VALUE thisRemoteDoesNotExist") h.requireReadLineExact("GETCONFIG rcloneprefix") h.requireWriteLine("VALUE " + h.remotePrefix) h.requireReadLineExact("GETCONFIG rclonelayout") h.requireWriteLine("VALUE frankencase") h.requireReadLineExact("PREPARE-SUCCESS") require.Equal(t, h.server.configRcloneRemoteName, "thisRemoteDoesNotExist") require.Equal(t, h.server.configPrefix, h.remotePrefix) require.True(t, h.server.configsDone) h.requireWriteLine("INITREMOTE") h.requireReadLineExact("INITREMOTE-FAILURE remote does not exist or incorrectly contains a path: thisRemoteDoesNotExist") require.NoError(t, h.mockStdinW.Close()) }, expectedError: "remote does not exist or incorrectly contains a path: thisRemoteDoesNotExist", }, { label: "HandlesPrepareWithPathAsRemote", testProtocolFunc: func(t *testing.T, h *testState) { h.requireReadLineExact("VERSION 1") h.requireWriteLine("EXTENSIONS INFO") // Advertise that we support the INFO extension h.requireReadLineExact("EXTENSIONS") require.True(t, h.server.extensionInfo) h.requireWriteLine("PREPARE") h.requireReadLineExact("GETCONFIG rcloneremotename") h.requireWriteLine("VALUE " + h.remotePrefix) h.requireReadLineExact("GETCONFIG rcloneprefix") h.requireWriteLine("VALUE /foo") h.requireReadLineExact("GETCONFIG rclonelayout") h.requireWriteLine("VALUE frankencase") h.requireReadLineExact("PREPARE-SUCCESS") require.Equal(t, h.server.configRcloneRemoteName, h.remotePrefix) require.Equal(t, h.server.configPrefix, "/foo") require.True(t, h.server.configsDone) h.requireWriteLine("INITREMOTE") require.Regexp(t, regexp.MustCompile("^INITREMOTE-FAILURE remote does not exist or incorrectly contains a path: "), h.requireReadLine(), ) require.NoError(t, h.mockStdinW.Close()) }, expectedError: "remote does not exist or incorrectly contains a path:", }, { label: "HandlesPrepareWithNonexistentBackendAsRemote", testProtocolFunc: func(t *testing.T, h *testState) { h.requireReadLineExact("VERSION 1") h.requireWriteLine("PREPARE") h.requireReadLineExact("GETCONFIG rcloneremotename") h.requireWriteLine("VALUE :nonexistentBackend:") h.requireReadLineExact("GETCONFIG rcloneprefix") h.requireWriteLine("VALUE /foo") h.requireReadLineExact("GETCONFIG rclonelayout") h.requireWriteLine("VALUE frankencase") h.requireReadLineExact("PREPARE-SUCCESS") require.Equal(t, ":nonexistentBackend:", h.server.configRcloneRemoteName) require.Equal(t, "/foo", h.server.configPrefix) require.True(t, h.server.configsDone) h.requireWriteLine("INITREMOTE") h.requireReadLineExact("INITREMOTE-FAILURE backend does not exist: nonexistentBackend") require.NoError(t, h.mockStdinW.Close()) }, expectedError: "backend does not exist:", }, { label: "HandlesPrepareWithBackendAsRemote", testProtocolFunc: func(t *testing.T, h *testState) { h.requireReadLineExact("VERSION 1") h.requireWriteLine("PREPARE") h.requireReadLineExact("GETCONFIG rcloneremotename") h.requireWriteLine("VALUE :local:") h.requireReadLineExact("GETCONFIG rcloneprefix") h.requireWriteLine("VALUE /foo") h.requireReadLineExact("GETCONFIG rclonelayout") h.requireWriteLine("VALUE frankencase") h.requireReadLineExact("PREPARE-SUCCESS") require.Equal(t, ":local:", h.server.configRcloneRemoteName) require.Equal(t, "/foo", h.server.configPrefix) require.True(t, h.server.configsDone) h.requireWriteLine("INITREMOTE") h.requireReadLineExact("INITREMOTE-SUCCESS") require.NoError(t, h.mockStdinW.Close()) }, }, { label: "HandlesPrepareWithBackendMissingTrailingColonAsRemote", testProtocolFunc: func(t *testing.T, h *testState) { h.requireReadLineExact("VERSION 1") h.requireWriteLine("PREPARE") h.requireReadLineExact("GETCONFIG rcloneremotename") h.requireWriteLine("VALUE :local") h.requireReadLineExact("GETCONFIG rcloneprefix") h.requireWriteLine("VALUE /foo") h.requireReadLineExact("GETCONFIG rclonelayout") h.requireWriteLine("VALUE frankencase") h.requireReadLineExact("PREPARE-SUCCESS") require.Equal(t, ":local", h.server.configRcloneRemoteName) require.Equal(t, "/foo", h.server.configPrefix) require.True(t, h.server.configsDone) h.requireWriteLine("INITREMOTE") h.requireReadLineExact("INITREMOTE-FAILURE remote could not be parsed: :local") require.NoError(t, h.mockStdinW.Close()) }, expectedError: "remote could not be parsed:", }, { label: "HandlesPrepareWithBackendContainingOptionsAsRemote", testProtocolFunc: func(t *testing.T, h *testState) { h.requireReadLineExact("VERSION 1") h.requireWriteLine("PREPARE") h.requireReadLineExact("GETCONFIG rcloneremotename") h.requireWriteLine("VALUE :local,description=banana:") h.requireReadLineExact("GETCONFIG rcloneprefix") h.requireWriteLine("VALUE /foo") h.requireReadLineExact("GETCONFIG rclonelayout") h.requireWriteLine("VALUE frankencase") h.requireReadLineExact("PREPARE-SUCCESS") require.Equal(t, ":local,description=banana:", h.server.configRcloneRemoteName) require.Equal(t, "/foo", h.server.configPrefix) require.True(t, h.server.configsDone) h.requireWriteLine("INITREMOTE") h.requireReadLineExact("INITREMOTE-SUCCESS") require.NoError(t, h.mockStdinW.Close()) }, }, { label: "HandlesPrepareWithBackendContainingOptionsAndIllegalPathAsRemote", testProtocolFunc: func(t *testing.T, h *testState) { h.requireReadLineExact("VERSION 1") h.requireWriteLine("PREPARE") h.requireReadLineExact("GETCONFIG rcloneremotename") h.requireWriteLine("VALUE :local,description=banana:/bad/path") h.requireReadLineExact("GETCONFIG rcloneprefix") h.requireWriteLine("VALUE /foo") h.requireReadLineExact("GETCONFIG rclonelayout") h.requireWriteLine("VALUE frankencase") h.requireReadLineExact("PREPARE-SUCCESS") require.Equal(t, ":local,description=banana:/bad/path", h.server.configRcloneRemoteName) require.Equal(t, "/foo", h.server.configPrefix) require.True(t, h.server.configsDone) h.requireWriteLine("INITREMOTE") h.requireReadLineExact("INITREMOTE-FAILURE remote does not exist or incorrectly contains a path: :local,description=banana:/bad/path") require.NoError(t, h.mockStdinW.Close()) }, expectedError: "remote does not exist or incorrectly contains a path:", }, { label: "HandlesPrepareWithRemoteContainingOptions", testProtocolFunc: func(t *testing.T, h *testState) { const envVar = "RCLONE_CONFIG_fake_remote_TYPE" require.NoError(t, os.Setenv(envVar, "memory")) t.Cleanup(func() { require.NoError(t, os.Unsetenv(envVar)) }) h.requireReadLineExact("VERSION 1") h.requireWriteLine("PREPARE") h.requireReadLineExact("GETCONFIG rcloneremotename") h.requireWriteLine("VALUE fake_remote,banana=yes:") h.requireReadLineExact("GETCONFIG rcloneprefix") h.requireWriteLine("VALUE /foo") h.requireReadLineExact("GETCONFIG rclonelayout") h.requireWriteLine("VALUE frankencase") h.requireReadLineExact("PREPARE-SUCCESS") require.Equal(t, "fake_remote,banana=yes:", h.server.configRcloneRemoteName) require.Equal(t, "/foo", h.server.configPrefix) require.True(t, h.server.configsDone) h.requireWriteLine("INITREMOTE") h.requireReadLineExact("INITREMOTE-SUCCESS") require.NoError(t, h.mockStdinW.Close()) }, }, { label: "HandlesPrepareWithSynonyms", testProtocolFunc: func(t *testing.T, h *testState) { h.requireReadLineExact("VERSION 1") h.requireWriteLine("EXTENSIONS INFO") // Advertise that we support the INFO extension h.requireReadLineExact("EXTENSIONS") require.True(t, h.server.extensionInfo) h.requireWriteLine("PREPARE") h.requireReadLineExact("GETCONFIG rcloneremotename") // TODO check what git-annex does when asked for a config value it does not have. h.requireWriteLine("VALUE") h.requireReadLineExact("GETCONFIG target") h.requireWriteLine("VALUE " + h.remoteName) h.requireReadLineExact("GETCONFIG rcloneprefix") h.requireWriteLine("VALUE " + h.remotePrefix) h.requireReadLineExact("GETCONFIG rclonelayout") h.requireWriteLine("VALUE frankencase") h.requireReadLineExact("PREPARE-SUCCESS") require.Equal(t, h.server.configRcloneRemoteName, h.remoteName) require.Equal(t, h.server.configPrefix, h.remotePrefix) require.True(t, h.server.configsDone) require.NoError(t, h.mockStdinW.Close()) }, }, { label: "HandlesPrepareAndDoesNotTrimWhitespaceFromValue", testProtocolFunc: func(t *testing.T, h *testState) { h.requireReadLineExact("VERSION 1") h.requireWriteLine("EXTENSIONS INFO") // Advertise that we support the INFO extension h.requireReadLineExact("EXTENSIONS") require.True(t, h.server.extensionInfo) h.requireWriteLine("PREPARE") h.requireReadLineExact("GETCONFIG rcloneremotename") remoteNameWithSpaces := fmt.Sprintf(" %s ", h.remoteName) prefixWithWhitespace := fmt.Sprintf(" %s\t", h.remotePrefix) h.requireWriteLine(fmt.Sprintf("VALUE %s", remoteNameWithSpaces)) h.requireReadLineExact("GETCONFIG rcloneprefix") h.requireWriteLine(fmt.Sprintf("VALUE %s", prefixWithWhitespace)) h.requireReadLineExact("GETCONFIG rclonelayout") h.requireWriteLine("VALUE") h.requireReadLineExact("GETCONFIG rclone_layout") h.requireWriteLine("VALUE") h.requireReadLineExact("PREPARE-SUCCESS") require.Equal(t, h.server.configRcloneRemoteName, remoteNameWithSpaces) require.Equal(t, h.server.configPrefix, prefixWithWhitespace) require.True(t, h.server.configsDone) require.NoError(t, h.mockStdinW.Close()) }, }, { label: "HandlesEarlyError", testProtocolFunc: func(t *testing.T, h *testState) { h.preconfigureServer() h.requireReadLineExact("VERSION 1") h.requireWriteLine("ERROR foo") require.NoError(t, h.mockStdinW.Close()) }, expectedError: "received error message from git-annex: foo", }, // Test what happens when the git-annex client sends "GETCONFIG", but // doesn't understand git-annex's response. { label: "ConfigFail", testProtocolFunc: func(t *testing.T, h *testState) { h.requireReadLineExact("VERSION 1") h.requireWriteLine("EXTENSIONS INFO") // Advertise that we support the INFO extension h.requireReadLineExact("EXTENSIONS") require.True(t, h.server.extensionInfo) h.requireWriteLine("PREPARE") h.requireReadLineExact("GETCONFIG rcloneremotename") h.requireWriteLine("ERROR ineffable error") h.requireReadLineExact("PREPARE-FAILURE Error getting configs") require.NoError(t, h.mockStdinW.Close()) }, expectedError: "failed to parse config value: ERROR ineffable error", }, { label: "TransferStoreEmptyPath", testProtocolFunc: func(t *testing.T, h *testState) { h.preconfigureServer() h.requireReadLineExact("VERSION 1") h.requireWriteLine("INITREMOTE") h.requireReadLineExact("INITREMOTE-SUCCESS") // Note the whitespace following the key. h.requireWriteLine("TRANSFER STORE Key ") h.requireReadLineExact("TRANSFER-FAILURE failed to parse file path") require.NoError(t, h.mockStdinW.Close()) }, expectedError: "failed to parse file", }, // Repeated EXTENSIONS messages add to each other rather than overriding // prior advertised extensions. This behavior is not mandated by the // protocol design. { label: "ExtensionsCompound", testProtocolFunc: func(t *testing.T, h *testState) { h.preconfigureServer() h.requireReadLineExact("VERSION 1") h.requireWriteLine("INITREMOTE") h.requireReadLineExact("INITREMOTE-SUCCESS") h.requireWriteLine("EXTENSIONS") h.requireReadLineExact("EXTENSIONS") require.False(t, h.server.extensionInfo) require.False(t, h.server.extensionAsync) require.False(t, h.server.extensionGetGitRemoteName) require.False(t, h.server.extensionUnavailableResponse) h.requireWriteLine("EXTENSIONS INFO") h.requireReadLineExact("EXTENSIONS") require.True(t, h.server.extensionInfo) require.False(t, h.server.extensionAsync) require.False(t, h.server.extensionGetGitRemoteName) require.False(t, h.server.extensionUnavailableResponse) h.requireWriteLine("EXTENSIONS ASYNC") h.requireReadLineExact("EXTENSIONS") require.True(t, h.server.extensionInfo) require.True(t, h.server.extensionAsync) require.False(t, h.server.extensionGetGitRemoteName) require.False(t, h.server.extensionUnavailableResponse) h.requireWriteLine("EXTENSIONS GETGITREMOTENAME") h.requireReadLineExact("EXTENSIONS") require.True(t, h.server.extensionInfo) require.True(t, h.server.extensionAsync) require.True(t, h.server.extensionGetGitRemoteName) require.False(t, h.server.extensionUnavailableResponse) h.requireWriteLine("EXTENSIONS UNAVAILABLERESPONSE") h.requireReadLineExact("EXTENSIONS") require.True(t, h.server.extensionInfo) require.True(t, h.server.extensionAsync) require.True(t, h.server.extensionGetGitRemoteName) require.True(t, h.server.extensionUnavailableResponse) require.NoError(t, h.mockStdinW.Close()) }, }, { label: "ExtensionsIdempotent", testProtocolFunc: func(t *testing.T, h *testState) { h.preconfigureServer() h.requireReadLineExact("VERSION 1") h.requireWriteLine("INITREMOTE") h.requireReadLineExact("INITREMOTE-SUCCESS") h.requireWriteLine("EXTENSIONS") h.requireReadLineExact("EXTENSIONS") require.False(t, h.server.extensionInfo) require.False(t, h.server.extensionAsync) require.False(t, h.server.extensionGetGitRemoteName) require.False(t, h.server.extensionUnavailableResponse) h.requireWriteLine("EXTENSIONS") h.requireReadLineExact("EXTENSIONS") require.False(t, h.server.extensionInfo) require.False(t, h.server.extensionAsync) require.False(t, h.server.extensionGetGitRemoteName) require.False(t, h.server.extensionUnavailableResponse) h.requireWriteLine("EXTENSIONS INFO") h.requireReadLineExact("EXTENSIONS") require.True(t, h.server.extensionInfo) require.False(t, h.server.extensionAsync) require.False(t, h.server.extensionGetGitRemoteName) require.False(t, h.server.extensionUnavailableResponse) h.requireWriteLine("EXTENSIONS INFO") h.requireReadLineExact("EXTENSIONS") require.True(t, h.server.extensionInfo) require.False(t, h.server.extensionAsync) require.False(t, h.server.extensionGetGitRemoteName) require.False(t, h.server.extensionUnavailableResponse) h.requireWriteLine("EXTENSIONS ASYNC ASYNC") h.requireReadLineExact("EXTENSIONS") require.True(t, h.server.extensionInfo) require.True(t, h.server.extensionAsync) require.False(t, h.server.extensionGetGitRemoteName) require.False(t, h.server.extensionUnavailableResponse) require.NoError(t, h.mockStdinW.Close()) }, }, { label: "ExtensionsSupportsMultiple", testProtocolFunc: func(t *testing.T, h *testState) { h.preconfigureServer() h.requireReadLineExact("VERSION 1") h.requireWriteLine("INITREMOTE") h.requireReadLineExact("INITREMOTE-SUCCESS") h.requireWriteLine("EXTENSIONS") h.requireReadLineExact("EXTENSIONS") require.False(t, h.server.extensionInfo) require.False(t, h.server.extensionAsync) require.False(t, h.server.extensionGetGitRemoteName) require.False(t, h.server.extensionUnavailableResponse) h.requireWriteLine("EXTENSIONS INFO ASYNC") h.requireReadLineExact("EXTENSIONS") require.True(t, h.server.extensionInfo) require.True(t, h.server.extensionAsync) require.False(t, h.server.extensionGetGitRemoteName) require.False(t, h.server.extensionUnavailableResponse) require.NoError(t, h.mockStdinW.Close()) }, }, { label: "TransferStoreAbsolute", testProtocolFunc: func(t *testing.T, h *testState) { h.preconfigureServer() h.requireReadLineExact("VERSION 1") h.requireWriteLine("INITREMOTE") h.requireReadLineExact("INITREMOTE-SUCCESS") // Create temp file for transfer with an absolute path. item := h.fstestRun.WriteFile("file.txt", "HELLO", time.Now()) absPath := filepath.Join(h.fstestRun.Flocal.Root(), item.Path) require.True(t, filepath.IsAbs(absPath)) // Specify an absolute path to transfer. h.requireWriteLine("TRANSFER STORE KeyAbsolute " + absPath) h.requireReadLineExact("TRANSFER-SUCCESS STORE KeyAbsolute") // Check that the file was transferred. remoteItem := fstest.NewItem("KeyAbsolute", "HELLO", item.ModTime) h.fstestRun.CheckRemoteItems(t, remoteItem) // Transfer the same absolute path a second time, but with a different key. h.requireWriteLine("TRANSFER STORE KeyAbsolute2 " + absPath) h.requireReadLineExact("TRANSFER-SUCCESS STORE KeyAbsolute2") // Check that the same file was transferred to a new name. remoteItem2 := fstest.NewItem("KeyAbsolute2", "HELLO", item.ModTime) h.fstestRun.CheckRemoteItems(t, remoteItem, remoteItem2) h.requireWriteLine("CHECKPRESENT KeyAbsolute2") h.requireReadLineExact("CHECKPRESENT-SUCCESS KeyAbsolute2") h.requireWriteLine("CHECKPRESENT KeyThatDoesNotExist") h.requireReadLineExact("CHECKPRESENT-FAILURE KeyThatDoesNotExist") require.NoError(t, h.mockStdinW.Close()) }, }, // Test that the TRANSFER command understands simple relative paths // consisting only of a file name. { label: "TransferStoreRelative", testProtocolFunc: func(t *testing.T, h *testState) { // Save the current working directory so we can restore it when this // test ends. cwd, err := os.Getwd() require.NoError(t, err)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mountlib/rc.go
cmd/mountlib/rc.go
package mountlib import ( "context" "errors" "sort" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/vfs/vfscommon" ) var ( // mutex to protect all the variables in this block mountMu sync.Mutex // Mount functions available mountFns = map[string]MountFn{} // Map of mounted path => MountInfo liveMounts = map[string]*MountPoint{} // Supported mount types supportedMountTypes = []string{"mount", "cmount", "mount2"} ) // ResolveMountMethod returns mount function by name func ResolveMountMethod(mountType string) (string, MountFn) { if mountType != "" { return mountType, mountFns[mountType] } for _, mountType := range supportedMountTypes { if mountFns[mountType] != nil { return mountType, mountFns[mountType] } } return "", nil } // AddRc adds mount and unmount functionality to rc func AddRc(mountUtilName string, mountFunction MountFn) { mountMu.Lock() defer mountMu.Unlock() // rcMount allows the mount command to be run from rc mountFns[mountUtilName] = mountFunction } func init() { rc.Add(rc.Call{ Path: "mount/mount", AuthRequired: true, Fn: mountRc, Title: "Create a new mount point", Help: `rclone allows Linux, FreeBSD, macOS and Windows to mount any of Rclone's cloud storage systems as a file system with FUSE. If no mountType is provided, the priority is given as follows: 1. mount 2.cmount 3.mount2 This takes the following parameters: - fs - a remote path to be mounted (required) - mountPoint: valid path on the local machine (required) - mountType: one of the values (mount, cmount, mount2) specifies the mount implementation to use - mountOpt: a JSON object with Mount options in. - vfsOpt: a JSON object with VFS options in. Example: ` + "```console" + ` rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=mount rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}' ` + "```" + ` The vfsOpt are as described in options/get and can be seen in the the "vfs" section when running and the mountOpt can be seen in the "mount" section: ` + "```console" + ` rclone rc options/get ` + "```" + ` `, }) } // mountRc allows the mount command to be run from rc func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) { mountPoint, err := in.GetString("mountPoint") if err != nil { return nil, err } vfsOpt := vfscommon.Opt err = in.GetStructMissingOK("vfsOpt", &vfsOpt) if err != nil { return nil, err } mountOpt := Opt err = in.GetStructMissingOK("mountOpt", &mountOpt) if err != nil { return nil, err } if mountOpt.Daemon { return nil, errors.New("daemon option not supported over the API") } mountType, err := in.GetString("mountType") mountMu.Lock() defer mountMu.Unlock() if err != nil { mountType = "" } mountType, mountFn := ResolveMountMethod(mountType) if mountFn == nil { return nil, errors.New("mount option specified is not registered, or is invalid") } // Get Fs.fs to be mounted from fs parameter in the params fdst, err := rc.GetFs(ctx, in) if err != nil { return nil, err } mnt := NewMountPoint(mountFn, mountPoint, fdst, &mountOpt, &vfsOpt) _, err = mnt.Mount() if err != nil { fs.Logf(nil, "mount FAILED: %v", err) return nil, err } go func() { if err = mnt.Wait(); err != nil { fs.Logf(nil, "unmount FAILED: %v", err) return } mountMu.Lock() defer mountMu.Unlock() delete(liveMounts, mountPoint) }() // Add mount to list if mount point was successfully created liveMounts[mountPoint] = mnt fs.Debugf(nil, "Mount for %s created at %s using %s", fdst.String(), mountPoint, mountType) return nil, nil } func init() { rc.Add(rc.Call{ Path: "mount/unmount", AuthRequired: true, Fn: unMountRc, Title: "Unmount selected active mount", Help: ` rclone allows Linux, FreeBSD, macOS and Windows to mount any of Rclone's cloud storage systems as a file system with FUSE. This takes the following parameters: - mountPoint: valid path on the local machine where the mount was created (required) Example: rclone rc mount/unmount mountPoint=/home/<user>/mountPoint `, }) } // unMountRc allows the umount command to be run from rc func unMountRc(_ context.Context, in rc.Params) (out rc.Params, err error) { mountPoint, err := in.GetString("mountPoint") if err != nil { return nil, err } mountMu.Lock() defer mountMu.Unlock() mountInfo, found := liveMounts[mountPoint] if !found { return nil, errors.New("mount not found") } if err = mountInfo.Unmount(); err != nil { return nil, err } delete(liveMounts, mountPoint) return nil, nil } func init() { rc.Add(rc.Call{ Path: "mount/types", AuthRequired: true, Fn: mountTypesRc, Title: "Show all possible mount types", Help: `This shows all possible mount types and returns them as a list. This takes no parameters and returns - mountTypes: list of mount types The mount types are strings like "mount", "mount2", "cmount" and can be passed to mount/mount as the mountType parameter. Eg rclone rc mount/types `, }) } // mountTypesRc returns a list of available mount types. func mountTypesRc(_ context.Context, in rc.Params) (out rc.Params, err error) { var mountTypes = []string{} mountMu.Lock() defer mountMu.Unlock() for mountType := range mountFns { mountTypes = append(mountTypes, mountType) } sort.Strings(mountTypes) return rc.Params{ "mountTypes": mountTypes, }, nil } func init() { rc.Add(rc.Call{ Path: "mount/listmounts", AuthRequired: true, Fn: listMountsRc, Title: "Show current mount points", Help: `This shows currently mounted points, which can be used for performing an unmount. This takes no parameters and returns - mountPoints: list of current mount points Eg rclone rc mount/listmounts `, }) } // MountInfo is a transitional structure for json marshaling type MountInfo struct { Fs string `json:"Fs"` MountPoint string `json:"MountPoint"` MountedOn time.Time `json:"MountedOn"` } // listMountsRc returns a list of current mounts sorted by mount path func listMountsRc(_ context.Context, in rc.Params) (out rc.Params, err error) { mountMu.Lock() defer mountMu.Unlock() var keys []string for key := range liveMounts { keys = append(keys, key) } sort.Strings(keys) mountPoints := []MountInfo{} for _, k := range keys { m := liveMounts[k] info := MountInfo{ Fs: fs.ConfigString(m.Fs), MountPoint: m.MountPoint, MountedOn: m.MountedOn, } mountPoints = append(mountPoints, info) } return rc.Params{ "mountPoints": mountPoints, }, nil } func init() { rc.Add(rc.Call{ Path: "mount/unmountall", AuthRequired: true, Fn: unmountAll, Title: "Unmount all active mounts", Help: ` rclone allows Linux, FreeBSD, macOS and Windows to mount any of Rclone's cloud storage systems as a file system with FUSE. This takes no parameters and returns error if unmount does not succeed. Eg rclone rc mount/unmountall `, }) } // unmountAll unmounts all the created mounts func unmountAll(_ context.Context, in rc.Params) (out rc.Params, err error) { mountMu.Lock() defer mountMu.Unlock() for mountPoint, mountInfo := range liveMounts { if err = mountInfo.Unmount(); err != nil { fs.Debugf(nil, "Couldn't unmount : %s", mountPoint) return nil, err } delete(liveMounts, mountPoint) } return nil, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mountlib/utils.go
cmd/mountlib/utils.go
package mountlib import ( "fmt" "io" "os" "path/filepath" "runtime" "strings" "github.com/rclone/rclone/fs" ) // ClipBlocks clips the blocks pointed to the OS max func ClipBlocks(b *uint64) { var max uint64 switch runtime.GOOS { case "windows": if runtime.GOARCH == "386" { max = (1 << 32) - 1 } else { max = (1 << 43) - 1 } case "darwin": // OSX FUSE only supports 32 bit number of blocks // https://github.com/osxfuse/osxfuse/issues/396 max = (1 << 32) - 1 default: // no clipping return } if *b > max { *b = max } } // CheckOverlap checks that root doesn't overlap with a mountpoint func CheckOverlap(f fs.Fs, mountpoint string) error { name := f.Name() if name != "" && name != "local" { return nil } rootAbs := absPath(f.Root()) mountpointAbs := absPath(mountpoint) if strings.HasPrefix(rootAbs, mountpointAbs) || strings.HasPrefix(mountpointAbs, rootAbs) { const msg = "mount point %q (%q) and directory to be mounted %q (%q) mustn't overlap" return fmt.Errorf(msg, mountpoint, mountpointAbs, f.Root(), rootAbs) } return nil } // absPath is a helper function for CheckOverlap func absPath(path string) string { if abs, err := filepath.EvalSymlinks(path); err == nil { path = abs } if abs, err := filepath.Abs(path); err == nil { path = abs } path = filepath.ToSlash(path) if runtime.GOOS == "windows" { // Removes any UNC long path prefix to make sure a simple HasPrefix test // in CheckOverlap works when one is UNC (root) and one is not (mountpoint). path = strings.TrimPrefix(path, `//?/`) } if !strings.HasSuffix(path, "/") { path += "/" } return path } // CheckAllowNonEmpty checks --allow-non-empty flag, and if not used verifies that mountpoint is empty. func CheckAllowNonEmpty(mountpoint string, opt *Options) error { if !opt.AllowNonEmpty { return CheckMountEmpty(mountpoint) } return nil } // checkMountEmpty checks if mountpoint folder is empty by listing it. func checkMountEmpty(mountpoint string) error { fp, err := os.Open(mountpoint) if err != nil { return fmt.Errorf("cannot open: %s: %w", mountpoint, err) } defer fs.CheckClose(fp, &err) _, err = fp.Readdirnames(1) if err == io.EOF { return nil } const msg = "%q is not empty, use --allow-non-empty to mount anyway" if err == nil { return fmt.Errorf(msg, mountpoint) } return fmt.Errorf(msg+": %w", mountpoint, err) } // SetVolumeName with sensible default func (m *MountPoint) SetVolumeName(vol string) { if vol == "" { vol = fs.ConfigString(m.Fs) } m.MountOpt.SetVolumeName(vol) } // SetVolumeName removes special characters from volume name if necessary func (o *Options) SetVolumeName(vol string) { vol = strings.ReplaceAll(vol, ":", " ") vol = strings.ReplaceAll(vol, "/", " ") vol = strings.TrimSpace(vol) if runtime.GOOS == "windows" && len(vol) > 32 { vol = vol[:32] } o.VolumeName = vol } // SetDeviceName with sensible default func (m *MountPoint) SetDeviceName(dev string) { if dev == "" { dev = fs.ConfigString(m.Fs) } m.MountOpt.DeviceName = dev }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mountlib/mount.go
cmd/mountlib/mount.go
// Package mountlib provides the mount command. package mountlib import ( "context" _ "embed" "fmt" "os" "runtime" "strings" "sync" "time" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/daemonize" "github.com/rclone/rclone/lib/systemd" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" "github.com/rclone/rclone/vfs/vfsflags" "github.com/spf13/cobra" "github.com/spf13/pflag" ) //go:embed mount.md var mountHelp string // help returns the help string cleaned up to simplify appending func help(commandName string) string { return strings.TrimSpace(strings.ReplaceAll(mountHelp, "@", commandName)) + "\n\n" } // OptionsInfo describes the Options in use var OptionsInfo = fs.Options{{ Name: "debug_fuse", Default: false, Help: "Debug the FUSE internals - needs -v", Groups: "Mount", }, { Name: "attr_timeout", Default: fs.Duration(1 * time.Second), Help: "Time for which file/directory attributes are cached", Groups: "Mount", }, { Name: "option", Default: []string{}, Help: "Option for libfuse/WinFsp (repeat if required)", Groups: "Mount", ShortOpt: "o", }, { Name: "fuse_flag", Default: []string{}, Help: "Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required)", Groups: "Mount", }, { Name: "daemon", Default: false, Help: "Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows)", Groups: "Mount", }, { Name: "daemon_timeout", Default: func() fs.Duration { if runtime.GOOS == "darwin" { // DaemonTimeout defaults to non-zero for macOS // (this is a macOS specific kernel option unrelated to DaemonWait) return fs.Duration(10 * time.Minute) } return 0 }(), Help: "Time limit for rclone to respond to kernel (not supported on Windows)", Groups: "Mount", }, { Name: "default_permissions", Default: false, Help: "Makes kernel enforce access control based on the file mode (not supported on Windows)", Groups: "Mount", }, { Name: "allow_non_empty", Default: false, Help: "Allow mounting over a non-empty directory (not supported on Windows)", Groups: "Mount", }, { Name: "allow_root", Default: false, Help: "Allow access to root user (not supported on Windows)", Groups: "Mount", }, { Name: "allow_other", Default: false, Help: "Allow access to other users (not supported on Windows)", Groups: "Mount", }, { Name: "async_read", Default: true, Help: "Use asynchronous reads (not supported on Windows)", Groups: "Mount", }, { Name: "max_read_ahead", Default: fs.SizeSuffix(128 * 1024), Help: "The number of bytes that can be prefetched for sequential reads (not supported on Windows)", Groups: "Mount", }, { Name: "write_back_cache", Default: false, Help: "Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)", Groups: "Mount", }, { Name: "devname", Default: "", Help: "Set the device name - default is remote:path", Groups: "Mount", }, { Name: "mount_case_insensitive", Default: fs.Tristate{}, Help: "Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto)", Groups: "Mount", }, { Name: "direct_io", Default: false, Help: "Use Direct IO, disables caching of data", Groups: "Mount", }, { Name: "volname", Default: "", Help: "Set the volume name (supported on Windows and OSX only)", Groups: "Mount", }, { Name: "noappledouble", Default: true, Help: "Ignore Apple Double (._) and .DS_Store files (supported on OSX only)", Groups: "Mount", }, { Name: "noapplexattr", Default: false, Help: "Ignore all \"com.apple.*\" extended attributes (supported on OSX only)", Groups: "Mount", }, { Name: "network_mode", Default: false, Help: "Mount as remote network drive, instead of fixed disk drive (supported on Windows only)", Groups: "Mount", }, { Name: "daemon_wait", Default: func() fs.Duration { switch runtime.GOOS { case "linux": // Linux provides /proc/mounts to check mount status // so --daemon-wait means *maximum* time to wait return fs.Duration(60 * time.Second) case "darwin", "openbsd", "freebsd", "netbsd": // On BSD we can't check mount status yet // so --daemon-wait is just a *constant* delay return fs.Duration(5 * time.Second) } return 0 }(), Help: "Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows)", Groups: "Mount", }} func init() { fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "mount", Opt: &Opt, Options: OptionsInfo}) } // Options for creating the mount type Options struct { DebugFUSE bool `config:"debug_fuse"` AllowNonEmpty bool `config:"allow_non_empty"` AllowRoot bool `config:"allow_root"` AllowOther bool `config:"allow_other"` DefaultPermissions bool `config:"default_permissions"` WritebackCache bool `config:"write_back_cache"` Daemon bool `config:"daemon"` DaemonWait fs.Duration `config:"daemon_wait"` // time to wait for ready mount from daemon, maximum on Linux or constant on macOS/BSD MaxReadAhead fs.SizeSuffix `config:"max_read_ahead"` ExtraOptions []string `config:"option"` ExtraFlags []string `config:"fuse_flag"` AttrTimeout fs.Duration `config:"attr_timeout"` // how long the kernel caches attribute for DeviceName string `config:"devname"` VolumeName string `config:"volname"` NoAppleDouble bool `config:"noappledouble"` NoAppleXattr bool `config:"noapplexattr"` DaemonTimeout fs.Duration `config:"daemon_timeout"` // OSXFUSE only AsyncRead bool `config:"async_read"` NetworkMode bool `config:"network_mode"` // Windows only DirectIO bool `config:"direct_io"` // use Direct IO for file access CaseInsensitive fs.Tristate `config:"mount_case_insensitive"` } type ( // UnmountFn is called to unmount the file system UnmountFn func() error // MountFn is called to mount the file system MountFn func(VFS *vfs.VFS, mountpoint string, opt *Options) (<-chan error, func() error, error) ) // MountPoint represents a mount with options and runtime state type MountPoint struct { MountPoint string MountedOn time.Time MountOpt Options VFSOpt vfscommon.Options Fs fs.Fs VFS *vfs.VFS MountFn MountFn UnmountFn UnmountFn ErrChan <-chan error } // NewMountPoint makes a new mounting structure func NewMountPoint(mount MountFn, mountPoint string, f fs.Fs, mountOpt *Options, vfsOpt *vfscommon.Options) *MountPoint { return &MountPoint{ MountFn: mount, MountPoint: mountPoint, Fs: f, MountOpt: *mountOpt, VFSOpt: *vfsOpt, } } // Global constants const ( MaxLeafSize = 1024 // don't pass file names longer than this ) // Opt contains options set by command line flags var Opt Options // AddFlags adds the non filing system specific flags to the command func AddFlags(flagSet *pflag.FlagSet) { flags.AddFlagsFromOptions(flagSet, "", OptionsInfo) } const ( pollInterval = 100 * time.Millisecond ) // WaitMountReady waits until mountpoint is mounted by rclone. // // If the mount daemon dies prematurely it will notice too. func WaitMountReady(mountpoint string, timeout time.Duration, daemon *os.Process) (err error) { endTime := time.Now().Add(timeout) for { if CanCheckMountReady { err = CheckMountReady(mountpoint) if err == nil { break } } err = daemonize.Check(daemon) if err != nil { return err } delay := time.Until(endTime) if delay <= 0 { break } if delay > pollInterval { delay = pollInterval } time.Sleep(delay) } return } // NewMountCommand makes a mount command with the given name and Mount function func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Command { var commandDefinition = &cobra.Command{ Use: commandName + " remote:path /path/to/mountpoint", Hidden: hidden, Short: `Mount the remote as file system on a mountpoint.`, Long: help(commandName) + strings.TrimSpace(vfs.Help()), Annotations: map[string]string{ "versionIntroduced": "v1.33", "groups": "Filter", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(2, 2, command, args) if fs.GetConfig(context.Background()).UseListR { fs.Logf(nil, "--fast-list does nothing on a mount") } if Opt.Daemon { config.PassConfigKeyForDaemonization = true } if os.Getenv("PATH") == "" && runtime.GOOS != "windows" { // PATH can be unset when running under Autofs or Systemd mount fs.Debugf(nil, "Using fallback PATH to run fusermount") _ = os.Setenv("PATH", "/bin:/usr/bin") } // Show stats if the user has specifically requested them if cmd.ShowStats() { defer cmd.StartStats()() } mnt := NewMountPoint(mount, args[1], cmd.NewFsDir(args), &Opt, &vfscommon.Opt) mountDaemon, err := mnt.Mount() // Wait for foreground mount, if any... if mountDaemon == nil { if err == nil { defer systemd.Notify()() err = mnt.Wait() } if err != nil { fs.Fatalf(nil, "Fatal error: %v", err) } return } // Wait for mountDaemon, if any... killOnce := sync.Once{} killDaemon := func(reason string) { killOnce.Do(func() { if err := mountDaemon.Signal(os.Interrupt); err != nil { fs.Errorf(nil, "%s. Failed to terminate daemon pid %d: %v", reason, mountDaemon.Pid, err) return } fs.Debugf(nil, "%s. Terminating daemon pid %d", reason, mountDaemon.Pid) }) } if err == nil && Opt.DaemonWait > 0 { handle := atexit.Register(func() { killDaemon("Got interrupt") }) err = WaitMountReady(mnt.MountPoint, time.Duration(Opt.DaemonWait), mountDaemon) if err != nil { killDaemon("Daemon timed out") } atexit.Unregister(handle) } if err != nil { fs.Fatalf(nil, "Fatal error: %v", err) } }, } // Register the command cmd.Root.AddCommand(commandDefinition) // Add flags cmdFlags := commandDefinition.Flags() AddFlags(cmdFlags) vfsflags.AddFlags(cmdFlags) return commandDefinition } // Mount the remote at mountpoint func (m *MountPoint) Mount() (mountDaemon *os.Process, err error) { // Ensure sensible defaults m.SetVolumeName(m.MountOpt.VolumeName) m.SetDeviceName(m.MountOpt.DeviceName) // Start background task if --daemon is specified if m.MountOpt.Daemon { mountDaemon, err = daemonize.StartDaemon(os.Args) if mountDaemon != nil || err != nil { return mountDaemon, err } } m.VFS = vfs.New(m.Fs, &m.VFSOpt) m.ErrChan, m.UnmountFn, err = m.MountFn(m.VFS, m.MountPoint, &m.MountOpt) if err != nil { if len(os.Args) > 0 && strings.HasPrefix(os.Args[0], "/snap/") { return nil, fmt.Errorf("mounting is not supported when running from snap") } return nil, fmt.Errorf("failed to mount FUSE fs: %w", err) } m.MountedOn = time.Now() return nil, nil } // Wait for mount end func (m *MountPoint) Wait() error { // Unmount on exit var finaliseOnce sync.Once finalise := func() { finaliseOnce.Do(func() { // Unmount only if directory was mounted by rclone, e.g. don't unmount autofs hooks. if err := CheckMountReady(m.MountPoint); err != nil { fs.Debugf(m.MountPoint, "Unmounted externally. Just exit now.") return } if err := m.Unmount(); err != nil { fs.Errorf(m.MountPoint, "Failed to unmount: %v", err) } else { fs.Logf(m.MountPoint, "Unmounted rclone mount") } }) } fnHandle := atexit.Register(finalise) defer atexit.Unregister(fnHandle) err := <-m.ErrChan finalise() if err != nil { return fmt.Errorf("failed to umount FUSE fs: %w", err) } return nil } // Unmount the specified mountpoint func (m *MountPoint) Unmount() (err error) { return m.UnmountFn() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mountlib/rc_test.go
cmd/mountlib/rc_test.go
package mountlib_test import ( "context" "os" "path/filepath" "runtime" "testing" "time" _ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/cmd/cmount" _ "github.com/rclone/rclone/cmd/mount" _ "github.com/rclone/rclone/cmd/mount2" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/fs/config/configfile" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fstest/testy" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestRc(t *testing.T) { // Disable tests under macOS and the CI since they are locking up if runtime.GOOS == "darwin" { testy.SkipUnreliable(t) } ctx := context.Background() configfile.Install() mount := rc.Calls.Get("mount/mount") assert.NotNil(t, mount) unmount := rc.Calls.Get("mount/unmount") assert.NotNil(t, unmount) getMountTypes := rc.Calls.Get("mount/types") assert.NotNil(t, getMountTypes) localDir := t.TempDir() err := os.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666) require.NoError(t, err) mountPoint := t.TempDir() if runtime.GOOS == "windows" { // Windows requires the mount point not to exist require.NoError(t, os.RemoveAll(mountPoint)) } out, err := getMountTypes.Fn(ctx, nil) require.NoError(t, err) var mountTypes []string err = out.GetStruct("mountTypes", &mountTypes) require.NoError(t, err) t.Logf("Mount types %v", mountTypes) t.Run("Errors", func(t *testing.T) { _, err := mount.Fn(ctx, rc.Params{}) assert.Error(t, err) _, err = mount.Fn(ctx, rc.Params{"fs": "/tmp"}) assert.Error(t, err) _, err = mount.Fn(ctx, rc.Params{"mountPoint": "/tmp"}) assert.Error(t, err) }) t.Run("Mount", func(t *testing.T) { if len(mountTypes) == 0 { t.Skip("Can't mount") } in := rc.Params{ "fs": localDir, "mountPoint": mountPoint, "vfsOpt": rc.Params{ "FilePerms": 0400, }, } // check file.txt is not there filePath := filepath.Join(mountPoint, "file.txt") _, err := os.Stat(filePath) require.Error(t, err) require.True(t, os.IsNotExist(err)) // mount _, err = mount.Fn(ctx, in) if err != nil { t.Skipf("Mount failed - skipping test: %v", err) } // check file.txt is there now fi, err := os.Stat(filePath) require.NoError(t, err) assert.Equal(t, int64(5), fi.Size()) if runtime.GOOS == "linux" { assert.Equal(t, os.FileMode(0400), fi.Mode()) } // check mount point list checkMountList := func() []mountlib.MountInfo { listCall := rc.Calls.Get("mount/listmounts") require.NotNil(t, listCall) listReply, err := listCall.Fn(ctx, rc.Params{}) require.NoError(t, err) mountPointsReply, err := listReply.Get("mountPoints") require.NoError(t, err) mountPoints, ok := mountPointsReply.([]mountlib.MountInfo) require.True(t, ok) return mountPoints } mountPoints := checkMountList() require.Equal(t, 1, len(mountPoints)) require.Equal(t, mountPoint, mountPoints[0].MountPoint) // FIXME the OS sometimes appears to be using the mount // immediately after it appears so wait a moment time.Sleep(100 * time.Millisecond) t.Run("Unmount", func(t *testing.T) { _, err := unmount.Fn(ctx, in) require.NoError(t, err) assert.Equal(t, 0, len(checkMountList())) }) }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mountlib/check_other.go
cmd/mountlib/check_other.go
//go:build !linux package mountlib // CheckMountEmpty checks if mountpoint folder is empty. // On non-Linux unixes we list directory to ensure that. func CheckMountEmpty(mountpoint string) error { return checkMountEmpty(mountpoint) } // CheckMountReady should check if mountpoint is mounted by rclone. // The check is implemented only for Linux so this does nothing. func CheckMountReady(mountpoint string) error { return nil } // CanCheckMountReady is set if CheckMountReady is functional var CanCheckMountReady = false
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mountlib/check_linux.go
cmd/mountlib/check_linux.go
//go:build linux package mountlib import ( "fmt" "path/filepath" "strings" "github.com/moby/sys/mountinfo" ) // CheckMountEmpty checks if folder is not already a mountpoint. // On Linux we use the OS-specific /proc/self/mountinfo API so the check won't access the path. // Directories marked as "mounted" by autofs are considered not mounted. func CheckMountEmpty(mountpoint string) error { const msg = "directory already mounted, use --allow-non-empty to mount anyway: %s" mountpointAbs, err := filepath.Abs(mountpoint) if err != nil { return fmt.Errorf("cannot get absolute path: %s: %w", mountpoint, err) } infos, err := mountinfo.GetMounts(mountinfo.SingleEntryFilter(mountpointAbs)) if err != nil { return fmt.Errorf("cannot get mounts: %w", err) } foundAutofs := false for _, info := range infos { if info.FSType != "autofs" { return fmt.Errorf(msg, mountpointAbs) } foundAutofs = true } // It isn't safe to list an autofs in the middle of mounting if foundAutofs { return nil } return checkMountEmpty(mountpoint) } // singleEntryFilter looks for a specific entry. // // It may appear more than once and we return all of them if so. func singleEntryFilter(mp string) mountinfo.FilterFunc { return func(m *mountinfo.Info) (skip, stop bool) { return m.Mountpoint != mp, false } } // CheckMountReady checks whether mountpoint is mounted by rclone. // Only mounts with type "rclone" or "fuse.rclone" count. func CheckMountReady(mountpoint string) error { const msg = "mount not ready: %s" mountpointAbs, err := filepath.Abs(mountpoint) if err != nil { return fmt.Errorf("cannot get absolute path: %s: %w", mountpoint, err) } infos, err := mountinfo.GetMounts(singleEntryFilter(mountpointAbs)) if err != nil { return fmt.Errorf("cannot get mounts: %w", err) } for _, info := range infos { if strings.Contains(info.FSType, "rclone") { return nil } } return fmt.Errorf(msg, mountpointAbs) } // CanCheckMountReady is set if CheckMountReady is functional var CanCheckMountReady = true
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/authorize/authorize.go
cmd/authorize/authorize.go
// Package authorize provides the authorize command. package authorize import ( "context" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/flags" "github.com/spf13/cobra" ) var ( noAutoBrowser bool template string ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser", "") flags.StringVarP(cmdFlags, &template, "template", "", "", "The path to a custom Go template for generating HTML responses", "") } var commandDefinition = &cobra.Command{ Use: "authorize <backendname> [base64_json_blob | client_id client_secret]", Short: `Remote authorization.`, Long: `Remote authorization. Used to authorize a remote or headless rclone from a machine with a browser. Use as instructed by rclone config. See also the [remote setup documentation](/remote_setup). The command requires 1-3 arguments: - Name of a backend (e.g. "drive", "s3") - Either a base64 encoded JSON blob obtained from a previous rclone config session - Or a client_id and client_secret pair obtained from the remote service Use --auth-no-open-browser to prevent rclone to open auth link in default browser automatically. Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`, Annotations: map[string]string{ "versionIntroduced": "v1.27", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(1, 3, command, args) return config.Authorize(context.Background(), args, noAutoBrowser, template) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/authorize/authorize_test.go
cmd/authorize/authorize_test.go
package authorize import ( "bytes" "strings" "testing" "github.com/spf13/cobra" ) func TestAuthorizeCommand(t *testing.T) { // Test that the Use string is correctly formatted if commandDefinition.Use != "authorize <backendname> [base64_json_blob | client_id client_secret]" { t.Errorf("Command Use string doesn't match expected format: %s", commandDefinition.Use) } // Test that help output contains the argument information buf := &bytes.Buffer{} cmd := &cobra.Command{} cmd.AddCommand(commandDefinition) cmd.SetOut(buf) cmd.SetArgs([]string{"authorize", "--help"}) err := cmd.Execute() if err != nil { t.Fatalf("Failed to execute help command: %v", err) } helpOutput := buf.String() if !strings.Contains(helpOutput, "authorize <backendname>") { t.Errorf("Help output doesn't contain correct usage information") } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/cat/cat.go
cmd/cat/cat.go
// Package cat provides the cat command. package cat import ( "context" "io" "os" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) // Globals var ( head = int64(0) tail = int64(0) offset = int64(0) count = int64(-1) discard = false separator = string("") ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.Int64VarP(cmdFlags, &head, "head", "", head, "Only print the first N characters", "") flags.Int64VarP(cmdFlags, &tail, "tail", "", tail, "Only print the last N characters", "") flags.Int64VarP(cmdFlags, &offset, "offset", "", offset, "Start printing at offset N (or from end if -ve)", "") flags.Int64VarP(cmdFlags, &count, "count", "", count, "Only print N characters", "") flags.BoolVarP(cmdFlags, &discard, "discard", "", discard, "Discard the output instead of printing", "") flags.StringVarP(cmdFlags, &separator, "separator", "", separator, "Separator to use between objects when printing multiple files", "") } var commandDefinition = &cobra.Command{ Use: "cat remote:path", Short: `Concatenates any files and sends them to stdout.`, // Warning! "|" will be replaced by backticks below Long: strings.ReplaceAll(`Sends any files to standard output. You can use it like this to output a single file |||sh rclone cat remote:path/to/file ||| Or like this to output any file in dir or its subdirectories. |||sh rclone cat remote:path/to/dir ||| Or like this to output any .txt files in dir or its subdirectories. |||sh rclone --include "*.txt" cat remote:path/to/dir ||| Use the |--head| flag to print characters only at the start, |--tail| for the end and |--offset| and |--count| to print a section in the middle. Note that if offset is negative it will count from the end, so |--offset -1 --count 1| is equivalent to |--tail 1|. Use the |--separator| flag to print a separator value between files. Be sure to shell-escape special characters. For example, to print a newline between files, use: - bash: |||sh rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir ||| - powershell: |||powershell rclone --include "*.txt" --separator "|n" cat remote:path/to/dir |||`, "|", "`"), Annotations: map[string]string{ "versionIntroduced": "v1.33", "groups": "Filter,Listing", }, Run: func(command *cobra.Command, args []string) { usedOffset := offset != 0 || count >= 0 usedHead := head > 0 usedTail := tail > 0 if usedHead && usedTail || usedHead && usedOffset || usedTail && usedOffset { fs.Fatalf(nil, "Can only use one of --head, --tail or --offset with --count") } if head > 0 { offset = 0 count = head } if tail > 0 { offset = -tail count = -1 } cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) var w io.Writer = os.Stdout if discard { w = io.Discard } cmd.Run(false, false, command, func() error { return operations.Cat(context.Background(), fsrc, w, offset, count, []byte(separator)) }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/rmdirs/rmdirs.go
cmd/rmdirs/rmdirs.go
// Package rmdir provides the rmdir command. package rmdir import ( "context" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) var ( leaveRoot = false ) func init() { cmd.Root.AddCommand(rmdirsCmd) rmdirsCmd.Flags().BoolVarP(&leaveRoot, "leave-root", "", leaveRoot, "Do not remove root directory if empty") } var rmdirsCmd = &cobra.Command{ Use: "rmdirs remote:path", Short: `Remove empty directories under the path.`, Long: `This recursively removes any empty directories (including directories that only contain empty directories), that it finds under the path. The root path itself will also be removed if it is empty, unless you supply the ` + "`--leave-root`" + ` flag. Use command [rmdir](/commands/rclone_rmdir/) to delete just the empty directory given by path, not recurse. This is useful for tidying up remotes that rclone has left a lot of empty directories in. For example the [delete](/commands/rclone_delete/) command will delete files but leave the directory structure (unless used with option ` + "`--rmdirs`" + `). This will delete ` + "`--checkers`" + ` directories concurrently so if you have thousands of empty directories consider increasing this number. To delete a path and any objects in it, use the [purge](/commands/rclone_purge/) command.`, Annotations: map[string]string{ "versionIntroduced": "v1.35", "groups": "Important", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) fdst := cmd.NewFsDir(args) cmd.Run(true, false, command, func() error { return operations.Rmdirs(context.Background(), fdst, "", leaveRoot) }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/rc/rc.go
cmd/rc/rc.go
// Package rc provides the rc command. package rc import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "net/http" "os" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fs/rc/jobs" "github.com/spf13/cobra" "github.com/spf13/pflag" ) var ( noOutput = false url = "http://localhost:5572/" unixSocket = "" jsonInput = "" authUser = "" authPass = "" loopback = false options []string arguments []string ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &noOutput, "no-output", "", noOutput, "If set, don't output the JSON result", "") flags.StringVarP(cmdFlags, &url, "url", "", url, "URL to connect to rclone remote control", "") flags.StringVarP(cmdFlags, &unixSocket, "unix-socket", "", unixSocket, "Path to a unix domain socket to dial to, instead of opening a TCP connection directly", "") flags.StringVarP(cmdFlags, &jsonInput, "json", "", jsonInput, "Input JSON - use instead of key=value args", "") flags.StringVarP(cmdFlags, &authUser, "user", "", "", "Username to use to rclone remote control", "") flags.StringVarP(cmdFlags, &authPass, "pass", "", "", "Password to use to connect to rclone remote control", "") flags.BoolVarP(cmdFlags, &loopback, "loopback", "", false, "If set connect to this rclone instance not via HTTP", "") flags.StringArrayVarP(cmdFlags, &options, "opt", "o", options, "Option in the form name=value or name placed in the \"opt\" array", "") flags.StringArrayVarP(cmdFlags, &arguments, "arg", "a", arguments, "Argument placed in the \"arg\" array", "") } var commandDefinition = &cobra.Command{ Use: "rc commands parameter", Short: `Run a command against a running rclone.`, Long: strings.ReplaceAll(`This runs a command against a running rclone. Use the |--url| flag to specify an non default URL to connect on. This can be either a ":port" which is taken to mean <http://localhost:port> or a "host:port" which is taken to mean <http://host:port>. A username and password can be passed in with |--user| and |--pass|. Note that |--rc-addr|, |--rc-user|, |--rc-pass| will be read also for |--url|, |--user|, |--pass|. The |--unix-socket| flag can be used to connect over a unix socket like this |||sh # start server on /tmp/my.socket rclone rcd --rc-addr unix:///tmp/my.socket # Connect to it rclone rc --unix-socket /tmp/my.socket core/stats ||| Arguments should be passed in as parameter=value. The result will be returned as a JSON object by default. The |--json| parameter can be used to pass in a JSON blob as an input instead of key=value arguments. This is the only way of passing in more complicated values. The |-o|/|--opt| option can be used to set a key "opt" with key, value options in the form |-o key=value| or |-o key|. It can be repeated as many times as required. This is useful for rc commands which take the "opt" parameter which by convention is a dictionary of strings. |||text -o key=value -o key2 ||| Will place this in the "opt" value |||json {"key":"value", "key2","") ||| The |-a|/|--arg| option can be used to set strings in the "arg" value. It can be repeated as many times as required. This is useful for rc commands which take the "arg" parameter which by convention is a list of strings. |||text -a value -a value2 ||| Will place this in the "arg" value |||json ["value", "value2"] ||| Use |--loopback| to connect to the rclone instance running |rclone rc|. This is very useful for testing commands without having to run an rclone rc server, e.g.: |||sh rclone rc --loopback operations/about fs=/ ||| Use |rclone rc| to see a list of all possible commands.`, "|", "`"), Annotations: map[string]string{ "versionIntroduced": "v1.40", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(0, 1e9, command, args) cmd.Run(false, false, command, func() error { ctx := context.Background() parseFlags() if len(args) == 0 { return list(ctx) } return run(ctx, args) }) }, } // Parse the flags func parseFlags() { // set alternates from alternate flags setAlternateFlag("rc-addr", &url) setAlternateFlag("rc-user", &authUser) setAlternateFlag("rc-pass", &authPass) // If url is just :port then fix it up if strings.HasPrefix(url, ":") { url = "localhost" + url } // if url is just host:port add http:// if !strings.HasPrefix(url, "http:") && !strings.HasPrefix(url, "https:") { url = "http://" + url } // if url doesn't end with / add it if !strings.HasSuffix(url, "/") { url += "/" } } // ParseOptions parses a slice of options in the form key=value or key // into a map func ParseOptions(options []string) (opt map[string]string) { opt = make(map[string]string, len(options)) for _, option := range options { equals := strings.IndexRune(option, '=') key := option value := "" if equals >= 0 { key = option[:equals] value = option[equals+1:] } opt[key] = value } return opt } // If the user set flagName set the output to its value func setAlternateFlag(flagName string, output *string) { if rcFlag := pflag.Lookup(flagName); rcFlag != nil && rcFlag.Changed { *output = rcFlag.Value.String() if sliceValue, ok := rcFlag.Value.(pflag.SliceValue); ok { stringSlice := sliceValue.GetSlice() for _, value := range stringSlice { if value != "" { *output = value break } } } } } // Format an error and create a synthetic server return from it func errorf(status int, path string, format string, arg ...any) (out rc.Params, err error) { err = fmt.Errorf(format, arg...) out = make(rc.Params) out["error"] = err.Error() out["path"] = path out["status"] = status return out, err } // do a call from (path, in) to (out, err). // // if err is set, out may be a valid error return or it may be nil func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err error) { // If loopback set, short circuit HTTP request if loopback { call := rc.Calls.Get(path) if call == nil { return errorf(http.StatusBadRequest, path, "loopback: method %q not found", path) } _, out, err := jobs.NewJob(ctx, call.Fn, in) if err != nil { return errorf(http.StatusInternalServerError, path, "loopback: call failed: %w", err) } // Reshape (serialize then deserialize) the data so it is in the form expected err = rc.Reshape(&out, out) if err != nil { return errorf(http.StatusInternalServerError, path, "loopback: reshape failed: %w", err) } return out, nil } // Do HTTP request var client *http.Client if unixSocket == "" { client = fshttp.NewClient(ctx) } else { client = fshttp.NewClientWithUnixSocket(ctx, unixSocket) } url += path data, err := json.Marshal(in) if err != nil { return errorf(http.StatusBadRequest, path, "failed to encode request: %w", err) } req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(data)) if err != nil { return errorf(http.StatusInternalServerError, path, "failed to make request: %w", err) } req.Header.Set("Content-Type", "application/json") if authUser != "" || authPass != "" { req.SetBasicAuth(authUser, authPass) } resp, err := client.Do(req) if err != nil { return errorf(http.StatusServiceUnavailable, path, "connection failed: %w", err) } defer fs.CheckClose(resp.Body, &err) // Read response var body []byte var bodyString string body, err = io.ReadAll(resp.Body) bodyString = strings.TrimSpace(string(body)) if err != nil { return errorf(resp.StatusCode, "failed to read rc response: %s: %s", resp.Status, bodyString) } // Parse output out = make(rc.Params) err = json.NewDecoder(strings.NewReader(bodyString)).Decode(&out) if err != nil { return errorf(resp.StatusCode, path, "failed to decode response: %w: %s", err, bodyString) } // Check we got 200 OK if resp.StatusCode != http.StatusOK { err = fmt.Errorf("operation %q failed: %v", path, out["error"]) } return out, err } // Run the remote control command passed in func run(ctx context.Context, args []string) (err error) { path := strings.Trim(args[0], "/") // parse input in := make(rc.Params) params := args[1:] if jsonInput == "" { for _, param := range params { equals := strings.IndexRune(param, '=') if equals < 0 { return fmt.Errorf("no '=' found in parameter %q", param) } key, value := param[:equals], param[equals+1:] in[key] = value } } else { if len(params) > 0 { return errors.New("can't use --json and parameters together") } err = json.Unmarshal([]byte(jsonInput), &in) if err != nil { return fmt.Errorf("bad --json input: %w", err) } } if len(options) > 0 { in["opt"] = ParseOptions(options) } if len(arguments) > 0 { in["arg"] = arguments } // Do the call out, callErr := doCall(ctx, path, in) // Write the JSON blob to stdout if required if out != nil && !noOutput { err := rc.WriteJSON(os.Stdout, out) if err != nil { return fmt.Errorf("failed to output JSON: %w", err) } } return callErr } // List the available commands to stdout func list(ctx context.Context) error { list, err := doCall(ctx, "rc/list", nil) if err != nil { return fmt.Errorf("failed to list: %w", err) } commands, ok := list["commands"].([]any) if !ok { return errors.New("bad JSON") } for _, command := range commands { info, ok := command.(map[string]any) if !ok { return errors.New("bad JSON") } fmt.Printf("### %s: %s {#%s}\n\n", info["Path"], info["Title"], strings.ReplaceAll(info["Path"].(string), "/", "-")) fmt.Printf("%s\n\n", info["Help"]) if authRequired := info["AuthRequired"]; authRequired != nil { if authRequired.(bool) { fmt.Printf("**Authentication is required for this call.**\n\n") } } } return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/cachestats/cachestats_unsupported.go
cmd/cachestats/cachestats_unsupported.go
// Build for cache for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 || js // Package cachestats provides the cachestats command. package cachestats
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/cachestats/cachestats.go
cmd/cachestats/cachestats.go
//go:build !plan9 && !js // Package cachestats provides the cachestats command. package cachestats import ( "encoding/json" "fmt" "github.com/rclone/rclone/backend/cache" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(commandDefinition) } var commandDefinition = &cobra.Command{ Use: "cachestats source:", Short: `Print cache stats for a remote`, Long: `Print cache stats for a remote in JSON format `, Hidden: true, Annotations: map[string]string{ "versionIntroduced": "v1.39", "status": "Deprecated", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) fs.Logf(nil, `"rclone cachestats" is deprecated, use "rclone backend stats %s" instead`, args[0]) fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { var fsCache *cache.Fs fsCache, ok := fsrc.(*cache.Fs) if !ok { unwrap := fsrc.Features().UnWrap if unwrap != nil { fsCache, ok = unwrap().(*cache.Fs) } if !ok { return fmt.Errorf("%s: is not a cache remote", fsrc.Name()) } } m, err := fsCache.Stats() if err != nil { return err } raw, err := json.MarshalIndent(m, "", " ") if err != nil { return err } fmt.Printf("%s\n", string(raw)) return nil }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mkdir/mkdir.go
cmd/mkdir/mkdir.go
// Package mkdir provides the mkdir command. package mkdir import ( "context" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(commandDefinition) } var commandDefinition = &cobra.Command{ Use: "mkdir remote:path", Short: `Make the path if it doesn't already exist.`, Annotations: map[string]string{ "groups": "Important", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) fdst := cmd.NewFsDir(args) if !fdst.Features().CanHaveEmptyDirectories && strings.Contains(fdst.Root(), "/") { fs.Logf(fdst, "Warning: running mkdir on a remote which can't have empty directories does nothing") } cmd.Run(true, false, command, func() error { return operations.Mkdir(context.Background(), fdst, "") }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/settier/settier.go
cmd/settier/settier.go
// Package settier provides the settier command. package settier import ( "context" "fmt" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(commandDefinition) } var commandDefinition = &cobra.Command{ Use: "settier tier remote:path", Short: `Changes storage class/tier of objects in remote.`, Long: `Changes storage tier or class at remote if supported. Few cloud storage services provides different storage classes on objects, for example AWS S3 and Glacier, Azure Blob storage - Hot, Cool and Archive, Google Cloud Storage, Regional Storage, Nearline, Coldline etc. Note that, certain tier changes make objects not available to access immediately. For example tiering to archive in azure blob storage makes objects in frozen state, user can restore by setting tier to Hot/Cool, similarly S3 to Glacier makes object inaccessible.true You can use it to tier single object ` + "```console" + ` rclone settier Cool remote:path/file ` + "```" + ` Or use rclone filters to set tier on only specific files ` + "```console" + ` rclone --include "*.txt" settier Hot remote:path/dir ` + "```" + ` Or just provide remote directory and all files in directory will be tiered ` + "```console" + ` rclone settier tier remote:path/dir ` + "```", Annotations: map[string]string{ "versionIntroduced": "v1.44", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(2, 2, command, args) tier := args[0] input := args[1:] fsrc := cmd.NewFsSrc(input) cmd.Run(false, false, command, func() error { isSupported := fsrc.Features().SetTier if !isSupported { return fmt.Errorf("remote %s does not support settier", fsrc.Name()) } return operations.SetTier(context.Background(), fsrc, tier) }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/all/all.go
cmd/all/all.go
// Package all imports all the commands package all import ( // Active commands _ "github.com/rclone/rclone/cmd" _ "github.com/rclone/rclone/cmd/about" _ "github.com/rclone/rclone/cmd/archive" _ "github.com/rclone/rclone/cmd/archive/create" _ "github.com/rclone/rclone/cmd/archive/extract" _ "github.com/rclone/rclone/cmd/archive/list" _ "github.com/rclone/rclone/cmd/authorize" _ "github.com/rclone/rclone/cmd/backend" _ "github.com/rclone/rclone/cmd/bisync" _ "github.com/rclone/rclone/cmd/cachestats" _ "github.com/rclone/rclone/cmd/cat" _ "github.com/rclone/rclone/cmd/check" _ "github.com/rclone/rclone/cmd/checksum" _ "github.com/rclone/rclone/cmd/cleanup" _ "github.com/rclone/rclone/cmd/cmount" _ "github.com/rclone/rclone/cmd/config" _ "github.com/rclone/rclone/cmd/convmv" _ "github.com/rclone/rclone/cmd/copy" _ "github.com/rclone/rclone/cmd/copyto" _ "github.com/rclone/rclone/cmd/copyurl" _ "github.com/rclone/rclone/cmd/cryptcheck" _ "github.com/rclone/rclone/cmd/cryptdecode" _ "github.com/rclone/rclone/cmd/dedupe" _ "github.com/rclone/rclone/cmd/delete" _ "github.com/rclone/rclone/cmd/deletefile" _ "github.com/rclone/rclone/cmd/genautocomplete" _ "github.com/rclone/rclone/cmd/gendocs" _ "github.com/rclone/rclone/cmd/gitannex" _ "github.com/rclone/rclone/cmd/hashsum" _ "github.com/rclone/rclone/cmd/link" _ "github.com/rclone/rclone/cmd/listremotes" _ "github.com/rclone/rclone/cmd/ls" _ "github.com/rclone/rclone/cmd/lsd" _ "github.com/rclone/rclone/cmd/lsf" _ "github.com/rclone/rclone/cmd/lsjson" _ "github.com/rclone/rclone/cmd/lsl" _ "github.com/rclone/rclone/cmd/md5sum" _ "github.com/rclone/rclone/cmd/mkdir" _ "github.com/rclone/rclone/cmd/mount" _ "github.com/rclone/rclone/cmd/mount2" _ "github.com/rclone/rclone/cmd/move" _ "github.com/rclone/rclone/cmd/moveto" _ "github.com/rclone/rclone/cmd/ncdu" _ "github.com/rclone/rclone/cmd/nfsmount" _ "github.com/rclone/rclone/cmd/obscure" _ "github.com/rclone/rclone/cmd/purge" _ "github.com/rclone/rclone/cmd/rc" _ "github.com/rclone/rclone/cmd/rcat" _ "github.com/rclone/rclone/cmd/rcd" _ "github.com/rclone/rclone/cmd/reveal" _ "github.com/rclone/rclone/cmd/rmdir" _ "github.com/rclone/rclone/cmd/rmdirs" _ "github.com/rclone/rclone/cmd/selfupdate" _ "github.com/rclone/rclone/cmd/serve" _ "github.com/rclone/rclone/cmd/serve/dlna" _ "github.com/rclone/rclone/cmd/serve/docker" _ "github.com/rclone/rclone/cmd/serve/ftp" _ "github.com/rclone/rclone/cmd/serve/http" _ "github.com/rclone/rclone/cmd/serve/nfs" _ "github.com/rclone/rclone/cmd/serve/restic" _ "github.com/rclone/rclone/cmd/serve/s3" _ "github.com/rclone/rclone/cmd/serve/sftp" _ "github.com/rclone/rclone/cmd/serve/webdav" _ "github.com/rclone/rclone/cmd/settier" _ "github.com/rclone/rclone/cmd/sha1sum" _ "github.com/rclone/rclone/cmd/size" _ "github.com/rclone/rclone/cmd/sync" _ "github.com/rclone/rclone/cmd/test" _ "github.com/rclone/rclone/cmd/test/changenotify" _ "github.com/rclone/rclone/cmd/test/histogram" _ "github.com/rclone/rclone/cmd/test/info" _ "github.com/rclone/rclone/cmd/test/makefiles" _ "github.com/rclone/rclone/cmd/test/memory" _ "github.com/rclone/rclone/cmd/touch" _ "github.com/rclone/rclone/cmd/tree" _ "github.com/rclone/rclone/cmd/version" )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/sha1sum/sha1sum.go
cmd/sha1sum/sha1sum.go
// Package sha1sum provides the sha1sum command. package sha1sum import ( "context" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/hashsum" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() hashsum.AddHashsumFlags(cmdFlags) } var commandDefinition = &cobra.Command{ Use: "sha1sum remote:path", Short: `Produces an sha1sum file for all the objects in the path.`, Long: `Produces an sha1sum file for all the objects in the path. This is in the same format as the standard sha1sum tool produces. By default, the hash is requested from the remote. If SHA-1 is not supported by the remote, no hash will be returned. With the download flag, the file will be downloaded from the remote and hashed locally enabling SHA-1 for any remote. For other algorithms, see the [hashsum](/commands/rclone_hashsum/) command. Running ` + "`rclone sha1sum remote:path`" + ` is equivalent to running ` + "`rclone hashsum SHA1 remote:path`" + `. This command can also hash data received on standard input (stdin), by not passing a remote:path, or by passing a hyphen as remote:path when there is data to read (if not, the hyphen will be treated literally, as a relative path). This command can also hash data received on STDIN, if not passing a remote:path.`, Annotations: map[string]string{ "versionIntroduced": "v1.27", "groups": "Filter,Listing", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(0, 1, command, args) if found, err := hashsum.CreateFromStdinArg(hash.SHA1, args, 0); found { return err } fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { if hashsum.ChecksumFile != "" { fsum, sumFile := cmd.NewFsFile(hashsum.ChecksumFile) return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, hash.SHA1, nil, hashsum.DownloadFlag) } if hashsum.HashsumOutfile == "" { return operations.HashLister(context.Background(), hash.SHA1, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, nil) } output, close, err := hashsum.GetHashsumOutput(hashsum.HashsumOutfile) if err != nil { return err } defer close() return operations.HashLister(context.Background(), hash.SHA1, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, output) }) return nil }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/moveto/moveto.go
cmd/moveto/moveto.go
// Package moveto provides the moveto command. package moveto import ( "context" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations/operationsflags" "github.com/rclone/rclone/fs/sync" "github.com/spf13/cobra" ) var ( loggerOpt = operations.LoggerOpt{} loggerFlagsOpt = operationsflags.AddLoggerFlagsOptions{} ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() operationsflags.AddLoggerFlags(cmdFlags, &loggerOpt, &loggerFlagsOpt) loggerOpt.LoggerFn = operations.NewDefaultLoggerFn(&loggerOpt) } var commandDefinition = &cobra.Command{ Use: "moveto source:path dest:path", Short: `Move file or directory from source to dest.`, Long: `If source:path is a file or directory then it moves it to a file or directory named dest:path. This can be used to rename files or upload single files to other than their existing name. If the source is a directory then it acts exactly like the [move](/commands/rclone_move/) command. So ` + "```console" + ` rclone moveto src dst ` + "```" + ` where src and dst are rclone paths, either remote:path or /path/to/local or C:\windows\path\if\on\windows. This will: ` + "```text" + ` if src is file move it to dst, overwriting an existing file if it exists if src is directory move it to dst, overwriting existing files if they exist see move command for full details ` + "```" + ` This doesn't transfer files that are identical on src and dst, testing by size and modification time or MD5SUM. src will be deleted on successful transfer. **Important**: Since this can cause data loss, test first with the ` + "`--dry-run` or the `--interactive`/`-i`" + ` flag. **Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics. ` + operationsflags.Help(), Annotations: map[string]string{ "versionIntroduced": "v1.35", "groups": "Filter,Listing,Important,Copy", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(2, 2, command, args) fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args) cmd.Run(true, true, command, func() error { ctx := context.Background() close, err := operationsflags.ConfigureLoggers(ctx, fdst, command, &loggerOpt, loggerFlagsOpt) if err != nil { return err } defer close() if loggerFlagsOpt.AnySet() { ctx = operations.WithSyncLogger(ctx, loggerOpt) } if srcFileName == "" { return sync.MoveDir(ctx, fdst, fsrc, false, false) } return operations.MoveFile(ctx, fdst, fsrc, dstFileName, srcFileName) }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/config/config.go
cmd/config/config.go
// Package config provides the config command. package config import ( "context" "encoding/json" "errors" "fmt" "os" "sort" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/rc" "github.com/spf13/cobra" "github.com/spf13/pflag" ) func init() { cmd.Root.AddCommand(configCommand) configCommand.AddCommand(configEditCommand) configCommand.AddCommand(configFileCommand) configCommand.AddCommand(configTouchCommand) configCommand.AddCommand(configPathsCommand) configCommand.AddCommand(configShowCommand) configCommand.AddCommand(configRedactedCommand) configCommand.AddCommand(configDumpCommand) configCommand.AddCommand(configProvidersCommand) configCommand.AddCommand(configCreateCommand) configCommand.AddCommand(configUpdateCommand) configCommand.AddCommand(configDeleteCommand) configCommand.AddCommand(configPasswordCommand) configCommand.AddCommand(configReconnectCommand) configCommand.AddCommand(configDisconnectCommand) configCommand.AddCommand(configUserInfoCommand) configCommand.AddCommand(configEncryptionCommand) configCommand.AddCommand(configStringCommand) } var configCommand = &cobra.Command{ Use: "config", Short: `Enter an interactive configuration session.`, Long: `Enter an interactive configuration session where you can setup new remotes and manage existing ones. You may also set or remove a password to protect your configuration.`, Annotations: map[string]string{ "versionIntroduced": "v1.39", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(0, 0, command, args) return config.EditConfig(context.Background()) }, } var configEditCommand = &cobra.Command{ Use: "edit", Short: configCommand.Short, Long: configCommand.Long, Annotations: map[string]string{ "versionIntroduced": "v1.39", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(0, 0, command, args) return config.EditConfig(context.Background()) }, } var configFileCommand = &cobra.Command{ Use: "file", Short: `Show path of configuration file in use.`, Annotations: map[string]string{ "versionIntroduced": "v1.38", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(0, 0, command, args) config.ShowConfigLocation() }, } var configTouchCommand = &cobra.Command{ Use: "touch", Short: `Ensure configuration file exists.`, Annotations: map[string]string{ "versionIntroduced": "v1.56", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(0, 0, command, args) config.SaveConfig() }, } var configPathsCommand = &cobra.Command{ Use: "paths", Short: `Show paths used for configuration, cache, temp etc.`, Annotations: map[string]string{ "versionIntroduced": "v1.57", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(0, 0, command, args) fmt.Printf("Config file: %s\n", config.GetConfigPath()) fmt.Printf("Cache dir: %s\n", config.GetCacheDir()) fmt.Printf("Temp dir: %s\n", os.TempDir()) }, } var configShowCommand = &cobra.Command{ Use: "show [<remote>]", Short: `Print (decrypted) config file, or the config for a single remote.`, Annotations: map[string]string{ "versionIntroduced": "v1.38", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(0, 1, command, args) if len(args) == 0 { config.ShowConfig() } else { name := strings.TrimRight(args[0], ":") config.ShowRemote(name) } }, } var configRedactedCommand = &cobra.Command{ Use: "redacted [<remote>]", Short: `Print redacted (decrypted) config file, or the redacted config for a single remote.`, Long: `This prints a redacted copy of the config file, either the whole config file or for a given remote. The config file will be redacted by replacing all passwords and other sensitive info with XXX. This makes the config file suitable for posting online for support. It should be double checked before posting as the redaction may not be perfect.`, Annotations: map[string]string{ "versionIntroduced": "v1.64", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(0, 1, command, args) if len(args) == 0 { config.ShowRedactedConfig() } else { name := strings.TrimRight(args[0], ":") config.ShowRedactedRemote(name) } fmt.Println("### Double check the config for sensitive info before posting publicly") }, } var configDumpCommand = &cobra.Command{ Use: "dump", Short: `Dump the config file as JSON.`, Annotations: map[string]string{ "versionIntroduced": "v1.39", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(0, 0, command, args) return config.Dump() }, } var configProvidersCommand = &cobra.Command{ Use: "providers", Short: `List in JSON format all the providers and options.`, Annotations: map[string]string{ "versionIntroduced": "v1.39", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(0, 0, command, args) return config.JSONListProviders() }, } var updateRemoteOpt config.UpdateRemoteOpt var configPasswordHelp = strings.ReplaceAll( `Note that if the config process would normally ask a question the default is taken (unless |--non-interactive| is used). Each time that happens rclone will print or DEBUG a message saying how to affect the value taken. If any of the parameters passed is a password field, then rclone will automatically obscure them if they aren't already obscured before putting them in the config file. **NB** If the password parameter is 22 characters or longer and consists only of base64 characters then rclone can get confused about whether the password is already obscured or not and put unobscured passwords into the config file. If you want to be 100% certain that the passwords get obscured then use the |--obscure| flag, or if you are 100% certain you are already passing obscured passwords then use |--no-obscure|. You can also set obscured passwords using the |rclone config password| command. The flag |--non-interactive| is for use by applications that wish to configure rclone themselves, rather than using rclone's text based configuration questions. If this flag is set, and rclone needs to ask the user a question, a JSON blob will be returned with the question in it. This will look something like (some irrelevant detail removed): |||json { "State": "*oauth-islocal,teamdrive,,", "Option": { "Name": "config_is_local", "Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n", "Default": true, "Examples": [ { "Value": "true", "Help": "Yes" }, { "Value": "false", "Help": "No" } ], "Required": false, "IsPassword": false, "Type": "bool", "Exclusive": true, }, "Error": "", } ||| The format of |Option| is the same as returned by |rclone config providers|. The question should be asked to the user and returned to rclone as the |--result| option along with the |--state| parameter. The keys of |Option| are used as follows: - |Name| - name of variable - show to user - |Help| - help text. Hard wrapped at 80 chars. Any URLs should be clicky. - |Default| - default value - return this if the user just wants the default. - |Examples| - the user should be able to choose one of these - |Required| - the value should be non-empty - |IsPassword| - the value is a password and should be edited as such - |Type| - type of value, eg |bool|, |string|, |int| and others - |Exclusive| - if set no free-form entry allowed only the |Examples| - Irrelevant keys |Provider|, |ShortOpt|, |Hide|, |NoPrefix|, |Advanced| If |Error| is set then it should be shown to the user at the same time as the question. |||sh rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true" ||| Note that when using |--continue| all passwords should be passed in the clear (not obscured). Any default config values should be passed in with each invocation of |--continue|. At the end of the non interactive process, rclone will return a result with |State| as empty string. If |--all| is passed then rclone will ask all the config questions, not just the post config questions. Any parameters are used as defaults for questions as usual. Note that |bin/config.py| in the rclone source implements this protocol as a readable demonstration.`, "|", "`") var configCreateCommand = &cobra.Command{ Use: "create name type [key value]*", Short: `Create a new remote with name, type and options.`, Long: strings.ReplaceAll(`Create a new remote of |name| with |type| and options. The options should be passed in pairs of |key| |value| or as |key=value|. For example, to make a swift remote of name myremote using auto config you would do: |||sh rclone config create myremote swift env_auth true rclone config create myremote swift env_auth=true ||| So for example if you wanted to configure a Google Drive remote but using remote authorization you would do this: |||sh rclone config create mydrive drive config_is_local=false ||| `, "|", "`") + configPasswordHelp, Annotations: map[string]string{ "versionIntroduced": "v1.39", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(2, 256, command, args) in, err := argsToMap(args[2:]) if err != nil { return err } return doConfig(args[0], in, func(opts config.UpdateRemoteOpt) (*fs.ConfigOut, error) { return config.CreateRemote(context.Background(), args[0], args[1], in, opts) }) }, } func doConfig(name string, in rc.Params, do func(config.UpdateRemoteOpt) (*fs.ConfigOut, error)) error { out, err := do(updateRemoteOpt) if err != nil { return err } if updateRemoteOpt.NoOutput { return nil } if !(updateRemoteOpt.NonInteractive || updateRemoteOpt.Continue) { config.ShowRemote(name) } else { if out == nil { out = &fs.ConfigOut{} } outBytes, err := json.MarshalIndent(out, "", "\t") if err != nil { return err } _, _ = os.Stdout.Write(outBytes) _, _ = os.Stdout.WriteString("\n") } return nil } func init() { for _, cmdFlags := range []*pflag.FlagSet{configCreateCommand.Flags(), configUpdateCommand.Flags()} { flags.BoolVarP(cmdFlags, &updateRemoteOpt.Obscure, "obscure", "", false, "Force any passwords to be obscured", "Config") flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoObscure, "no-obscure", "", false, "Force any passwords not to be obscured", "Config") flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoOutput, "no-output", "", false, "Don't provide any output", "Config") flags.BoolVarP(cmdFlags, &updateRemoteOpt.NonInteractive, "non-interactive", "", false, "Don't interact with user and return questions", "Config") flags.BoolVarP(cmdFlags, &updateRemoteOpt.Continue, "continue", "", false, "Continue the configuration process with an answer", "Config") flags.BoolVarP(cmdFlags, &updateRemoteOpt.All, "all", "", false, "Ask the full set of config questions", "Config") flags.StringVarP(cmdFlags, &updateRemoteOpt.State, "state", "", "", "State - use with --continue", "Config") flags.StringVarP(cmdFlags, &updateRemoteOpt.Result, "result", "", "", "Result - use with --continue", "Config") } } var configUpdateCommand = &cobra.Command{ Use: "update name [key value]+", Short: `Update options in an existing remote.`, Long: strings.ReplaceAll(`Update an existing remote's options. The options should be passed in pairs of |key| |value| or as |key=value|. For example, to update the env_auth field of a remote of name myremote you would do: |||sh rclone config update myremote env_auth true rclone config update myremote env_auth=true ||| If the remote uses OAuth the token will be updated, if you don't require this add an extra parameter thus: |||sh rclone config update myremote env_auth=true config_refresh_token=false ||| `, "|", "`") + configPasswordHelp, Annotations: map[string]string{ "versionIntroduced": "v1.39", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(1, 256, command, args) in, err := argsToMap(args[1:]) if err != nil { return err } return doConfig(args[0], in, func(opts config.UpdateRemoteOpt) (*fs.ConfigOut, error) { return config.UpdateRemote(context.Background(), args[0], in, opts) }) }, } var configDeleteCommand = &cobra.Command{ Use: "delete name", Short: "Delete an existing remote.", Annotations: map[string]string{ "versionIntroduced": "v1.39", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) config.DeleteRemote(args[0]) }, } var configPasswordCommand = &cobra.Command{ Use: "password name [key value]+", Short: `Update password in an existing remote.`, Long: strings.ReplaceAll(`Update an existing remote's password. The password should be passed in pairs of |key| |password| or as |key=password|. The |password| should be passed in in clear (unobscured). For example, to set password of a remote of name myremote you would do: |||sh rclone config password myremote fieldname mypassword rclone config password myremote fieldname=mypassword ||| This command is obsolete now that "config update" and "config create" both support obscuring passwords directly.`, "|", "`"), Annotations: map[string]string{ "versionIntroduced": "v1.39", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(1, 256, command, args) in, err := argsToMap(args[1:]) if err != nil { return err } err = config.PasswordRemote(context.Background(), args[0], in) if err != nil { return err } config.ShowRemote(args[0]) return nil }, } // This takes a list of arguments in key value key value form, or // key=value key=value form and converts it into a map func argsToMap(args []string) (out rc.Params, err error) { out = rc.Params{} for i := 0; i < len(args); i++ { key := args[i] equals := strings.IndexRune(key, '=') var value string if equals >= 0 { key, value = key[:equals], key[equals+1:] } else { i++ if i >= len(args) { return nil, errors.New("found key without value") } value = args[i] } out[key] = value } return out, nil } var configReconnectCommand = &cobra.Command{ Use: "reconnect remote:", Short: `Re-authenticates user with remote.`, Long: `This reconnects remote: passed in to the cloud storage system. To disconnect the remote use "rclone config disconnect". This normally means going through the interactive oauth flow again.`, RunE: func(command *cobra.Command, args []string) error { ctx := context.Background() cmd.CheckArgs(1, 1, command, args) fsInfo, configName, _, m, err := fs.ConfigFs(args[0]) if err != nil { return err } return config.PostConfig(ctx, configName, m, fsInfo) }, } var configDisconnectCommand = &cobra.Command{ Use: "disconnect remote:", Short: `Disconnects user from remote`, Long: `This disconnects the remote: passed in to the cloud storage system. This normally means revoking the oauth token. To reconnect use "rclone config reconnect".`, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(1, 1, command, args) f := cmd.NewFsSrc(args) doDisconnect := f.Features().Disconnect if doDisconnect == nil { return fmt.Errorf("%v doesn't support Disconnect", f) } err := doDisconnect(context.Background()) if err != nil { return fmt.Errorf("disconnect call failed: %w", err) } return nil }, } var ( jsonOutput bool ) func init() { flags.BoolVarP(configUserInfoCommand.Flags(), &jsonOutput, "json", "", false, "Format output as JSON", "") } var configUserInfoCommand = &cobra.Command{ Use: "userinfo remote:", Short: `Prints info about logged in user of remote.`, Long: `This prints the details of the person logged in to the cloud storage system.`, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(1, 1, command, args) f := cmd.NewFsSrc(args) doUserInfo := f.Features().UserInfo if doUserInfo == nil { return fmt.Errorf("%v doesn't support UserInfo", f) } u, err := doUserInfo(context.Background()) if err != nil { return fmt.Errorf("UserInfo call failed: %w", err) } if jsonOutput { out := json.NewEncoder(os.Stdout) out.SetIndent("", "\t") return out.Encode(u) } var keys []string var maxKeyLen int for key := range u { keys = append(keys, key) if len(key) > maxKeyLen { maxKeyLen = len(key) } } sort.Strings(keys) for _, key := range keys { fmt.Printf("%*s: %s\n", maxKeyLen, key, u[key]) } return nil }, } func init() { configEncryptionCommand.AddCommand(configEncryptionSetCommand) configEncryptionCommand.AddCommand(configEncryptionRemoveCommand) configEncryptionCommand.AddCommand(configEncryptionCheckCommand) } var configEncryptionCommand = &cobra.Command{ Use: "encryption", Short: `set, remove and check the encryption for the config file`, Long: `This command sets, clears and checks the encryption for the config file using the subcommands below.`, } var configEncryptionSetCommand = &cobra.Command{ Use: "set", Short: `Set or change the config file encryption password`, Long: strings.ReplaceAll(`This command sets or changes the config file encryption password. If there was no config password set then it sets a new one, otherwise it changes the existing config password. Note that if you are changing an encryption password using |--password-command| then this will be called once to decrypt the config using the old password and then again to read the new password to re-encrypt the config. When |--password-command| is called to change the password then the environment variable |RCLONE_PASSWORD_CHANGE=1| will be set. So if changing passwords programmatically you can use the environment variable to distinguish which password you must supply. Alternatively you can remove the password first (with |rclone config encryption remove|), then set it again with this command which may be easier if you don't mind the unencrypted config file being on the disk briefly.`, "|", "`"), RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(0, 0, command, args) config.LoadedData() config.ChangeConfigPasswordAndSave() return nil }, } var configEncryptionRemoveCommand = &cobra.Command{ Use: "remove", Short: `Remove the config file encryption password`, Long: strings.ReplaceAll(`Remove the config file encryption password This removes the config file encryption, returning it to un-encrypted. If |--password-command| is in use, this will be called to supply the old config password. If the config was not encrypted then no error will be returned and this command will do nothing.`, "|", "`"), RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(0, 0, command, args) config.LoadedData() config.RemoveConfigPasswordAndSave() return nil }, } var configEncryptionCheckCommand = &cobra.Command{ Use: "check", Short: `Check that the config file is encrypted`, Long: strings.ReplaceAll(`This checks the config file is encrypted and that you can decrypt it. It will attempt to decrypt the config using the password you supply. If decryption fails it will return a non-zero exit code if using |--password-command|, otherwise it will prompt again for the password. If the config file is not encrypted it will return a non zero exit code.`, "|", "`"), RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(0, 0, command, args) config.LoadedData() if !config.IsEncrypted() { return errors.New("config file is NOT encrypted") } return nil }, } var configStringCommand = &cobra.Command{ Use: "string <remote>", Short: `Print connection string for a single remote.`, Long: strings.ReplaceAll(`Print a connection string for a single remote. The [connection strings](/docs/#connection-strings) can be used wherever a remote is needed and can be more convenient than using the config file, especially if using the RC API. Backend parameters may be provided to the command also. Example: |||sh $ rclone config string s3:rclone --s3-no-check-bucket :s3,access_key_id=XXX,no_check_bucket,provider=AWS,region=eu-west-2,secret_access_key=YYY:rclone ||| **NB** the strings are not quoted for use in shells (eg bash, powershell, windows cmd). Most will work if enclosed in "double quotes", however connection strings that contain double quotes will require further quoting which is very shell dependent. `, "|", "`"), Annotations: map[string]string{ "versionIntroduced": "v1.72", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(1, 1, command, args) remote := args[0] fsInfo, _, fsPath, m, err := fs.ConfigFs(remote) if err != nil { return err } // Find the overridden options and construct the string overridden := fsInfo.Options.NonDefault(m) var out strings.Builder out.WriteRune(':') out.WriteString(fsInfo.Name) config := overridden.Human() if config != "" { out.WriteRune(',') out.WriteString(config) } out.WriteRune(':') out.WriteString(fsPath) fmt.Println(out.String()) return nil }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/config/config_test.go
cmd/config/config_test.go
package config import ( "fmt" "testing" "github.com/rclone/rclone/fs/rc" "github.com/stretchr/testify/assert" ) func TestArgsToMap(t *testing.T) { for _, test := range []struct { args []string want rc.Params wantErr bool }{ { args: []string{}, want: rc.Params{}, }, { args: []string{"hello", "42"}, want: rc.Params{"hello": "42"}, }, { args: []string{"hello", "42", "bye", "43"}, want: rc.Params{"hello": "42", "bye": "43"}, }, { args: []string{"hello=42", "bye", "43"}, want: rc.Params{"hello": "42", "bye": "43"}, }, { args: []string{"hello", "42", "bye=43"}, want: rc.Params{"hello": "42", "bye": "43"}, }, { args: []string{"hello=42", "bye=43"}, want: rc.Params{"hello": "42", "bye": "43"}, }, { args: []string{"hello", "42", "bye", "43", "unused"}, wantErr: true, }, { args: []string{"hello=42", "bye=43", "unused"}, wantErr: true, }, } { what := fmt.Sprintf("args = %#v", test.args) got, err := argsToMap(test.args) if test.wantErr { assert.Error(t, err, what) } else { assert.NoError(t, err, what) assert.Equal(t, test.want, got, what) } } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/cmount/mountpoint_windows.go
cmd/cmount/mountpoint_windows.go
//go:build cmount && windows package cmount import ( "errors" "fmt" "os" "path/filepath" "regexp" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/file" ) var isDriveRegex = regexp.MustCompile(`^[a-zA-Z]\:$`) var isDriveRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\$`) var isDriveOrRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\?$`) var isNetworkSharePathRegex = regexp.MustCompile(`^\\\\[^\\\?]+\\[^\\]`) // isNetworkSharePath returns true if the given string is a valid network share path, // in the basic UNC format "\\Server\Share\Path", where the first two path components // are required ("\\Server\Share", which represents the volume). // Extended-length UNC format "\\?\UNC\Server\Share\Path" is not considered, as it is // not supported by cgofuse/winfsp, so returns false for any paths with prefix "\\?\". // Note: There is a UNCPath function in lib/file, but it refers to any extended-length // paths using prefix "\\?\", and not necessarily network resource UNC paths. func isNetworkSharePath(l string) bool { return isNetworkSharePathRegex.MatchString(l) } // isDrive returns true if given string is a drive letter followed by the volume separator, e.g. "X:". // This is the format supported by cgofuse/winfsp for mounting as drive. // Extended-length format "\\?\X:" is not considered, as it is not supported by cgofuse/winfsp. func isDrive(l string) bool { return isDriveRegex.MatchString(l) } // isDriveRootPath returns true if given string is a drive letter followed by the volume separator, // as well as a path separator, e.g. "X:\". This is a format often used instead of the format without the // trailing path separator to denote a drive or volume, in addition to representing the drive's root directory. // This format is not accepted by cgofuse/winfsp for mounting as drive, but can easily be by trimming off // the path separator. Extended-length format "\\?\X:\" is not considered. func isDriveRootPath(l string) bool { return isDriveRootPathRegex.MatchString(l) } // isDriveOrRootPath returns true if given string is a drive letter followed by the volume separator, // and optionally a path separator. See isDrive and isDriveRootPath functions. func isDriveOrRootPath(l string) bool { return isDriveOrRootPathRegex.MatchString(l) } // isDefaultPath returns true if given string is a special keyword used to trigger default mount. func isDefaultPath(l string) bool { return l == "" || l == "*" } // getUnusedDrive find unused drive letter and returns string with drive letter followed by volume separator. func getUnusedDrive() (string, error) { driveLetter := file.FindUnusedDriveLetter() if driveLetter == 0 { return "", errors.New("could not find unused drive letter") } mountpoint := string(driveLetter) + ":" // Drive letter with volume separator only, no trailing backslash, which is what cgofuse/winfsp expects fs.Logf(nil, "Assigning drive letter %q", mountpoint) return mountpoint, nil } // handleDefaultMountpath handles the case where mount path is not set, or set to a special keyword. // This will automatically pick an unused drive letter to use as mountpoint. func handleDefaultMountpath() (string, error) { return getUnusedDrive() } // handleNetworkShareMountpath handles the case where mount path is a network share path. // Sets volume name option and returns a mountpoint string. func handleNetworkShareMountpath(mountpath string, opt *mountlib.Options) (string, error) { // Assuming mount path is a valid network share path (UNC format, "\\Server\Share"). // Always mount as network drive, regardless of the NetworkMode option. // Find an unused drive letter to use as mountpoint, the supplied path can // be used as volume prefix (network share path) instead of mountpoint. if !opt.NetworkMode { fs.Debugf(nil, "Forcing --network-mode because mountpoint path is network share UNC format") opt.NetworkMode = true } mountpoint, err := getUnusedDrive() if err != nil { return "", err } return mountpoint, nil } // handleLocalMountpath handles the case where mount path is a local file system path. func handleLocalMountpath(f fs.Fs, mountpath string, opt *mountlib.Options) (string, error) { // Assuming path is drive letter or directory path, not network share (UNC) path. // If drive letter: Must be given as a single character followed by ":" and nothing else. // Else, assume directory path: Directory must not exist, but its parent must. if _, err := os.Stat(mountpath); err == nil { return "", errors.New("mountpoint path already exists: " + mountpath) } else if !os.IsNotExist(err) { return "", fmt.Errorf("failed to retrieve mountpoint path information: %w", err) } if isDriveRootPath(mountpath) { // Assume intention with "X:\" was "X:" mountpath = mountpath[:len(mountpath)-1] // WinFsp needs drive mountpoints without trailing path separator } if !isDrive(mountpath) { // Assuming directory path, since it is not a pure drive letter string such as "X:". // Drive letter string can be used as is, since we have already checked it does not exist, // but directory path needs more checks. if opt.NetworkMode { fs.Errorf(nil, "Ignoring --network-mode as it is not supported with directory mountpoint") opt.NetworkMode = false } var err error if mountpath, err = filepath.Abs(mountpath); err != nil { // Ensures parent is found but also more informative log messages return "", fmt.Errorf("mountpoint path is not valid: %s: %w", mountpath, err) } parent := filepath.Join(mountpath, "..") if _, err = os.Stat(parent); err != nil { if os.IsNotExist(err) { return "", errors.New("parent of mountpoint directory does not exist: " + parent) } return "", fmt.Errorf("failed to retrieve mountpoint directory parent information: %w", err) } if err = mountlib.CheckOverlap(f, mountpath); err != nil { return "", err } } return mountpath, nil } // handleVolumeName handles the volume name option. func handleVolumeName(opt *mountlib.Options, volumeName string) { // If volumeName parameter is set, then just set that into options replacing any existing value. // Else, ensure the volume name option is a valid network share UNC path if network mode, // and ensure network mode if configured volume name is already UNC path. if volumeName != "" { opt.VolumeName = volumeName } else if opt.VolumeName != "" { // Should always be true due to code in mountlib caller // Use value of given volume name option, but check if it is disk volume name or network volume prefix if isNetworkSharePath(opt.VolumeName) { // Specified volume name is network share UNC path, assume network mode and use it as volume prefix opt.VolumeName = opt.VolumeName[1:] // WinFsp requires volume prefix as UNC-like path but with only a single backslash if !opt.NetworkMode { // Specified volume name is network share UNC path, force network mode and use it as volume prefix fs.Debugf(nil, "Forcing network mode due to network share (UNC) volume name") opt.NetworkMode = true } } else if opt.NetworkMode { // Plain volume name treated as share name in network mode, append to hard coded "\\server" prefix to get full volume prefix. opt.VolumeName = "\\server\\" + opt.VolumeName } } else if opt.NetworkMode { // Hard coded default opt.VolumeName = "\\server\\share" } } // getMountpoint handles mounting details on Windows, // where disk and network based file systems are treated different. func getMountpoint(f fs.Fs, mountpath string, opt *mountlib.Options) (mountpoint string, err error) { // Inform about some options not relevant in this mode if opt.AllowNonEmpty { fs.Logf(nil, "--allow-non-empty flag does nothing on Windows") } if opt.AllowRoot { fs.Logf(nil, "--allow-root flag does nothing on Windows") } if opt.AllowOther { fs.Logf(nil, "--allow-other flag does nothing on Windows") } // Handle mountpath var volumeName string if isDefaultPath(mountpath) { // Mount path indicates defaults, which will automatically pick an unused drive letter. mountpoint, err = handleDefaultMountpath() } else if isNetworkSharePath(mountpath) { // Mount path is a valid network share path (UNC format, "\\Server\Share" prefix). mountpoint, err = handleNetworkShareMountpath(mountpath, opt) // In this case the volume name is taken from the mount path, will replace any existing volume name option. volumeName = mountpath[1:] // WinFsp requires volume prefix as UNC-like path but with only a single backslash } else { // Mount path is drive letter or directory path. mountpoint, err = handleLocalMountpath(f, mountpath, opt) } // Handle volume name handleVolumeName(opt, volumeName) // Done, return mountpoint to be used, together with updated mount options. if opt.NetworkMode { fs.Debugf(nil, "Network mode mounting is enabled") } else { fs.Debugf(nil, "Network mode mounting is disabled") } return }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/cmount/mount_brew.go
cmd/cmount/mount_brew.go
//go:build brew && darwin // Package cmount implements a FUSE mounting system for rclone remotes. // // Build for macos with the brew tag to handle the absence // of fuse and print an appropriate error message package cmount import ( "errors" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/vfs" ) func init() { name := "mount" cmd := mountlib.NewMountCommand(name, false, mount) cmd.Aliases = append(cmd.Aliases, "cmount") mountlib.AddRc("cmount", mount) } // mount the file system // // The mount point will be ready when this returns. // // returns an error, and an error channel for the serve process to // report an error when fusermount is called. func mount(_ *vfs.VFS, _ string, _ *mountlib.Options) (<-chan error, func() error, error) { return nil, nil, errors.New("rclone mount is not supported on MacOS when rclone is installed via Homebrew. " + "Please install the rclone binaries available at https://rclone.org/downloads/ " + "instead if you want to use the rclone mount command") }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/cmount/arch.go
cmd/cmount/arch.go
package cmount // ProvidedBy returns true if the rclone build for the given OS // provides support for lib/cgo-fuse func ProvidedBy(osName string) bool { return osName == "windows" || osName == "darwin" }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/cmount/mount_test.go
cmd/cmount/mount_test.go
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows) && (!race || !windows) // Package cmount implements a FUSE mounting system for rclone remotes. // // FIXME this doesn't work with the race detector under Windows either // hanging or producing lots of differences. package cmount import ( "runtime" "testing" "github.com/rclone/rclone/fstest/testy" "github.com/rclone/rclone/vfs/vfscommon" "github.com/rclone/rclone/vfs/vfstest" ) func TestMount(t *testing.T) { // Disable tests under macOS and the CI since they are locking up if runtime.GOOS == "darwin" { testy.SkipUnreliable(t) } vfstest.RunTests(t, false, vfscommon.CacheModeOff, true, mount) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/cmount/mount.go
cmd/cmount/mount.go
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows) // Package cmount implements a FUSE mounting system for rclone remotes. // // This uses the cgo based cgofuse library package cmount import ( "errors" "fmt" "strings" "os" "runtime" "time" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/buildinfo" "github.com/rclone/rclone/vfs" "github.com/winfsp/cgofuse/fuse" ) func init() { name := "cmount" cmountOnly := runtime.GOOS != "linux" // rclone mount only works for linux if cmountOnly { name = "mount" } cmd := mountlib.NewMountCommand(name, false, mount) if cmountOnly { cmd.Aliases = append(cmd.Aliases, "cmount") } mountlib.AddRc("cmount", mount) buildinfo.Tags = append(buildinfo.Tags, "cmount") } // mountOptions configures the options from the command line flags func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.Options) (options []string) { // Options options = []string{ "-o", fmt.Sprintf("attr_timeout=%g", time.Duration(opt.AttrTimeout).Seconds()), } if opt.DebugFUSE { options = append(options, "-o", "debug") } if runtime.GOOS == "windows" { options = append(options, "-o", "uid=-1") options = append(options, "-o", "gid=-1") options = append(options, "--FileSystemName=rclone") if opt.VolumeName != "" { if opt.NetworkMode { options = append(options, "--VolumePrefix="+opt.VolumeName) } else { options = append(options, "-o", "volname="+opt.VolumeName) } } } else { options = append(options, "-o", "fsname="+device) options = append(options, "-o", "subtype=rclone") options = append(options, "-o", fmt.Sprintf("max_readahead=%d", opt.MaxReadAhead)) // This causes FUSE to supply O_TRUNC with the Open // call which is more efficient for cmount. However // it does not work with cgofuse on Windows with // WinFSP so cmount must work with or without it. options = append(options, "-o", "atomic_o_trunc") if opt.DaemonTimeout != 0 { options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(time.Duration(opt.DaemonTimeout).Seconds()))) } if opt.AllowOther { options = append(options, "-o", "allow_other") } if opt.AllowRoot { options = append(options, "-o", "allow_root") } if opt.DefaultPermissions { options = append(options, "-o", "default_permissions") } if VFS.Opt.ReadOnly { options = append(options, "-o", "ro") } //if opt.WritebackCache { // FIXME? options = append(options, "-o", WritebackCache()) //} if runtime.GOOS == "darwin" { if opt.VolumeName != "" { options = append(options, "-o", "volname="+opt.VolumeName) } if opt.NoAppleDouble { options = append(options, "-o", "noappledouble") } if opt.NoAppleXattr { options = append(options, "-o", "noapplexattr") } } } for _, option := range opt.ExtraOptions { options = append(options, "-o", option) } options = append(options, opt.ExtraFlags...) return options } // waitFor runs fn() until it returns true or the timeout expires func waitFor(fn func() bool) (ok bool) { const totalWait = 10 * time.Second const individualWait = 10 * time.Millisecond for i := 0; i < int(totalWait/individualWait); i++ { ok = fn() if ok { return ok } time.Sleep(individualWait) } return false } // mount the file system // // The mount point will be ready when this returns. // // returns an error, and an error channel for the serve process to // report an error when fusermount is called. func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error, func() error, error) { // Get mountpoint using OS specific logic f := VFS.Fs() mountpoint, err := getMountpoint(f, mountPath, opt) if err != nil { return nil, nil, err } fs.Debugf(nil, "Mounting on %q (%q)", mountpoint, opt.VolumeName) // Create underlying FS fsys := NewFS(VFS, opt) host := fuse.NewFileSystemHost(fsys) host.SetCapReaddirPlus(true) // only works on Windows if opt.CaseInsensitive.Valid { host.SetCapCaseInsensitive(opt.CaseInsensitive.Value) } else { host.SetCapCaseInsensitive(f.Features().CaseInsensitive) } // Create options options := mountOptions(VFS, opt.DeviceName, mountpoint, opt) fs.Debugf(f, "Mounting with options: %q", options) // Serve the mount point in the background returning error to errChan errChan := make(chan error, 1) go func() { defer func() { if r := recover(); r != nil { err := fmt.Errorf("mount failed: %v", r) if strings.Contains(strings.ToLower(err.Error()), "cannot find winfsp") { err = fmt.Errorf("%w\nHint: Install WinFsp from https://winfsp.dev/rel/", err) } errChan <- err } }() var err error ok := host.Mount(mountpoint, options) if !ok { err = errors.New("mount failed") fs.Errorf(f, "Mount failed") } errChan <- err }() // unmount unmount := func() error { // Shutdown the VFS fsys.VFS.Shutdown() var umountOK bool if fsys.destroyed.Load() != 0 { fs.Debugf(nil, "Not calling host.Unmount as mount already Destroyed") umountOK = true } else if atexit.Signalled() { // If we have received a signal then FUSE will be shutting down already fs.Debugf(nil, "Not calling host.Unmount as signal received") umountOK = true } else { fs.Debugf(nil, "Calling host.Unmount") umountOK = host.Unmount() } if umountOK { fs.Debugf(nil, "Unmounted successfully") if runtime.GOOS == "windows" { if !waitFor(func() bool { _, err := os.Stat(mountpoint) return err != nil }) { fs.Errorf(nil, "mountpoint %q didn't disappear after unmount - continuing anyway", mountpoint) } } return nil } fs.Debugf(nil, "host.Unmount failed") return errors.New("host unmount failed") } // Wait for the filesystem to become ready, checking the file // system didn't blow up before starting select { case err := <-errChan: err = fmt.Errorf("mount stopped before calling Init: %w", err) return nil, nil, err case <-fsys.ready: } // Wait for the mount point to be available on Windows // On Windows the Init signal comes slightly before the mount is ready if runtime.GOOS == "windows" { if !waitFor(func() bool { _, err := os.Stat(mountpoint) return err == nil }) { fs.Errorf(nil, "mountpoint %q didn't became available on mount - continuing anyway", mountpoint) } } return errChan, unmount, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/cmount/mount_unsupported.go
cmd/cmount/mount_unsupported.go
//go:build !((linux && cgo && cmount) || (darwin && cgo && cmount) || (freebsd && cgo && cmount) || (windows && cmount)) // Package cmount implements a FUSE mounting system for rclone remotes. // // Build for cmount for unsupported platforms to stop go complaining // about "no buildable Go source files". package cmount
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/cmount/mountpoint_other.go
cmd/cmount/mountpoint_other.go
//go:build cmount && cgo && !windows package cmount import ( "errors" "fmt" "os" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/fs" ) func getMountpoint(f fs.Fs, mountPath string, opt *mountlib.Options) (string, error) { fi, err := os.Stat(mountPath) if err != nil { return "", fmt.Errorf("failed to retrieve mount path information: %w", err) } if !fi.IsDir() { return "", errors.New("mount path is not a directory") } if err = mountlib.CheckOverlap(f, mountPath); err != nil { return "", err } if err = mountlib.CheckAllowNonEmpty(mountPath, opt); err != nil { return "", err } return mountPath, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/cmount/fs.go
cmd/cmount/fs.go
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows) package cmount import ( "io" "os" "path" "strings" "sync" "sync/atomic" "time" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/vfs" "github.com/winfsp/cgofuse/fuse" ) const fhUnset = ^uint64(0) // FS represents the top level filing system type FS struct { VFS *vfs.VFS f fs.Fs opt *mountlib.Options ready chan (struct{}) mu sync.Mutex // to protect the below handles []vfs.Handle destroyed atomic.Int32 } // NewFS makes a new FS func NewFS(VFS *vfs.VFS, opt *mountlib.Options) *FS { fsys := &FS{ VFS: VFS, f: VFS.Fs(), opt: opt, ready: make(chan (struct{})), } return fsys } // Open a handle returning an integer file handle func (fsys *FS) openHandle(handle vfs.Handle) (fh uint64) { fsys.mu.Lock() defer fsys.mu.Unlock() var i int var oldHandle vfs.Handle for i, oldHandle = range fsys.handles { if oldHandle == nil { fsys.handles[i] = handle goto found } } fsys.handles = append(fsys.handles, handle) i = len(fsys.handles) - 1 found: return uint64(i) } // get the handle for fh, call with the lock held func (fsys *FS) _getHandle(fh uint64) (i int, handle vfs.Handle, errc int) { if fh > uint64(len(fsys.handles)) { fs.Debugf(nil, "Bad file handle: too big: 0x%X", fh) return i, nil, -fuse.EBADF } i = int(fh) handle = fsys.handles[i] if handle == nil { fs.Debugf(nil, "Bad file handle: nil handle: 0x%X", fh) return i, nil, -fuse.EBADF } return i, handle, 0 } // Get the handle for the file handle func (fsys *FS) getHandle(fh uint64) (handle vfs.Handle, errc int) { fsys.mu.Lock() _, handle, errc = fsys._getHandle(fh) fsys.mu.Unlock() return } // Close the handle func (fsys *FS) closeHandle(fh uint64) (errc int) { fsys.mu.Lock() i, _, errc := fsys._getHandle(fh) if errc == 0 { fsys.handles[i] = nil } fsys.mu.Unlock() return } // lookup a Node given a path func (fsys *FS) lookupNode(path string) (node vfs.Node, errc int) { node, err := fsys.VFS.Stat(path) return node, translateError(err) } // lookup a Dir given a path func (fsys *FS) lookupDir(path string) (dir *vfs.Dir, errc int) { node, errc := fsys.lookupNode(path) if errc != 0 { return nil, errc } dir, ok := node.(*vfs.Dir) if !ok { return nil, -fuse.ENOTDIR } return dir, 0 } // lookup a parent Dir given a path returning the dir and the leaf func (fsys *FS) lookupParentDir(filePath string) (leaf string, dir *vfs.Dir, errc int) { parentDir, leaf := path.Split(filePath) dir, errc = fsys.lookupDir(parentDir) return leaf, dir, errc } // get a node and handle from the path or from the fh if not fhUnset // // handle may be nil func (fsys *FS) getNode(path string, fh uint64) (node vfs.Node, handle vfs.Handle, errc int) { if fh == fhUnset { node, errc = fsys.lookupNode(path) } else { handle, errc = fsys.getHandle(fh) if errc == 0 { node = handle.Node() } } return } // stat fills up the stat block for Node func (fsys *FS) stat(node vfs.Node, stat *fuse.Stat_t) (errc int) { Size := uint64(node.Size()) Blocks := (Size + 511) / 512 modTime := node.ModTime() //stat.Dev = 1 stat.Ino = node.Inode() // FIXME do we need to set the inode number? stat.Mode = getMode(node) stat.Nlink = 1 stat.Uid = fsys.VFS.Opt.UID stat.Gid = fsys.VFS.Opt.GID //stat.Rdev stat.Size = int64(Size) t := fuse.NewTimespec(modTime) stat.Atim = t stat.Mtim = t stat.Ctim = t stat.Blksize = 512 stat.Blocks = int64(Blocks) stat.Birthtim = t // fs.Debugf(nil, "stat = %+v", *stat) return 0 } // Init is called after the filesystem is ready func (fsys *FS) Init() { defer log.Trace(fsys.f, "")("") close(fsys.ready) } // Destroy is called when it is unmounted (note that depending on how // the file system is terminated the file system may not receive the // Destroy call). func (fsys *FS) Destroy() { defer log.Trace(fsys.f, "")("") fsys.destroyed.Store(1) } // Getattr reads the attributes for path func (fsys *FS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) { defer log.Trace(path, "fh=0x%X", fh)("errc=%v", &errc) node, _, errc := fsys.getNode(path, fh) if errc == 0 { errc = fsys.stat(node, stat) } return } // Opendir opens path as a directory func (fsys *FS) Opendir(path string) (errc int, fh uint64) { defer log.Trace(path, "")("errc=%d, fh=0x%X", &errc, &fh) handle, err := fsys.VFS.OpenFile(path, os.O_RDONLY, 0777) if err != nil { return translateError(err), fhUnset } return 0, fsys.openHandle(handle) } // Readdir reads the directory at dirPath func (fsys *FS) Readdir(dirPath string, fill func(name string, stat *fuse.Stat_t, ofst int64) bool, ofst int64, fh uint64) (errc int) { itemsRead := -1 defer log.Trace(dirPath, "ofst=%d, fh=0x%X", ofst, fh)("items=%d, errc=%d", &itemsRead, &errc) dir, errc := fsys.lookupDir(dirPath) if errc != 0 { return errc } // We can't seek in directories and FUSE should know that so // return an error if ofst is ever set. if ofst > 0 { return -fuse.ESPIPE } nodes, err := dir.ReadDirAll() if err != nil { return translateError(err) } // Optionally, create a struct stat that describes the file as // for getattr (but FUSE only looks at st_ino and the // file-type bits of st_mode). // // We have called host.SetCapReaddirPlus() so WinFsp will // use the full stat information - a Useful optimization on // Windows. // // NB we are using the first mode for readdir: The readdir // implementation ignores the offset parameter, and passes // zero to the filler function's offset. The filler function // will not return '1' (unless an error happens), so the whole // directory is read in a single readdir operation. fill(".", nil, 0) fill("..", nil, 0) for _, node := range nodes { name := node.Name() if len(name) > mountlib.MaxLeafSize { fs.Errorf(dirPath, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name) continue } // We have called host.SetCapReaddirPlus() so supply the stat information // It is very cheap at this point so supply it regardless of OS capabilities var stat fuse.Stat_t _ = fsys.stat(node, &stat) // not capable of returning an error fill(name, &stat, 0) } itemsRead = len(nodes) return 0 } // Releasedir finished reading the directory func (fsys *FS) Releasedir(path string, fh uint64) (errc int) { defer log.Trace(path, "fh=0x%X", fh)("errc=%d", &errc) return fsys.closeHandle(fh) } // Statfs reads overall stats on the filesystem func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) { defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc) const blockSize = 4096 total, _, free := fsys.VFS.Statfs() stat.Blocks = uint64(total) / blockSize // Total data blocks in file system. stat.Bfree = uint64(free) / blockSize // Free blocks in file system. stat.Bavail = stat.Bfree // Free blocks in file system if you're not root. stat.Files = 1e9 // Total files in file system. stat.Ffree = 1e9 // Free files in file system. stat.Bsize = blockSize // Block size stat.Namemax = 255 // Maximum file name length? stat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system. mountlib.ClipBlocks(&stat.Blocks) mountlib.ClipBlocks(&stat.Bfree) mountlib.ClipBlocks(&stat.Bavail) return 0 } // OpenEx opens a file func (fsys *FS) OpenEx(path string, fi *fuse.FileInfo_t) (errc int) { defer log.Trace(path, "flags=0x%X", fi.Flags)("errc=%d, fh=0x%X", &errc, &fi.Fh) fi.Fh = fhUnset // translate the fuse flags to os flags flags := translateOpenFlags(fi.Flags) handle, err := fsys.VFS.OpenFile(path, flags, 0777) if err != nil { return translateError(err) } // If size unknown then use direct io to read if entry := handle.Node().DirEntry(); entry != nil && entry.Size() < 0 { fi.DirectIo = true } if fsys.opt.DirectIO { fi.DirectIo = true } fi.Fh = fsys.openHandle(handle) return 0 } // Open opens a file func (fsys *FS) Open(path string, flags int) (errc int, fh uint64) { var fi = fuse.FileInfo_t{ Flags: flags, } errc = fsys.OpenEx(path, &fi) return errc, fi.Fh } // CreateEx creates and opens a file. func (fsys *FS) CreateEx(filePath string, mode uint32, fi *fuse.FileInfo_t) (errc int) { defer log.Trace(filePath, "flags=0x%X, mode=0%o", fi.Flags, mode)("errc=%d, fh=0x%X", &errc, &fi.Fh) fi.Fh = fhUnset leaf, parentDir, errc := fsys.lookupParentDir(filePath) if errc != 0 { return errc } file, err := parentDir.Create(leaf, fi.Flags) if err != nil { return translateError(err) } // translate the fuse flags to os flags flags := translateOpenFlags(fi.Flags) | os.O_CREATE handle, err := file.Open(flags) if err != nil { return translateError(err) } fi.Fh = fsys.openHandle(handle) return 0 } // Create creates and opens a file. func (fsys *FS) Create(filePath string, flags int, mode uint32) (errc int, fh uint64) { var fi = fuse.FileInfo_t{ Flags: flags, } errc = fsys.CreateEx(filePath, mode, &fi) return errc, fi.Fh } // Truncate truncates a file to size func (fsys *FS) Truncate(path string, size int64, fh uint64) (errc int) { defer log.Trace(path, "size=%d, fh=0x%X", size, fh)("errc=%d", &errc) node, handle, errc := fsys.getNode(path, fh) if errc != 0 { return errc } var err error if handle != nil { err = handle.Truncate(size) } else { err = node.Truncate(size) } if err != nil { return translateError(err) } return 0 } // Read data from file handle func (fsys *FS) Read(path string, buff []byte, ofst int64, fh uint64) (n int) { defer log.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n) handle, errc := fsys.getHandle(fh) if errc != 0 { return errc } n, err := handle.ReadAt(buff, ofst) if err == io.EOF { } else if err != nil { return translateError(err) } return n } // Write data to file handle func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) { defer log.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n) handle, errc := fsys.getHandle(fh) if errc != 0 { return errc } n, err := handle.WriteAt(buff, ofst) if err != nil { return translateError(err) } return n } // Flush flushes an open file descriptor or path func (fsys *FS) Flush(path string, fh uint64) (errc int) { defer log.Trace(path, "fh=0x%X", fh)("errc=%d", &errc) handle, errc := fsys.getHandle(fh) if errc != 0 { return errc } return translateError(handle.Flush()) } // Release closes the file if still open func (fsys *FS) Release(path string, fh uint64) (errc int) { defer log.Trace(path, "fh=0x%X", fh)("errc=%d", &errc) handle, errc := fsys.getHandle(fh) if errc != 0 { return errc } _ = fsys.closeHandle(fh) return translateError(handle.Release()) } // Unlink removes a file. func (fsys *FS) Unlink(filePath string) (errc int) { defer log.Trace(filePath, "")("errc=%d", &errc) leaf, parentDir, errc := fsys.lookupParentDir(filePath) if errc != 0 { return errc } return translateError(parentDir.RemoveName(leaf)) } // Mkdir creates a directory. func (fsys *FS) Mkdir(dirPath string, mode uint32) (errc int) { defer log.Trace(dirPath, "mode=0%o", mode)("errc=%d", &errc) leaf, parentDir, errc := fsys.lookupParentDir(dirPath) if errc != 0 { return errc } _, err := parentDir.Mkdir(leaf) return translateError(err) } // Rmdir removes a directory func (fsys *FS) Rmdir(dirPath string) (errc int) { defer log.Trace(dirPath, "")("errc=%d", &errc) leaf, parentDir, errc := fsys.lookupParentDir(dirPath) if errc != 0 { return errc } return translateError(parentDir.RemoveName(leaf)) } // Rename renames a file. func (fsys *FS) Rename(oldPath string, newPath string) (errc int) { defer log.Trace(oldPath, "newPath=%q", newPath)("errc=%d", &errc) return translateError(fsys.VFS.Rename(oldPath, newPath)) } // Windows sometimes seems to send times that are the epoch which is // 1601-01-01 +/- timezone so filter out times that are earlier than // this. var invalidDateCutoff = time.Date(1601, 1, 2, 0, 0, 0, 0, time.UTC) // Utimens changes the access and modification times of a file. func (fsys *FS) Utimens(path string, tmsp []fuse.Timespec) (errc int) { defer log.Trace(path, "tmsp=%+v", tmsp)("errc=%d", &errc) node, errc := fsys.lookupNode(path) if errc != 0 { return errc } if tmsp == nil || len(tmsp) < 2 { fs.Debugf(path, "Utimens: Not setting time as timespec isn't complete: %v", tmsp) return 0 } t := tmsp[1].Time() if t.Before(invalidDateCutoff) { fs.Debugf(path, "Utimens: Not setting out of range time: %v", t) return 0 } fs.Debugf(path, "Utimens: SetModTime: %v", t) return translateError(node.SetModTime(t)) } // Mknod creates a file node. func (fsys *FS) Mknod(path string, mode uint32, dev uint64) (errc int) { defer log.Trace(path, "mode=0x%X, dev=0x%X", mode, dev)("errc=%d", &errc) return -fuse.ENOSYS } // Fsync synchronizes file contents. func (fsys *FS) Fsync(path string, datasync bool, fh uint64) (errc int) { defer log.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc) // This is a no-op for rclone return 0 } // Link creates a hard link to a file. func (fsys *FS) Link(oldpath string, newpath string) (errc int) { defer log.Trace(oldpath, "newpath=%q", newpath)("errc=%d", &errc) return -fuse.ENOSYS } // Symlink creates a symbolic link. func (fsys *FS) Symlink(target string, newpath string) (errc int) { defer log.Trace(target, "newpath=%q, target=%q", newpath, target)("errc=%d", &errc) return translateError(fsys.VFS.Symlink(target, newpath)) } // Readlink reads the target of a symbolic link. func (fsys *FS) Readlink(path string) (errc int, linkPath string) { defer log.Trace(path, "")("errc=%v, linkPath=%q", &errc, linkPath) linkPath, err := fsys.VFS.Readlink(path) return translateError(err), linkPath } // Chmod changes the permission bits of a file. func (fsys *FS) Chmod(path string, mode uint32) (errc int) { defer log.Trace(path, "mode=0%o", mode)("errc=%d", &errc) // This is a no-op for rclone return 0 } // Chown changes the owner and group of a file. func (fsys *FS) Chown(path string, uid uint32, gid uint32) (errc int) { defer log.Trace(path, "uid=%d, gid=%d", uid, gid)("errc=%d", &errc) // This is a no-op for rclone return 0 } // Access checks file access permissions. func (fsys *FS) Access(path string, mask uint32) (errc int) { defer log.Trace(path, "mask=0%o", mask)("errc=%d", &errc) // This is a no-op for rclone return 0 } // Fsyncdir synchronizes directory contents. func (fsys *FS) Fsyncdir(path string, datasync bool, fh uint64) (errc int) { defer log.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc) // This is a no-op for rclone return 0 } // Setxattr sets extended attributes. func (fsys *FS) Setxattr(path string, name string, value []byte, flags int) (errc int) { defer log.Trace(path, "name=%q, value=%q, flags=%d", name, value, flags)("errc=%d", &errc) return -fuse.ENOSYS } // Getxattr gets extended attributes. func (fsys *FS) Getxattr(path string, name string) (errc int, value []byte) { defer log.Trace(path, "name=%q", name)("errc=%d, value=%q", &errc, &value) return -fuse.ENOSYS, nil } // Removexattr removes extended attributes. func (fsys *FS) Removexattr(path string, name string) (errc int) { defer log.Trace(path, "name=%q", name)("errc=%d", &errc) return -fuse.ENOSYS } // Listxattr lists extended attributes. func (fsys *FS) Listxattr(path string, fill func(name string) bool) (errc int) { defer log.Trace(path, "fill=%p", fill)("errc=%d", &errc) return -fuse.ENOSYS } // Getpath allows a case-insensitive file system to report the correct case of // a file path. func (fsys *FS) Getpath(path string, fh uint64) (errc int, normalisedPath string) { defer log.Trace(path, "Getpath fh=%d", fh)("errc=%d, normalisedPath=%q", &errc, &normalisedPath) node, _, errc := fsys.getNode(path, fh) if errc != 0 { return errc, "" } normalisedPath = node.Path() if !strings.HasPrefix(normalisedPath, "/") { normalisedPath = "/" + normalisedPath } return 0, normalisedPath } // Translate errors from mountlib func translateError(err error) (errc int) { if err == nil { return 0 } _, uErr := fserrors.Cause(err) switch uErr { case vfs.OK: return 0 case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound: return -fuse.ENOENT case vfs.EEXIST, fs.ErrorDirExists: return -fuse.EEXIST case vfs.EPERM, fs.ErrorPermissionDenied: return -fuse.EPERM case vfs.ECLOSED: return -fuse.EBADF case vfs.ENOTEMPTY: return -fuse.ENOTEMPTY case vfs.ESPIPE: return -fuse.ESPIPE case vfs.EBADF: return -fuse.EBADF case vfs.EROFS: return -fuse.EROFS case vfs.ENOSYS, fs.ErrorNotImplemented: return -fuse.ENOSYS case vfs.EINVAL: return -fuse.EINVAL case vfs.ELOOP: return -fuse.ELOOP } fs.Errorf(nil, "IO error: %v", err) return -fuse.EIO } // Translate Open Flags from FUSE to os (as used in the vfs layer) func translateOpenFlags(inFlags int) (outFlags int) { switch inFlags & fuse.O_ACCMODE { case fuse.O_RDONLY: outFlags = os.O_RDONLY case fuse.O_WRONLY: outFlags = os.O_WRONLY case fuse.O_RDWR: outFlags = os.O_RDWR } if inFlags&fuse.O_APPEND != 0 { outFlags |= os.O_APPEND } if inFlags&fuse.O_CREAT != 0 { outFlags |= os.O_CREATE } if inFlags&fuse.O_EXCL != 0 { outFlags |= os.O_EXCL } if inFlags&fuse.O_TRUNC != 0 { outFlags |= os.O_TRUNC } // NB O_SYNC isn't defined by fuse return outFlags } // get the Mode from a vfs Node func getMode(node os.FileInfo) uint32 { vfsMode := node.Mode() Mode := vfsMode.Perm() if vfsMode&os.ModeDir != 0 { Mode |= fuse.S_IFDIR } else if vfsMode&os.ModeSymlink != 0 { Mode |= fuse.S_IFLNK } else if vfsMode&os.ModeNamedPipe != 0 { Mode |= fuse.S_IFIFO } else { Mode |= fuse.S_IFREG } return uint32(Mode) } // Make sure interfaces are satisfied var ( _ fuse.FileSystemInterface = (*FS)(nil) _ fuse.FileSystemOpenEx = (*FS)(nil) _ fuse.FileSystemGetpath = (*FS)(nil) //_ fuse.FileSystemChflags = (*FS)(nil) //_ fuse.FileSystemSetcrtime = (*FS)(nil) //_ fuse.FileSystemSetchgtime = (*FS)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/touch/touch.go
cmd/touch/touch.go
// Package touch provides the touch command. package touch import ( "bytes" "context" "errors" "fmt" "time" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) var ( notCreateNewFile bool timeAsArgument string localTime bool recursive bool ) const ( defaultLayout string = "060102" layoutDateWithTime string = "2006-01-02T15:04:05" layoutDateWithTimeNano string = "2006-01-02T15:04:05.999999999" ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &notCreateNewFile, "no-create", "C", false, "Do not create the file if it does not exist (implied with --recursive)", "") flags.StringVarP(cmdFlags, &timeAsArgument, "timestamp", "t", "", "Use specified time instead of the current time of day", "") flags.BoolVarP(cmdFlags, &localTime, "localtime", "", false, "Use localtime for timestamp, not UTC", "") flags.BoolVarP(cmdFlags, &recursive, "recursive", "R", false, "Recursively touch all files", "") } var commandDefinition = &cobra.Command{ Use: "touch remote:path", Short: `Create new file or change file modification time.`, Long: `Set the modification time on file(s) as specified by remote:path to have the current time. If remote:path does not exist then a zero sized file will be created, unless ` + "`--no-create`" + ` or ` + "`--recursive`" + ` is provided. If ` + "`--recursive`" + ` is used then recursively sets the modification time on all existing files that is found under the path. Filters are supported, and you can test with the ` + "`--dry-run`" + ` or the ` + "`--interactive`/`-i`" + ` flag. This will touch ` + "`--transfers`" + ` files concurrently. If ` + "`--timestamp`" + ` is used then sets the modification time to that time instead of the current time. Times may be specified as one of: - 'YYMMDD' - e.g. 17.10.30 - 'YYYY-MM-DDTHH:MM:SS' - e.g. 2006-01-02T15:04:05 - 'YYYY-MM-DDTHH:MM:SS.SSS' - e.g. 2006-01-02T15:04:05.123456789 Note that value of ` + "`--timestamp`" + ` is in UTC. If you want local time then add the ` + "`--localtime`" + ` flag.`, Annotations: map[string]string{ "versionIntroduced": "v1.39", "groups": "Filter,Listing,Important", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) f, remote := newFsDst(args) cmd.Run(true, false, command, func() error { return Touch(context.Background(), f, remote) }) }, } // newFsDst creates a new dst fs from the arguments. // // The returned fs will never point to a file. It will point to the // parent directory of specified path, and is returned together with // the basename of file or directory, except if argument is only a // remote name. Similar to cmd.NewFsDstFile, but without raising fatal // when name of file or directory is empty (e.g. "remote:" or "remote:path/"). func newFsDst(args []string) (f fs.Fs, remote string) { root, remote, err := fspath.Split(args[0]) if err != nil { fs.Fatalf(nil, "Parsing %q failed: %v", args[0], err) } if root == "" { root = "." } f = cmd.NewFsDir([]string{root}) return f, remote } // parseTimeArgument parses a timestamp string according to specific layouts func parseTimeArgument(timeString string) (time.Time, error) { layout := defaultLayout if len(timeString) == len(layoutDateWithTime) { layout = layoutDateWithTime } else if len(timeString) > len(layoutDateWithTime) { layout = layoutDateWithTimeNano } if localTime { return time.ParseInLocation(layout, timeString, time.Local) } return time.Parse(layout, timeString) } // timeOfTouch returns the time value set on files func timeOfTouch() (time.Time, error) { var t time.Time if timeAsArgument != "" { var err error if t, err = parseTimeArgument(timeAsArgument); err != nil { return t, fmt.Errorf("failed to parse timestamp argument: %w", err) } } else { t = time.Now() } return t, nil } // createEmptyObject creates an empty object (file) with specified timestamp func createEmptyObject(ctx context.Context, remote string, modTime time.Time, f fs.Fs) error { var buffer []byte src := object.NewStaticObjectInfo(remote, modTime, int64(len(buffer)), true, nil, f) _, err := f.Put(ctx, bytes.NewBuffer(buffer), src) return err } // Touch create new file or change file modification time. func Touch(ctx context.Context, f fs.Fs, remote string) error { t, err := timeOfTouch() if err != nil { return err } fs.Debugf(nil, "Touch time %v", t) var file fs.Object if remote == "" { err = fs.ErrorIsDir } else { file, err = f.NewObject(ctx, remote) } if err != nil { if errors.Is(err, fs.ErrorObjectNotFound) { // Touching non-existent path, possibly creating it as new file if remote == "" { fs.Logf(f, "Not touching empty directory") return nil } if notCreateNewFile { fs.Logf(f, "Not touching nonexistent file due to --no-create") return nil } if recursive { // For consistency, --recursive never creates new files. fs.Logf(f, "Not touching nonexistent file due to --recursive") return nil } if operations.SkipDestructive(ctx, f, "touch (create)") { return nil } fs.Debugf(f, "Touching (creating) %q", remote) if err = createEmptyObject(ctx, remote, t, f); err != nil { return fmt.Errorf("failed to touch (create): %w", err) } } if errors.Is(err, fs.ErrorIsDir) { // Touching existing directory if recursive { fs.Debugf(f, "Touching recursively files in directory %q", remote) return operations.TouchDir(ctx, f, remote, t, true) } fs.Debugf(f, "Touching non-recursively files in directory %q", remote) return operations.TouchDir(ctx, f, remote, t, false) } return err } // Touch single existing file if !operations.SkipDestructive(ctx, remote, "touch") { fs.Debugf(f, "Touching %q", remote) err = file.SetModTime(ctx, t) if err != nil { return fmt.Errorf("failed to touch: %w", err) } } return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/touch/touch_test.go
cmd/touch/touch_test.go
package touch import ( "context" "testing" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest" "github.com/stretchr/testify/require" ) var ( t1 = fstest.Time("2017-02-03T04:05:06.499999999Z") ) func checkFile(t *testing.T, r fs.Fs, path string, content string) { timeAtrFromFlags, err := timeOfTouch() require.NoError(t, err) file1 := fstest.NewItem(path, content, timeAtrFromFlags) fstest.CheckItems(t, r, file1) } // TestMain drives the tests func TestMain(m *testing.M) { fstest.TestMain(m) } func TestTouchOneFile(t *testing.T) { r := fstest.NewRun(t) err := Touch(context.Background(), r.Fremote, "newFile") require.NoError(t, err) _, err = r.Fremote.NewObject(context.Background(), "newFile") require.NoError(t, err) } func TestTouchWithNoCreateFlag(t *testing.T) { r := fstest.NewRun(t) notCreateNewFile = true err := Touch(context.Background(), r.Fremote, "newFile") require.NoError(t, err) _, err = r.Fremote.NewObject(context.Background(), "newFile") require.Error(t, err) notCreateNewFile = false } func TestTouchWithTimestamp(t *testing.T) { r := fstest.NewRun(t) timeAsArgument = "060102" srcFileName := "oldFile" err := Touch(context.Background(), r.Fremote, srcFileName) require.NoError(t, err) checkFile(t, r.Fremote, srcFileName, "") } func TestTouchWithLongerTimestamp(t *testing.T) { r := fstest.NewRun(t) timeAsArgument = "2006-01-02T15:04:05" srcFileName := "oldFile" err := Touch(context.Background(), r.Fremote, srcFileName) require.NoError(t, err) checkFile(t, r.Fremote, srcFileName, "") } func TestTouchUpdateTimestamp(t *testing.T) { r := fstest.NewRun(t) srcFileName := "a" content := "aaa" file1 := r.WriteObject(context.Background(), srcFileName, content, t1) r.CheckRemoteItems(t, file1) timeAsArgument = "121212" err := Touch(context.Background(), r.Fremote, "a") require.NoError(t, err) checkFile(t, r.Fremote, srcFileName, content) } func TestTouchUpdateTimestampWithCFlag(t *testing.T) { r := fstest.NewRun(t) srcFileName := "a" content := "aaa" file1 := r.WriteObject(context.Background(), srcFileName, content, t1) r.CheckRemoteItems(t, file1) notCreateNewFile = true timeAsArgument = "121212" err := Touch(context.Background(), r.Fremote, "a") require.NoError(t, err) checkFile(t, r.Fremote, srcFileName, content) notCreateNewFile = false } func TestTouchCreateMultipleDirAndFile(t *testing.T) { r := fstest.NewRun(t) longPath := "a/b/c.txt" err := Touch(context.Background(), r.Fremote, longPath) require.NoError(t, err) file1 := fstest.NewItem("a/b/c.txt", "", t1) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{"a", "a/b"}, fs.ModTimeNotSupported) } func TestTouchEmptyName(t *testing.T) { r := fstest.NewRun(t) err := Touch(context.Background(), r.Fremote, "") require.NoError(t, err) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, fs.ModTimeNotSupported) } func TestTouchEmptyDir(t *testing.T) { r := fstest.NewRun(t) err := r.Fremote.Mkdir(context.Background(), "a") require.NoError(t, err) err = Touch(context.Background(), r.Fremote, "a") require.NoError(t, err) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{"a"}, fs.ModTimeNotSupported) } func TestTouchDirWithFiles(t *testing.T) { r := fstest.NewRun(t) err := r.Fremote.Mkdir(context.Background(), "a") require.NoError(t, err) file1 := r.WriteObject(context.Background(), "a/f1", "111", t1) file2 := r.WriteObject(context.Background(), "a/f2", "222", t1) err = Touch(context.Background(), r.Fremote, "a") require.NoError(t, err) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1, file2}, []string{"a"}, fs.ModTimeNotSupported) } func TestRecursiveTouchDirWithFiles(t *testing.T) { r := fstest.NewRun(t) err := r.Fremote.Mkdir(context.Background(), "a/b/c") require.NoError(t, err) file1 := r.WriteObject(context.Background(), "a/f1", "111", t1) file2 := r.WriteObject(context.Background(), "a/b/f2", "222", t1) file3 := r.WriteObject(context.Background(), "a/b/c/f3", "333", t1) recursive = true err = Touch(context.Background(), r.Fremote, "a") recursive = false require.NoError(t, err) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1, file2, file3}, []string{"a", "a/b", "a/b/c"}, fs.ModTimeNotSupported) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/lsjson/lsjson.go
cmd/lsjson/lsjson.go
// Package lsjson provides the lsjson command. package lsjson import ( "context" "encoding/json" "fmt" "os" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/ls/lshelp" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) var ( opt operations.ListJSONOpt statOnly bool ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &opt.Recurse, "recursive", "R", false, "Recurse into the listing", "") flags.BoolVarP(cmdFlags, &opt.ShowHash, "hash", "", false, "Include hashes in the output (may take longer)", "") flags.BoolVarP(cmdFlags, &opt.NoModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up)", "") flags.BoolVarP(cmdFlags, &opt.NoMimeType, "no-mimetype", "", false, "Don't read the mime type (can speed things up)", "") flags.BoolVarP(cmdFlags, &opt.ShowEncrypted, "encrypted", "", false, "Show the encrypted names", "") flags.BoolVarP(cmdFlags, &opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object", "") flags.BoolVarP(cmdFlags, &opt.FilesOnly, "files-only", "", false, "Show only files in the listing", "") flags.BoolVarP(cmdFlags, &opt.DirsOnly, "dirs-only", "", false, "Show only directories in the listing", "") flags.BoolVarP(cmdFlags, &opt.Metadata, "metadata", "M", false, "Add metadata to the listing", "") flags.StringArrayVarP(cmdFlags, &opt.HashTypes, "hash-type", "", nil, "Show only this hash type (may be repeated)", "") flags.BoolVarP(cmdFlags, &statOnly, "stat", "", false, "Just return the info for the pointed to file", "") } var commandDefinition = &cobra.Command{ Use: "lsjson remote:path", Short: `List directories and objects in the path in JSON format.`, Long: `List directories and objects in the path in JSON format. The output is an array of Items, where each Item looks like this: ` + "```json" + ` { "Hashes" : { "SHA-1" : "f572d396fae9206628714fb2ce00f72e94f2258f", "MD5" : "b1946ac92492d2347c6235b4d2611184", "DropboxHash" : "ecb65bb98f9d905b70458986c39fcbad7715e5f2fcc3b1f07767d7c83e2438cc" }, "ID": "y2djkhiujf83u33", "OrigID": "UYOJVTUW00Q1RzTDA", "IsBucket" : false, "IsDir" : false, "MimeType" : "application/octet-stream", "ModTime" : "2017-05-31T16:15:57.034468261+01:00", "Name" : "file.txt", "Encrypted" : "v0qpsdq8anpci8n929v3uu9338", "EncryptedPath" : "kja9098349023498/v0qpsdq8anpci8n929v3uu9338", "Path" : "full/path/goes/here/file.txt", "Size" : 6, "Tier" : "hot", } ` + "```" + ` The exact set of properties included depends on the backend: - The property IsBucket will only be included for bucket-based remotes, and only for directories that are buckets. It will always be omitted when value is not true. - Properties Encrypted and EncryptedPath will only be included for encrypted remotes, and (as mentioned below) only if the ` + "`--encrypted`" + ` option is set. Different options may also affect which properties are included: - If ` + "`--hash`" + ` is not specified, the Hashes property will be omitted. The types of hash can be specified with the ` + "`--hash-type`" + ` parameter (which may be repeated). If ` + "`--hash-type`" + ` is set then it implies ` + "`--hash`" + `. - If ` + "`--no-modtime`" + ` is specified then ModTime will be blank. This can speed things up on remotes where reading the ModTime takes an extra request (e.g. s3, swift). - If ` + "`--no-mimetype`" + ` is specified then MimeType will be blank. This can speed things up on remotes where reading the MimeType takes an extra request (e.g. s3, swift). - If ` + "`--encrypted`" + ` is not specified the Encrypted and EncryptedPath properties will be omitted - even for encrypted remotes. - If ` + "`--metadata`" + ` is set then an additional Metadata property will be returned. This will have [metadata](/docs/#metadata) in rclone standard format as a JSON object. The default is to list directories and files/objects, but this can be changed with the following options: - If ` + "`--dirs-only`" + ` is specified then directories will be returned only, no files/objects. - If ` + "`--files-only`" + ` is specified then files will be returned only, no directories. If ` + "`--stat`" + ` is set then the the output is not an array of items, but instead a single JSON blob will be returned about the item pointed to. This will return an error if the item isn't found, however on bucket based backends (like s3, gcs, b2, azureblob etc) if the item isn't found it will return an empty directory, as it isn't possible to tell empty directories from missing directories there. The Path field will only show folders below the remote path being listed. If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt" will be "subfolder/file.txt", not "remote:path/subfolder/file.txt". When used without ` + "`--recursive`" + ` the Path will always be the same as Name. The time is in RFC3339 format with up to nanosecond precision. The number of decimal digits in the seconds will depend on the precision that the remote can hold the times, so if times are accurate to the nearest millisecond (e.g. Google Drive) then 3 digits will always be shown ("2017-05-31T16:15:57.034+01:00") whereas if the times are accurate to the nearest second (Dropbox, Box, WebDav, etc.) no digits will be shown ("2017-05-31T16:15:57+01:00"). The whole output can be processed as a JSON blob, or alternatively it can be processed line by line as each item is written on individual lines (except with ` + "`--stat`" + `). ` + lshelp.Help, Annotations: map[string]string{ "versionIntroduced": "v1.37", "groups": "Filter,Listing", }, RunE: func(command *cobra.Command, args []string) error { // Make sure we set the global Metadata flag too as it // isn't parsed by cobra. We need to do this first // before any backends are created. ci := fs.GetConfig(context.Background()) ci.Metadata = opt.Metadata cmd.CheckArgs(1, 1, command, args) var fsrc fs.Fs var remote string if statOnly { fsrc, remote = cmd.NewFsFile(args[0]) } else { fsrc = cmd.NewFsSrc(args) } cmd.Run(false, false, command, func() error { if statOnly { item, err := operations.StatJSON(context.Background(), fsrc, remote, &opt) if err != nil { return err } out, err := json.MarshalIndent(item, "", "\t") if err != nil { return fmt.Errorf("failed to marshal list object: %w", err) } _, err = os.Stdout.Write(out) if err != nil { return fmt.Errorf("failed to write to output: %w", err) } fmt.Println() } else { fmt.Println("[") first := true err := operations.ListJSON(context.Background(), fsrc, remote, &opt, func(item *operations.ListJSONItem) error { out, err := json.Marshal(item) if err != nil { return fmt.Errorf("failed to marshal list object: %w", err) } if first { first = false } else { fmt.Print(",\n") } _, err = os.Stdout.Write(out) if err != nil { return fmt.Errorf("failed to write to output: %w", err) } return nil }) if err != nil { return err } if !first { fmt.Println() } fmt.Println("]") } return nil }) return nil }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/link/link.go
cmd/link/link.go
// Package link provides the link command. package link import ( "context" "fmt" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) var ( expire = fs.DurationOff unlink = false ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.FVarP(cmdFlags, &expire, "expire", "", "The amount of time that the link will be valid", "") flags.BoolVarP(cmdFlags, &unlink, "unlink", "", unlink, "Remove existing public link to file/folder", "") } var commandDefinition = &cobra.Command{ Use: "link remote:path", Short: `Generate public link to file/folder.`, Long: `Create, retrieve or remove a public link to the given file or folder. ` + "```console" + ` rclone link remote:path/to/file rclone link remote:path/to/folder/ rclone link --unlink remote:path/to/folder/ rclone link --expire 1d remote:path/to/file ` + "```" + ` If you supply the --expire flag, it will set the expiration time otherwise it will use the default (100 years). **Note** not all backends support the --expire flag - if the backend doesn't support it then the link returned won't expire. Use the --unlink flag to remove existing public links to the file or folder. **Note** not all backends support "--unlink" flag - those that don't will just ignore it. If successful, the last line of the output will contain the link. Exact capabilities depend on the remote, but the link will always by default be created with the least constraints - e.g. no expiry, no password protection, accessible without account.`, Annotations: map[string]string{ "versionIntroduced": "v1.41", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) fsrc, remote := cmd.NewFsFile(args[0]) cmd.Run(false, false, command, func() error { link, err := operations.PublicLink(context.Background(), fsrc, remote, expire, unlink) if err != nil { return err } if link != "" { fmt.Println(link) } return nil }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/rc.go
cmd/serve/rc.go
package serve import ( "cmp" "context" "errors" "fmt" "math/rand/v2" "net" "slices" "sort" "strings" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/lib/errcount" ) // Handle describes what a server can do type Handle interface { // Addr returns the listening address of the server Addr() net.Addr // Shutdown stops the server Shutdown() error // Serve starts the server - doesn't return until Shutdown is called. Serve() (err error) } // Describes a running server type server struct { ID string `json:"id"` // id of the server Addr string `json:"addr"` // address of the server Params rc.Params `json:"params"` // Parameters used to start the server h Handle `json:"-"` // control the server errChan chan error `json:"-"` // receive errors from the server process } // Fn starts an rclone serve command type Fn func(ctx context.Context, f fs.Fs, in rc.Params) (Handle, error) // Globals var ( // mutex to protect all the variables in this block serveMu sync.Mutex // Serve functions available serveFns = map[string]Fn{} // Running servers servers = map[string]*server{} ) // AddRc adds the named serve function to the rc func AddRc(name string, serveFunction Fn) { serveMu.Lock() defer serveMu.Unlock() serveFns[name] = serveFunction } // unquote ` func q(s string) string { return strings.ReplaceAll(s, "|", "`") } func init() { rc.Add(rc.Call{ Path: "serve/start", AuthRequired: true, Fn: startRc, Title: "Create a new server", Help: q(`Create a new server with the specified parameters. This takes the following parameters: - |type| - type of server: |http|, |webdav|, |ftp|, |sftp|, |nfs|, etc. - |fs| - remote storage path to serve - |addr| - the ip:port to run the server on, eg ":1234" or "localhost:1234" Other parameters are as described in the documentation for the relevant [rclone serve](/commands/rclone_serve/) command line options. To translate a command line option to an rc parameter, remove the leading |--| and replace |-| with |_|, so |--vfs-cache-mode| becomes |vfs_cache_mode|. Note that global parameters must be set with |_config| and |_filter| as described above. Examples: rclone rc serve/start type=nfs fs=remote: addr=:4321 vfs_cache_mode=full rclone rc serve/start --json '{"type":"nfs","fs":"remote:","addr":":1234","vfs_cache_mode":"full"}' This will give the reply |||json { "addr": "[::]:4321", // Address the server was started on "id": "nfs-ecfc6852" // Unique identifier for the server instance } ||| Or an error if it failed to start. Stop the server with |serve/stop| and list the running servers with |serve/list|. `), }) } // startRc allows the serve command to be run from rc func startRc(ctx context.Context, in rc.Params) (out rc.Params, err error) { serveType, err := in.GetString("type") serveMu.Lock() defer serveMu.Unlock() serveFn := serveFns[serveType] if serveFn == nil { return nil, fmt.Errorf("could not find serve type=%q", serveType) } // Get Fs.fs to be served from fs parameter in the params f, err := rc.GetFs(ctx, in) if err != nil { return nil, err } // Make a background context and copy the config back. newCtx := context.Background() newCtx = fs.CopyConfig(newCtx, ctx) newCtx = filter.CopyConfig(newCtx, ctx) // Start the server h, err := serveFn(newCtx, f, in) if err != nil { return nil, fmt.Errorf("could not start serve %q: %w", serveType, err) } // Start the server running in the background errChan := make(chan error, 1) go func() { errChan <- h.Serve() close(errChan) }() // Wait for a short length of time to see if an error occurred select { case err = <-errChan: if err == nil { err = errors.New("server stopped immediately") } case <-time.After(100 * time.Millisecond): err = nil } if err != nil { return nil, fmt.Errorf("error when starting serve %q: %w", serveType, err) } // Store it for later runningServer := server{ ID: fmt.Sprintf("%s-%08x", serveType, rand.Uint32()), Params: in, Addr: h.Addr().String(), h: h, errChan: errChan, } servers[runningServer.ID] = &runningServer out = rc.Params{ "id": runningServer.ID, "addr": runningServer.Addr, } fs.Debugf(f, "Started serve %s on %s", serveType, runningServer.Addr) return out, nil } func init() { rc.Add(rc.Call{ Path: "serve/stop", AuthRequired: true, Fn: stopRc, Title: "Unserve selected active serve", Help: q(`Stops a running |serve| instance by ID. This takes the following parameters: - id: as returned by serve/start This will give an empty response if successful or an error if not. Example: rclone rc serve/stop id=12345 `), }) } // stopRc stops the server process func stopRc(_ context.Context, in rc.Params) (out rc.Params, err error) { id, err := in.GetString("id") if err != nil { return nil, err } serveMu.Lock() defer serveMu.Unlock() s := servers[id] if s == nil { return nil, fmt.Errorf("server with id=%q not found", id) } err = s.h.Shutdown() <-s.errChan // ignore server return error - likely is "use of closed network connection" delete(servers, id) return nil, err } func init() { rc.Add(rc.Call{ Path: "serve/types", AuthRequired: true, Fn: serveTypesRc, Title: "Show all possible serve types", Help: q(`This shows all possible serve types and returns them as a list. This takes no parameters and returns - types: list of serve types, eg "nfs", "sftp", etc The serve types are strings like "serve", "serve2", "cserve" and can be passed to serve/start as the serveType parameter. Eg rclone rc serve/types Returns |||json { "types": [ "http", "sftp", "nfs" ] } ||| `), }) } // serveTypesRc returns a list of available serve types. func serveTypesRc(_ context.Context, in rc.Params) (out rc.Params, err error) { var serveTypes = []string{} serveMu.Lock() defer serveMu.Unlock() for serveType := range serveFns { serveTypes = append(serveTypes, serveType) } sort.Strings(serveTypes) return rc.Params{ "types": serveTypes, }, nil } func init() { rc.Add(rc.Call{ Path: "serve/list", AuthRequired: true, Fn: listRc, Title: "Show running servers", Help: q(`Show running servers with IDs. This takes no parameters and returns - list: list of running serve commands Each list element will have - id: ID of the server - addr: address the server is running on - params: parameters used to start the server Eg rclone rc serve/list Returns |||json { "list": [ { "addr": "[::]:4321", "id": "nfs-ffc2a4e5", "params": { "fs": "remote:", "opt": { "ListenAddr": ":4321" }, "type": "nfs", "vfsOpt": { "CacheMode": "full" } } } ] } ||| `), }) } // listRc returns a list of current serves sorted by serve path func listRc(_ context.Context, in rc.Params) (out rc.Params, err error) { serveMu.Lock() defer serveMu.Unlock() list := []*server{} for _, item := range servers { list = append(list, item) } slices.SortFunc(list, func(a, b *server) int { return cmp.Compare(a.ID, b.ID) }) return rc.Params{ "list": list, }, nil } func init() { rc.Add(rc.Call{ Path: "serve/stopall", AuthRequired: true, Fn: stopAll, Title: "Stop all active servers", Help: q(`Stop all active servers. This will stop all active servers. rclone rc serve/stopall `), }) } // stopAll shuts all the servers down func stopAll(_ context.Context, in rc.Params) (out rc.Params, err error) { serveMu.Lock() defer serveMu.Unlock() ec := errcount.New() for id, s := range servers { ec.Add(s.h.Shutdown()) <-s.errChan // ignore server return error - likely is "use of closed network connection" delete(servers, id) } return nil, ec.Err("error when stopping server") }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/serve.go
cmd/serve/serve.go
// Package serve provides the serve command. package serve import ( "errors" "github.com/rclone/rclone/cmd" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(Command) } // Command definition for cobra var Command = &cobra.Command{ Use: "serve <protocol> [opts] <remote>", Short: `Serve a remote over a protocol.`, Long: `Serve a remote over a given protocol. Requires the use of a subcommand to specify the protocol, e.g. ` + "```console" + ` rclone serve http remote: ` + "```" + ` When the "--metadata" flag is enabled, the following metadata fields will be provided as headers: - "content-disposition" - "cache-control" - "content-language" - "content-encoding" Note: The availability of these fields depends on whether the remote supports metadata. Each subcommand has its own options which you can see in their help. `, Annotations: map[string]string{ "versionIntroduced": "v1.39", }, RunE: func(command *cobra.Command, args []string) error { if len(args) == 0 { return errors.New("serve requires a protocol, e.g. 'rclone serve http remote:'") } return errors.New("unknown protocol") }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/rc_test.go
cmd/serve/rc_test.go
package serve import ( "context" "errors" "net" "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fstest/mockfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type dummyServer struct { addr *net.TCPAddr shutdownCh chan struct{} shutdownCalled bool } func (d *dummyServer) Addr() net.Addr { return d.addr } func (d *dummyServer) Shutdown() error { d.shutdownCalled = true close(d.shutdownCh) return nil } func (d *dummyServer) Serve() error { <-d.shutdownCh return nil } func newServer(ctx context.Context, f fs.Fs, in rc.Params) (Handle, error) { return &dummyServer{ addr: &net.TCPAddr{ IP: net.IPv4(127, 0, 0, 1), Port: 8080, }, shutdownCh: make(chan struct{}), }, nil } func newServerError(ctx context.Context, f fs.Fs, in rc.Params) (Handle, error) { return nil, errors.New("serve error") } func newServerImmediateStop(ctx context.Context, f fs.Fs, in rc.Params) (Handle, error) { h, _ := newServer(ctx, f, in) close(h.(*dummyServer).shutdownCh) return h, nil } func resetGlobals() { serveMu.Lock() defer serveMu.Unlock() serveFns = make(map[string]Fn) servers = make(map[string]*server) } func newTest(t *testing.T) { _, err := fs.Find("mockfs") if err != nil { mockfs.Register() } resetGlobals() t.Cleanup(resetGlobals) } func TestRcStartServeType(t *testing.T) { newTest(t) serveStart := rc.Calls.Get("serve/start") in := rc.Params{"fs": ":mockfs:", "type": "nonexistent"} _, err := serveStart.Fn(context.Background(), in) assert.ErrorContains(t, err, "could not find serve type") } func TestRcStartServeFnError(t *testing.T) { newTest(t) serveStart := rc.Calls.Get("serve/start") AddRc("error", newServerError) in := rc.Params{"fs": ":mockfs:", "type": "error"} _, err := serveStart.Fn(context.Background(), in) assert.ErrorContains(t, err, "could not start serve") } func TestRcStartImmediateStop(t *testing.T) { newTest(t) serveStart := rc.Calls.Get("serve/start") AddRc("immediate", newServerImmediateStop) in := rc.Params{"fs": ":mockfs:", "type": "immediate"} _, err := serveStart.Fn(context.Background(), in) assert.ErrorContains(t, err, "server stopped immediately") } func TestRcStartAndStop(t *testing.T) { newTest(t) serveStart := rc.Calls.Get("serve/start") serveStop := rc.Calls.Get("serve/stop") AddRc("dummy", newServer) in := rc.Params{"fs": ":mockfs:", "type": "dummy"} out, err := serveStart.Fn(context.Background(), in) require.NoError(t, err) id := out["id"].(string) assert.Contains(t, id, "dummy") assert.Equal(t, 1, len(servers)) _, err = serveStop.Fn(context.Background(), rc.Params{"id": id}) require.NoError(t, err) assert.Equal(t, 0, len(servers)) } func TestRcStopNonexistent(t *testing.T) { newTest(t) serveStop := rc.Calls.Get("serve/stop") _, err := serveStop.Fn(context.Background(), rc.Params{"id": "nonexistent"}) assert.ErrorContains(t, err, "not found") } func TestRcServeTypes(t *testing.T) { newTest(t) serveTypes := rc.Calls.Get("serve/types") AddRc("a", newServer) AddRc("c", newServer) AddRc("b", newServer) out, err := serveTypes.Fn(context.Background(), nil) require.NoError(t, err) types := out["types"].([]string) assert.Equal(t, types, []string{"a", "b", "c"}) } func TestRcList(t *testing.T) { newTest(t) serveStart := rc.Calls.Get("serve/start") serveList := rc.Calls.Get("serve/list") AddRc("dummy", newServer) // Start two servers. _, err := serveStart.Fn(context.Background(), rc.Params{"fs": ":mockfs:", "type": "dummy"}) require.NoError(t, err) _, err = serveStart.Fn(context.Background(), rc.Params{"fs": ":mockfs:", "type": "dummy"}) require.NoError(t, err) // Check list out, err := serveList.Fn(context.Background(), nil) require.NoError(t, err) list := out["list"].([]*server) assert.Equal(t, 2, len(list)) } func TestRcStopAll(t *testing.T) { newTest(t) serveStart := rc.Calls.Get("serve/start") serveStopAll := rc.Calls.Get("serve/stopall") AddRc("dummy", newServer) _, err := serveStart.Fn(context.Background(), rc.Params{"fs": ":mockfs:", "type": "dummy"}) require.NoError(t, err) _, err = serveStart.Fn(context.Background(), rc.Params{"fs": ":mockfs:", "type": "dummy"}) require.NoError(t, err) assert.Equal(t, 2, len(servers)) _, err = serveStopAll.Fn(context.Background(), nil) require.NoError(t, err) assert.Equal(t, 0, len(servers)) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/nfs/nfs.go
cmd/serve/nfs/nfs.go
//go:build unix // Package nfs implements a server to serve a VFS remote over the NFSv3 protocol // // There is no authentication available on this server and it is // served on the loopback interface by default. // // This is primarily used for mounting a VFS remote in macOS, where // FUSE-mounting mechanisms are usually not available. package nfs import ( "context" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/serve" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" "github.com/rclone/rclone/vfs/vfsflags" "github.com/spf13/cobra" "github.com/spf13/pflag" ) // OptionsInfo descripts the Options in use var OptionsInfo = fs.Options{{ Name: "addr", Default: "", Help: "IPaddress:Port or :Port to bind server to", }, { Name: "nfs_cache_handle_limit", Default: 1000000, Help: "max file handles cached simultaneously (min 5)", }, { Name: "nfs_cache_type", Default: cacheMemory, Help: "Type of NFS handle cache to use", }, { Name: "nfs_cache_dir", Default: "", Help: "The directory the NFS handle cache will use if set", }} func init() { fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "nfs", Opt: &Opt, Options: OptionsInfo}) } type handleCache = fs.Enum[handleCacheChoices] const ( cacheMemory handleCache = iota cacheDisk cacheSymlink ) type handleCacheChoices struct{} func (handleCacheChoices) Choices() []string { return []string{ cacheMemory: "memory", cacheDisk: "disk", cacheSymlink: "symlink", } } // Options contains options for the NFS Server type Options struct { ListenAddr string `config:"addr"` // Port to listen on HandleLimit int `config:"nfs_cache_handle_limit"` // max file handles cached by go-nfs CachingHandler HandleCache handleCache `config:"nfs_cache_type"` // what kind of handle cache to use HandleCacheDir string `config:"nfs_cache_dir"` // where the handle cache should be stored } // Opt is the default set of serve nfs options var Opt Options // AddFlags adds flags for serve nfs (and nfsmount) func AddFlags(flagSet *pflag.FlagSet) { flags.AddFlagsFromOptions(flagSet, "", OptionsInfo) } func init() { vfsflags.AddFlags(Command.Flags()) AddFlags(Command.Flags()) serve.Command.AddCommand(Command) serve.AddRc("nfs", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) { // Create VFS var vfsOpt = vfscommon.Opt // set default opts err := configstruct.SetAny(in, &vfsOpt) if err != nil { return nil, err } VFS := vfs.New(f, &vfsOpt) // Read opts var opt = Opt // set default opts err = configstruct.SetAny(in, &opt) if err != nil { return nil, err } // Create server return NewServer(ctx, VFS, &opt) }) } // Run the command func Run(command *cobra.Command, args []string) { var f fs.Fs cmd.CheckArgs(1, 1, command, args) f = cmd.NewFsSrc(args) cmd.Run(false, true, command, func() error { s, err := NewServer(context.Background(), vfs.New(f, &vfscommon.Opt), &Opt) if err != nil { return err } return s.Serve() }) } // Command is the definition of the command var Command = &cobra.Command{ Use: "nfs remote:path", Short: `Serve the remote as an NFS mount`, Long: strings.ReplaceAll(`Create an NFS server that serves the given remote over the network. This implements an NFSv3 server to serve any rclone remote via NFS. The primary purpose for this command is to enable the [mount command](/commands/rclone_mount/) on recent macOS versions where installing FUSE is very cumbersome. This server does not implement any authentication so any client will be able to access the data. To limit access, you can use |serve nfs| on the loopback address or rely on secure tunnels (such as SSH) or use firewalling. For this reason, by default, a random TCP port is chosen and the loopback interface is used for the listening address by default; meaning that it is only available to the local machine. If you want other machines to access the NFS mount over local network, you need to specify the listening address and port using the |--addr| flag. Modifying files through the NFS protocol requires VFS caching. Usually you will need to specify |--vfs-cache-mode| in order to be able to write to the mountpoint (|full| is recommended). If you don't specify VFS cache mode, the mount will be read-only. |--nfs-cache-type| controls the type of the NFS handle cache. By default this is |memory| where new handles will be randomly allocated when needed. These are stored in memory. If the server is restarted the handle cache will be lost and connected NFS clients will get stale handle errors. |--nfs-cache-type disk| uses an on disk NFS handle cache. Rclone hashes the path of the object and stores it in a file named after the hash. These hashes are stored on disk the directory controlled by |--cache-dir| or the exact directory may be specified with |--nfs-cache-dir|. Using this means that the NFS server can be restarted at will without affecting the connected clients. |--nfs-cache-type symlink| is similar to |--nfs-cache-type disk| in that it uses an on disk cache, but the cache entries are held as symlinks. Rclone will use the handle of the underlying file as the NFS handle which improves performance. This sort of cache can't be backed up and restored as the underlying handles will change. This is Linux only. It requires running rclone as root or with |CAP_DAC_READ_SEARCH|. You can run rclone with this extra permission by doing this to the rclone binary |sudo setcap cap_dac_read_search+ep /path/to/rclone|. |--nfs-cache-handle-limit| controls the maximum number of cached NFS handles stored by the caching handler. This should not be set too low or you may experience errors when trying to access files. The default is |1000000|, but consider lowering this limit if the server's system resource usage causes problems. This is only used by the |memory| type cache. To serve NFS over the network use following command: |||sh rclone serve nfs remote: --addr 0.0.0.0:$PORT --vfs-cache-mode=full ||| This specifies a port that can be used in the mount command. To mount the server under Linux/macOS, use the following command: |||sh mount -t nfs -o port=$PORT,mountport=$PORT,tcp $HOSTNAME:/ path/to/mountpoint ||| Where |$PORT| is the same port number used in the |serve nfs| command and |$HOSTNAME| is the network address of the machine that |serve nfs| was run on. If |--vfs-metadata-extension| is in use then for the |--nfs-cache-type disk| and |--nfs-cache-type cache| the metadata files will have the file handle of their parent file suffixed with |0x00, 0x00, 0x00, 0x01|. This means they can be looked up directly from the parent file handle is desired. This command is only available on Unix platforms. `, "|", "`") + strings.TrimSpace(vfs.Help()), Annotations: map[string]string{ "versionIntroduced": "v1.65", "groups": "Filter", "status": "Experimental", }, Run: Run, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/nfs/symlink_cache_linux.go
cmd/serve/nfs/symlink_cache_linux.go
//go:build unix && linux /* This implements an efficient disk cache for the NFS file handles for Linux only. 1. The destination paths are stored as symlink destinations. These can be stored in the directory for maximum efficiency. 2. The on disk handle of the cache file is returned to NFS with name_to_handle_at(). This means that if the cache is deleted and restored, the file handle mapping will be lost. 3. These handles are looked up with open_by_handle_at() so no searching through directory trees is needed. Note that open_by_handle_at requires CAP_DAC_READ_SEARCH so rclone will need to be run as root or with elevated permissions. Test with go test -c && sudo setcap cap_dac_read_search+ep ./nfs.test && ./nfs.test -test.v -test.run TestCache/symlink */ package nfs import ( "bytes" "encoding/binary" "errors" "fmt" "os" "path/filepath" "syscall" "github.com/rclone/rclone/fs" "golang.org/x/sys/unix" ) // emptyPath is written instead of "" as symlinks can't be empty var ( emptyPath = "\x01" emptyPathBytes = []byte(emptyPath) ) // Turn the diskHandler into a symlink cache // // This also tests the cache works as it may not have enough // permissions or have be the correct Linux version. func (dh *diskHandler) makeSymlinkCache() error { path := filepath.Join(dh.cacheDir, "test") fullPath := "testpath" fh := []byte{1, 2, 3, 4, 5} // Create a symlink newFh, err := dh.symlinkCacheWrite(fh, path, fullPath) fs.Debugf(nil, "newFh = %q", newFh) if err != nil { return fmt.Errorf("symlink cache write test failed: %w", err) } defer func() { _ = os.Remove(path) }() // Read it back newFullPath, err := dh.symlinkCacheRead(newFh, path) fs.Debugf(nil, "newFullPath = %q", newFullPath) if err != nil { if errors.Is(err, syscall.EPERM) { return ErrorSymlinkCacheNoPermission } return fmt.Errorf("symlink cache read test failed: %w", err) } // Check result all OK if string(newFullPath) != fullPath { return fmt.Errorf("symlink cache read test failed: expecting %q read %q", string(newFullPath), fullPath) } // If OK install symlink cache dh.read = dh.symlinkCacheRead dh.write = dh.symlinkCacheWrite dh.remove = dh.symlinkCacheRemove dh.suffix = dh.symlinkCacheSuffix return nil } // Prefixes a []byte with its length as a 4-byte big-endian integer. func addLengthPrefix(data []byte) []byte { length := uint32(len(data)) buf := new(bytes.Buffer) err := binary.Write(buf, binary.BigEndian, length) if err != nil { // This should never fail panic(err) } buf.Write(data) return buf.Bytes() } // Removes the 4-byte big-endian length prefix from a []byte. func removeLengthPrefix(data []byte) ([]byte, error) { if len(data) < 4 { return nil, errors.New("file handle too short") } length := binary.BigEndian.Uint32(data[:4]) if int(length) != len(data)-4 { return nil, errors.New("file handle invalid length") } return data[4 : 4+length], nil } // Write the fullPath into cachePath returning the possibly updated fh // // This writes the fullPath into the file with the cachePath given and // returns the handle for that file so we can look it up later. func (dh *diskHandler) symlinkCacheWrite(fh []byte, cachePath string, fullPath string) (newFh []byte, err error) { //defer log.Trace(nil, "fh=%x, cachePath=%q, fullPath=%q", fh, cachePath)("newFh=%x, err=%v", &newFh, &err) // Can't write an empty symlink so write a substitution if fullPath == "" { fullPath = emptyPath } // Write the symlink err = os.Symlink(fullPath, cachePath) if err != nil && !errors.Is(err, syscall.EEXIST) { return nil, fmt.Errorf("symlink cache create symlink: %w", err) } // Read the newly created symlinks handle handle, _, err := unix.NameToHandleAt(unix.AT_FDCWD, cachePath, 0) if err != nil { return nil, fmt.Errorf("symlink cache name to handle at: %w", err) } // Store the handle type if it hasn't changed // This should run once only when called by makeSymlinkCache if dh.handleType != handle.Type() { dh.handleType = handle.Type() } // Adjust the raw handle so it has a length prefix return addLengthPrefix(handle.Bytes()), nil } // Read the contents of (fh, cachePath) // // This reads the symlink with the corresponding file handle and // returns the contents. It ignores the cachePath which will be // pointing in the wrong place. // // Note that the caller needs CAP_DAC_READ_SEARCH to use this. func (dh *diskHandler) symlinkCacheRead(fh []byte, cachePath string) (fullPath []byte, err error) { //defer log.Trace(nil, "fh=%x, cachePath=%q", fh, cachePath)("fullPath=%q, err=%v", &fullPath, &err) // First check and remove the file handle prefix length fh, err = removeLengthPrefix(fh) if err != nil { return nil, fmt.Errorf("symlink cache open by handle at: %w", err) } // Find the file with the handle passed in handle := unix.NewFileHandle(dh.handleType, fh) fd, err := unix.OpenByHandleAt(unix.AT_FDCWD, handle, unix.O_RDONLY|unix.O_PATH|unix.O_NOFOLLOW) // needs O_PATH for symlinks if err != nil { return nil, fmt.Errorf("symlink cache open by handle at: %w", err) } // Close it on exit defer func() { newErr := unix.Close(fd) if err != nil { err = newErr } }() // Read the symlink which is the path required buf := make([]byte, 1024) // Max path length n, err := unix.Readlinkat(fd, "", buf) // It will (silently) truncate the contents, in case the buffer is too small to hold all of the contents. if err != nil { return nil, fmt.Errorf("symlink cache read: %w", err) } fullPath = buf[:n:n] // Undo empty symlink substitution if bytes.Equal(fullPath, emptyPathBytes) { fullPath = buf[:0:0] } return fullPath, nil } // Remove the (fh, cachePath) file func (dh *diskHandler) symlinkCacheRemove(fh []byte, cachePath string) error { // First read the path fullPath, err := dh.symlinkCacheRead(fh, cachePath) if err != nil { return err } // fh for the actual cache file fh = hashPath(string(fullPath)) // cachePath for the actual cache file cachePath = dh.handleToPath(fh) return os.Remove(cachePath) } // Return a suffix for the file handle or nil func (dh *diskHandler) symlinkCacheSuffix(fh []byte) []byte { if len(fh) < 4 { return nil } length := int(binary.BigEndian.Uint32(fh[:4])) + 4 if len(fh) <= length { return nil } return fh[length:] }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/nfs/cache.go
cmd/serve/nfs/cache.go
//go:build unix package nfs import ( "bytes" "crypto/md5" "encoding/hex" "errors" "fmt" "math" "os" "path" "path/filepath" "runtime" "strings" "sync" billy "github.com/go-git/go-billy/v5" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/file" "github.com/willscott/go-nfs" nfshelper "github.com/willscott/go-nfs/helpers" ) // Errors on cache initialisation var ( ErrorSymlinkCacheNotSupported = errors.New("symlink cache not supported on " + runtime.GOOS) ErrorSymlinkCacheNoPermission = errors.New("symlink cache must be run as root or with CAP_DAC_READ_SEARCH") ) // Metadata files have the file handle of their source file with this // suffixed so we can look them up directly from the file handle. // // Note that this is 4 bytes - using a non multiple of 4 will cause // the Linux NFS client not to be able to read any files. // // The value is big endian 0x00000001 var metadataSuffix = []byte{0x00, 0x00, 0x00, 0x01} // Cache controls the file handle cache implementation type Cache interface { // ToHandle takes a file and represents it with an opaque handle to reference it. // In stateless nfs (when it's serving a unix fs) this can be the device + inode // but we can generalize with a stateful local cache of handed out IDs. ToHandle(f billy.Filesystem, path []string) []byte // FromHandle converts from an opaque handle to the file it represents FromHandle(fh []byte) (billy.Filesystem, []string, error) // Invalidate the handle passed - used on rename and delete InvalidateHandle(fs billy.Filesystem, handle []byte) error // HandleLimit exports how many file handles can be safely stored by this cache. HandleLimit() int } // Set the cache of the handler to the type required by the user func (h *Handler) getCache() (c Cache, err error) { fs.Debugf("nfs", "Starting %v handle cache", h.opt.HandleCache) switch h.opt.HandleCache { case cacheMemory: return nfshelper.NewCachingHandler(h, h.opt.HandleLimit), nil case cacheDisk: return newDiskHandler(h) case cacheSymlink: dh, err := newDiskHandler(h) if err != nil { return nil, err } err = dh.makeSymlinkCache() if err != nil { return nil, err } return dh, nil } return nil, errors.New("unknown handle cache type") } // diskHandler implements an on disk NFS file handle cache type diskHandler struct { mu sync.RWMutex cacheDir string billyFS billy.Filesystem write func(fh []byte, cachePath string, fullPath string) ([]byte, error) read func(fh []byte, cachePath string) ([]byte, error) remove func(fh []byte, cachePath string) error suffix func(fh []byte) []byte // returns nil for no suffix or the suffix handleType int32 //nolint:unused // used by the symlink cache metadata string // extension for metadata } // Create a new disk handler func newDiskHandler(h *Handler) (dh *diskHandler, err error) { cacheDir := h.opt.HandleCacheDir // If cacheDir isn't set then make one from the config if cacheDir == "" { // How the VFS was configured configString := fs.ConfigString(h.vfs.Fs()) // Turn it into a valid OS directory name dirName := encoder.OS.ToStandardName(configString) cacheDir = filepath.Join(config.GetCacheDir(), "serve-nfs-handle-cache-"+h.opt.HandleCache.String(), dirName) } // Create the cache dir err = file.MkdirAll(cacheDir, 0700) if err != nil { return nil, fmt.Errorf("disk handler mkdir failed: %v", err) } dh = &diskHandler{ cacheDir: cacheDir, billyFS: h.billyFS, write: dh.diskCacheWrite, read: dh.diskCacheRead, remove: dh.diskCacheRemove, suffix: dh.diskCacheSuffix, metadata: h.vfs.Opt.MetadataExtension, } fs.Infof("nfs", "Storing handle cache in %q", dh.cacheDir) return dh, nil } // Convert a path to a hash func hashPath(fullPath string) []byte { hash := md5.Sum([]byte(fullPath)) return hash[:] } // Convert a handle to a path on disk for the handle func (dh *diskHandler) handleToPath(fh []byte) (cachePath string) { fhString := hex.EncodeToString(fh) if len(fhString) <= 4 { cachePath = filepath.Join(dh.cacheDir, fhString) } else { cachePath = filepath.Join(dh.cacheDir, fhString[0:2], fhString[2:4], fhString) } return cachePath } // Return true if name represents a metadata file // // It returns the underlying path func (dh *diskHandler) isMetadataFile(name string) (rawName string, found bool) { if dh.metadata == "" { return name, false } rawName, found = strings.CutSuffix(name, dh.metadata) return rawName, found } // ToHandle takes a file and represents it with an opaque handle to reference it. // In stateless nfs (when it's serving a unix fs) this can be the device + inode // but we can generalize with a stateful local cache of handed out IDs. func (dh *diskHandler) ToHandle(f billy.Filesystem, splitPath []string) (fh []byte) { dh.mu.Lock() defer dh.mu.Unlock() fullPath := path.Join(splitPath...) // metadata file has file handle of original file fullPath, isMetadataFile := dh.isMetadataFile(fullPath) fh = hashPath(fullPath) cachePath := dh.handleToPath(fh) cacheDir := filepath.Dir(cachePath) err := os.MkdirAll(cacheDir, 0700) if err != nil { fs.Errorf("nfs", "Couldn't create cache file handle directory: %v", err) return fh } fh, err = dh.write(fh, cachePath, fullPath) if err != nil { fs.Errorf("nfs", "Couldn't create cache file handle: %v", err) return fh } // metadata file handle is suffixed with metadataSuffix if isMetadataFile { fh = append(fh, metadataSuffix...) } return fh } // Write the fullPath into cachePath returning the possibly updated fh func (dh *diskHandler) diskCacheWrite(fh []byte, cachePath string, fullPath string) ([]byte, error) { return fh, os.WriteFile(cachePath, []byte(fullPath), 0600) } var ( errStaleHandle = &nfs.NFSStatusError{NFSStatus: nfs.NFSStatusStale} ) // Test to see if a fh is a metadata handle and if so return the underlying handle func (dh *diskHandler) isMetadataHandle(fh []byte) (isMetadata bool, newFh []byte, err error) { if dh.metadata == "" { return false, fh, nil } suffix := dh.suffix(fh) if len(suffix) == 0 { // OK return false, fh, nil } else if bytes.Equal(suffix, metadataSuffix) { return true, fh[:len(fh)-len(suffix)], nil } fs.Errorf("nfs", "Bad file handle suffix %X", suffix) return false, nil, errStaleHandle } // FromHandle converts from an opaque handle to the file it represents func (dh *diskHandler) FromHandle(fh []byte) (f billy.Filesystem, splitPath []string, err error) { dh.mu.RLock() defer dh.mu.RUnlock() isMetadata, fh, err := dh.isMetadataHandle(fh) if err != nil { return nil, nil, err } cachePath := dh.handleToPath(fh) fullPathBytes, err := dh.read(fh, cachePath) if err != nil { fs.Errorf("nfs", "Stale handle %q: %v", cachePath, err) return nil, nil, errStaleHandle } if isMetadata { fullPathBytes = append(fullPathBytes, []byte(dh.metadata)...) } splitPath = strings.Split(string(fullPathBytes), "/") return dh.billyFS, splitPath, nil } // Read the contents of (fh, cachePath) func (dh *diskHandler) diskCacheRead(fh []byte, cachePath string) ([]byte, error) { return os.ReadFile(cachePath) } // Invalidate the handle passed - used on rename and delete func (dh *diskHandler) InvalidateHandle(f billy.Filesystem, fh []byte) error { dh.mu.Lock() defer dh.mu.Unlock() isMetadata, fh, err := dh.isMetadataHandle(fh) if err != nil { return err } if isMetadata { // Can't invalidate a metadata handle as it is synthetic return nil } cachePath := dh.handleToPath(fh) err = dh.remove(fh, cachePath) if err != nil { fs.Errorf("nfs", "Failed to remove handle %q: %v", cachePath, err) } return nil } // Remove the (fh, cachePath) file func (dh *diskHandler) diskCacheRemove(fh []byte, cachePath string) error { return os.Remove(cachePath) } // Return a suffix for the file handle or nil func (dh *diskHandler) diskCacheSuffix(fh []byte) []byte { if len(fh) <= md5.Size { return nil } return fh[md5.Size:] } // HandleLimit exports how many file handles can be safely stored by this cache. func (dh *diskHandler) HandleLimit() int { return math.MaxInt }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/nfs/cache_test.go
cmd/serve/nfs/cache_test.go
//go:build unix package nfs import ( "context" "fmt" "strings" "sync" "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // NB to test the symlink cache, running with elevated permissions is needed const testSymlinkCache = "go test -c && sudo setcap cap_dac_read_search+ep ./nfs.test && ./nfs.test -test.v -test.run TestCache/symlink" // Check basic CRUD operations func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) { isMetadata := strings.HasSuffix(fileName, ".metadata") // Check reading a non existent handle returns an error _, _, err := c.FromHandle([]byte{10}) assert.Error(t, err) // Write a handle splitPath := []string{"dir", fileName} fh := c.ToHandle(h.billyFS, splitPath) assert.True(t, len(fh) > 0) if isMetadata { assert.Equal(t, metadataSuffix, fh[len(fh)-len(metadataSuffix):]) } else { assert.NotEqual(t, metadataSuffix, fh[len(fh)-len(metadataSuffix):]) } // Read the handle back newFs, newSplitPath, err := c.FromHandle(fh) require.NoError(t, err) assert.Equal(t, h.billyFS, newFs) assert.Equal(t, splitPath, newSplitPath) // Invalidate the handle err = c.InvalidateHandle(h.billyFS, fh) require.NoError(t, err) // Invalidate the handle twice err = c.InvalidateHandle(h.billyFS, fh) require.NoError(t, err) // Check the handle is gone and returning stale handle error _, _, err = c.FromHandle(fh) if !isMetadata { require.Error(t, err) assert.Equal(t, errStaleHandle, err) } else { // Can't invalidate metadata handles require.NoError(t, err) } } // Thrash the cache operations in parallel on different files func testCacheThrashDifferent(t *testing.T, h *Handler, c Cache) { var wg sync.WaitGroup for i := range 100 { wg.Add(1) go func() { defer wg.Done() testCacheCRUD(t, h, c, fmt.Sprintf("file-%d", i)) }() } wg.Wait() } // Thrash the cache operations in parallel on the same file func testCacheThrashSame(t *testing.T, h *Handler, c Cache) { var wg sync.WaitGroup for range 100 { wg.Add(1) go func() { defer wg.Done() // Write a handle splitPath := []string{"file"} fh := c.ToHandle(h.billyFS, splitPath) assert.True(t, len(fh) > 0) // Read the handle back newFs, newSplitPath, err := c.FromHandle(fh) if err != nil { assert.Equal(t, errStaleHandle, err) } else { require.NoError(t, err) assert.Equal(t, h.billyFS, newFs) assert.Equal(t, splitPath, newSplitPath) } // Invalidate the handle err = c.InvalidateHandle(h.billyFS, fh) require.NoError(t, err) // Check the handle is gone and returning stale handle error _, _, err = c.FromHandle(fh) if err != nil { require.Error(t, err) assert.Equal(t, errStaleHandle, err) } }() } wg.Wait() } func TestCache(t *testing.T) { // Quieten the flood of ERROR messages! ci := fs.GetConfig(context.Background()) oldLogLevel := ci.LogLevel ci.LogLevel = fs.LogLevelEmergency //ci.LogLevel = fs.LogLevelDebug defer func() { ci.LogLevel = oldLogLevel }() billyFS := &FS{nil} // place holder billyFS for _, cacheType := range []handleCache{cacheMemory, cacheDisk, cacheSymlink} { t.Run(cacheType.String(), func(t *testing.T) { h := &Handler{ vfs: vfs.New(object.MemoryFs, nil), billyFS: billyFS, } h.vfs.Opt.MetadataExtension = ".metadata" h.opt.HandleLimit = 1000 h.opt.HandleCache = cacheType h.opt.HandleCacheDir = t.TempDir() c, err := h.getCache() if err == ErrorSymlinkCacheNotSupported { t.Skip(err.Error()) } if err == ErrorSymlinkCacheNoPermission { t.Skip("Need more permissions to run symlink cache tests: " + testSymlinkCache) } require.NoError(t, err) t.Run("Empty", func(t *testing.T) { // Write a handle splitPath := []string{""} fh := c.ToHandle(h.billyFS, splitPath) assert.True(t, len(fh) > 0) // Read the handle back newFs, newSplitPath, err := c.FromHandle(fh) require.NoError(t, err) assert.Equal(t, h.billyFS, newFs) assert.Equal(t, splitPath, newSplitPath) testCacheCRUD(t, h, c, "file") }) t.Run("CRUD", func(t *testing.T) { testCacheCRUD(t, h, c, "file") }) // NB the default caching handler is not thread safe! if cacheType != cacheMemory { t.Run("ThrashDifferent", func(t *testing.T) { testCacheThrashDifferent(t, h, c) }) t.Run("ThrashSame", func(t *testing.T) { testCacheThrashSame(t, h, c) }) // Metadata file handles only supported on non memory t.Run("CRUDMetadata", func(t *testing.T) { testCacheCRUD(t, h, c, "file.metadata") }) } }) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/nfs/symlink_cache_other.go
cmd/serve/nfs/symlink_cache_other.go
//go:build unix && !linux package nfs // Turn the diskHandler into a symlink cache func (dh *diskHandler) makeSymlinkCache() error { return ErrorSymlinkCacheNotSupported }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/nfs/filesystem.go
cmd/serve/nfs/filesystem.go
//go:build unix package nfs import ( "os" "path" "strings" "time" billy "github.com/go-git/go-billy/v5" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" "github.com/willscott/go-nfs/file" ) // setSys sets the Sys() call up for the vfs.Node passed in // // The billy abstraction layer does not extend to exposing `uid` and `gid` // ownership of files. If ownership is important to your file system, you // will need to ensure that the `os.FileInfo` meets additional constraints. // In particular, the `Sys()` escape hatch is queried by this library, and // if your file system populates a [`syscall.Stat_t`](https://golang.org/pkg/syscall/#Stat_t) // concrete struct, the ownership specified in that object will be used. // It can also return a file.FileInfo which is easier to manage cross platform func setSys(fi os.FileInfo) { node, ok := fi.(vfs.Node) if !ok { fs.Errorf(fi, "internal error: %T is not a vfs.Node", fi) return } vfs := node.VFS() // Set the UID and GID for the node passed in from the VFS defaults. stat := file.FileInfo{ Nlink: 1, UID: vfs.Opt.UID, GID: vfs.Opt.GID, Fileid: node.Inode(), // without this mounting doesn't work on Linux } node.SetSys(&stat) } // FS is our wrapper around the VFS to properly support billy.Filesystem interface type FS struct { vfs *vfs.VFS } // ReadDir implements read dir func (f *FS) ReadDir(path string) (dir []os.FileInfo, err error) { defer log.Trace(path, "")("items=%d, err=%v", &dir, &err) dir, err = f.vfs.ReadDir(path) if err != nil { return nil, err } for _, fi := range dir { setSys(fi) } return dir, nil } // Create implements creating new files func (f *FS) Create(filename string) (node billy.File, err error) { defer log.Trace(filename, "")("%v, err=%v", &node, &err) return f.vfs.Create(filename) } // Open opens a file func (f *FS) Open(filename string) (node billy.File, err error) { defer log.Trace(filename, "")("%v, err=%v", &node, &err) return f.vfs.Open(filename) } // OpenFile opens a file func (f *FS) OpenFile(filename string, flag int, perm os.FileMode) (node billy.File, err error) { defer log.Trace(filename, "flag=0x%X, perm=%v", flag, perm)("%v, err=%v", &node, &err) return f.vfs.OpenFile(filename, flag, perm) } // Stat gets the file stat func (f *FS) Stat(filename string) (fi os.FileInfo, err error) { defer log.Trace(filename, "")("fi=%v, err=%v", &fi, &err) fi, err = f.vfs.Stat(filename) if err != nil { return nil, err } setSys(fi) return fi, nil } // Rename renames a file func (f *FS) Rename(oldpath, newpath string) (err error) { defer log.Trace(oldpath, "newpath=%q", newpath)("err=%v", &err) return f.vfs.Rename(oldpath, newpath) } // Remove deletes a file func (f *FS) Remove(filename string) (err error) { defer log.Trace(filename, "")("err=%v", &err) return f.vfs.Remove(filename) } // Join joins path elements func (f *FS) Join(elem ...string) string { return path.Join(elem...) } // TempFile is not implemented func (f *FS) TempFile(dir, prefix string) (node billy.File, err error) { defer log.Trace(dir, "prefix=%q", prefix)("node=%v, err=%v", &node, &err) return nil, os.ErrInvalid } // MkdirAll creates a directory and all the ones above it // it does not redirect to VFS.MkDirAll because that one doesn't // honor the permissions func (f *FS) MkdirAll(filename string, perm os.FileMode) (err error) { defer log.Trace(filename, "perm=%v", perm)("err=%v", &err) parts := strings.Split(filename, "/") for i := range parts { current := strings.Join(parts[:i+1], "/") _, err := f.Stat(current) if err == vfs.ENOENT { err = f.vfs.Mkdir(current, perm) if err != nil { return err } } } return nil } // Lstat gets the stats for symlink func (f *FS) Lstat(filename string) (fi os.FileInfo, err error) { defer log.Trace(filename, "")("fi=%v, err=%v", &fi, &err) fi, err = f.vfs.Stat(filename) if err != nil { return nil, err } setSys(fi) return fi, nil } // Symlink creates a link pointing to target func (f *FS) Symlink(target, link string) (err error) { defer log.Trace(target, "link=%q", link)("err=%v", &err) return f.vfs.Symlink(target, link) } // Readlink reads the contents of link func (f *FS) Readlink(link string) (result string, err error) { defer log.Trace(link, "")("result=%q, err=%v", &result, &err) return f.vfs.Readlink(link) } // Chmod changes the file modes func (f *FS) Chmod(name string, mode os.FileMode) (err error) { defer log.Trace(name, "mode=%v", mode)("err=%v", &err) file, err := f.vfs.Open(name) if err != nil { return err } defer func() { if err := file.Close(); err != nil { fs.Logf(f, "Error while closing file: %e", err) } }() err = file.Chmod(mode) // Mask Chmod not implemented if err == vfs.ENOSYS { err = nil } return err } // Lchown changes the owner of symlink func (f *FS) Lchown(name string, uid, gid int) (err error) { defer log.Trace(name, "uid=%d, gid=%d", uid, gid)("err=%v", &err) return f.Chown(name, uid, gid) } // Chown changes owner of the file func (f *FS) Chown(name string, uid, gid int) (err error) { defer log.Trace(name, "uid=%d, gid=%d", uid, gid)("err=%v", &err) file, err := f.vfs.Open(name) if err != nil { return err } defer func() { if err := file.Close(); err != nil { fs.Logf(f, "Error while closing file: %e", err) } }() return file.Chown(uid, gid) } // Chtimes changes the access time and modified time func (f *FS) Chtimes(name string, atime time.Time, mtime time.Time) (err error) { defer log.Trace(name, "atime=%v, mtime=%v", atime, mtime)("err=%v", &err) return f.vfs.Chtimes(name, atime, mtime) } // Chroot is not supported in VFS func (f *FS) Chroot(path string) (FS billy.Filesystem, err error) { defer log.Trace(path, "")("FS=%v, err=%v", &FS, &err) return nil, os.ErrInvalid } // Root returns the root of a VFS func (f *FS) Root() (root string) { defer log.Trace(nil, "")("root=%q", &root) return f.vfs.Fs().Root() } // Capabilities exports the filesystem capabilities func (f *FS) Capabilities() (caps billy.Capability) { defer log.Trace(nil, "")("caps=%v", &caps) if f.vfs.Opt.CacheMode == vfscommon.CacheModeOff { return billy.ReadCapability | billy.SeekCapability } return billy.WriteCapability | billy.ReadCapability | billy.ReadAndWriteCapability | billy.SeekCapability | billy.TruncateCapability } // Interface check var ( _ billy.Filesystem = (*FS)(nil) _ billy.Change = (*FS)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/nfs/server.go
cmd/serve/nfs/server.go
//go:build unix package nfs import ( "context" "fmt" "net" nfs "github.com/willscott/go-nfs" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" ) // Server contains everything to run the Server type Server struct { opt Options handler nfs.Handler ctx context.Context // for global config listener net.Listener UnmountedExternally bool } // NewServer creates a new server func NewServer(ctx context.Context, vfs *vfs.VFS, opt *Options) (s *Server, err error) { if vfs.Opt.CacheMode == vfscommon.CacheModeOff { fs.LogPrintf(fs.LogLevelWarning, ctx, "NFS writes don't work without a cache, the filesystem will be served read-only") } // Our NFS server doesn't have any authentication, we run it on localhost and random port by default if opt.ListenAddr == "" { opt.ListenAddr = "localhost:" } s = &Server{ ctx: ctx, opt: *opt, } s.handler, err = NewHandler(ctx, vfs, opt) if err != nil { return nil, fmt.Errorf("failed to make NFS handler: %w", err) } s.listener, err = net.Listen("tcp", s.opt.ListenAddr) if err != nil { return nil, fmt.Errorf("failed to open listening socket: %w", err) } return s, nil } // Addr returns the listening address of the server func (s *Server) Addr() net.Addr { return s.listener.Addr() } // Shutdown stops the server func (s *Server) Shutdown() error { return s.listener.Close() } // Serve starts the server func (s *Server) Serve() (err error) { fs.Logf(nil, "NFS Server running at %s\n", s.listener.Addr()) return nfs.Serve(s.listener, s.handler) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/nfs/nfs_unsupported.go
cmd/serve/nfs/nfs_unsupported.go
// For unsupported architectures //go:build !unix // Package nfs is not supported on non-Unix platforms package nfs import ( "github.com/spf13/cobra" ) // Command is just nil for unsupported platforms var Command *cobra.Command
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/nfs/nfs_test.go
cmd/serve/nfs/nfs_test.go
//go:build unix // The serving is tested in cmd/nfsmount - here we test anything else package nfs import ( "testing" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/cmd/serve/servetest" "github.com/rclone/rclone/fs/rc" ) func TestRc(t *testing.T) { servetest.TestRc(t, rc.Params{ "type": "nfs", "vfs_cache_mode": "off", }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/nfs/handler.go
cmd/serve/nfs/handler.go
//go:build unix package nfs import ( "context" "fmt" "net" "strings" "github.com/go-git/go-billy/v5" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/vfs" "github.com/willscott/go-nfs" ) // Handler returns a NFS backing that exposes a given file system in response to all mount requests. type Handler struct { vfs *vfs.VFS opt Options billyFS *FS Cache } // NewHandler creates a handler for the provided filesystem func NewHandler(ctx context.Context, vfs *vfs.VFS, opt *Options) (handler nfs.Handler, err error) { ci := fs.GetConfig(ctx) h := &Handler{ vfs: vfs, opt: *opt, billyFS: &FS{vfs: vfs}, } h.opt.HandleLimit = h.opt.Limit() h.Cache, err = h.getCache() if err != nil { return nil, fmt.Errorf("failed to make cache: %w", err) } var level nfs.LogLevel switch { case ci.LogLevel >= fs.LogLevelDebug: // Debug level, needs -vv level = nfs.TraceLevel case ci.LogLevel >= fs.LogLevelInfo: // Transfers, needs -v level = nfs.InfoLevel case ci.LogLevel >= fs.LogLevelNotice: // Normal logging, -q suppresses level = nfs.WarnLevel case ci.LogLevel >= fs.LogLevelError: // Error - can't be suppressed level = nfs.ErrorLevel default: level = nfs.WarnLevel } nfs.SetLogger(&logger{level: level}) return h, nil } // Mount backs Mount RPC Requests, allowing for access control policies. func (h *Handler) Mount(ctx context.Context, conn net.Conn, req nfs.MountRequest) (status nfs.MountStatus, hndl billy.Filesystem, auths []nfs.AuthFlavor) { auths = []nfs.AuthFlavor{nfs.AuthFlavorNull} return nfs.MountStatusOk, h.billyFS, auths } // Change provides an interface for updating file attributes. func (h *Handler) Change(fs billy.Filesystem) billy.Change { if c, ok := fs.(billy.Change); ok { return c } return nil } // FSStat provides information about a filesystem. func (h *Handler) FSStat(ctx context.Context, f billy.Filesystem, s *nfs.FSStat) error { total, _, free := h.vfs.Statfs() s.TotalSize = uint64(total) s.FreeSize = uint64(free) s.AvailableSize = uint64(free) return nil } // ToHandle takes a file and represents it with an opaque handle to reference it. // In stateless nfs (when it's serving a unix fs) this can be the device + inode // but we can generalize with a stateful local cache of handed out IDs. func (h *Handler) ToHandle(f billy.Filesystem, s []string) (b []byte) { defer log.Trace("nfs", "path=%q", s)("handle=%X", &b) return h.Cache.ToHandle(f, s) } // FromHandle converts from an opaque handle to the file it represents func (h *Handler) FromHandle(b []byte) (f billy.Filesystem, s []string, err error) { defer log.Trace("nfs", "handle=%X", b)("path=%q, err=%v", &s, &err) return h.Cache.FromHandle(b) } // HandleLimit exports how many file handles can be safely stored by this cache. func (h *Handler) HandleLimit() int { return h.Cache.HandleLimit() } // InvalidateHandle invalidates the handle passed - used on rename and delete func (h *Handler) InvalidateHandle(f billy.Filesystem, b []byte) (err error) { defer log.Trace("nfs", "handle=%X", b)("err=%v", &err) return h.Cache.InvalidateHandle(f, b) } // Limit overrides the --nfs-cache-handle-limit value if out-of-range func (o *Options) Limit() int { if o.HandleLimit < 0 { return 1000000 } if o.HandleLimit <= 5 { return 5 } return o.HandleLimit } // OnUnmountFunc registers a function to call when externally unmounted var OnUnmountFunc func() func onUnmount() { fs.Infof(nil, "unmount detected") if OnUnmountFunc != nil { OnUnmountFunc() } } // logger handles go-nfs logs and reroutes them to rclone's logging system type logger struct { level nfs.LogLevel } // logPrint intercepts go-nfs logs and calls rclone's log system instead func (l *logger) logPrint(level fs.LogLevel, args ...any) { fs.LogPrintf(level, "nfs", "%s", fmt.Sprint(args...)) } // logPrintf intercepts go-nfs logs and calls rclone's log system instead func (l *logger) logPrintf(level fs.LogLevel, format string, args ...any) { fs.LogPrintf(level, "nfs", format, args...) } // Debug reroutes go-nfs Debug messages to Intercept func (l *logger) Debug(args ...any) { if l.level < nfs.DebugLevel { return } l.logPrint(fs.LogLevelDebug, args...) } // Debugf reroutes go-nfs Debugf messages to logPrintf func (l *logger) Debugf(format string, args ...any) { if l.level < nfs.DebugLevel { return } l.logPrintf(fs.LogLevelDebug, format, args...) } // Error reroutes go-nfs Error messages to Intercept func (l *logger) Error(args ...any) { if l.level < nfs.ErrorLevel { return } l.logPrint(fs.LogLevelError, args...) } // Errorf reroutes go-nfs Errorf messages to logPrintf func (l *logger) Errorf(format string, args ...any) { if l.level < nfs.ErrorLevel { return } l.logPrintf(fs.LogLevelError, format, args...) } // Fatal reroutes go-nfs Fatal messages to Intercept func (l *logger) Fatal(args ...any) { if l.level < nfs.FatalLevel { return } l.logPrint(fs.LogLevelError, args...) } // Fatalf reroutes go-nfs Fatalf messages to logPrintf func (l *logger) Fatalf(format string, args ...any) { if l.level < nfs.FatalLevel { return } l.logPrintf(fs.LogLevelError, format, args...) } // GetLevel returns the nfs.LogLevel func (l *logger) GetLevel() nfs.LogLevel { return l.level } // Info reroutes go-nfs Info messages to Intercept func (l *logger) Info(args ...any) { if l.level < nfs.InfoLevel { return } l.logPrint(fs.LogLevelInfo, args...) } // Infof reroutes go-nfs Infof messages to logPrintf func (l *logger) Infof(format string, args ...any) { if l.level < nfs.InfoLevel { return } l.logPrintf(fs.LogLevelInfo, format, args...) } // Panic reroutes go-nfs Panic messages to Intercept func (l *logger) Panic(args ...any) { if l.level < nfs.PanicLevel { return } l.logPrint(fs.LogLevelError, args...) } // Panicf reroutes go-nfs Panicf messages to logPrintf func (l *logger) Panicf(format string, args ...any) { if l.level < nfs.PanicLevel { return } l.logPrintf(fs.LogLevelError, format, args...) } // ParseLevel parses the nfs.LogLevel func (l *logger) ParseLevel(level string) (nfs.LogLevel, error) { return nfs.Log.ParseLevel(level) } // Print reroutes go-nfs Print messages to Intercept func (l *logger) Print(args ...any) { if l.level < nfs.InfoLevel { return } l.logPrint(fs.LogLevelInfo, args...) } // Printf reroutes go-nfs Printf messages to Intercept func (l *logger) Printf(format string, args ...any) { if l.level < nfs.InfoLevel { return } l.logPrintf(fs.LogLevelInfo, format, args...) } // SetLevel sets the nfs.LogLevel func (l *logger) SetLevel(level nfs.LogLevel) { l.level = level } // Trace reroutes go-nfs Trace messages to Intercept func (l *logger) Trace(args ...any) { if l.level < nfs.DebugLevel { return } l.logPrint(fs.LogLevelDebug, args...) } // Tracef reroutes go-nfs Tracef messages to logPrintf func (l *logger) Tracef(format string, args ...any) { // FIXME BODGE ... the real fix is probably https://github.com/willscott/go-nfs/pull/28 // This comes from `Log.Tracef("request: %v", w.req)` in conn.go // DEBUG : nfs: request: RPC #3285799202 (mount.Umnt) argsS := fmt.Sprint(args...) if strings.Contains(argsS, "mount.Umnt") { onUnmount() } if l.level < nfs.DebugLevel { return } l.logPrintf(fs.LogLevelDebug, format, args...) } // Warn reroutes go-nfs Warn messages to Intercept func (l *logger) Warn(args ...any) { if l.level < nfs.WarnLevel { return } l.logPrint(fs.LogLevelNotice, args...) } // Warnf reroutes go-nfs Warnf messages to logPrintf func (l *logger) Warnf(format string, args ...any) { if l.level < nfs.WarnLevel { return } l.logPrintf(fs.LogLevelNotice, format, args...) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/proxy/proxy.go
cmd/serve/proxy/proxy.go
// Package proxy implements a programmable proxy for rclone serve package proxy import ( "bytes" "context" "crypto/sha256" "crypto/subtle" "encoding/json" "errors" "fmt" "os/exec" "strings" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/obscure" libcache "github.com/rclone/rclone/lib/cache" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" ) // Help contains text describing how to use the proxy var Help = strings.ReplaceAll(`### Auth Proxy If you supply the parameter |--auth-proxy /path/to/program| then rclone will use that program to generate backends on the fly which then are used to authenticate incoming requests. This uses a simple JSON based protocol with input on STDIN and output on STDOUT. **PLEASE NOTE:** |--auth-proxy| and |--authorized-keys| cannot be used together, if |--auth-proxy| is set the authorized keys option will be ignored. There is an example program [bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/bin/test_proxy.py) in the rclone source code. The program's job is to take a |user| and |pass| on the input and turn those into the config for a backend on STDOUT in JSON format. This config will have any default parameters for the backend added, but it won't use configuration from environment variables or command line options - it is the job of the proxy program to make a complete config. This config generated must have this extra parameter - |_root| - root to use for the backend And it may have this parameter - |_obscure| - comma separated strings for parameters to obscure If password authentication was used by the client, input to the proxy process (on STDIN) would look similar to this: |||json { "user": "me", "pass": "mypassword" } ||| If public-key authentication was used by the client, input to the proxy process (on STDIN) would look similar to this: |||json { "user": "me", "public_key": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDuwESFdAe14hVS6omeyX7edc...JQdf" } ||| And as an example return this on STDOUT |||json { "type": "sftp", "_root": "", "_obscure": "pass", "user": "me", "pass": "mypassword", "host": "sftp.example.com" } ||| This would mean that an SFTP backend would be created on the fly for the |user| and |pass|/|public_key| returned in the output to the host given. Note that since |_obscure| is set to |pass|, rclone will obscure the |pass| parameter before creating the backend (which is required for sftp backends). The program can manipulate the supplied |user| in any way, for example to make proxy to many different sftp backends, you could make the |user| be |user@example.com| and then set the |host| to |example.com| in the output and the user to |user|. For security you'd probably want to restrict the |host| to a limited list. Note that an internal cache is keyed on |user| so only use that for configuration, don't use |pass| or |public_key|. This also means that if a user's password or public-key is changed the cache will need to expire (which takes 5 mins) before it takes effect. This can be used to build general purpose proxies to any kind of backend that rclone supports. `, "|", "`") // OptionsInfo descripts the Options in use var OptionsInfo = fs.Options{{ Name: "auth_proxy", Default: "", Help: "A program to use to create the backend from the auth", }} // Options is options for creating the proxy type Options struct { AuthProxy string `config:"auth_proxy"` } // Opt is the default options var Opt Options func init() { fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "proxy", Opt: &Opt, Options: OptionsInfo}) } // Proxy represents a proxy to turn auth requests into a VFS type Proxy struct { cmdLine []string // broken down command line vfsCache *libcache.Cache ctx context.Context // for global config Opt Options vfsOpt vfscommon.Options } // cacheEntry is what is stored in the vfsCache type cacheEntry struct { vfs *vfs.VFS // stored VFS pwHash [sha256.Size]byte // sha256 hash of the password/publicKey } // New creates a new proxy with the Options passed in // // Any VFS are created with the vfsOpt passed in. func New(ctx context.Context, opt *Options, vfsOpt *vfscommon.Options) *Proxy { return &Proxy{ ctx: ctx, Opt: *opt, cmdLine: strings.Fields(opt.AuthProxy), vfsCache: libcache.New(), vfsOpt: *vfsOpt, } } // run the proxy command returning a config map func (p *Proxy) run(in map[string]string) (config configmap.Simple, err error) { cmd := exec.Command(p.cmdLine[0], p.cmdLine[1:]...) inBytes, err := json.MarshalIndent(in, "", "\t") if err != nil { return nil, fmt.Errorf("proxy: failed to marshal input: %w", err) } var stdout, stderr bytes.Buffer cmd.Stdin = bytes.NewBuffer(inBytes) cmd.Stdout = &stdout cmd.Stderr = &stderr start := time.Now() err = cmd.Run() fs.Debugf(nil, "Calling proxy %v", p.cmdLine) duration := time.Since(start) if err != nil { return nil, fmt.Errorf("proxy: failed on %v: %q: %w", p.cmdLine, strings.TrimSpace(stderr.String()), err) } err = json.Unmarshal(stdout.Bytes(), &config) if err != nil { return nil, fmt.Errorf("proxy: failed to read output: %q: %w", stdout.String(), err) } fs.Debugf(nil, "Proxy returned in %v", duration) // Obscure any values in the config map that need it obscureFields, ok := config.Get("_obscure") if ok { for key := range strings.SplitSeq(obscureFields, ",") { value, ok := config.Get(key) if ok { obscuredValue, err := obscure.Obscure(value) if err != nil { return nil, fmt.Errorf("proxy: %w", err) } config.Set(key, obscuredValue) } } } return config, nil } // call runs the auth proxy and returns a cacheEntry and an error func (p *Proxy) call(user, auth string, isPublicKey bool) (value any, err error) { var config configmap.Simple // Contact the proxy if isPublicKey { config, err = p.run(map[string]string{ "user": user, "public_key": auth, }) } else { config, err = p.run(map[string]string{ "user": user, "pass": auth, }) } if err != nil { return nil, err } // Look for required fields in the answer fsName, ok := config.Get("type") if !ok { return nil, errors.New("proxy: type not set in result") } root, ok := config.Get("_root") if !ok { return nil, errors.New("proxy: _root not set in result") } // Find the backend fsInfo, err := fs.Find(fsName) if err != nil { return nil, fmt.Errorf("proxy: couldn't find backend for %q: %w", fsName, err) } // base name of config on user name. This may appear in logs name := "proxy-" + user fsString := name + ":" + root // Look for fs in the VFS cache value, err = p.vfsCache.Get(user, func(key string) (value any, ok bool, err error) { // Create the Fs from the cache f, err := cache.GetFn(p.ctx, fsString, func(ctx context.Context, fsString string) (fs.Fs, error) { // Update the config with the default values for i := range fsInfo.Options { o := &fsInfo.Options[i] if _, found := config.Get(o.Name); !found && o.Default != nil && o.String() != "" { config.Set(o.Name, o.String()) } } return fsInfo.NewFs(ctx, name, root, config) }) if err != nil { return nil, false, err } // We hash the auth here so we don't copy the auth more than we // need to in memory. An attacker would find it easier to go // after the unencrypted password in memory most likely. entry := cacheEntry{ vfs: vfs.New(f, &p.vfsOpt), pwHash: sha256.Sum256([]byte(auth)), } return entry, true, nil }) if err != nil { return nil, fmt.Errorf("proxy: failed to create backend: %w", err) } return value, nil } // Call runs the auth proxy with the username and password/public key provided // returning a *vfs.VFS and the key used in the VFS cache. func (p *Proxy) Call(user, auth string, isPublicKey bool) (VFS *vfs.VFS, vfsKey string, err error) { // Look in the cache first value, ok := p.vfsCache.GetMaybe(user) // If not found then call the proxy for a fresh answer if !ok { value, err = p.call(user, auth, isPublicKey) if err != nil { return nil, "", err } } // check we got what we were expecting entry, ok := value.(cacheEntry) if !ok { return nil, "", fmt.Errorf("proxy: value is not cache entry: %#v", value) } // Check the password / public key is correct in the cached entry. This // prevents an attack where subsequent requests for the same // user don't have their auth checked. It does mean that if // the password is changed, the user will have to wait for // cache expiry (5m) before trying again. authHash := sha256.Sum256([]byte(auth)) if subtle.ConstantTimeCompare(authHash[:], entry.pwHash[:]) != 1 { if isPublicKey { return nil, "", errors.New("proxy: incorrect public key") } return nil, "", errors.New("proxy: incorrect password") } return entry.vfs, user, nil } // Get VFS from the cache using key - returns nil if not found func (p *Proxy) Get(key string) *vfs.VFS { value, ok := p.vfsCache.GetMaybe(key) if !ok { return nil } entry := value.(cacheEntry) return entry.vfs }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/proxy/proxy_test.go
cmd/serve/proxy/proxy_test.go
package proxy import ( "context" "crypto/rand" "crypto/rsa" "crypto/sha256" "encoding/base64" "strings" "testing" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/vfs/vfscommon" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/crypto/ssh" ) func TestRun(t *testing.T) { opt := Opt cmd := "go run proxy_code.go" opt.AuthProxy = cmd p := New(context.Background(), &opt, &vfscommon.Opt) t.Run("Normal", func(t *testing.T) { config, err := p.run(map[string]string{ "type": "ftp", "user": "me", "pass": "pass", "host": "127.0.0.1", }) require.NoError(t, err) assert.Equal(t, configmap.Simple{ "type": "ftp", "user": "me-test", "pass": "pass", "host": "127.0.0.1", "_root": "", }, config) }) t.Run("Error", func(t *testing.T) { config, err := p.run(map[string]string{ "error": "potato", }) assert.Nil(t, config) require.Error(t, err) require.Contains(t, err.Error(), "potato") }) t.Run("Obscure", func(t *testing.T) { config, err := p.run(map[string]string{ "type": "ftp", "user": "me", "pass": "pass", "host": "127.0.0.1", "_obscure": "pass,user", }) require.NoError(t, err) config["user"] = obscure.MustReveal(config["user"]) config["pass"] = obscure.MustReveal(config["pass"]) assert.Equal(t, configmap.Simple{ "type": "ftp", "user": "me-test", "pass": "pass", "host": "127.0.0.1", "_obscure": "pass,user", "_root": "", }, config) }) const testUser = "testUser" const testPass = "testPass" t.Run("call w/Password", func(t *testing.T) { // check cache empty assert.Equal(t, 0, p.vfsCache.Entries()) defer p.vfsCache.Clear() passwordBytes := []byte(testPass) value, err := p.call(testUser, testPass, false) require.NoError(t, err) entry, ok := value.(cacheEntry) require.True(t, ok) // check hash is correct in entry assert.Equal(t, entry.pwHash, sha256.Sum256(passwordBytes)) require.NotNil(t, entry.vfs) f := entry.vfs.Fs() require.NotNil(t, f) assert.Equal(t, "proxy-"+testUser, f.Name()) assert.True(t, strings.HasPrefix(f.String(), "Local file system")) // check it is in the cache assert.Equal(t, 1, p.vfsCache.Entries()) cacheValue, ok := p.vfsCache.GetMaybe(testUser) assert.True(t, ok) assert.Equal(t, value, cacheValue) }) t.Run("Call w/Password", func(t *testing.T) { // check cache empty assert.Equal(t, 0, p.vfsCache.Entries()) defer p.vfsCache.Clear() vfs, vfsKey, err := p.Call(testUser, testPass, false) require.NoError(t, err) require.NotNil(t, vfs) assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name()) assert.Equal(t, testUser, vfsKey) // check it is in the cache assert.Equal(t, 1, p.vfsCache.Entries()) cacheValue, ok := p.vfsCache.GetMaybe(testUser) assert.True(t, ok) cacheEntry, ok := cacheValue.(cacheEntry) assert.True(t, ok) assert.Equal(t, vfs, cacheEntry.vfs) // Test Get works while we have something in the cache t.Run("Get", func(t *testing.T) { assert.Equal(t, vfs, p.Get(testUser)) assert.Nil(t, p.Get("unknown")) }) // now try again from the cache vfs, vfsKey, err = p.Call(testUser, testPass, false) require.NoError(t, err) require.NotNil(t, vfs) assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name()) assert.Equal(t, testUser, vfsKey) // check cache is at the same level assert.Equal(t, 1, p.vfsCache.Entries()) // now try again from the cache but with wrong password vfs, vfsKey, err = p.Call(testUser, testPass+"wrong", false) require.Error(t, err) require.Contains(t, err.Error(), "incorrect password") require.Nil(t, vfs) require.Equal(t, "", vfsKey) // check cache is at the same level assert.Equal(t, 1, p.vfsCache.Entries()) }) privateKey, privateKeyErr := rsa.GenerateKey(rand.Reader, 2048) if privateKeyErr != nil { fs.Fatal(nil, "error generating test private key "+privateKeyErr.Error()) } publicKey, publicKeyError := ssh.NewPublicKey(&privateKey.PublicKey) if publicKeyError != nil { fs.Fatal(nil, "error generating test public key "+publicKeyError.Error()) } publicKeyString := base64.StdEncoding.EncodeToString(publicKey.Marshal()) t.Run("Call w/PublicKey", func(t *testing.T) { // check cache empty assert.Equal(t, 0, p.vfsCache.Entries()) defer p.vfsCache.Clear() value, err := p.call(testUser, publicKeyString, true) require.NoError(t, err) entry, ok := value.(cacheEntry) require.True(t, ok) // check publicKey is correct in entry require.NoError(t, err) require.NotNil(t, entry.vfs) f := entry.vfs.Fs() require.NotNil(t, f) assert.Equal(t, "proxy-"+testUser, f.Name()) assert.True(t, strings.HasPrefix(f.String(), "Local file system")) // check it is in the cache assert.Equal(t, 1, p.vfsCache.Entries()) cacheValue, ok := p.vfsCache.GetMaybe(testUser) assert.True(t, ok) assert.Equal(t, value, cacheValue) }) t.Run("call w/PublicKey", func(t *testing.T) { // check cache empty assert.Equal(t, 0, p.vfsCache.Entries()) defer p.vfsCache.Clear() vfs, vfsKey, err := p.Call( testUser, publicKeyString, true, ) require.NoError(t, err) require.NotNil(t, vfs) assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name()) assert.Equal(t, testUser, vfsKey) // check it is in the cache assert.Equal(t, 1, p.vfsCache.Entries()) cacheValue, ok := p.vfsCache.GetMaybe(testUser) assert.True(t, ok) cacheEntry, ok := cacheValue.(cacheEntry) assert.True(t, ok) assert.Equal(t, vfs, cacheEntry.vfs) // Test Get works while we have something in the cache t.Run("Get", func(t *testing.T) { assert.Equal(t, vfs, p.Get(testUser)) assert.Nil(t, p.Get("unknown")) }) // now try again from the cache vfs, vfsKey, err = p.Call(testUser, publicKeyString, true) require.NoError(t, err) require.NotNil(t, vfs) assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name()) assert.Equal(t, testUser, vfsKey) // check cache is at the same level assert.Equal(t, 1, p.vfsCache.Entries()) // now try again from the cache but with wrong public key vfs, vfsKey, err = p.Call(testUser, publicKeyString+"wrong", true) require.Error(t, err) require.Contains(t, err.Error(), "incorrect public key") require.Nil(t, vfs) require.Equal(t, "", vfsKey) // check cache is at the same level assert.Equal(t, 1, p.vfsCache.Entries()) }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/proxy/proxy_code.go
cmd/serve/proxy/proxy_code.go
//go:build ignore // A simple auth proxy for testing purposes package main import ( "encoding/json" "log" "os" ) func main() { // Read the input var in map[string]string err := json.NewDecoder(os.Stdin).Decode(&in) if err != nil { log.Fatal(err) } // Write the output var out = map[string]string{} for k, v := range in { switch k { case "user": v += "-test" case "error": log.Fatal(v) } out[k] = v } if out["type"] == "" { out["type"] = "local" } if out["_root"] == "" { out["_root"] = "" } json.NewEncoder(os.Stdout).Encode(&out) if err != nil { log.Fatal(err) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/proxy/proxyflags/proxyflags.go
cmd/serve/proxy/proxyflags/proxyflags.go
// Package proxyflags implements command line flags to set up a proxy package proxyflags import ( "github.com/rclone/rclone/cmd/serve/proxy" "github.com/rclone/rclone/fs/config/flags" "github.com/spf13/pflag" ) // AddFlags adds the non filing system specific flags to the command func AddFlags(flagSet *pflag.FlagSet) { flags.AddFlagsFromOptions(flagSet, "", proxy.OptionsInfo) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false