repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/sftp/sftp_test.go
cmd/serve/sftp/sftp_test.go
// Serve sftp tests set up a server and run the integration tests // for the sftp remote against it. // // We skip tests on platforms with troublesome character mappings //go:build !windows && !darwin && !plan9 package sftp import ( "context" "strings" "testing" "github.com/pkg/sftp" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/cmd/serve/proxy" "github.com/rclone/rclone/cmd/serve/servetest" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/vfs/vfscommon" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( testBindAddress = "localhost:0" testUser = "testuser" testPass = "testpass" ) // check interfaces var ( _ sftp.FileReader = vfsHandler{} _ sftp.FileWriter = vfsHandler{} _ sftp.FileCmder = vfsHandler{} _ sftp.FileLister = vfsHandler{} ) // TestSftp runs the sftp server then runs the unit tests for the // sftp remote against it. func TestSftp(t *testing.T) { // Configure and start the server start := func(f fs.Fs) (configmap.Simple, func()) { opt := Opt opt.ListenAddr = testBindAddress opt.User = testUser opt.Pass = testPass w, err := newServer(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt) require.NoError(t, err) go func() { require.NoError(t, w.Serve()) }() // Read the host and port we started on addr := w.Addr().String() colon := strings.LastIndex(addr, ":") // Config for the backend we'll use to connect to the server config := configmap.Simple{ "type": "sftp", "user": testUser, "pass": obscure.MustObscure(testPass), "host": addr[:colon], "port": addr[colon+1:], } // return a stop function return config, func() { assert.NoError(t, w.Shutdown()) } } servetest.Run(t, "sftp", start) } func TestRc(t *testing.T) { servetest.TestRc(t, rc.Params{ "type": "sftp", "user": "test", "pass": obscure.MustObscure("test"), "vfs_cache_mode": "off", }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/sftp/sftp_unsupported.go
cmd/serve/sftp/sftp_unsupported.go
// Build for sftp for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 // Package sftp implements an SFTP server to serve an rclone VFS package sftp import "github.com/spf13/cobra" // Command definition is nil to show not implemented var Command *cobra.Command
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/sftp/sftp.go
cmd/serve/sftp/sftp.go
//go:build !plan9 // Package sftp implements an SFTP server to serve an rclone VFS package sftp import ( "context" "fmt" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/serve" "github.com/rclone/rclone/cmd/serve/proxy" "github.com/rclone/rclone/cmd/serve/proxy/proxyflags" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/lib/systemd" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" "github.com/rclone/rclone/vfs/vfsflags" "github.com/spf13/cobra" "github.com/spf13/pflag" ) // OptionsInfo descripts the Options in use var OptionsInfo = fs.Options{{ Name: "addr", Default: "localhost:2022", Help: "IPaddress:Port or :Port to bind server to", }, { Name: "key", Default: []string{}, Help: "SSH private host key file (Can be multi-valued, leave blank to auto generate)", }, { Name: "authorized_keys", Default: "~/.ssh/authorized_keys", Help: "Authorized keys file", }, { Name: "user", Default: "", Help: "User name for authentication", }, { Name: "pass", Default: "", Help: "Password for authentication", }, { Name: "no_auth", Default: false, Help: "Allow connections with no authentication if set", }, { Name: "stdio", Default: false, Help: "Run an sftp server on stdin/stdout", }} // Options contains options for the http Server type Options struct { ListenAddr string `config:"addr"` // Port to listen on HostKeys []string `config:"key"` // Paths to private host keys AuthorizedKeys string `config:"authorized_keys"` // Path to authorized keys file User string `config:"user"` // single username Pass string `config:"pass"` // password for user NoAuth bool `config:"no_auth"` // allow no authentication on connections Stdio bool `config:"stdio"` // serve on stdio } func init() { fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "sftp", Opt: &Opt, Options: OptionsInfo}) } // Opt is options set by command line flags var Opt Options // AddFlags adds flags for the sftp func AddFlags(flagSet *pflag.FlagSet, Opt *Options) { flags.AddFlagsFromOptions(flagSet, "", OptionsInfo) } func init() { vfsflags.AddFlags(Command.Flags()) proxyflags.AddFlags(Command.Flags()) AddFlags(Command.Flags(), &Opt) serve.Command.AddCommand(Command) serve.AddRc("sftp", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) { // Read VFS Opts var vfsOpt = vfscommon.Opt // set default opts err := configstruct.SetAny(in, &vfsOpt) if err != nil { return nil, err } // Read Proxy Opts var proxyOpt = proxy.Opt // set default opts err = configstruct.SetAny(in, &proxyOpt) if err != nil { return nil, err } // Read opts var opt = Opt // set default opts err = configstruct.SetAny(in, &opt) if err != nil { return nil, err } // Create server return newServer(ctx, f, &opt, &vfsOpt, &proxyOpt) }) } // Command definition for cobra var Command = &cobra.Command{ Use: "sftp remote:path", Short: `Serve the remote over SFTP.`, Long: `Run an SFTP server to serve a remote over SFTP. This can be used with an SFTP client or you can make a remote of type [sftp](/sftp) to use with it. You can use the [filter](/filtering) flags (e.g. ` + "`--include`, `--exclude`" + `) to control what is served. The server will respond to a small number of shell commands, mainly md5sum, sha1sum and df, which enable it to provide support for checksums and the about feature when accessed from an sftp remote. Note that this server uses standard 32 KiB packet payload size, which means you must not configure the client to expect anything else, e.g. with the [chunk_size](/sftp/#sftp-chunk-size) option on an sftp remote. The server will log errors. Use ` + "`-v`" + ` to see access logs. ` + "`--bwlimit`" + ` will be respected for file transfers. Use ` + "`--stats`" + ` to control the stats printing. You must provide some means of authentication, either with ` + "`--user`/`--pass`" + `, an authorized keys file (specify location with ` + "`--authorized-keys`" + ` - the default is the same as ssh), an ` + "`--auth-proxy`" + `, or set the ` + "`--no-auth`" + ` flag for no authentication when logging in. If you don't supply a host ` + "`--key`" + ` then rclone will generate rsa, ecdsa and ed25519 variants, and cache them for later use in rclone's cache directory (see ` + "`rclone help flags cache-dir`" + `) in the "serve-sftp" directory. By default the server binds to localhost:2022 - if you want it to be reachable externally then supply ` + "`--addr :2022`" + ` for example. This also supports being run with socket activation, in which case it will listen on the first passed FD. It can be configured with .socket and .service unit files as described in <https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html>. Socket activation can be tested ad-hoc with the ` + "`systemd-socket-activate`" + `command: ` + "```console" + ` systemd-socket-activate -l 2222 -- rclone serve sftp :local:vfs/ ` + "```" + ` This will socket-activate rclone on the first connection to port 2222 over TCP. Note that the default of ` + "`--vfs-cache-mode off`" + ` is fine for the rclone sftp backend, but it may not be with other SFTP clients. If ` + "`--stdio`" + ` is specified, rclone will serve SFTP over stdio, which can be used with sshd via ~/.ssh/authorized_keys, for example: ` + "```text" + ` restrict,command="rclone serve sftp --stdio ./photos" ssh-rsa ... ` + "```" + ` On the client you need to set ` + "`--transfers 1`" + ` when using ` + "`--stdio`" + `. Otherwise multiple instances of the rclone server are started by OpenSSH which can lead to "corrupted on transfer" errors. This is the case because the client chooses indiscriminately which server to send commands to while the servers all have different views of the state of the filing system. The "restrict" in authorized_keys prevents SHA1SUMs and MD5SUMs from being used. Omitting "restrict" and using ` + "`--sftp-path-override`" + ` to enable checksumming is possible but less secure and you could use the SFTP server provided by OpenSSH in this case. ` + strings.TrimSpace(vfs.Help()+proxy.Help), Annotations: map[string]string{ "versionIntroduced": "v1.48", "groups": "Filter", }, Run: func(command *cobra.Command, args []string) { var f fs.Fs if proxy.Opt.AuthProxy == "" { cmd.CheckArgs(1, 1, command, args) f = cmd.NewFsSrc(args) } else { cmd.CheckArgs(0, 0, command, args) } cmd.Run(false, true, command, func() error { if Opt.Stdio { return serveStdio(f) } s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt) if err != nil { fs.Fatal(nil, fmt.Sprint(err)) } defer systemd.Notify()() return s.Serve() }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/sftp/connection.go
cmd/serve/sftp/connection.go
//go:build !plan9 package sftp import ( "context" "errors" "fmt" "io" "net" "os" "regexp" "strings" "github.com/pkg/sftp" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/terminal" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" "golang.org/x/crypto/ssh" ) func describeConn(c interface { RemoteAddr() net.Addr LocalAddr() net.Addr }) string { return fmt.Sprintf("serve sftp %s->%s", c.RemoteAddr(), c.LocalAddr()) } // Return the exit status of the command type exitStatus struct { RC uint32 } // The incoming exec command type execCommand struct { Command string } var shellUnEscapeRegex = regexp.MustCompile(`\\(.)`) // Unescape a string that was escaped by rclone func shellUnEscape(str string) string { str = strings.ReplaceAll(str, "'\n'", "\n") str = shellUnEscapeRegex.ReplaceAllString(str, `$1`) return str } // Info about the current connection type conn struct { vfs *vfs.VFS handlers sftp.Handlers what string } // execCommand implements an extremely limited number of commands to // interoperate with the rclone sftp backend func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (err error) { binary, args := command, "" before, after, ok := strings.Cut(command, " ") if ok { binary = before args = strings.TrimLeft(after, " ") } args = shellUnEscape(args) fs.Debugf(c.what, "exec command: binary = %q, args = %q", binary, args) switch binary { case "df": about := c.vfs.Fs().Features().About if about == nil { return errors.New("df not supported") } usage, err := about(ctx) if err != nil { return fmt.Errorf("about failed: %w", err) } total, used, free := int64(-1), int64(-1), int64(-1) if usage.Total != nil { total = *usage.Total / 1024 } if usage.Used != nil { used = *usage.Used / 1024 } if usage.Free != nil { free = *usage.Free / 1024 } perc := int64(0) if total > 0 && used >= 0 { perc = (100 * used) / total } _, err = fmt.Fprintf(out, ` Filesystem 1K-blocks Used Available Use%% Mounted on /dev/root %d %d %d %d%% / `, total, used, free, perc) if err != nil { return fmt.Errorf("send output failed: %w", err) } case "md5sum": return c.handleHashsumCommand(ctx, out, hash.MD5, args) case "sha1sum": return c.handleHashsumCommand(ctx, out, hash.SHA1, args) case "crc32": return c.handleHashsumCommand(ctx, out, hash.CRC32, args) case "sha256sum": return c.handleHashsumCommand(ctx, out, hash.SHA256, args) case "b3sum": return c.handleHashsumCommand(ctx, out, hash.BLAKE3, args) case "xxh128sum": return c.handleHashsumCommand(ctx, out, hash.XXH128, args) case "xxhsum": argv := strings.SplitN(args, " ", 2) if len(argv) == 0 || argv[0] != "-H2" { return fmt.Errorf("%q not implemented", command) } if len(argv) > 1 { args = argv[1] } else { args = "" } return c.handleHashsumCommand(ctx, out, hash.XXH128, args) case "rclone": argv := strings.SplitN(args, " ", 3) if len(argv) > 1 && argv[0] == "hashsum" { var ht hash.Type if err := ht.Set(argv[1]); err != nil { return err } if len(argv) > 2 { args = argv[2] } else { args = "" } return c.handleHashsumCommand(ctx, out, ht, args) } return fmt.Errorf("%q not implemented", command) case "echo": // Special cases for legacy rclone command detection. // Before rclone v1.49.0 the sftp backend used "echo 'abc' | md5sum" when // detecting hash support, but was then changed to instead just execute // md5sum/sha1sum (without arguments), which is handled above. The following // code is therefore only necessary to support rclone versions older than // v1.49.0 using a sftp remote connected to a rclone serve sftp instance // running a newer version of rclone (e.g. latest). switch args { case "'abc' | md5sum": if c.vfs.Fs().Hashes().Contains(hash.MD5) { _, err = fmt.Fprintf(out, "0bee89b07a248e27c83fc3d5951213c1 -\n") if err != nil { return fmt.Errorf("send output failed: %w", err) } } else { return errors.New("md5 hash not supported") } case "'abc' | sha1sum": if c.vfs.Fs().Hashes().Contains(hash.SHA1) { _, err = fmt.Fprintf(out, "03cfd743661f07975fa2f1220c5194cbaff48451 -\n") if err != nil { return fmt.Errorf("send output failed: %w", err) } } else { return errors.New("sha1 hash not supported") } default: _, err = fmt.Fprintf(out, "%s\n", args) if err != nil { return fmt.Errorf("send output failed: %w", err) } } default: return fmt.Errorf("%q not implemented", command) } return nil } // handleHashsumCommand is a helper to execCommand for common functionality of hashsum related commands func (c *conn) handleHashsumCommand(ctx context.Context, out io.Writer, ht hash.Type, args string) (err error) { if !c.vfs.Fs().Hashes().Contains(ht) { return fmt.Errorf("%v hash not supported", ht) } var hashSum string if args == "" { // empty hash for no input switch ht { case hash.MD5: hashSum = "d41d8cd98f00b204e9800998ecf8427e" case hash.SHA1: hashSum = "da39a3ee5e6b4b0d3255bfef95601890afd80709" case hash.CRC32: hashSum = "00000000" case hash.SHA256: hashSum = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" case hash.BLAKE3: hashSum = "af1349b9f5f9a1a6a0404dea36dcc9499bcb25c9adc112b7cc9a93cae41f3262" case hash.XXH3: hashSum = "2d06800538d394c2" case hash.XXH128: hashSum = "99aa06d3014798d86001c324468d497f" default: return fmt.Errorf("%v hash not implemented", ht) } args = "-" } else { node, err := c.vfs.Stat(args) if err != nil { return fmt.Errorf("hash failed finding file %q: %w", args, err) } if node.IsDir() { return errors.New("can't hash directory") } o, ok := node.DirEntry().(fs.ObjectInfo) if !ok { fs.Debugf(args, "File uploading - reading hash from VFS cache") in, err := node.Open(os.O_RDONLY) if err != nil { return fmt.Errorf("hash vfs open failed: %w", err) } defer func() { _ = in.Close() }() h, err := hash.NewMultiHasherTypes(hash.NewHashSet(ht)) if err != nil { return fmt.Errorf("hash vfs create multi-hasher failed: %w", err) } _, err = io.Copy(h, in) if err != nil { return fmt.Errorf("hash vfs copy failed: %w", err) } hashSum = h.Sums()[ht] } else { hashSum, err = o.Hash(ctx, ht) if err != nil { return fmt.Errorf("hash failed: %w", err) } } } _, err = fmt.Fprintf(out, "%s %s\n", hashSum, args) if err != nil { return fmt.Errorf("send output failed: %w", err) } return nil } // handle a new incoming channel request func (c *conn) handleChannel(newChannel ssh.NewChannel) { fs.Debugf(c.what, "Incoming channel: %s\n", newChannel.ChannelType()) if newChannel.ChannelType() != "session" { err := newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") fs.Debugf(c.what, "Unknown channel type: %s\n", newChannel.ChannelType()) if err != nil { fs.Errorf(c.what, "Failed to reject unknown channel: %v", err) } return } channel, requests, err := newChannel.Accept() if err != nil { fs.Errorf(c.what, "could not accept channel: %v", err) return } defer func() { err := channel.Close() if err != nil && err != io.EOF { fs.Debugf(c.what, "Failed to close channel: %v", err) } }() fs.Debugf(c.what, "Channel accepted\n") isSFTP := make(chan bool, 1) var command execCommand // Handle out-of-band requests go func(in <-chan *ssh.Request) { for req := range in { fs.Debugf(c.what, "Request: %v\n", req.Type) ok := false var subSystemIsSFTP bool var reply []byte switch req.Type { case "subsystem": fs.Debugf(c.what, "Subsystem: %s\n", req.Payload[4:]) if string(req.Payload[4:]) == "sftp" { ok = true subSystemIsSFTP = true } case "exec": err := ssh.Unmarshal(req.Payload, &command) if err != nil { fs.Errorf(c.what, "ignoring bad exec command: %v", err) } else { ok = true subSystemIsSFTP = false } } fs.Debugf(c.what, " - accepted: %v\n", ok) err = req.Reply(ok, reply) if err != nil { fs.Errorf(c.what, "Failed to Reply to request: %v", err) return } if ok { // Wake up main routine after we have responded isSFTP <- subSystemIsSFTP } } }(requests) // Wait for either subsystem "sftp" or "exec" request if <-isSFTP { if err := serveChannel(channel, c.handlers, c.what); err != nil { fs.Errorf(c.what, "Failed to serve SFTP: %v", err) } } else { var rc = uint32(0) err := c.execCommand(context.TODO(), channel, command.Command) if err != nil { rc = 1 _, errPrint := fmt.Fprintf(channel.Stderr(), "%v\n", err) if errPrint != nil { fs.Errorf(c.what, "Failed to write to stderr: %v", errPrint) } fs.Debugf(c.what, "command %q failed with error: %v", command.Command, err) } _, err = channel.SendRequest("exit-status", false, ssh.Marshal(exitStatus{RC: rc})) if err != nil { fs.Errorf(c.what, "Failed to send exit status: %v", err) } } } // Service the incoming Channel channel in go routine func (c *conn) handleChannels(chans <-chan ssh.NewChannel) { for newChannel := range chans { go c.handleChannel(newChannel) } } func serveChannel(rwc io.ReadWriteCloser, h sftp.Handlers, what string) error { fs.Debugf(what, "Starting SFTP server") server := sftp.NewRequestServer(rwc, h) defer func() { err := server.Close() if err != nil && err != io.EOF { fs.Debugf(what, "Failed to close server: %v", err) } }() err := server.Serve() if err != nil && err != io.EOF { return fmt.Errorf("completed with error: %w", err) } fs.Debugf(what, "exited session") return nil } func serveStdio(f fs.Fs) error { if terminal.IsTerminal(int(os.Stdout.Fd())) { return errors.New("refusing to run SFTP server directly on a terminal. Please let sshd start rclone, by connecting with sftp or sshfs") } sshChannel := &stdioChannel{ stdin: os.Stdin, stdout: os.Stdout, } handlers := newVFSHandler(vfs.New(f, &vfscommon.Opt)) return serveChannel(sshChannel, handlers, "stdio") } type stdioChannel struct { stdin *os.File stdout *os.File } func (c *stdioChannel) Read(data []byte) (int, error) { return c.stdin.Read(data) } func (c *stdioChannel) Write(data []byte) (int, error) { return c.stdout.Write(data) } func (c *stdioChannel) Close() error { err1 := c.stdin.Close() err2 := c.stdout.Close() if err1 != nil { return err1 } return err2 }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/sftp/connection_test.go
cmd/serve/sftp/connection_test.go
//go:build !plan9 package sftp import ( "fmt" "testing" "github.com/stretchr/testify/assert" ) func TestShellEscape(t *testing.T) { for i, test := range []struct { unescaped, escaped string }{ {"", ""}, {"/this/is/harmless", "/this/is/harmless"}, {"$(rm -rf /)", "\\$\\(rm\\ -rf\\ /\\)"}, {"/test/\n", "/test/'\n'"}, {":\"'", ":\\\"\\'"}, } { got := shellUnEscape(test.escaped) assert.Equal(t, test.unescaped, got, fmt.Sprintf("Test %d unescaped = %q", i, test.unescaped)) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/sftp/server.go
cmd/serve/sftp/server.go
//go:build !plan9 package sftp import ( "bytes" "context" "crypto/ecdsa" "crypto/ed25519" "crypto/elliptic" "crypto/rand" "crypto/rsa" "crypto/subtle" "crypto/x509" "encoding/base64" "encoding/pem" "errors" "fmt" "io" "net" "os" "path/filepath" "strings" "github.com/rclone/rclone/cmd/serve/proxy" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/file" sdActivation "github.com/rclone/rclone/lib/sdactivation" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" "golang.org/x/crypto/ssh" ) // server contains everything to run the server type server struct { f fs.Fs opt Options vfs *vfs.VFS ctx context.Context // for global config config *ssh.ServerConfig listener net.Listener stopped chan struct{} // for waiting on the listener to stop proxy *proxy.Proxy } func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (*server, error) { s := &server{ f: f, ctx: ctx, opt: *opt, stopped: make(chan struct{}), } if proxy.Opt.AuthProxy != "" { s.proxy = proxy.New(ctx, proxyOpt, vfsOpt) } else { s.vfs = vfs.New(f, vfsOpt) } err := s.configure() if err != nil { return nil, fmt.Errorf("sftp configuration failed: %w", err) } return s, nil } // getVFS gets the vfs from s or the proxy func (s *server) getVFS(what string, sshConn *ssh.ServerConn) (VFS *vfs.VFS) { if s.proxy == nil { return s.vfs } if sshConn.Permissions == nil || sshConn.Permissions.Extensions == nil { fs.Infof(what, "SSH Permissions Extensions not found") return nil } key := sshConn.Permissions.Extensions["_vfsKey"] if key == "" { fs.Infof(what, "VFS key not found") return nil } VFS = s.proxy.Get(key) if VFS == nil { fs.Infof(what, "failed to read VFS from cache") return nil } return VFS } // Accept a single connection - run in a go routine as the ssh // authentication can block func (s *server) acceptConnection(nConn net.Conn) { what := describeConn(nConn) // Before use, a handshake must be performed on the incoming net.Conn. sshConn, chans, reqs, err := ssh.NewServerConn(nConn, s.config) if err != nil { fs.Errorf(what, "SSH login failed: %v", err) return } fs.Infof(what, "SSH login from %s using %s", sshConn.User(), sshConn.ClientVersion()) // Discard all global out-of-band Requests go ssh.DiscardRequests(reqs) c := &conn{ what: what, vfs: s.getVFS(what, sshConn), } if c.vfs == nil { fs.Infof(what, "Closing unauthenticated connection (couldn't find VFS)") _ = nConn.Close() return } c.handlers = newVFSHandler(c.vfs) // Accept all channels go c.handleChannels(chans) } // Accept connections and call them in a go routine func (s *server) acceptConnections() { for { nConn, err := s.listener.Accept() if err != nil { if strings.Contains(err.Error(), "use of closed network connection") { return } fs.Errorf(nil, "Failed to accept incoming connection: %v", err) continue } go s.acceptConnection(nConn) } } // configure the server // // Based on example server code from golang.org/x/crypto/ssh and server_standalone func (s *server) configure() (err error) { var authorizedKeysMap map[string]struct{} // ensure the user isn't trying to use conflicting flags if proxy.Opt.AuthProxy != "" && s.opt.AuthorizedKeys != "" && s.opt.AuthorizedKeys != Opt.AuthorizedKeys { return errors.New("--auth-proxy and --authorized-keys cannot be used at the same time") } // Load the authorized keys if s.opt.AuthorizedKeys != "" && proxy.Opt.AuthProxy == "" { authKeysFile := env.ShellExpand(s.opt.AuthorizedKeys) authorizedKeysMap, err = loadAuthorizedKeys(authKeysFile) // If user set the flag away from the default then report an error if s.opt.AuthorizedKeys != Opt.AuthorizedKeys { if err != nil { return err } if len(authorizedKeysMap) == 0 { return fmt.Errorf("failed to parse authorized keys") } } fs.Logf(nil, "Loaded %d authorized keys from %q", len(authorizedKeysMap), authKeysFile) } if !s.opt.NoAuth && len(authorizedKeysMap) == 0 && s.opt.User == "" && s.opt.Pass == "" && s.proxy == nil { return errors.New("no authorization found, use --user/--pass or --authorized-keys or --no-auth or --auth-proxy") } // An SSH server is represented by a ServerConfig, which holds // certificate details and handles authentication of ServerConns. s.config = &ssh.ServerConfig{ ServerVersion: "SSH-2.0-" + fs.GetConfig(s.ctx).UserAgent, PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { fs.Debugf(describeConn(c), "Password login attempt for %s", c.User()) if s.proxy != nil { // query the proxy for the config _, vfsKey, err := s.proxy.Call(c.User(), string(pass), false) if err != nil { return nil, err } // just return the Key so we can get it back from the cache return &ssh.Permissions{ Extensions: map[string]string{ "_vfsKey": vfsKey, }, }, nil } else if s.opt.User != "" && s.opt.Pass != "" { userOK := subtle.ConstantTimeCompare([]byte(c.User()), []byte(s.opt.User)) passOK := subtle.ConstantTimeCompare(pass, []byte(s.opt.Pass)) if (userOK & passOK) == 1 { return nil, nil } } return nil, fmt.Errorf("password rejected for %q", c.User()) }, PublicKeyCallback: func(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) { fs.Debugf(describeConn(c), "Public key login attempt for %s", c.User()) if s.proxy != nil { //query the proxy for the config _, vfsKey, err := s.proxy.Call( c.User(), base64.StdEncoding.EncodeToString(pubKey.Marshal()), true, ) if err != nil { return nil, err } // just return the Key so we can get it back from the cache return &ssh.Permissions{ Extensions: map[string]string{ "_vfsKey": vfsKey, }, }, nil } if _, ok := authorizedKeysMap[string(pubKey.Marshal())]; ok { return &ssh.Permissions{ // Record the public key used for authentication. Extensions: map[string]string{ "pubkey-fp": ssh.FingerprintSHA256(pubKey), }, }, nil } return nil, fmt.Errorf("unknown public key for %q", c.User()) }, AuthLogCallback: func(conn ssh.ConnMetadata, method string, err error) { status := "OK" if err != nil { status = err.Error() } fs.Debugf(describeConn(conn), "ssh auth %q from %q: %s", method, conn.ClientVersion(), status) }, NoClientAuth: s.opt.NoAuth, } // Load the private key, from the cache if not explicitly configured keyPaths := s.opt.HostKeys cachePath := filepath.Join(config.GetCacheDir(), "serve-sftp") if len(keyPaths) == 0 { keyPaths = []string{ filepath.Join(cachePath, "id_rsa"), filepath.Join(cachePath, "id_ecdsa"), filepath.Join(cachePath, "id_ed25519"), } } for _, keyPath := range keyPaths { private, err := loadPrivateKey(keyPath) if err != nil && len(s.opt.HostKeys) == 0 { fs.Debugf(nil, "Failed to load %q: %v", keyPath, err) // If loading a cached key failed, make the keys and retry err = file.MkdirAll(cachePath, 0700) if err != nil { return fmt.Errorf("failed to create cache path: %w", err) } if strings.HasSuffix(keyPath, string(os.PathSeparator)+"id_rsa") { const bits = 2048 fs.Logf(nil, "Generating %d bit key pair at %q", bits, keyPath) err = makeRSASSHKeyPair(bits, keyPath+".pub", keyPath) } else if strings.HasSuffix(keyPath, string(os.PathSeparator)+"id_ecdsa") { fs.Logf(nil, "Generating ECDSA p256 key pair at %q", keyPath) err = makeECDSASSHKeyPair(keyPath+".pub", keyPath) } else if strings.HasSuffix(keyPath, string(os.PathSeparator)+"id_ed25519") { fs.Logf(nil, "Generating Ed25519 key pair at %q", keyPath) err = makeEd25519SSHKeyPair(keyPath+".pub", keyPath) } else { return fmt.Errorf("don't know how to generate key pair %q", keyPath) } if err != nil { return fmt.Errorf("failed to create SSH key pair: %w", err) } // reload the new key private, err = loadPrivateKey(keyPath) } if err != nil { return err } fs.Debugf(nil, "Loaded private key from %q", keyPath) s.config.AddHostKey(private) } // Once a ServerConfig has been configured, connections can be // accepted. var listener net.Listener // In case we run in a socket-activated environment, listen on (the first) // passed FD. sdListeners, err := sdActivation.Listeners() if err != nil { return fmt.Errorf("unable to acquire listeners: %w", err) } if len(sdListeners) > 0 { if len(sdListeners) > 1 { fs.LogPrintf(fs.LogLevelWarning, nil, "more than one listener passed, ignoring all but the first.\n") } listener = sdListeners[0] } else { listener, err = net.Listen("tcp", s.opt.ListenAddr) if err != nil { return fmt.Errorf("failed to listen for connection: %w", err) } } s.listener = listener return nil } // Serve SFTP until the server is Shutdown func (s *server) Serve() (err error) { fs.Logf(nil, "SFTP server listening on %v\n", s.listener.Addr()) s.acceptConnections() close(s.stopped) return nil } // Addr returns the address the server is listening on func (s *server) Addr() net.Addr { return s.listener.Addr() } // Wait blocks while the listener is open. func (s *server) Wait() { <-s.stopped } // Shutdown shuts the running server down func (s *server) Shutdown() error { err := s.listener.Close() if errors.Is(err, io.ErrUnexpectedEOF) { err = nil } s.Wait() return err } func loadPrivateKey(keyPath string) (ssh.Signer, error) { privateBytes, err := os.ReadFile(keyPath) if err != nil { return nil, fmt.Errorf("failed to load private key: %w", err) } private, err := ssh.ParsePrivateKey(privateBytes) if err != nil { return nil, fmt.Errorf("failed to parse private key: %w", err) } return private, nil } // Public key authentication is done by comparing // the public key of a received connection // with the entries in the authorized_keys file. func loadAuthorizedKeys(authorizedKeysPath string) (authorizedKeysMap map[string]struct{}, err error) { authorizedKeysBytes, err := os.ReadFile(authorizedKeysPath) if err != nil { return nil, fmt.Errorf("failed to load authorized keys: %w", err) } authorizedKeysMap = make(map[string]struct{}) for len(authorizedKeysBytes) > 0 { pubKey, _, _, rest, err := ssh.ParseAuthorizedKey(authorizedKeysBytes) if err == nil { authorizedKeysMap[string(pubKey.Marshal())] = struct{}{} authorizedKeysBytes = bytes.TrimSpace(rest) } } return authorizedKeysMap, nil } // makeRSASSHKeyPair make a pair of public and private keys for SSH access. // Public key is encoded in the format for inclusion in an OpenSSH authorized_keys file. // Private Key generated is PEM encoded // // Originally from: https://stackoverflow.com/a/34347463/164234 func makeRSASSHKeyPair(bits int, pubKeyPath, privateKeyPath string) (err error) { privateKey, err := rsa.GenerateKey(rand.Reader, bits) if err != nil { return err } // generate and write private key as PEM privateKeyFile, err := os.OpenFile(privateKeyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { return err } defer fs.CheckClose(privateKeyFile, &err) privateKeyPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)} if err := pem.Encode(privateKeyFile, privateKeyPEM); err != nil { return err } // generate and write public key pub, err := ssh.NewPublicKey(&privateKey.PublicKey) if err != nil { return err } return os.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0644) } // makeECDSASSHKeyPair make a pair of public and private keys for ECDSA SSH access. // Public key is encoded in the format for inclusion in an OpenSSH authorized_keys file. // Private Key generated is PEM encoded func makeECDSASSHKeyPair(pubKeyPath, privateKeyPath string) (err error) { privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { return err } // generate and write private key as PEM privateKeyFile, err := os.OpenFile(privateKeyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { return err } defer fs.CheckClose(privateKeyFile, &err) buf, err := x509.MarshalECPrivateKey(privateKey) if err != nil { return err } privateKeyPEM := &pem.Block{Type: "EC PRIVATE KEY", Bytes: buf} if err := pem.Encode(privateKeyFile, privateKeyPEM); err != nil { return err } // generate and write public key pub, err := ssh.NewPublicKey(&privateKey.PublicKey) if err != nil { return err } return os.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0644) } // makeEd25519SSHKeyPair make a pair of public and private keys for Ed25519 SSH access. // Public key is encoded in the format for inclusion in an OpenSSH authorized_keys file. // Private Key generated is PEM encoded func makeEd25519SSHKeyPair(pubKeyPath, privateKeyPath string) (err error) { publicKey, privateKey, err := ed25519.GenerateKey(rand.Reader) if err != nil { return err } // generate and write private key as PEM privateKeyFile, err := os.OpenFile(privateKeyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { return err } defer fs.CheckClose(privateKeyFile, &err) buf, err := x509.MarshalPKCS8PrivateKey(privateKey) if err != nil { return err } privateKeyPEM := &pem.Block{Type: "PRIVATE KEY", Bytes: buf} if err := pem.Encode(privateKeyFile, privateKeyPEM); err != nil { return err } // generate and write public key pub, err := ssh.NewPublicKey(publicKey) if err != nil { return err } return os.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0644) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/sftp/handler.go
cmd/serve/sftp/handler.go
//go:build !plan9 package sftp import ( "io" "os" "syscall" "time" "github.com/pkg/sftp" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/vfs" ) // vfsHandler converts the VFS to be served by SFTP type vfsHandler struct { *vfs.VFS } // vfsHandler returns a Handlers object with the test handlers. func newVFSHandler(vfs *vfs.VFS) sftp.Handlers { v := vfsHandler{VFS: vfs} return sftp.Handlers{ FileGet: v, FilePut: v, FileCmd: v, FileList: v, } } func (v vfsHandler) Fileread(r *sftp.Request) (io.ReaderAt, error) { file, err := v.OpenFile(r.Filepath, os.O_RDONLY, 0777) if err != nil { return nil, err } return file, nil } func (v vfsHandler) Filewrite(r *sftp.Request) (io.WriterAt, error) { file, err := v.OpenFile(r.Filepath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777) if err != nil { return nil, err } return file, nil } func (v vfsHandler) Filecmd(r *sftp.Request) error { switch r.Method { case "Setstat": attr := r.Attributes() if attr.Mtime != 0 { modTime := time.Unix(int64(attr.Mtime), 0) err := v.Chtimes(r.Filepath, modTime, modTime) if err != nil { return err } } return nil case "Rename": err := v.Rename(r.Filepath, r.Target) if err != nil { return err } case "Rmdir", "Remove": err := v.Remove(r.Filepath) if err != nil { return err } case "Mkdir": err := v.Mkdir(r.Filepath, 0777) if err != nil { return err } case "Symlink": // FIXME // _, err := v.fetch(r.Filepath) // if err != nil { // return err // } // link := newMemFile(r.Target, false) // link.symlink = r.Filepath // v.files[r.Target] = link return sftp.ErrSshFxOpUnsupported case "Link": return sftp.ErrSshFxOpUnsupported default: return sftp.ErrSshFxOpUnsupported } return nil } type listerat []os.FileInfo // Modeled after strings.Reader's ReadAt() implementation func (f listerat) ListAt(ls []os.FileInfo, offset int64) (int, error) { var n int if offset >= int64(len(f)) { return 0, io.EOF } n = copy(ls, f[offset:]) if n < len(ls) { return n, io.EOF } return n, nil } func (v vfsHandler) Filelist(r *sftp.Request) (l sftp.ListerAt, err error) { var node vfs.Node var handle vfs.Handle switch r.Method { case "List": node, err = v.Stat(r.Filepath) if err != nil { return nil, err } if !node.IsDir() { return nil, syscall.ENOTDIR } handle, err = node.Open(os.O_RDONLY) if err != nil { return nil, err } defer fs.CheckClose(handle, &err) fis, err := handle.Readdir(-1) if err != nil { return nil, err } return listerat(fis), nil case "Stat": node, err = v.Stat(r.Filepath) if err != nil { return nil, err } return listerat([]os.FileInfo{node}), nil case "Readlink": // FIXME // if file.symlink != "" { // file, err = v.fetch(file.symlink) // if err != nil { // return nil, err // } // } // return listerat([]os.FileInfo{file}), nil } return nil, sftp.ErrSshFxOpUnsupported }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/docker/systemd.go
cmd/serve/docker/systemd.go
//go:build linux && !android package docker import ( "os" "github.com/coreos/go-systemd/v22/activation" "github.com/coreos/go-systemd/v22/util" ) func systemdActivationFiles() []*os.File { if util.IsRunningSystemd() { return activation.Files(false) } return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/docker/volume.go
cmd/serve/docker/volume.go
package docker import ( "context" "errors" "fmt" "os" "path/filepath" "runtime" "sort" "time" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/lib/file" ) // Errors var ( ErrVolumeNotFound = errors.New("volume not found") ErrVolumeExists = errors.New("volume already exists") ErrMountpointExists = errors.New("non-empty mountpoint already exists") ) // Volume keeps volume runtime state // Public members get persisted in saved state type Volume struct { Name string `json:"name"` MountPoint string `json:"mountpoint"` CreatedAt time.Time `json:"created"` Fs string `json:"fs"` // remote[,connectString]:path Type string `json:"type,omitempty"` // same as ":backend:" Path string `json:"path,omitempty"` // for "remote:path" or ":backend:path" Options VolOpts `json:"options"` // all options together Mounts []string `json:"mounts"` // mountReqs as a string list mountReqs map[string]any fsString string // result of merging Fs, Type and Options persist bool mountType string drv *Driver mnt *mountlib.MountPoint } // VolOpts keeps volume options type VolOpts map[string]string // VolInfo represents a volume for Get and List requests type VolInfo struct { Name string Mountpoint string `json:",omitempty"` CreatedAt string `json:",omitempty"` Status map[string]any `json:",omitempty"` } func newVolume(ctx context.Context, name string, volOpt VolOpts, drv *Driver) (*Volume, error) { path := filepath.Join(drv.root, name) mnt := &mountlib.MountPoint{ MountPoint: path, } vol := &Volume{ Name: name, MountPoint: path, CreatedAt: time.Now(), drv: drv, mnt: mnt, mountReqs: make(map[string]any), } err := vol.applyOptions(volOpt) if err == nil { err = vol.setup(ctx) } if err != nil { return nil, err } return vol, nil } // getInfo returns short digest about volume func (vol *Volume) getInfo() *VolInfo { vol.prepareState() return &VolInfo{ Name: vol.Name, CreatedAt: vol.CreatedAt.Format(time.RFC3339), Mountpoint: vol.MountPoint, Status: rc.Params{"Mounts": vol.Mounts}, } } // prepareState prepares volume for saving state func (vol *Volume) prepareState() { vol.Mounts = []string{} for id := range vol.mountReqs { vol.Mounts = append(vol.Mounts, id) } sort.Strings(vol.Mounts) } // restoreState updates volume from saved state func (vol *Volume) restoreState(ctx context.Context, drv *Driver) error { vol.drv = drv vol.mnt = &mountlib.MountPoint{ MountPoint: vol.MountPoint, } volOpt := vol.Options volOpt["fs"] = vol.Fs volOpt["type"] = vol.Type if err := vol.applyOptions(volOpt); err != nil { return err } if err := vol.validate(); err != nil { return err } if err := vol.setup(ctx); err != nil { return err } for _, id := range vol.Mounts { if err := vol.mount(id); err != nil { return err } } return nil } // validate volume func (vol *Volume) validate() error { if vol.Name == "" { return errors.New("volume name is required") } if (vol.Type != "" && vol.Fs != "") || (vol.Type == "" && vol.Fs == "") { return errors.New("volume must have either remote or backend type") } if vol.persist && vol.Type == "" { return errors.New("backend type is required to persist remotes") } if vol.persist && !canPersist { return errors.New("using backend type to persist remotes is prohibited") } if vol.MountPoint == "" { return errors.New("mount point is required") } if vol.mountReqs == nil { vol.mountReqs = make(map[string]any) } return nil } // checkMountpoint verifies that mount point is an existing empty directory func (vol *Volume) checkMountpoint() error { path := vol.mnt.MountPoint if runtime.GOOS == "windows" { path = filepath.Dir(path) } _, err := os.Lstat(path) if os.IsNotExist(err) { if err = file.MkdirAll(path, 0700); err != nil { return fmt.Errorf("failed to create mountpoint: %s: %w", path, err) } } else if err != nil { return err } if runtime.GOOS != "windows" { if err := mountlib.CheckMountEmpty(path); err != nil { return ErrMountpointExists } } return nil } // setup volume filesystem func (vol *Volume) setup(ctx context.Context) error { fs.Debugf(nil, "Setup volume %q as %q at path %s", vol.Name, vol.fsString, vol.MountPoint) if err := vol.checkMountpoint(); err != nil { return err } if vol.drv.dummy { return nil } _, mountFn := mountlib.ResolveMountMethod(vol.mountType) if mountFn == nil { if vol.mountType != "" { return fmt.Errorf("unsupported mount type %q", vol.mountType) } return errors.New("mount command unsupported by this build") } vol.mnt.MountFn = mountFn if vol.persist { // Add remote to config file params := rc.Params{} for key, val := range vol.Options { params[key] = val } updateMode := config.UpdateRemoteOpt{} _, err := config.CreateRemote(ctx, vol.Name, vol.Type, params, updateMode) if err != nil { return err } } // Use existing remote f, err := fs.NewFs(ctx, vol.fsString) if err == nil { vol.mnt.Fs = f } return err } // remove volume filesystem and mounts func (vol *Volume) remove(ctx context.Context) error { count := len(vol.mountReqs) fs.Debugf(nil, "Remove volume %q (count %d)", vol.Name, count) if count > 0 { return errors.New("volume is in use") } if !vol.drv.dummy { shutdownFn := vol.mnt.Fs.Features().Shutdown if shutdownFn != nil { if err := shutdownFn(ctx); err != nil { return err } } } if vol.persist { // Remote remote from config file config.DeleteRemote(vol.Name) } return nil } // clearCache will clear VFS cache for the volume func (vol *Volume) clearCache() error { VFS := vol.mnt.VFS if VFS == nil { return nil } root, err := VFS.Root() if err != nil { return fmt.Errorf("error reading root: %v: %w", VFS.Fs(), err) } root.ForgetAll() return nil } // mount volume filesystem func (vol *Volume) mount(id string) error { drv := vol.drv count := len(vol.mountReqs) fs.Debugf(nil, "Mount volume %q for id %q at path %s (count %d)", vol.Name, id, vol.MountPoint, count) if _, found := vol.mountReqs[id]; found { return errors.New("volume is already mounted by this id") } if count > 0 { // already mounted vol.mountReqs[id] = nil return nil } if drv.dummy { vol.mountReqs[id] = nil return nil } if vol.mnt.Fs == nil { return errors.New("volume filesystem is not ready") } if _, err := vol.mnt.Mount(); err != nil { return err } vol.mountReqs[id] = nil vol.drv.monChan <- false // ask monitor to refresh channels return nil } // unmount volume func (vol *Volume) unmount(id string) error { count := len(vol.mountReqs) fs.Debugf(nil, "Unmount volume %q from id %q at path %s (count %d)", vol.Name, id, vol.MountPoint, count) if count == 0 { return errors.New("volume is not mounted") } if _, found := vol.mountReqs[id]; !found { return errors.New("volume is not mounted by this id") } delete(vol.mountReqs, id) if len(vol.mountReqs) > 0 { return nil // more mounts left } if vol.drv.dummy { return nil } mnt := vol.mnt if mnt.UnmountFn != nil { if err := mnt.UnmountFn(); err != nil { return err } } mnt.ErrChan = nil mnt.UnmountFn = nil mnt.VFS = nil vol.drv.monChan <- false // ask monitor to refresh channels return nil } func (vol *Volume) unmountAll() error { var firstErr error for id := range vol.mountReqs { err := vol.unmount(id) if firstErr == nil { firstErr = err } } return firstErr }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/docker/driver.go
cmd/serve/docker/driver.go
package docker import ( "context" "encoding/json" "fmt" "math/rand" "os" "path/filepath" "reflect" "sort" "sync" "time" "github.com/coreos/go-systemd/v22/daemon" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/file" "github.com/rclone/rclone/vfs/vfscommon" ) // Driver implements docker driver api type Driver struct { root string volumes map[string]*Volume statePath string dummy bool // disables real mounting mntOpt mountlib.Options vfsOpt vfscommon.Options mu sync.Mutex exitOnce sync.Once hupChan chan os.Signal monChan chan bool // exit if true for exit, refresh if false } // NewDriver makes a new docker driver func NewDriver(ctx context.Context, root string, mntOpt *mountlib.Options, vfsOpt *vfscommon.Options, dummy, forgetState bool) (*Driver, error) { // setup directories cacheDir := config.GetCacheDir() err := file.MkdirAll(cacheDir, 0700) if err != nil { return nil, fmt.Errorf("failed to create cache directory: %s: %w", cacheDir, err) } // setup driver state if mntOpt == nil { mntOpt = &mountlib.Opt } if vfsOpt == nil { vfsOpt = &vfscommon.Opt } drv := &Driver{ root: root, statePath: filepath.Join(cacheDir, stateFile), volumes: map[string]*Volume{}, mntOpt: *mntOpt, vfsOpt: *vfsOpt, dummy: dummy, } drv.mntOpt.Daemon = false // restore from saved state if !forgetState { if err = drv.restoreState(ctx); err != nil { return nil, fmt.Errorf("failed to restore state: %w", err) } } // start mount monitoring drv.hupChan = make(chan os.Signal, 1) drv.monChan = make(chan bool, 1) go drv.monitor() // unmount all volumes on exit atexit.Register(func() { drv.exitOnce.Do(drv.Exit) }) // notify systemd if _, err := daemon.SdNotify(false, daemon.SdNotifyReady); err != nil { return nil, fmt.Errorf("failed to notify systemd: %w", err) } return drv, nil } // Exit will unmount all currently mounted volumes func (drv *Driver) Exit() { fs.Debugf(nil, "Unmount all volumes") drv.mu.Lock() defer drv.mu.Unlock() reportErr(func() error { _, err := daemon.SdNotify(false, daemon.SdNotifyStopping) return err }()) drv.monChan <- true // ask monitor to exit for _, vol := range drv.volumes { reportErr(vol.unmountAll()) vol.Mounts = []string{} // never persist mounts at exit } reportErr(drv.saveState()) drv.dummy = true // no more mounts } // monitor all mounts func (drv *Driver) monitor() { for { // https://stackoverflow.com/questions/19992334/how-to-listen-to-n-channels-dynamic-select-statement monChan := reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: reflect.ValueOf(drv.monChan), } hupChan := reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: reflect.ValueOf(drv.monChan), } sources := []reflect.SelectCase{monChan, hupChan} volumes := []*Volume{nil, nil} drv.mu.Lock() for _, vol := range drv.volumes { if vol.mnt.ErrChan != nil { errSource := reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: reflect.ValueOf(vol.mnt.ErrChan), } sources = append(sources, errSource) volumes = append(volumes, vol) } } drv.mu.Unlock() fs.Debugf(nil, "Monitoring %d volumes", len(sources)-2) idx, val, _ := reflect.Select(sources) switch idx { case 0: if val.Bool() { fs.Debugf(nil, "Monitoring stopped") return } case 1: // user sent SIGHUP to clear the cache drv.clearCache() default: vol := volumes[idx] if err := val.Interface(); err != nil { fs.Logf(nil, "Volume %q unmounted externally: %v", vol.Name, err) } else { fs.Infof(nil, "Volume %q unmounted externally", vol.Name) } drv.mu.Lock() reportErr(vol.unmountAll()) drv.mu.Unlock() } } } // clearCache will clear cache of all volumes func (drv *Driver) clearCache() { fs.Debugf(nil, "Clear all caches") drv.mu.Lock() defer drv.mu.Unlock() for _, vol := range drv.volumes { reportErr(vol.clearCache()) } } func reportErr(err error) { if err != nil { fs.Errorf("docker plugin", "%v", err) } } // Create volume // To use subpath we are limited to defining a new volume definition via alias func (drv *Driver) Create(req *CreateRequest) error { ctx := context.Background() drv.mu.Lock() defer drv.mu.Unlock() name := req.Name fs.Debugf(nil, "Create volume %q", name) if vol, _ := drv.getVolume(name); vol != nil { return ErrVolumeExists } vol, err := newVolume(ctx, name, req.Options, drv) if err != nil { return err } drv.volumes[name] = vol return drv.saveState() } // Remove volume func (drv *Driver) Remove(req *RemoveRequest) error { ctx := context.Background() drv.mu.Lock() defer drv.mu.Unlock() vol, err := drv.getVolume(req.Name) if err != nil { return err } if err = vol.remove(ctx); err != nil { return err } delete(drv.volumes, vol.Name) return drv.saveState() } // List volumes handled by the driver func (drv *Driver) List() (*ListResponse, error) { drv.mu.Lock() defer drv.mu.Unlock() volumeList := drv.listVolumes() fs.Debugf(nil, "List: %v", volumeList) res := &ListResponse{ Volumes: []*VolInfo{}, } for _, name := range volumeList { vol := drv.volumes[name] res.Volumes = append(res.Volumes, vol.getInfo()) } return res, nil } // Get volume info func (drv *Driver) Get(req *GetRequest) (*GetResponse, error) { drv.mu.Lock() defer drv.mu.Unlock() vol, err := drv.getVolume(req.Name) if err != nil { return nil, err } return &GetResponse{Volume: vol.getInfo()}, nil } // Path returns path of the requested volume func (drv *Driver) Path(req *PathRequest) (*PathResponse, error) { drv.mu.Lock() defer drv.mu.Unlock() vol, err := drv.getVolume(req.Name) if err != nil { return nil, err } return &PathResponse{Mountpoint: vol.MountPoint}, nil } // Mount volume func (drv *Driver) Mount(req *MountRequest) (*MountResponse, error) { drv.mu.Lock() defer drv.mu.Unlock() vol, err := drv.getVolume(req.Name) if err == nil { err = vol.mount(req.ID) } if err == nil { err = drv.saveState() } if err != nil { return nil, err } return &MountResponse{Mountpoint: vol.MountPoint}, nil } // Unmount volume func (drv *Driver) Unmount(req *UnmountRequest) error { drv.mu.Lock() defer drv.mu.Unlock() vol, err := drv.getVolume(req.Name) if err == nil { err = vol.unmount(req.ID) } if err == nil { err = drv.saveState() } return err } // getVolume returns volume by name func (drv *Driver) getVolume(name string) (*Volume, error) { vol := drv.volumes[name] if vol == nil { return nil, ErrVolumeNotFound } return vol, nil } // listVolumes returns list volume listVolumes func (drv *Driver) listVolumes() []string { names := []string{} for key := range drv.volumes { names = append(names, key) } sort.Strings(names) return names } // saveState saves volumes handled by driver to persistent store func (drv *Driver) saveState() error { volumeList := drv.listVolumes() fs.Debugf(nil, "Save state %v to %s", volumeList, drv.statePath) state := []*Volume{} for _, key := range volumeList { vol := drv.volumes[key] vol.prepareState() state = append(state, vol) } data, err := json.Marshal(state) if err != nil { return fmt.Errorf("failed to marshal state: %w", err) } ctx := context.Background() retries := fs.GetConfig(ctx).LowLevelRetries for i := 0; i <= retries; i++ { err = os.WriteFile(drv.statePath, data, 0600) if err == nil { return nil } time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond) } return fmt.Errorf("failed to save state: %w", err) } // restoreState recreates volumes from saved driver state func (drv *Driver) restoreState(ctx context.Context) error { fs.Debugf(nil, "Restore state from %s", drv.statePath) data, err := os.ReadFile(drv.statePath) if os.IsNotExist(err) { return nil } var state []*Volume if err == nil { err = json.Unmarshal(data, &state) } if err != nil { fs.Logf(nil, "Failed to restore plugin state: %v", err) return nil } for _, vol := range state { if err := vol.restoreState(ctx, drv); err != nil { fs.Logf(nil, "Failed to restore volume %q: %v", vol.Name, err) continue } drv.volumes[vol.Name] = vol } return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/docker/api.go
cmd/serve/docker/api.go
package docker import ( "encoding/json" "net/http" "github.com/go-chi/chi/v5" "github.com/rclone/rclone/fs" ) const ( contentType = "application/vnd.docker.plugins.v1.1+json" activatePath = "/Plugin.Activate" createPath = "/VolumeDriver.Create" getPath = "/VolumeDriver.Get" listPath = "/VolumeDriver.List" removePath = "/VolumeDriver.Remove" pathPath = "/VolumeDriver.Path" mountPath = "/VolumeDriver.Mount" unmountPath = "/VolumeDriver.Unmount" capsPath = "/VolumeDriver.Capabilities" ) // CreateRequest is the structure that docker's requests are deserialized to. type CreateRequest struct { Name string Options map[string]string `json:"Opts,omitempty"` } // RemoveRequest structure for a volume remove request type RemoveRequest struct { Name string } // MountRequest structure for a volume mount request type MountRequest struct { Name string ID string } // MountResponse structure for a volume mount response type MountResponse struct { Mountpoint string } // UnmountRequest structure for a volume unmount request type UnmountRequest struct { Name string ID string } // PathRequest structure for a volume path request type PathRequest struct { Name string } // PathResponse structure for a volume path response type PathResponse struct { Mountpoint string } // GetRequest structure for a volume get request type GetRequest struct { Name string } // GetResponse structure for a volume get response type GetResponse struct { Volume *VolInfo } // ListResponse structure for a volume list response type ListResponse struct { Volumes []*VolInfo } // CapabilitiesResponse structure for a volume capability response type CapabilitiesResponse struct { Capabilities Capability } // Capability represents the list of capabilities a volume driver can return type Capability struct { Scope string } // ErrorResponse is a formatted error message that docker can understand type ErrorResponse struct { Err string } func newRouter(drv *Driver) http.Handler { r := chi.NewRouter() r.Post(activatePath, func(w http.ResponseWriter, r *http.Request) { res := map[string]any{ "Implements": []string{"VolumeDriver"}, } encodeResponse(w, res, nil, activatePath) }) r.Post(createPath, func(w http.ResponseWriter, r *http.Request) { var req CreateRequest if decodeRequest(w, r, &req) { err := drv.Create(&req) encodeResponse(w, nil, err, createPath) } }) r.Post(removePath, func(w http.ResponseWriter, r *http.Request) { var req RemoveRequest if decodeRequest(w, r, &req) { err := drv.Remove(&req) encodeResponse(w, nil, err, removePath) } }) r.Post(mountPath, func(w http.ResponseWriter, r *http.Request) { var req MountRequest if decodeRequest(w, r, &req) { res, err := drv.Mount(&req) encodeResponse(w, res, err, mountPath) } }) r.Post(pathPath, func(w http.ResponseWriter, r *http.Request) { var req PathRequest if decodeRequest(w, r, &req) { res, err := drv.Path(&req) encodeResponse(w, res, err, pathPath) } }) r.Post(getPath, func(w http.ResponseWriter, r *http.Request) { var req GetRequest if decodeRequest(w, r, &req) { res, err := drv.Get(&req) encodeResponse(w, res, err, getPath) } }) r.Post(unmountPath, func(w http.ResponseWriter, r *http.Request) { var req UnmountRequest if decodeRequest(w, r, &req) { err := drv.Unmount(&req) encodeResponse(w, nil, err, unmountPath) } }) r.Post(listPath, func(w http.ResponseWriter, r *http.Request) { res, err := drv.List() encodeResponse(w, res, err, listPath) }) r.Post(capsPath, func(w http.ResponseWriter, r *http.Request) { res := &CapabilitiesResponse{ Capabilities: Capability{Scope: pluginScope}, } encodeResponse(w, res, nil, capsPath) }) return r } func decodeRequest(w http.ResponseWriter, r *http.Request, req any) bool { if err := json.NewDecoder(r.Body).Decode(req); err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return false } return true } func encodeResponse(w http.ResponseWriter, res any, err error, path string) { w.Header().Set("Content-Type", contentType) if err != nil { fs.Debugf(path, "Request returned error: %v", err) w.WriteHeader(http.StatusInternalServerError) res = &ErrorResponse{Err: err.Error()} } else if res == nil { res = struct{}{} } if err = json.NewEncoder(w).Encode(res); err != nil { fs.Debugf(path, "Response encoding failed: %v", err) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/docker/serve.go
cmd/serve/docker/serve.go
package docker import ( "context" "crypto/tls" "fmt" "net" "net/http" "os" "path/filepath" "runtime" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/file" ) // Server connects plugin with docker daemon by protocol type Server http.Server // NewServer creates new docker plugin server func NewServer(drv *Driver) *Server { return &Server{Handler: newRouter(drv)} } // Shutdown the server func (s *Server) Shutdown(ctx context.Context) error { hs := (*http.Server)(s) return hs.Shutdown(ctx) } func (s *Server) serve(listener net.Listener, addr, tempFile string) error { if tempFile != "" { atexit.Register(func() { // remove spec file or self-created unix socket fs.Debugf(nil, "Removing stale file %s", tempFile) _ = os.Remove(tempFile) }) } hs := (*http.Server)(s) return hs.Serve(listener) } // ServeUnix makes the handler to listen for requests in a unix socket. // It also creates the socket file in the right directory for docker to read. func (s *Server) ServeUnix(path string, gid int) error { listener, socketPath, err := newUnixListener(path, gid) if err != nil { return err } if socketPath != "" { path = socketPath fs.Infof(nil, "Serving unix socket: %s", path) } else { fs.Infof(nil, "Serving systemd socket") } return s.serve(listener, path, socketPath) } // ServeTCP makes the handler listen for request on a given TCP address. // It also writes the spec file in the right directory for docker to read. func (s *Server) ServeTCP(addr, specDir string, tlsConfig *tls.Config, noSpec bool) error { listener, err := net.Listen("tcp", addr) if err != nil { return err } if tlsConfig != nil { tlsConfig.NextProtos = []string{"http/1.1"} listener = tls.NewListener(listener, tlsConfig) } addr = listener.Addr().String() specFile := "" if !noSpec { specFile, err = writeSpecFile(addr, "tcp", specDir) if err != nil { return err } } fs.Infof(nil, "Serving TCP socket: %s", addr) return s.serve(listener, addr, specFile) } func writeSpecFile(addr, proto, specDir string) (string, error) { if specDir == "" && runtime.GOOS == "windows" { specDir = os.TempDir() } if specDir == "" { specDir = defSpecDir } if err := file.MkdirAll(specDir, 0755); err != nil { return "", err } specFile := filepath.Join(specDir, "rclone.spec") url := fmt.Sprintf("%s://%s", proto, addr) if err := os.WriteFile(specFile, []byte(url), 0644); err != nil { return "", err } fs.Debugf(nil, "Plugin spec has been written to %s", specFile) return specFile, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/docker/docker.go
cmd/serve/docker/docker.go
// Package docker serves a remote suitable for use with docker volume api package docker import ( "context" _ "embed" "path/filepath" "strings" "syscall" "github.com/spf13/cobra" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/cmd/serve" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfsflags" ) var ( pluginName = "rclone" pluginScope = "local" baseDir = "/var/lib/docker-volumes/rclone" sockDir = "/run/docker/plugins" //lint:ignore U1000 unused when not building linux defSpecDir = "/etc/docker/plugins" stateFile = "docker-plugin.state" socketAddr = "" // TCP listening address or empty string for Unix socket socketGid = syscall.Getgid() canPersist = false // allows writing to config file forgetState = false noSpec = false ) //go:embed docker.md var longHelp string // help returns the help string cleaned up to simplify appending func help() string { return strings.TrimSpace(longHelp) + "\n\n" } func init() { cmdFlags := Command.Flags() // Add command specific flags flags.StringVarP(cmdFlags, &baseDir, "base-dir", "", baseDir, "Base directory for volumes", "") flags.StringVarP(cmdFlags, &socketAddr, "socket-addr", "", socketAddr, "Address <host:port> or absolute path (default: /run/docker/plugins/rclone.sock)", "") flags.IntVarP(cmdFlags, &socketGid, "socket-gid", "", socketGid, "GID for unix socket (default: current process GID)", "") flags.BoolVarP(cmdFlags, &forgetState, "forget-state", "", forgetState, "Skip restoring previous state", "") flags.BoolVarP(cmdFlags, &noSpec, "no-spec", "", noSpec, "Do not write spec file", "") // Add common mount/vfs flags mountlib.AddFlags(cmdFlags) vfsflags.AddFlags(cmdFlags) // Register with parent command serve.Command.AddCommand(Command) } // Command definition for cobra var Command = &cobra.Command{ Use: "docker", Short: `Serve any remote on docker's volume plugin API.`, Long: help() + strings.TrimSpace(vfs.Help()), Annotations: map[string]string{ "versionIntroduced": "v1.56", "groups": "Filter", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(0, 0, command, args) cmd.Run(false, false, command, func() error { ctx := context.Background() drv, err := NewDriver(ctx, baseDir, nil, nil, false, forgetState) if err != nil { return err } srv := NewServer(drv) if socketAddr == "" { // Listen on unix socket at /run/docker/plugins/<pluginName>.sock return srv.ServeUnix(pluginName, socketGid) } if filepath.IsAbs(socketAddr) { // Listen on unix socket at given path return srv.ServeUnix(socketAddr, socketGid) } return srv.ServeTCP(socketAddr, "", nil, noSpec) }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/docker/options_test.go
cmd/serve/docker/options_test.go
package docker import ( "testing" "time" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/fs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" _ "github.com/rclone/rclone/backend/local" ) func TestApplyOptions(t *testing.T) { vol := &Volume{ Name: "testName", MountPoint: "testPath", drv: &Driver{ root: "testRoot", }, mnt: &mountlib.MountPoint{ MountPoint: "testPath", }, mountReqs: make(map[string]any), } // Happy path volOpt := VolOpts{ "remote": "/tmp/docker", "persist": "FALSE", "mount_type": "potato", // backend options "--local-case-sensitive": "true", "local_no_check_updated": "1", // mount options "debug-fuse": "true", "attr_timeout": "100s", "--async-read": "TRUE", // vfs options "no-modtime": "1", "no_checksum": "true", "--no-seek": "true", } err := vol.applyOptions(volOpt) require.NoError(t, err) // normal options assert.Equal(t, ":local,case_sensitive='true',no_check_updated='1':/tmp/docker", vol.fsString) assert.Equal(t, false, vol.persist) assert.Equal(t, "potato", vol.mountType) // mount options assert.Equal(t, true, vol.mnt.MountOpt.DebugFUSE) assert.Equal(t, fs.Duration(100*time.Second), vol.mnt.MountOpt.AttrTimeout) assert.Equal(t, true, vol.mnt.MountOpt.AsyncRead) // vfs options assert.Equal(t, true, vol.mnt.VFSOpt.NoModTime) assert.Equal(t, true, vol.mnt.VFSOpt.NoChecksum) assert.Equal(t, true, vol.mnt.VFSOpt.NoSeek) // Check errors err = vol.applyOptions(VolOpts{ "debug-fuse": "POTATO", }) require.ErrorContains(t, err, "cannot parse mount options") err = vol.applyOptions(VolOpts{ "no-modtime": "POTATO", }) require.ErrorContains(t, err, "cannot parse vfs options") err = vol.applyOptions(VolOpts{ "remote": "/tmp/docker", "local_not_found": "POTATO", }) require.ErrorContains(t, err, "unsupported backend option") }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/docker/options.go
cmd/serve/docker/options.go
package docker import ( "fmt" "strings" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/vfs/vfscommon" ) // applyOptions configures volume from request options. // // There are 5 special options: // - "remote" aka "fs" determines existing remote from config file // with a path or on-the-fly remote using the ":backend:" syntax. // It is usually named "remote" in documentation but can be aliased as // "fs" to avoid confusion with the "remote" option of some backends. // - "type" is equivalent to the ":backend:" syntax (optional). // - "path" provides explicit on-remote path for "type" (optional). // - "mount-type" can be "mount", "cmount" or "mount2", defaults to // first found (optional). // - "persist" is reserved for future to create remotes persisted // in rclone.conf similar to rcd (optional). // // Unlike rcd we use the flat naming scheme for mount, vfs and backend // options without substructures. Dashes, underscores and mixed case // in option names can be used interchangeably. Option name conflicts // can be resolved in a manner similar to rclone CLI by adding prefixes: // "vfs-", primary mount backend type like "sftp-", and so on. // // After triaging the options are put in MountOpt, VFSOpt or connect // string for actual filesystem setup and in volume.Options for saving // the state. func (vol *Volume) applyOptions(volOpt VolOpts) error { // copy options to override later mntOpt := &vol.mnt.MountOpt vfsOpt := &vol.mnt.VFSOpt *mntOpt = vol.drv.mntOpt *vfsOpt = vol.drv.vfsOpt // vol.Options has all options except "remote" and "type" vol.Options = VolOpts{} vol.fsString = "" var fsName, fsPath, fsType string var explicitPath string var fsOpt configmap.Simple // parse "remote" or "type" for key, str := range volOpt { switch key { case "": continue case "remote", "fs": if str != "" { p, err := fspath.Parse(str) if err != nil || p.Name == ":" { return fmt.Errorf("cannot parse path %q: %w", str, err) } fsName, fsPath, fsOpt = p.Name, p.Path, p.Config vol.Fs = str } case "type": fsType = str vol.Type = str case "path": explicitPath = str vol.Path = str default: vol.Options[key] = str } } // find options supported by backend if strings.HasPrefix(fsName, ":") { fsType = fsName[1:] fsName = "" } if fsType == "" { fsType = "local" if fsName != "" { var ok bool fsType, ok = fs.ConfigMap("", nil, fsName, nil).Get("type") if !ok { return fs.ErrorNotFoundInConfigFile } } } if explicitPath != "" { if fsPath != "" { fs.Logf(nil, "Explicit path will override connection string") } fsPath = explicitPath } fsInfo, err := fs.Find(fsType) if err != nil { return fmt.Errorf("unknown filesystem type %q", fsType) } // handle remaining options, override fsOpt if fsOpt == nil { fsOpt = configmap.Simple{} } opt := rc.Params{} for key, val := range vol.Options { opt[key] = val } mntMap := configmap.Simple{} vfsMap := configmap.Simple{} for key := range opt { var ok bool var err error normalKey := normalOptName(key) underscoreKey := strings.ReplaceAll(normalKey, "-", "_") switch normalKey { case "persist": vol.persist, err = opt.GetBool(key) ok = true case "mount-type": vol.mountType, err = opt.GetString(key) ok = true } if err != nil { return fmt.Errorf("cannot parse option %q: %w", key, err) } if !ok { // try to use as a mount option in mntMap if mountlib.OptionsInfo.Get(underscoreKey) != nil { mntMap[underscoreKey] = vol.Options[key] ok = true } } if !ok { // try as a vfs option in vfsMap if vfscommon.OptionsInfo.Get(underscoreKey) != nil { vfsMap[underscoreKey] = vol.Options[key] ok = true } } if !ok { // try as a backend option in fsOpt (backends use "_" instead of "-") fsOptName := strings.TrimPrefix(underscoreKey, fsType+"_") hasFsPrefix := underscoreKey != fsOptName if !hasFsPrefix || fsInfo.Options.Get(fsOptName) == nil { fs.Logf(nil, "Option %q is not supported by backend %q", key, fsType) return fmt.Errorf("unsupported backend option %q", key) } fsOpt[fsOptName], err = opt.GetString(key) if err != nil { return fmt.Errorf("cannot parse backend option %q: %w", key, err) } } } // Parse VFS options err = configstruct.Set(vfsMap, vfsOpt) if err != nil { return fmt.Errorf("cannot parse vfs options: %w", err) } // Parse Mount options err = configstruct.Set(mntMap, mntOpt) if err != nil { return fmt.Errorf("cannot parse mount options: %w", err) } // build remote string from fsName, fsType, fsOpt, fsPath colon := ":" comma := "," if fsName == "" { fsName = ":" + fsType } connString := fsOpt.String() if fsName == "" && fsType == "" { colon = "" connString = "" } if connString == "" { comma = "" } vol.fsString = fsName + comma + connString + colon + fsPath return vol.validate() } func normalOptName(key string) string { return strings.ReplaceAll(strings.TrimPrefix(strings.ToLower(key), "--"), "_", "-") }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/docker/docker_test.go
cmd/serve/docker/docker_test.go
//go:build !race package docker_test import ( "bytes" "context" "encoding/json" "fmt" "io" "net" "net/http" "os" "path/filepath" "runtime" "strings" "testing" "time" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/cmd/serve/docker" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/testy" "github.com/rclone/rclone/lib/file" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" _ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/memory" _ "github.com/rclone/rclone/cmd/cmount" _ "github.com/rclone/rclone/cmd/mount" ) func initialise(ctx context.Context, t *testing.T) (string, fs.Fs) { fstest.Initialise() // Make test cache directory testDir, err := fstest.LocalRemote() require.NoError(t, err) err = file.MkdirAll(testDir, 0755) require.NoError(t, err) // Make test file system testFs, err := fs.NewFs(ctx, testDir) require.NoError(t, err) return testDir, testFs } func assertErrorContains(t *testing.T, err error, errString string, msgAndArgs ...any) { assert.Error(t, err) if err != nil { assert.Contains(t, err.Error(), errString, msgAndArgs...) } } func assertVolumeInfo(t *testing.T, v *docker.VolInfo, name, path string) { assert.Equal(t, name, v.Name) assert.Equal(t, path, v.Mountpoint) assert.NotEmpty(t, v.CreatedAt) _, err := time.Parse(time.RFC3339, v.CreatedAt) assert.NoError(t, err) } func TestDockerPluginLogic(t *testing.T) { ctx := context.Background() oldCacheDir := config.GetCacheDir() testDir, testFs := initialise(ctx, t) err := config.SetCacheDir(testDir) require.NoError(t, err) defer func() { _ = config.SetCacheDir(oldCacheDir) if !t.Failed() { fstest.Purge(testFs) _ = os.RemoveAll(testDir) } }() // Create dummy volume driver drv, err := docker.NewDriver(ctx, testDir, nil, nil, true, true) require.NoError(t, err) require.NotNil(t, drv) // 1st volume request volReq := &docker.CreateRequest{ Name: "vol1", Options: docker.VolOpts{}, } assertErrorContains(t, drv.Create(volReq), "volume must have either remote or backend") volReq.Options["remote"] = testDir assert.NoError(t, drv.Create(volReq)) path1 := filepath.Join(testDir, "vol1") assert.ErrorIs(t, drv.Create(volReq), docker.ErrVolumeExists) getReq := &docker.GetRequest{Name: "vol1"} getRes, err := drv.Get(getReq) assert.NoError(t, err) require.NotNil(t, getRes) assertVolumeInfo(t, getRes.Volume, "vol1", path1) // 2nd volume request volReq.Name = "vol2" assert.NoError(t, drv.Create(volReq)) path2 := filepath.Join(testDir, "vol2") listRes, err := drv.List() require.NoError(t, err) require.Equal(t, 2, len(listRes.Volumes)) assertVolumeInfo(t, listRes.Volumes[0], "vol1", path1) assertVolumeInfo(t, listRes.Volumes[1], "vol2", path2) // Try prohibited volume options volReq.Name = "vol99" volReq.Options["remote"] = testDir volReq.Options["type"] = "memory" err = drv.Create(volReq) assertErrorContains(t, err, "volume must have either remote or backend") volReq.Options["persist"] = "WrongBoolean" err = drv.Create(volReq) assertErrorContains(t, err, "cannot parse option") volReq.Options["persist"] = "true" delete(volReq.Options, "remote") err = drv.Create(volReq) assertErrorContains(t, err, "persist remotes is prohibited") volReq.Options["persist"] = "false" volReq.Options["memory-option-broken"] = "some-value" err = drv.Create(volReq) assertErrorContains(t, err, "unsupported backend option") getReq.Name = "vol99" getRes, err = drv.Get(getReq) assert.Error(t, err) assert.Nil(t, getRes) // Test mount requests mountReq := &docker.MountRequest{ Name: "vol2", ID: "id1", } mountRes, err := drv.Mount(mountReq) assert.NoError(t, err) require.NotNil(t, mountRes) assert.Equal(t, path2, mountRes.Mountpoint) mountRes, err = drv.Mount(mountReq) assert.Error(t, err) assert.Nil(t, mountRes) assertErrorContains(t, err, "already mounted by this id") mountReq.ID = "id2" mountRes, err = drv.Mount(mountReq) assert.NoError(t, err) require.NotNil(t, mountRes) assert.Equal(t, path2, mountRes.Mountpoint) unmountReq := &docker.UnmountRequest{ Name: "vol2", ID: "id1", } err = drv.Unmount(unmountReq) assert.NoError(t, err) err = drv.Unmount(unmountReq) assert.Error(t, err) assertErrorContains(t, err, "not mounted by this id") // Simulate plugin restart drv2, err := docker.NewDriver(ctx, testDir, nil, nil, true, false) assert.NoError(t, err) require.NotNil(t, drv2) // New plugin instance should pick up the saved state listRes, err = drv2.List() require.NoError(t, err) require.Equal(t, 2, len(listRes.Volumes)) assertVolumeInfo(t, listRes.Volumes[0], "vol1", path1) assertVolumeInfo(t, listRes.Volumes[1], "vol2", path2) rmReq := &docker.RemoveRequest{Name: "vol2"} err = drv.Remove(rmReq) assertErrorContains(t, err, "volume is in use") unmountReq.ID = "id1" err = drv.Unmount(unmountReq) assert.Error(t, err) assertErrorContains(t, err, "not mounted by this id") unmountReq.ID = "id2" err = drv.Unmount(unmountReq) assert.NoError(t, err) err = drv.Unmount(unmountReq) assert.EqualError(t, err, "volume is not mounted") err = drv.Remove(rmReq) assert.NoError(t, err) } const ( httpTimeout = 2 * time.Second tempDelay = 10 * time.Millisecond ) type APIClient struct { t *testing.T cli *http.Client host string } func newAPIClient(t *testing.T, host, unixPath string) *APIClient { tr := &http.Transport{ MaxIdleConns: 1, IdleConnTimeout: httpTimeout, DisableCompression: true, } if unixPath != "" { tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { return net.Dial("unix", unixPath) } } else { dialer := &net.Dialer{ Timeout: httpTimeout, KeepAlive: httpTimeout, } tr.DialContext = dialer.DialContext } cli := &http.Client{ Transport: tr, Timeout: httpTimeout, } return &APIClient{ t: t, cli: cli, host: host, } } func (a *APIClient) request(path string, in, out any, wantErr bool) { t := a.t var ( dataIn []byte dataOut []byte err error ) realm := "VolumeDriver" if path == "Activate" { realm = "Plugin" } url := fmt.Sprintf("http://%s/%s.%s", a.host, realm, path) if str, isString := in.(string); isString { dataIn = []byte(str) } else { dataIn, err = json.Marshal(in) require.NoError(t, err) } fs.Logf(path, "<-- %s", dataIn) req, err := http.NewRequest("POST", url, bytes.NewBuffer(dataIn)) require.NoError(t, err) req.Header.Set("Content-Type", "application/json") res, err := a.cli.Do(req) require.NoError(t, err) wantStatus := http.StatusOK if wantErr { wantStatus = http.StatusInternalServerError } assert.Equal(t, wantStatus, res.StatusCode) dataOut, err = io.ReadAll(res.Body) require.NoError(t, err) err = res.Body.Close() require.NoError(t, err) if strPtr, isString := out.(*string); isString || wantErr { require.True(t, isString, "must use string for error response") if wantErr { var errRes docker.ErrorResponse err = json.Unmarshal(dataOut, &errRes) require.NoError(t, err) *strPtr = errRes.Err } else { *strPtr = strings.TrimSpace(string(dataOut)) } } else { err = json.Unmarshal(dataOut, out) require.NoError(t, err) } fs.Logf(path, "--> %s", dataOut) time.Sleep(tempDelay) } func testMountAPI(t *testing.T, sockAddr string) { // Disable tests under macOS and linux in the CI since they are locking up if runtime.GOOS == "darwin" || runtime.GOOS == "linux" { testy.SkipUnreliable(t) } if _, mountFn := mountlib.ResolveMountMethod(""); mountFn == nil { t.Skip("Test requires working mount command") } ctx := context.Background() oldCacheDir := config.GetCacheDir() testDir, testFs := initialise(ctx, t) err := config.SetCacheDir(testDir) require.NoError(t, err) defer func() { _ = config.SetCacheDir(oldCacheDir) if !t.Failed() { fstest.Purge(testFs) _ = os.RemoveAll(testDir) } }() // Prepare API client var cli *APIClient var unixPath string if sockAddr != "" { cli = newAPIClient(t, sockAddr, "") } else { unixPath = filepath.Join(testDir, "rclone.sock") cli = newAPIClient(t, "localhost", unixPath) } // Create mounting volume driver and listen for requests drv, err := docker.NewDriver(ctx, testDir, nil, nil, false, true) require.NoError(t, err) require.NotNil(t, drv) defer drv.Exit() srv := docker.NewServer(drv) go func() { var errServe error if unixPath != "" { errServe = srv.ServeUnix(unixPath, os.Getgid()) } else { errServe = srv.ServeTCP(sockAddr, testDir, nil, false) } assert.ErrorIs(t, errServe, http.ErrServerClosed) }() defer func() { err := srv.Shutdown(ctx) assert.NoError(t, err) fs.Logf(nil, "Server stopped") time.Sleep(tempDelay) }() time.Sleep(tempDelay) // Let server start // Run test sequence path1 := filepath.Join(testDir, "path1") require.NoError(t, file.MkdirAll(path1, 0755)) mount1 := filepath.Join(testDir, "vol1") res := "" cli.request("Activate", "{}", &res, false) assert.Contains(t, res, `"VolumeDriver"`) createReq := docker.CreateRequest{ Name: "vol1", Options: docker.VolOpts{"remote": path1}, } cli.request("Create", createReq, &res, false) assert.Equal(t, "{}", res) cli.request("Create", createReq, &res, true) assert.Contains(t, res, "volume already exists") mountReq := docker.MountRequest{Name: "vol1", ID: "id1"} var mountRes docker.MountResponse cli.request("Mount", mountReq, &mountRes, false) assert.Equal(t, mount1, mountRes.Mountpoint) cli.request("Mount", mountReq, &res, true) assert.Contains(t, res, "already mounted by this id") removeReq := docker.RemoveRequest{Name: "vol1"} cli.request("Remove", removeReq, &res, true) assert.Contains(t, res, "volume is in use") text := []byte("banana") err = os.WriteFile(filepath.Join(mount1, "txt"), text, 0644) assert.NoError(t, err) time.Sleep(tempDelay) text2, err := os.ReadFile(filepath.Join(path1, "txt")) assert.NoError(t, err) if runtime.GOOS != "windows" { // this check sometimes fails on windows - ignore assert.Equal(t, text, text2) } unmountReq := docker.UnmountRequest{Name: "vol1", ID: "id1"} cli.request("Unmount", unmountReq, &res, false) assert.Equal(t, "{}", res) cli.request("Unmount", unmountReq, &res, true) assert.Equal(t, "volume is not mounted", res) cli.request("Remove", removeReq, &res, false) assert.Equal(t, "{}", res) cli.request("Remove", removeReq, &res, true) assert.Equal(t, "volume not found", res) var listRes docker.ListResponse cli.request("List", "{}", &listRes, false) assert.Empty(t, listRes.Volumes) } func TestDockerPluginMountTCP(t *testing.T) { testMountAPI(t, "localhost:53789") } func TestDockerPluginMountUnix(t *testing.T) { if runtime.GOOS != "linux" { t.Skip("Test is Linux-only") } testMountAPI(t, "") }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/docker/unix_unsupported.go
cmd/serve/docker/unix_unsupported.go
//go:build !linux && !freebsd package docker import ( "errors" "net" ) func newUnixListener(path string, gid int) (net.Listener, string, error) { return nil, "", errors.New("unix sockets require Linux or FreeBSD") }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/docker/unix.go
cmd/serve/docker/unix.go
//go:build linux || freebsd package docker import ( "fmt" "net" "os" "path/filepath" "github.com/rclone/rclone/lib/file" ) func newUnixListener(path string, gid int) (net.Listener, string, error) { // try systemd socket activation fds := systemdActivationFiles() switch len(fds) { case 0: // fall thru case 1: listener, err := net.FileListener(fds[0]) return listener, "", err default: return nil, "", fmt.Errorf("expected only one socket from systemd, got %d", len(fds)) } // create socket ourselves if filepath.Ext(path) == "" { path += ".sock" } if !filepath.IsAbs(path) { path = filepath.Join(sockDir, path) } if err := file.MkdirAll(filepath.Dir(path), 0755); err != nil { return nil, "", err } if err := os.Remove(path); err != nil && !os.IsNotExist(err) { return nil, "", err } listener, err := net.Listen("unix", path) if err != nil { return nil, "", err } if err = os.Chmod(path, 0660); err != nil { return nil, "", err } if os.Geteuid() == 0 { if err = os.Chown(path, 0, gid); err != nil { return nil, "", err } } // we don't use spec file with unix sockets return listener, path, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/docker/systemd_unsupported.go
cmd/serve/docker/systemd_unsupported.go
//go:build !linux || android package docker import ( "os" ) //lint:ignore U1000 unused when not building linux func systemdActivationFiles() []*os.File { return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/webdav/webdav_test.go
cmd/serve/webdav/webdav_test.go
// Serve webdav tests set up a server and run the integration tests // for the webdav remote against it. // // We skip tests on platforms with troublesome character mappings //go:build !windows && !darwin package webdav import ( "context" "flag" "io" "net/http" "os" "strings" "testing" "time" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/cmd/serve/proxy" "github.com/rclone/rclone/cmd/serve/servetest" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/vfs/vfscommon" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/webdav" ) const ( testBindAddress = "localhost:0" testUser = "user" testPass = "pass" testTemplate = "../http/testdata/golden/testindex.html" ) // check interfaces var ( _ os.FileInfo = FileInfo{nil, nil} _ webdav.ETager = FileInfo{nil, nil} _ webdav.ContentTyper = FileInfo{nil, nil} ) // TestWebDav runs the webdav server then runs the unit tests for the // webdav remote against it. func TestWebDav(t *testing.T) { // Configure and start the server start := func(f fs.Fs) (configmap.Simple, func()) { opt := Opt opt.HTTP.ListenAddr = []string{testBindAddress} opt.HTTP.BaseURL = "/prefix" opt.Auth.BasicUser = testUser opt.Auth.BasicPass = testPass opt.Template.Path = testTemplate opt.EtagHash = "MD5" // Start the server w, err := newWebDAV(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt) require.NoError(t, err) go func() { require.NoError(t, w.Serve()) }() // Config for the backend we'll use to connect to the server config := configmap.Simple{ "type": "webdav", "vendor": "rclone", "url": w.server.URLs()[0], "user": testUser, "pass": obscure.MustObscure(testPass), } return config, func() { assert.NoError(t, w.Shutdown()) } } servetest.Run(t, "webdav", start) } // Test serve http functionality in serve webdav // While similar to http serve, there are some inconsistencies // in the handling of some requests such as POST requests var ( updateGolden = flag.Bool("updategolden", false, "update golden files for regression test") ) func TestHTTPFunction(t *testing.T) { ctx := context.Background() // exclude files called hidden.txt and directories called hidden fi := filter.GetConfig(ctx) require.NoError(t, fi.AddRule("- hidden.txt")) require.NoError(t, fi.AddRule("- hidden/**")) // Uses the same test files as http tests but with different golden. f, err := fs.NewFs(context.Background(), "../http/testdata/files") assert.NoError(t, err) opt := Opt opt.HTTP.ListenAddr = []string{testBindAddress} opt.Template.Path = testTemplate // Start the server w, err := newWebDAV(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt) assert.NoError(t, err) go func() { require.NoError(t, w.Serve()) }() defer func() { assert.NoError(t, w.Shutdown()) }() testURL := w.server.URLs()[0] pause := time.Millisecond i := 0 for ; i < 10; i++ { resp, err := http.Head(testURL) if err == nil { _ = resp.Body.Close() break } // t.Logf("couldn't connect, sleeping for %v: %v", pause, err) time.Sleep(pause) pause *= 2 } if i >= 10 { t.Fatal("couldn't connect to server") } HelpTestGET(t, testURL) } // check body against the file, or re-write body if -updategolden is // set. func checkGolden(t *testing.T, fileName string, got []byte) { if *updateGolden { t.Logf("Updating golden file %q", fileName) err := os.WriteFile(fileName, got, 0666) require.NoError(t, err) } else { want, err := os.ReadFile(fileName) require.NoError(t, err, "problem") wants := strings.Split(string(want), "\n") gots := strings.Split(string(got), "\n") assert.Equal(t, wants, gots, fileName) } } func HelpTestGET(t *testing.T, testURL string) { for _, test := range []struct { URL string Status int Golden string Method string Range string }{ { URL: "", Status: http.StatusOK, Golden: "testdata/golden/index.html", }, { URL: "notfound", Status: http.StatusNotFound, Golden: "testdata/golden/notfound.html", }, { URL: "dirnotfound/", Status: http.StatusNotFound, Golden: "testdata/golden/dirnotfound.html", }, { URL: "hidden/", Status: http.StatusNotFound, Golden: "testdata/golden/hiddendir.html", }, { URL: "one%25.txt", Status: http.StatusOK, Golden: "testdata/golden/one.txt", }, { URL: "hidden.txt", Status: http.StatusNotFound, Golden: "testdata/golden/hidden.txt", }, { URL: "three/", Status: http.StatusOK, Golden: "testdata/golden/three.html", }, { URL: "three/a.txt", Status: http.StatusOK, Golden: "testdata/golden/a.txt", }, { URL: "", Method: "HEAD", Status: http.StatusOK, Golden: "testdata/golden/indexhead.txt", }, { URL: "one%25.txt", Method: "HEAD", Status: http.StatusOK, Golden: "testdata/golden/onehead.txt", }, { URL: "", Method: "POST", Status: http.StatusMethodNotAllowed, Golden: "testdata/golden/indexpost.txt", }, { URL: "one%25.txt", Method: "POST", Status: http.StatusOK, Golden: "testdata/golden/onepost.txt", }, { URL: "two.txt", Status: http.StatusOK, Golden: "testdata/golden/two.txt", }, { URL: "two.txt", Status: http.StatusPartialContent, Range: "bytes=2-5", Golden: "testdata/golden/two2-5.txt", }, { URL: "two.txt", Status: http.StatusPartialContent, Range: "bytes=0-6", Golden: "testdata/golden/two-6.txt", }, { URL: "two.txt", Status: http.StatusPartialContent, Range: "bytes=3-", Golden: "testdata/golden/two3-.txt", }, } { method := test.Method if method == "" { method = "GET" } req, err := http.NewRequest(method, testURL+test.URL, nil) require.NoError(t, err) if test.Range != "" { req.Header.Add("Range", test.Range) } resp, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, test.Status, resp.StatusCode, test.Golden) body, err := io.ReadAll(resp.Body) require.NoError(t, err) checkGolden(t, test.Golden, body) } } func TestRc(t *testing.T) { servetest.TestRc(t, rc.Params{ "type": "webdav", "vfs_cache_mode": "off", }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/webdav/webdav.go
cmd/serve/webdav/webdav.go
// Package webdav implements a WebDAV server backed by rclone VFS package webdav import ( "context" "encoding/xml" "errors" "fmt" "mime" "net" "net/http" "os" "path" "strconv" "strings" "time" chi "github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5/middleware" "github.com/rclone/rclone/cmd" cmdserve "github.com/rclone/rclone/cmd/serve" "github.com/rclone/rclone/cmd/serve/proxy" "github.com/rclone/rclone/cmd/serve/proxy/proxyflags" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/rc" libhttp "github.com/rclone/rclone/lib/http" "github.com/rclone/rclone/lib/http/serve" "github.com/rclone/rclone/lib/systemd" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" "github.com/rclone/rclone/vfs/vfsflags" "github.com/spf13/cobra" "golang.org/x/net/webdav" ) // OptionsInfo describes the Options in use var OptionsInfo = fs.Options{{ Name: "etag_hash", Default: "", Help: "Which hash to use for the ETag, or auto or blank for off", }, { Name: "disable_dir_list", Default: false, Help: "Disable HTML directory list on GET request for a directory", }, { Name: "disable_zip", Default: false, Help: "Disable zip download of directories", }}. Add(libhttp.ConfigInfo). Add(libhttp.AuthConfigInfo). Add(libhttp.TemplateConfigInfo) // Options required for http server type Options struct { Auth libhttp.AuthConfig HTTP libhttp.Config Template libhttp.TemplateConfig EtagHash string `config:"etag_hash"` DisableDirList bool `config:"disable_dir_list"` DisableZip bool `config:"disable_zip"` } // Opt is options set by command line flags var Opt Options // flagPrefix is the prefix used to uniquely identify command line flags. // It is intentionally empty for this package. const flagPrefix = "" func init() { fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "webdav", Opt: &Opt, Options: OptionsInfo}) flagSet := Command.Flags() flags.AddFlagsFromOptions(flagSet, "", OptionsInfo) vfsflags.AddFlags(flagSet) proxyflags.AddFlags(flagSet) cmdserve.Command.AddCommand(Command) cmdserve.AddRc("webdav", func(ctx context.Context, f fs.Fs, in rc.Params) (cmdserve.Handle, error) { // Read VFS Opts var vfsOpt = vfscommon.Opt // set default opts err := configstruct.SetAny(in, &vfsOpt) if err != nil { return nil, err } // Read Proxy Opts var proxyOpt = proxy.Opt // set default opts err = configstruct.SetAny(in, &proxyOpt) if err != nil { return nil, err } // Read opts var opt = Opt // set default opts err = configstruct.SetAny(in, &opt) if err != nil { return nil, err } // Create server return newWebDAV(ctx, f, &opt, &vfsOpt, &proxyOpt) }) } // Command definition for cobra var Command = &cobra.Command{ Use: "webdav remote:path", Short: `Serve remote:path over WebDAV.`, Long: `Run a basic WebDAV server to serve a remote over HTTP via the WebDAV protocol. This can be viewed with a WebDAV client, through a web browser, or you can make a remote of type WebDAV to read and write it. ### WebDAV options #### --etag-hash This controls the ETag header. Without this flag the ETag will be based on the ModTime and Size of the object. If this flag is set to "auto" then rclone will choose the first supported hash on the backend or you can use a named hash such as "MD5" or "SHA-1". Use the [hashsum](/commands/rclone_hashsum/) command to see the full list. ### Access WebDAV on Windows WebDAV shared folder can be mapped as a drive on Windows, however the default settings prevent it. Windows will fail to connect to the server using insecure Basic authentication. It will not even display any login dialog. Windows requires SSL / HTTPS connection to be used with Basic. If you try to connect via Add Network Location Wizard you will get the following error: "The folder you entered does not appear to be valid. Please choose another". However, you still can connect if you set the following registry key on a client machine: ` + "`HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\WebClient\\Parameters\\BasicAuthLevel`" + ` to 2. The BasicAuthLevel can be set to the following values: ` + "```text" + ` 0 - Basic authentication disabled 1 - Basic authentication enabled for SSL connections only 2 - Basic authentication enabled for SSL connections and for non-SSL connections ` + "```" + ` If required, increase the FileSizeLimitInBytes to a higher value. Navigate to the Services interface, then restart the WebClient service. ### Access Office applications on WebDAV Navigate to following registry ` + "`HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\[14.0/15.0/16.0]\\Common\\Internet`" + ` Create a new DWORD BasicAuthLevel with value 2. ` + "```text" + ` 0 - Basic authentication disabled 1 - Basic authentication enabled for SSL connections only 2 - Basic authentication enabled for SSL and for non-SSL connections ` + "```" + ` <https://learn.microsoft.com/en-us/office/troubleshoot/powerpoint/office-opens-blank-from-sharepoint> ### Serving over a unix socket You can serve the webdav on a unix socket like this: ` + "```console" + ` rclone serve webdav --addr unix:///tmp/my.socket remote:path ` + "```" + ` and connect to it like this using rclone and the webdav backend: ` + "```console" + ` rclone --webdav-unix-socket /tmp/my.socket --webdav-url http://localhost lsf :webdav: ` + "```" + ` Note that there is no authentication on http protocol - this is expected to be done by the permissions on the socket. ` + strings.TrimSpace(libhttp.Help(flagPrefix)+libhttp.TemplateHelp(flagPrefix)+libhttp.AuthHelp(flagPrefix)+vfs.Help()+proxy.Help), Annotations: map[string]string{ "versionIntroduced": "v1.39", "groups": "Filter", }, RunE: func(command *cobra.Command, args []string) error { var f fs.Fs if proxy.Opt.AuthProxy == "" { cmd.CheckArgs(1, 1, command, args) f = cmd.NewFsSrc(args) } else { cmd.CheckArgs(0, 0, command, args) } cmd.Run(false, false, command, func() error { s, err := newWebDAV(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt) if err != nil { return err } defer systemd.Notify()() return s.Serve() }) return nil }, } // WebDAV is a webdav.FileSystem interface // // A FileSystem implements access to a collection of named files. The elements // in a file path are separated by slash ('/', U+002F) characters, regardless // of host operating system convention. // // Each method has the same semantics as the os package's function of the same // name. // // Note that the os.Rename documentation says that "OS-specific restrictions // might apply". In particular, whether or not renaming a file or directory // overwriting another existing file or directory is an error is OS-dependent. type WebDAV struct { server *libhttp.Server opt Options f fs.Fs _vfs *vfs.VFS // don't use directly, use getVFS webdavhandler *webdav.Handler proxy *proxy.Proxy ctx context.Context // for global config etagHashType hash.Type } // check interface var _ webdav.FileSystem = (*WebDAV)(nil) // Make a new WebDAV to serve the remote func newWebDAV(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (w *WebDAV, err error) { w = &WebDAV{ f: f, ctx: ctx, opt: *opt, etagHashType: hash.None, } if opt.EtagHash == "auto" { w.etagHashType = f.Hashes().GetOne() } else if opt.EtagHash != "" { err := w.etagHashType.Set(opt.EtagHash) if err != nil { return nil, err } } if w.etagHashType != hash.None { fs.Debugf(f, "Using hash %v for ETag", w.etagHashType) } if proxyOpt.AuthProxy != "" { w.proxy = proxy.New(ctx, proxyOpt, vfsOpt) // override auth w.opt.Auth.CustomAuthFn = w.auth } else { w._vfs = vfs.New(f, vfsOpt) } w.server, err = libhttp.NewServer(ctx, libhttp.WithConfig(w.opt.HTTP), libhttp.WithAuth(w.opt.Auth), libhttp.WithTemplate(w.opt.Template), ) if err != nil { return nil, fmt.Errorf("failed to init server: %w", err) } // Make sure BaseURL starts with a / and doesn't end with one w.opt.HTTP.BaseURL = "/" + strings.Trim(w.opt.HTTP.BaseURL, "/") webdavHandler := &webdav.Handler{ Prefix: w.opt.HTTP.BaseURL, FileSystem: w, LockSystem: webdav.NewMemLS(), Logger: w.logRequest, // FIXME } w.webdavhandler = webdavHandler router := w.server.Router() router.Use( middleware.SetHeader("Accept-Ranges", "bytes"), middleware.SetHeader("Server", "rclone/"+fs.Version), ) router.Handle("/*", w) // Webdav only methods not defined in chi methods := []string{ "COPY", // Copies the resource. "LOCK", // Locks the resource. "MKCOL", // Creates the collection specified. "MOVE", // Moves the resource. "PROPFIND", // Performs a property find on the server. "PROPPATCH", // Sets or removes properties on the server. "UNLOCK", // Unlocks the resource. } for _, method := range methods { chi.RegisterMethod(method) router.Method(method, "/*", w) } return w, nil } // Gets the VFS in use for this request func (w *WebDAV) getVFS(ctx context.Context) (VFS *vfs.VFS, err error) { if w._vfs != nil { return w._vfs, nil } value := libhttp.CtxGetAuth(ctx) if value == nil { return nil, errors.New("no VFS found in context") } VFS, ok := value.(*vfs.VFS) if !ok { return nil, fmt.Errorf("context value is not VFS: %#v", value) } return VFS, nil } // auth does proxy authorization func (w *WebDAV) auth(user, pass string) (value any, err error) { VFS, _, err := w.proxy.Call(user, pass, false) if err != nil { return nil, err } return VFS, err } type webdavRW struct { http.ResponseWriter status int } func (rw *webdavRW) WriteHeader(statusCode int) { rw.status = statusCode rw.ResponseWriter.WriteHeader(statusCode) } func (rw *webdavRW) isSuccessfull() bool { return rw.status == 0 || (rw.status >= 200 && rw.status <= 299) } func (w *WebDAV) postprocess(r *http.Request, remote string) { // set modtime from requests, don't write to client because status is already written switch r.Method { case "COPY", "MOVE", "PUT": VFS, err := w.getVFS(r.Context()) if err != nil { fs.Errorf(nil, "Failed to get VFS: %v", err) return } // Get the node node, err := VFS.Stat(remote) if err != nil { fs.Errorf(nil, "Failed to stat node: %v", err) return } mh := r.Header.Get("X-OC-Mtime") if mh != "" { modtimeUnix, err := strconv.ParseInt(mh, 10, 64) if err == nil { err = node.SetModTime(time.Unix(modtimeUnix, 0)) if err != nil { fs.Errorf(nil, "Failed to set modtime: %v", err) } } else { fs.Errorf(nil, "Failed to parse modtime: %v", err) } } } } func (w *WebDAV) ServeHTTP(rw http.ResponseWriter, r *http.Request) { urlPath := r.URL.Path isDir := strings.HasSuffix(urlPath, "/") remote := strings.Trim(urlPath, "/") if !w.opt.DisableDirList && (r.Method == "GET" || r.Method == "HEAD") && isDir { w.serveDir(rw, r, remote) return } // Add URL Prefix back to path since webdavhandler needs to // return absolute references. r.URL.Path = w.opt.HTTP.BaseURL + r.URL.Path wrw := &webdavRW{ResponseWriter: rw} w.webdavhandler.ServeHTTP(wrw, r) if wrw.isSuccessfull() { w.postprocess(r, remote) } } // serveDir serves a directory index at dirRemote // This is similar to serveDir in serve http. func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote string) { ctx := r.Context() VFS, err := w.getVFS(r.Context()) if err != nil { http.Error(rw, "Root directory not found", http.StatusNotFound) fs.Errorf(nil, "Failed to serve directory: %v", err) return } // List the directory node, err := VFS.Stat(dirRemote) if err == vfs.ENOENT { http.Error(rw, "Directory not found", http.StatusNotFound) return } else if err != nil { serve.Error(ctx, dirRemote, rw, "Failed to list directory", err) return } if !node.IsDir() { http.Error(rw, "Not a directory", http.StatusNotFound) return } dir := node.(*vfs.Dir) if r.URL.Query().Get("download") == "zip" && !w.opt.DisableZip { fs.Infof(dirRemote, "%s: Zipping directory", r.RemoteAddr) zipName := path.Base(dirRemote) if dirRemote == "" { zipName = "root" } rw.Header().Set("Content-Disposition", "attachment; filename=\""+zipName+".zip\"") rw.Header().Set("Content-Type", "application/zip") rw.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat)) err := vfs.CreateZip(ctx, dir, rw) if err != nil { serve.Error(ctx, dirRemote, rw, "Failed to create zip", err) return } return } dirEntries, err := dir.ReadDirAll() if err != nil { serve.Error(ctx, dirRemote, rw, "Failed to list directory", err) return } // Make the entries for display directory := serve.NewDirectory(dirRemote, w.server.HTMLTemplate()) directory.DisableZip = w.opt.DisableZip for _, node := range dirEntries { if vfscommon.Opt.NoModTime { directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), time.Time{}) } else { directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), node.ModTime().UTC()) } } sortParm := r.URL.Query().Get("sort") orderParm := r.URL.Query().Get("order") directory.ProcessQueryParams(sortParm, orderParm) directory.Serve(rw, r) } // Serve HTTP until the server is shutdown // // Use s.Close() and s.Wait() to shutdown server func (w *WebDAV) Serve() error { w.server.Serve() fs.Logf(w.f, "WebDav Server started on %s", w.server.URLs()) w.server.Wait() return nil } // Addr returns the first address of the server func (w *WebDAV) Addr() net.Addr { return w.server.Addr() } // Shutdown the server func (w *WebDAV) Shutdown() error { return w.server.Shutdown() } // logRequest is called by the webdav module on every request func (w *WebDAV) logRequest(r *http.Request, err error) { fs.Infof(r.URL.Path, "%s from %s", r.Method, r.RemoteAddr) } // Mkdir creates a directory func (w *WebDAV) Mkdir(ctx context.Context, name string, perm os.FileMode) (err error) { // defer log.Trace(name, "perm=%v", perm)("err = %v", &err) VFS, err := w.getVFS(ctx) if err != nil { return err } dir, leaf, err := VFS.StatParent(name) if err != nil { return err } _, err = dir.Mkdir(leaf) return err } // OpenFile opens a file or a directory func (w *WebDAV) OpenFile(ctx context.Context, name string, flags int, perm os.FileMode) (file webdav.File, err error) { // defer log.Trace(name, "flags=%v, perm=%v", flags, perm)("err = %v", &err) VFS, err := w.getVFS(ctx) if err != nil { return nil, err } f, err := VFS.OpenFile(name, flags, perm) if err != nil { return nil, err } return Handle{Handle: f, w: w, ctx: ctx}, nil } // RemoveAll removes a file or a directory and its contents func (w *WebDAV) RemoveAll(ctx context.Context, name string) (err error) { // defer log.Trace(name, "")("err = %v", &err) VFS, err := w.getVFS(ctx) if err != nil { return err } node, err := VFS.Stat(name) if err != nil { return err } err = node.RemoveAll() if err != nil { return err } return nil } // Rename a file or a directory func (w *WebDAV) Rename(ctx context.Context, oldName, newName string) (err error) { // defer log.Trace(oldName, "newName=%q", newName)("err = %v", &err) VFS, err := w.getVFS(ctx) if err != nil { return err } return VFS.Rename(oldName, newName) } // Stat returns info about the file or directory func (w *WebDAV) Stat(ctx context.Context, name string) (fi os.FileInfo, err error) { // defer log.Trace(name, "")("fi=%+v, err = %v", &fi, &err) VFS, err := w.getVFS(ctx) if err != nil { return nil, err } fi, err = VFS.Stat(name) if err != nil { return nil, err } return FileInfo{FileInfo: fi, w: w}, nil } // Handle represents an open file type Handle struct { vfs.Handle w *WebDAV ctx context.Context } // Readdir reads directory entries from the handle func (h Handle) Readdir(count int) (fis []os.FileInfo, err error) { fis, err = h.Handle.Readdir(count) if err != nil { return nil, err } // Wrap each FileInfo for i := range fis { fis[i] = FileInfo{FileInfo: fis[i], w: h.w} } return fis, nil } // Stat the handle func (h Handle) Stat() (fi os.FileInfo, err error) { fi, err = h.Handle.Stat() if err != nil { return nil, err } return FileInfo{FileInfo: fi, w: h.w}, nil } // DeadProps returns extra properties about the handle func (h Handle) DeadProps() (map[xml.Name]webdav.Property, error) { var ( xmlName xml.Name property webdav.Property properties = make(map[xml.Name]webdav.Property) ) if h.w.etagHashType != hash.None { entry := h.Handle.Node().DirEntry() if o, ok := entry.(fs.Object); ok { hash, err := o.Hash(h.ctx, h.w.etagHashType) if err == nil { xmlName.Space = "http://owncloud.org/ns" xmlName.Local = "checksums" property.XMLName = xmlName property.InnerXML = append(property.InnerXML, "<checksum xmlns=\"http://owncloud.org/ns\">"...) property.InnerXML = append(property.InnerXML, strings.ToUpper(h.w.etagHashType.String())...) property.InnerXML = append(property.InnerXML, ':') property.InnerXML = append(property.InnerXML, hash...) property.InnerXML = append(property.InnerXML, "</checksum>"...) properties[xmlName] = property } else { fs.Errorf(nil, "failed to calculate hash: %v", err) } } } xmlName.Space = "DAV:" xmlName.Local = "lastmodified" property.XMLName = xmlName property.InnerXML = strconv.AppendInt(nil, h.Handle.Node().ModTime().Unix(), 10) properties[xmlName] = property return properties, nil } // Patch changes modtime of the underlying resources, it returns ok for all properties, the error is from setModtime if any // FIXME does not check for invalid property and SetModTime error func (h Handle) Patch(proppatches []webdav.Proppatch) ([]webdav.Propstat, error) { var ( stat webdav.Propstat err error ) stat.Status = http.StatusOK for _, patch := range proppatches { for _, prop := range patch.Props { stat.Props = append(stat.Props, webdav.Property{XMLName: prop.XMLName}) if prop.XMLName.Space == "DAV:" && prop.XMLName.Local == "lastmodified" { var modtimeUnix int64 modtimeUnix, err = strconv.ParseInt(string(prop.InnerXML), 10, 64) if err == nil { err = h.Handle.Node().SetModTime(time.Unix(modtimeUnix, 0)) } } } } return []webdav.Propstat{stat}, err } // FileInfo represents info about a file satisfying os.FileInfo and // also some additional interfaces for webdav for ETag and ContentType type FileInfo struct { os.FileInfo w *WebDAV } // ETag returns an ETag for the FileInfo func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) { // defer log.Trace(fi, "")("etag=%q, err=%v", &etag, &err) if fi.w.etagHashType == hash.None { return "", webdav.ErrNotImplemented } node, ok := (fi.FileInfo).(vfs.Node) if !ok { fs.Errorf(fi, "Expecting vfs.Node, got %T", fi.FileInfo) return "", webdav.ErrNotImplemented } entry := node.DirEntry() o, ok := entry.(fs.Object) if !ok { return "", webdav.ErrNotImplemented } hash, err := o.Hash(ctx, fi.w.etagHashType) if err != nil || hash == "" { return "", webdav.ErrNotImplemented } return `"` + hash + `"`, nil } // ContentType returns a content type for the FileInfo func (fi FileInfo) ContentType(ctx context.Context) (contentType string, err error) { // defer log.Trace(fi, "")("etag=%q, err=%v", &contentType, &err) node, ok := (fi.FileInfo).(vfs.Node) if !ok { fs.Errorf(fi, "Expecting vfs.Node, got %T", fi.FileInfo) return "application/octet-stream", nil } entry := node.DirEntry() // can be nil switch x := entry.(type) { case fs.Object: return fs.MimeType(ctx, x), nil case fs.Directory: return "inode/directory", nil case nil: return mime.TypeByExtension(path.Ext(node.Name())), nil } fs.Errorf(fi, "Expecting fs.Object or fs.Directory, got %T", entry) return "application/octet-stream", nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/dlna/cms.go
cmd/serve/dlna/cms.go
package dlna import ( "net/http" "github.com/anacrolix/dms/upnp" ) const defaultProtocolInfo = "http-get:*:video/mpeg:*,http-get:*:video/mp4:*,http-get:*:video/vnd.dlna.mpeg-tts:*,http-get:*:video/avi:*,http-get:*:video/x-matroska:*,http-get:*:video/x-ms-wmv:*,http-get:*:video/wtv:*,http-get:*:audio/mpeg:*,http-get:*:audio/mp3:*,http-get:*:audio/mp4:*,http-get:*:audio/x-ms-wma*,http-get:*:audio/wav:*,http-get:*:audio/L16:*,http-get:*image/jpeg:*,http-get:*image/png:*,http-get:*image/gif:*,http-get:*image/tiff:*" type connectionManagerService struct { *server upnp.Eventing } func (cms *connectionManagerService) Handle(action string, argsXML []byte, r *http.Request) (map[string]string, error) { switch action { case "GetProtocolInfo": return map[string]string{ "Source": defaultProtocolInfo, "Sink": "", }, nil default: return nil, upnp.InvalidActionError } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/dlna/cds_test.go
cmd/serve/dlna/cds_test.go
package dlna import ( "context" "sort" "testing" localBackend "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestMediaWithResources(t *testing.T) { fs, err := localBackend.NewFs(context.Background(), "testdatafiles", "testdata/files", configmap.New()) require.NoError(t, err) myvfs := vfs.New(fs, nil) { rootNode, err := myvfs.Stat("") require.NoError(t, err) rootDir := rootNode.(*vfs.Dir) dirEntries, err := rootDir.ReadDirAll() require.NoError(t, err) mediaItems, assocResources := mediaWithResources(dirEntries) // ensure mediaItems contains some items we care about. // We specifically check that the .mp4 file and a child directory is kept. var videoMp4 *vfs.Node foundSubdir := false for _, mediaItem := range mediaItems { if mediaItem.Name() == "video.mp4" { videoMp4 = &mediaItem } else if mediaItem.Name() == "subdir" { foundSubdir = true } } assert.True(t, videoMp4 != nil, "expected mp4 to be found") assert.True(t, foundSubdir, "expected subdir to be found") assocVideoResource, ok := assocResources[*videoMp4] require.True(t, ok, "expected video.mp4 to have assoc video resource") // ensure both video.en.srt and video.srt are in assocVideoResource. assocVideoResourceNames := make([]string, 0) for _, e := range assocVideoResource { assocVideoResourceNames = append(assocVideoResourceNames, e.Name()) } sort.Strings(assocVideoResourceNames) assert.Equal(t, []string{"video.en.srt", "video.srt"}, assocVideoResourceNames) } // Now test inside subdir2. // This directory only contains a video.mp4 file, but as it also contains a // "Subs" subdir, `mediaWithResources` is called with its children appended, // causing the media items are appropriately populated. { rootNode, err := myvfs.Stat("subdir2") require.NoError(t, err) subtitleNode, err := myvfs.Stat("subdir2/Subs") require.NoError(t, err) rootDir := rootNode.(*vfs.Dir) subtitleDir := subtitleNode.(*vfs.Dir) dirEntries, err := rootDir.ReadDirAll() require.NoError(t, err) subtitleEntries, err := subtitleDir.ReadDirAll() require.NoError(t, err) dirEntries = append(dirEntries, subtitleEntries...) mediaItems, assocResources := mediaWithResources(dirEntries) // ensure mediaItems contains some items we care about. // We specifically check that the .mp4 file is kept. var videoMp4 *vfs.Node for _, mediaItem := range mediaItems { if mediaItem.Name() == "video.mp4" { videoMp4 = &mediaItem } } assert.True(t, videoMp4 != nil, "expected mp4 to be found") assocVideoResource, ok := assocResources[*videoMp4] require.True(t, ok, "expected video.mp4 to have assoc video resource") // ensure both video.en.srt and video.srt are in assocVideoResource. assocVideoResourceNames := make([]string, 0) for _, e := range assocVideoResource { assocVideoResourceNames = append(assocVideoResourceNames, e.Name()) } sort.Strings(assocVideoResourceNames) assert.Equal(t, []string{"video.en.srt", "video.srt"}, assocVideoResourceNames) } // Now test subdir3. It contains a video.mpv, as well as Sub/video.{idx,sub}. { rootNode, err := myvfs.Stat("subdir3") require.NoError(t, err) subtitleNode, err := myvfs.Stat("subdir3/Subs") require.NoError(t, err) rootDir := rootNode.(*vfs.Dir) subtitleDir := subtitleNode.(*vfs.Dir) dirEntries, err := rootDir.ReadDirAll() require.NoError(t, err) subtitleEntries, err := subtitleDir.ReadDirAll() require.NoError(t, err) dirEntries = append(dirEntries, subtitleEntries...) mediaItems, assocResources := mediaWithResources(dirEntries) // ensure mediaItems contains some items we care about. // We specifically check that the .mp4 file is kept. var videoMp4 *vfs.Node for _, mediaItem := range mediaItems { if mediaItem.Name() == "video.mp4" { videoMp4 = &mediaItem } } assert.True(t, videoMp4 != nil, "expected mp4 to be found") // test assocResources to point from the video file to the subtitles assocVideoResource, ok := assocResources[*videoMp4] require.True(t, ok, "expected video.mp4 to have assoc video resource") // ensure both video.idx and video.sub are in assocVideoResource. assocVideoResourceNames := make([]string, 0) for _, e := range assocVideoResource { assocVideoResourceNames = append(assocVideoResourceNames, e.Name()) } sort.Strings(assocVideoResourceNames) assert.Equal(t, []string{"video.idx", "video.sub"}, assocVideoResourceNames) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/dlna/dlna_test.go
cmd/serve/dlna/dlna_test.go
package dlna import ( "bytes" "context" "fmt" "html" "io" "net/http" "os" "strings" "testing" "github.com/anacrolix/dms/soap" "github.com/rclone/rclone/cmd/serve/servetest" "github.com/rclone/rclone/fs/config/configfile" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/fs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var ( dlnaServer *server baseURL string ) const ( testBindAddress = "localhost:0" ) func startServer(t *testing.T, f fs.Fs) { opt := Opt opt.ListenAddr = testBindAddress var err error dlnaServer, err = newServer(context.Background(), f, &opt, &vfscommon.Opt) assert.NoError(t, err) go func() { assert.NoError(t, dlnaServer.Serve()) }() baseURL = "http://" + dlnaServer.HTTPConn.Addr().String() } func TestInit(t *testing.T) { configfile.Install() f, err := fs.NewFs(context.Background(), "testdata/files") l, _ := f.List(context.Background(), "") fmt.Println(l) require.NoError(t, err) startServer(t, f) } // Make sure that it serves rootDesc.xml (SCPD in uPnP parlance). func TestRootSCPD(t *testing.T) { req, err := http.NewRequest("GET", baseURL+rootDescPath, nil) require.NoError(t, err) resp, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode) body, err := io.ReadAll(resp.Body) require.NoError(t, err) // Make sure that the SCPD contains a CDS service. require.Contains(t, string(body), "<serviceType>urn:schemas-upnp-org:service:ContentDirectory:1</serviceType>") // Make sure that the SCPD contains a CM service. require.Contains(t, string(body), "<serviceType>urn:schemas-upnp-org:service:ConnectionManager:1</serviceType>") // Ensure that the SCPD url is configured. require.Regexp(t, "<SCPDURL>/.*</SCPDURL>", string(body)) } // Make sure that it serves content from the remote. func TestServeContent(t *testing.T) { req, err := http.NewRequest("GET", baseURL+resPath+"video.mp4", nil) require.NoError(t, err) resp, err := http.DefaultClient.Do(req) require.NoError(t, err) defer fs.CheckClose(resp.Body, &err) assert.Equal(t, http.StatusOK, resp.StatusCode) actualContents, err := io.ReadAll(resp.Body) assert.NoError(t, err) // Now compare the contents with the golden file. node, err := dlnaServer.vfs.Stat("/video.mp4") assert.NoError(t, err) goldenFile := node.(*vfs.File) goldenReader, err := goldenFile.Open(os.O_RDONLY) assert.NoError(t, err) defer fs.CheckClose(goldenReader, &err) goldenContents, err := io.ReadAll(goldenReader) assert.NoError(t, err) require.Equal(t, goldenContents, actualContents) } // Check that ContentDirectory#Browse returns appropriate metadata on the root container. func TestContentDirectoryBrowseMetadata(t *testing.T) { // Sample from: https://github.com/rclone/rclone/issues/3253#issuecomment-524317469 req, err := http.NewRequest("POST", baseURL+serviceControlURL, strings.NewReader(` <?xml version="1.0" encoding="utf-8"?> <s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"> <s:Body> <u:Browse xmlns:u="urn:schemas-upnp-org:service:ContentDirectory:1"> <ObjectID>0</ObjectID> <BrowseFlag>BrowseMetadata</BrowseFlag> <Filter>*</Filter> <StartingIndex>0</StartingIndex> <RequestedCount>0</RequestedCount> <SortCriteria></SortCriteria> </u:Browse> </s:Body> </s:Envelope>`)) require.NoError(t, err) req.Header.Set("SOAPACTION", `"urn:schemas-upnp-org:service:ContentDirectory:1#Browse"`) resp, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode) body, err := io.ReadAll(resp.Body) require.NoError(t, err) // should contain an appropriate URN require.Contains(t, string(body), "urn:schemas-upnp-org:service:ContentDirectory:1") // expect a <container> element require.Contains(t, string(body), html.EscapeString("<container ")) require.NotContains(t, string(body), html.EscapeString("<item ")) // if there is a childCount, it better not be zero require.NotContains(t, string(body), html.EscapeString(" childCount=\"0\"")) // should have a dc:date element require.Contains(t, string(body), html.EscapeString("<dc:date>")) } // Check that the X_MS_MediaReceiverRegistrar is faked out properly. func TestMediaReceiverRegistrarService(t *testing.T) { env := soap.Envelope{ Body: soap.Body{ Action: []byte("RegisterDevice"), }, } req, err := http.NewRequest("POST", baseURL+serviceControlURL, bytes.NewReader(mustMarshalXML(env))) require.NoError(t, err) req.Header.Set("SOAPACTION", `"urn:microsoft.com:service:X_MS_MediaReceiverRegistrar:1#RegisterDevice"`) resp, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode) body, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Contains(t, string(body), "<RegistrationRespMsg>") } // Check that ContentDirectory#Browse returns the expected items. func TestContentDirectoryBrowseDirectChildren(t *testing.T) { // First the root... req, err := http.NewRequest("POST", baseURL+serviceControlURL, strings.NewReader(` <?xml version="1.0" encoding="utf-8"?> <s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"> <s:Body> <u:Browse xmlns:u="urn:schemas-upnp-org:service:ContentDirectory:1"> <ObjectID>0</ObjectID> <BrowseFlag>BrowseDirectChildren</BrowseFlag> <Filter>*</Filter> <StartingIndex>0</StartingIndex> <RequestedCount>0</RequestedCount> <SortCriteria></SortCriteria> </u:Browse> </s:Body> </s:Envelope>`)) require.NoError(t, err) req.Header.Set("SOAPACTION", `"urn:schemas-upnp-org:service:ContentDirectory:1#Browse"`) resp, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode) body, err := io.ReadAll(resp.Body) require.NoError(t, err) // expect video.mp4, video.srt, video.en.srt URLs to be in the DIDL require.Contains(t, string(body), "/r/video.mp4") require.Contains(t, string(body), "/r/video.srt") require.Contains(t, string(body), "/r/video.en.srt") // Then a subdirectory (subdir) { req, err = http.NewRequest("POST", baseURL+serviceControlURL, strings.NewReader(` <?xml version="1.0" encoding="utf-8"?> <s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"> <s:Body> <u:Browse xmlns:u="urn:schemas-upnp-org:service:ContentDirectory:1"> <ObjectID>%2Fsubdir</ObjectID> <BrowseFlag>BrowseDirectChildren</BrowseFlag> <Filter>*</Filter> <StartingIndex>0</StartingIndex> <RequestedCount>0</RequestedCount> <SortCriteria></SortCriteria> </u:Browse> </s:Body> </s:Envelope>`)) require.NoError(t, err) req.Header.Set("SOAPACTION", `"urn:schemas-upnp-org:service:ContentDirectory:1#Browse"`) resp, err = http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode) body, err = io.ReadAll(resp.Body) require.NoError(t, err) // expect video.mp4, video.srt, URLs to be in the DIDL require.Contains(t, string(body), "/r/subdir/video.mp4") require.Contains(t, string(body), "/r/subdir/video.srt") } // Then a subdirectory with subtitles separately (subdir2) { req, err = http.NewRequest("POST", baseURL+serviceControlURL, strings.NewReader(` <?xml version="1.0" encoding="utf-8"?> <s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"> <s:Body> <u:Browse xmlns:u="urn:schemas-upnp-org:service:ContentDirectory:1"> <ObjectID>%2Fsubdir2</ObjectID> <BrowseFlag>BrowseDirectChildren</BrowseFlag> <Filter>*</Filter> <StartingIndex>0</StartingIndex> <RequestedCount>0</RequestedCount> <SortCriteria></SortCriteria> </u:Browse> </s:Body> </s:Envelope>`)) require.NoError(t, err) req.Header.Set("SOAPACTION", `"urn:schemas-upnp-org:service:ContentDirectory:1#Browse"`) resp, err = http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode) body, err = io.ReadAll(resp.Body) require.NoError(t, err) // expect video.mp4, Subs/video.srt, URLs to be in the DIDL require.Contains(t, string(body), "/r/subdir2/video.mp4") require.Contains(t, string(body), "/r/subdir2/Subs/video.srt") } // Then a subdirectory with subtitles in Subs/*.{idx,sub} (subdir3) { req, err = http.NewRequest("POST", baseURL+serviceControlURL, strings.NewReader(` <?xml version="1.0" encoding="utf-8"?> <s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"> <s:Body> <u:Browse xmlns:u="urn:schemas-upnp-org:service:ContentDirectory:1"> <ObjectID>%2Fsubdir3</ObjectID> <BrowseFlag>BrowseDirectChildren</BrowseFlag> <Filter>*</Filter> <StartingIndex>0</StartingIndex> <RequestedCount>0</RequestedCount> <SortCriteria></SortCriteria> </u:Browse> </s:Body> </s:Envelope>`)) require.NoError(t, err) req.Header.Set("SOAPACTION", `"urn:schemas-upnp-org:service:ContentDirectory:1#Browse"`) resp, err = http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode) body, err = io.ReadAll(resp.Body) require.NoError(t, err) // expect video.mp4, Subs/video.srt, URLs to be in the DIDL require.Contains(t, string(body), "/r/subdir3/video.mp4") require.Contains(t, string(body), "/r/subdir3/Subs/video.idx") require.Contains(t, string(body), "/r/subdir3/Subs/video.sub") } } func TestRc(t *testing.T) { servetest.TestRc(t, rc.Params{ "type": "dlna", "vfs_cache_mode": "off", }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/dlna/cds.go
cmd/serve/dlna/cds.go
package dlna import ( "context" "encoding/xml" "errors" "fmt" "net/http" "net/url" "os" "path" "path/filepath" "regexp" "sort" "strings" "github.com/anacrolix/dms/dlna" "github.com/anacrolix/dms/upnp" "github.com/rclone/rclone/cmd/serve/dlna/upnpav" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/vfs" ) type contentDirectoryService struct { *server upnp.Eventing } func (cds *contentDirectoryService) updateIDString() string { return fmt.Sprintf("%d", uint32(os.Getpid())) } var mediaMimeTypeRegexp = regexp.MustCompile("^(video|audio|image)/") // Turns the given entry and DMS host into a UPnP object. A nil object is // returned if the entry is not of interest. func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fileInfo vfs.Node, resources vfs.Nodes, host string) (ret any, err error) { obj := upnpav.Object{ ID: cdsObject.ID(), Restricted: 1, ParentID: cdsObject.ParentID(), } if fileInfo.IsDir() { defaultChildCount := 1 obj.Class = "object.container.storageFolder" obj.Title = fileInfo.Name() return upnpav.Container{ Object: obj, ChildCount: &defaultChildCount, }, nil } if !fileInfo.Mode().IsRegular() { return } // Read the mime type from the fs.Object if possible, // otherwise fall back to working out what it is from the file path. var mimeType string if o, ok := fileInfo.DirEntry().(fs.Object); ok { mimeType = fs.MimeType(context.TODO(), o) // If backend doesn't know what the mime type is then // try getting it from the file name if mimeType == "application/octet-stream" { mimeType = fs.MimeTypeFromName(fileInfo.Name()) } } else { mimeType = fs.MimeTypeFromName(fileInfo.Name()) } mediaType := mediaMimeTypeRegexp.FindStringSubmatch(mimeType) if mediaType == nil { return } obj.Class = "object.item." + mediaType[1] + "Item" obj.Title = fileInfo.Name() obj.Date = upnpav.Timestamp{Time: fileInfo.ModTime()} item := upnpav.Item{ Object: obj, Res: make([]upnpav.Resource, 0, 1), } item.Res = append(item.Res, upnpav.Resource{ URL: (&url.URL{ Scheme: "http", Host: host, Path: path.Join(resPath, cdsObject.Path), }).String(), ProtocolInfo: fmt.Sprintf("http-get:*:%s:%s", mimeType, dlna.ContentFeatures{ SupportRange: true, }.String()), Size: uint64(fileInfo.Size()), }) for _, resource := range resources { subtitleURL := (&url.URL{ Scheme: "http", Host: host, Path: path.Join(resPath, resource.Path()), }).String() // Read the mime type from the fs.Object if possible, // otherwise fall back to working out what it is from the file path. var mimeType string if o, ok := resource.DirEntry().(fs.Object); ok { mimeType = fs.MimeType(context.TODO(), o) // If backend doesn't know what the mime type is then // try getting it from the file name if mimeType == "application/octet-stream" { mimeType = fs.MimeTypeFromName(resource.Name()) } } else { mimeType = fs.MimeTypeFromName(resource.Name()) } item.Res = append(item.Res, upnpav.Resource{ URL: subtitleURL, ProtocolInfo: fmt.Sprintf("http-get:*:%s:*", mimeType), }) } ret = item return } // Returns all the upnpav objects in a directory. func (cds *contentDirectoryService) readContainer(o object, host string) (ret []any, err error) { node, err := cds.vfs.Stat(o.Path) if err != nil { return } if !node.IsDir() { err = errors.New("not a directory") return } dir := node.(*vfs.Dir) dirEntries, err := dir.ReadDirAll() if err != nil { err = errors.New("failed to list directory") return } // if there's a "Subs" child directory, add its children to the list as well, // so mediaWithResources is able to find them. for _, node := range dirEntries { if strings.EqualFold(node.Name(), "Subs") && node.IsDir() { subtitleDir := node.(*vfs.Dir) subtitleEntries, err := subtitleDir.ReadDirAll() if err != nil { err = errors.New("failed to list subtitle directory") return nil, err } dirEntries = append(dirEntries, subtitleEntries...) } } // Sort the directory entries by directories first then alphabetically by name sort.Slice(dirEntries, func(i, j int) bool { iNode, jNode := dirEntries[i], dirEntries[j] iIsDir, jIsDir := iNode.IsDir(), jNode.IsDir() if iIsDir && !jIsDir { return true } else if !iIsDir && jIsDir { return false } return strings.ToLower(iNode.Name()) < strings.ToLower(jNode.Name()) }) dirEntries, mediaResources := mediaWithResources(dirEntries) for _, de := range dirEntries { child := object{ path.Join(o.Path, de.Name()), } obj, err := cds.cdsObjectToUpnpavObject(child, de, mediaResources[de], host) if err != nil { fs.Errorf(cds, "error with %s: %s", child.FilePath(), err) continue } if obj == nil { fs.Debugf(cds, "unrecognized file type: %s", de) continue } ret = append(ret, obj) } return } // Given a list of nodes, separate them into potential media items and any associated resources (external subtitles, // for example.) // // The result is a slice of potential media nodes (in their original order) and a map containing associated // resources nodes of each media node, if any. func mediaWithResources(nodes vfs.Nodes) (vfs.Nodes, map[vfs.Node]vfs.Nodes) { media, mediaResources := vfs.Nodes{}, make(map[vfs.Node]vfs.Nodes) // First, separate out the subtitles and media into maps, keyed by their lowercase base names. mediaByName, subtitlesByName := make(map[string]vfs.Nodes), make(map[string]vfs.Nodes) for _, node := range nodes { baseName, ext := splitExt(strings.ToLower(node.Name())) switch ext { case ".srt", ".ass", ".ssa", ".sub", ".idx", ".sup", ".jss", ".txt", ".usf", ".cue", ".vtt", ".css": // .idx should be with .sub, .css should be with vtt otherwise they should be culled, // and their mimeTypes are not consistent, but anyway these negatives don't throw errors. subtitlesByName[baseName] = append(subtitlesByName[baseName], node) default: mediaByName[baseName] = append(mediaByName[baseName], node) media = append(media, node) } } // Find the associated media file for each subtitle for baseName, nodes := range subtitlesByName { // Find a media file with the same basename (video.mp4 for video.srt) mediaNodes, found := mediaByName[baseName] if !found { // Or basename of the basename (video.mp4 for video.en.srt) baseName, _ := splitExt(baseName) mediaNodes, found = mediaByName[baseName] } // Just advise if no match found if !found { fs.Infof(nodes, "could not find associated media for subtitle: %s", baseName) fs.Infof(mediaByName, "mediaByName is this, baseName is %s", baseName) continue } // Associate with all potential media nodes fs.Debugf(mediaNodes, "associating subtitle: %s", baseName) for _, mediaNode := range mediaNodes { mediaResources[mediaNode] = append(mediaResources[mediaNode], nodes...) } } return media, mediaResources } type browse struct { ObjectID string BrowseFlag string Filter string StartingIndex int RequestedCount int } // ContentDirectory object from ObjectID. func (cds *contentDirectoryService) objectFromID(id string) (o object, err error) { o.Path, err = url.QueryUnescape(id) if err != nil { return } if o.Path == "0" { o.Path = "/" } o.Path = path.Clean(o.Path) if !path.IsAbs(o.Path) { err = fmt.Errorf("bad ObjectID %v", o.Path) return } return } func (cds *contentDirectoryService) Handle(action string, argsXML []byte, r *http.Request) (map[string]string, error) { host := r.Host switch action { case "GetSystemUpdateID": return map[string]string{ "Id": cds.updateIDString(), }, nil case "GetSortCapabilities": return map[string]string{ "SortCaps": "dc:title", }, nil case "Browse": var browse browse if err := xml.Unmarshal(argsXML, &browse); err != nil { return nil, err } obj, err := cds.objectFromID(browse.ObjectID) if err != nil { return nil, upnp.Errorf(upnpav.NoSuchObjectErrorCode, "%s", err.Error()) } switch browse.BrowseFlag { case "BrowseDirectChildren": objs, err := cds.readContainer(obj, host) if err != nil { return nil, upnp.Errorf(upnpav.NoSuchObjectErrorCode, "%s", err.Error()) } totalMatches := len(objs) objs = objs[func() (low int) { low = min(browse.StartingIndex, len(objs)) return }():] if browse.RequestedCount != 0 && browse.RequestedCount < len(objs) { objs = objs[:browse.RequestedCount] } result, err := xml.Marshal(objs) if err != nil { return nil, err } return map[string]string{ "TotalMatches": fmt.Sprint(totalMatches), "NumberReturned": fmt.Sprint(len(objs)), "Result": didlLite(string(result)), "UpdateID": cds.updateIDString(), }, nil case "BrowseMetadata": node, err := cds.vfs.Stat(obj.Path) if err != nil { return nil, err } // TODO: External subtitles won't appear in the metadata here, but probably should. upnpObject, err := cds.cdsObjectToUpnpavObject(obj, node, vfs.Nodes{}, host) if err != nil { return nil, err } result, err := xml.Marshal(upnpObject) if err != nil { return nil, err } return map[string]string{ "TotalMatches": "1", "NumberReturned": "1", "Result": didlLite(string(result)), "UpdateID": cds.updateIDString(), }, nil default: return nil, upnp.Errorf(upnp.ArgumentValueInvalidErrorCode, "unhandled browse flag: %v", browse.BrowseFlag) } case "GetSearchCapabilities": return map[string]string{ "SearchCaps": "", }, nil // Samsung Extensions case "X_GetFeatureList": return map[string]string{ "FeatureList": `<Features xmlns="urn:schemas-upnp-org:av:avs" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="urn:schemas-upnp-org:av:avs http://www.upnp.org/schemas/av/avs.xsd"> <Feature name="samsung.com_BASICVIEW" version="1"> <container id="0" type="object.item.imageItem"/> <container id="0" type="object.item.audioItem"/> <container id="0" type="object.item.videoItem"/> </Feature> </Features>`}, nil case "X_SetBookmark": // just ignore return map[string]string{}, nil default: return nil, upnp.InvalidActionError } } // Represents a ContentDirectory object. type object struct { Path string // The cleaned, absolute path for the object relative to the server. } // Returns the actual local filesystem path for the object. func (o *object) FilePath() string { return filepath.FromSlash(o.Path) } // Returns the ObjectID for the object. This is used in various ContentDirectory actions. func (o object) ID() string { if !path.IsAbs(o.Path) { fs.Panicf(nil, "Relative object path: %s", o.Path) } if len(o.Path) == 1 { return "0" } return url.QueryEscape(o.Path) } func (o *object) IsRoot() bool { return o.Path == "/" } // Returns the object's parent ObjectID. Fortunately it can be deduced from the // ObjectID (for now). func (o object) ParentID() string { if o.IsRoot() { return "-1" } o.Path = path.Dir(o.Path) return o.ID() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/dlna/dlna.go
cmd/serve/dlna/dlna.go
// Package dlna provides DLNA server. package dlna import ( "bytes" "context" "encoding/xml" "fmt" "net" "net/http" "net/url" "os" "strconv" "strings" "time" dms_dlna "github.com/anacrolix/dms/dlna" "github.com/anacrolix/dms/soap" "github.com/anacrolix/dms/ssdp" "github.com/anacrolix/dms/upnp" "github.com/anacrolix/log" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/serve" "github.com/rclone/rclone/cmd/serve/dlna/data" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/lib/systemd" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" "github.com/rclone/rclone/vfs/vfsflags" "github.com/spf13/cobra" ) // OptionsInfo descripts the Options in use var OptionsInfo = fs.Options{{ Name: "addr", Default: ":7879", Help: "The ip:port or :port to bind the DLNA http server to", }, { Name: "name", Default: "", Help: "Name of DLNA server", }, { Name: "log_trace", Default: false, Help: "Enable trace logging of SOAP traffic", }, { Name: "interface", Default: []string{}, Help: "The interface to use for SSDP (repeat as necessary)", }, { Name: "announce_interval", Default: fs.Duration(12 * time.Minute), Help: "The interval between SSDP announcements", }} // Options is the type for DLNA serving options. type Options struct { ListenAddr string `config:"addr"` FriendlyName string `config:"name"` LogTrace bool `config:"log_trace"` InterfaceNames []string `config:"interface"` AnnounceInterval fs.Duration `config:"announce_interval"` } // Opt contains the options for DLNA serving. var Opt Options func init() { fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "dlna", Opt: &Opt, Options: OptionsInfo}) flagSet := Command.Flags() flags.AddFlagsFromOptions(flagSet, "", OptionsInfo) vfsflags.AddFlags(flagSet) serve.Command.AddCommand(Command) serve.AddRc("dlna", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) { // Read VFS Opts var vfsOpt = vfscommon.Opt // set default opts err := configstruct.SetAny(in, &vfsOpt) if err != nil { return nil, err } // Read opts var opt = Opt // set default opts err = configstruct.SetAny(in, &opt) if err != nil { return nil, err } // Create server return newServer(ctx, f, &opt, &vfsOpt) }) } // Command definition for cobra. var Command = &cobra.Command{ Use: "dlna remote:path", Short: `Serve remote:path over DLNA`, Long: `Run a DLNA media server for media stored in an rclone remote. Many devices, such as the Xbox and PlayStation, can automatically discover this server in the LAN and play audio/video from it. VLC is also supported. Service discovery uses UDP multicast packets (SSDP) and will thus only work on LANs. Rclone will list all files present in the remote, without filtering based on media formats or file extensions. Additionally, there is no media transcoding support. This means that some players might show files that they are not able to play back correctly. Rclone will add external subtitle files (.srt) to videos if they have the same filename as the video file itself (except the extension), either in the same directory as the video, or in a "Subs" subdirectory. ### Server options Use ` + "`--addr`" + ` to specify which IP address and port the server should listen on, e.g. ` + "`--addr 1.2.3.4:8000` or `--addr :8080`" + ` to listen to all IPs. Use ` + "`--name`" + ` to choose the friendly server name, which is by default "rclone (hostname)". Use ` + "`--log-trace` in conjunction with `-vv`" + ` to enable additional debug logging of all UPNP traffic. ` + strings.TrimSpace(vfs.Help()), Annotations: map[string]string{ "versionIntroduced": "v1.46", "groups": "Filter", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) f := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt) if err != nil { return err } defer systemd.Notify()() return s.Serve() }) }, } const ( serverField = "Linux/3.4 DLNADOC/1.50 UPnP/1.0 DMS/1.0" rootDescPath = "/rootDesc.xml" resPath = "/r/" serviceControlURL = "/ctl" ) type server struct { // The service SOAP handler keyed by service URN. services map[string]UPnPService Interfaces []net.Interface HTTPConn net.Listener httpListenAddr string handler http.Handler RootDeviceUUID string FriendlyName string // For waiting on the listener to close waitChan chan struct{} // Time interval between SSPD announces AnnounceInterval time.Duration f fs.Fs vfs *vfs.VFS } func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options) (*server, error) { friendlyName := opt.FriendlyName if friendlyName == "" { friendlyName = makeDefaultFriendlyName() } interfaces := make([]net.Interface, 0, len(opt.InterfaceNames)) for _, interfaceName := range opt.InterfaceNames { var err error intf, err := net.InterfaceByName(interfaceName) if err != nil { return nil, fmt.Errorf("failed to resolve interface name '%s': %w", interfaceName, err) } if !isAppropriatelyConfigured(*intf) { return nil, fmt.Errorf("interface '%s' is not appropriately configured (it should be UP, MULTICAST and MTU > 0)", interfaceName) } interfaces = append(interfaces, *intf) } if len(interfaces) == 0 { interfaces = listInterfaces() } s := &server{ AnnounceInterval: time.Duration(opt.AnnounceInterval), FriendlyName: friendlyName, RootDeviceUUID: makeDeviceUUID(friendlyName), Interfaces: interfaces, waitChan: make(chan struct{}), httpListenAddr: opt.ListenAddr, f: f, vfs: vfs.New(f, vfsOpt), } s.services = map[string]UPnPService{ "ContentDirectory": &contentDirectoryService{ server: s, }, "ConnectionManager": &connectionManagerService{ server: s, }, "X_MS_MediaReceiverRegistrar": &mediaReceiverRegistrarService{ server: s, }, } // Setup the various http routes. r := http.NewServeMux() r.Handle(resPath, http.StripPrefix(resPath, http.HandlerFunc(s.resourceHandler))) if opt.LogTrace { r.Handle(rootDescPath, traceLogging(http.HandlerFunc(s.rootDescHandler))) r.Handle(serviceControlURL, traceLogging(http.HandlerFunc(s.serviceControlHandler))) } else { r.HandleFunc(rootDescPath, s.rootDescHandler) r.HandleFunc(serviceControlURL, s.serviceControlHandler) } r.Handle("/static/", http.StripPrefix("/static/", withHeader("Cache-Control", "public, max-age=86400", http.FileServer(data.Assets)))) s.handler = logging(withHeader("Server", serverField, r)) // Currently, the SSDP server only listens on an IPv4 multicast address. // Differentiate between two INADDR_ANY addresses, // so that 0.0.0.0 can only listen on IPv4 addresses. network := "tcp4" if strings.Count(s.httpListenAddr, ":") > 1 { network = "tcp" } listener, err := net.Listen(network, s.httpListenAddr) if err != nil { return nil, err } s.HTTPConn = listener return s, nil } // UPnPService is the interface for the SOAP service. type UPnPService interface { Handle(action string, argsXML []byte, r *http.Request) (respArgs map[string]string, err error) Subscribe(callback []*url.URL, timeoutSeconds int) (sid string, actualTimeout int, err error) Unsubscribe(sid string) error } // Formats the server as a string (used for logging.) func (s *server) String() string { return fmt.Sprintf("DLNA server on %v", s.httpListenAddr) } // Returns rclone version number as the model number. func (s *server) ModelNumber() string { return fs.Version } // Renders the root device descriptor. func (s *server) rootDescHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() tmpl, err := data.GetTemplate() if err != nil { serveError(ctx, s, w, "Failed to load root descriptor template", err) return } buffer := new(bytes.Buffer) err = tmpl.Execute(buffer, s) if err != nil { serveError(ctx, s, w, "Failed to render root descriptor XML", err) return } w.Header().Set("content-type", `text/xml; charset="utf-8"`) w.Header().Set("cache-control", "private, max-age=60") w.Header().Set("content-length", strconv.FormatInt(int64(buffer.Len()), 10)) _, err = buffer.WriteTo(w) if err != nil { // Network error fs.Debugf(s, "Error writing rootDesc: %v", err) } } // Handle a service control HTTP request. func (s *server) serviceControlHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() soapActionString := r.Header.Get("SOAPACTION") soapAction, err := upnp.ParseActionHTTPHeader(soapActionString) if err != nil { serveError(ctx, s, w, "Could not parse SOAPACTION header", err) return } var env soap.Envelope if err := xml.NewDecoder(r.Body).Decode(&env); err != nil { serveError(ctx, s, w, "Could not parse SOAP request body", err) return } w.Header().Set("Content-Type", `text/xml; charset="utf-8"`) w.Header().Set("Ext", "") soapRespXML, code := func() ([]byte, int) { respArgs, err := s.soapActionResponse(soapAction, env.Body.Action, r) if err != nil { fs.Errorf(s, "Error invoking %v: %v", soapAction, err) upnpErr := upnp.ConvertError(err) return mustMarshalXML(soap.NewFault("UPnPError", upnpErr)), http.StatusInternalServerError } return marshalSOAPResponse(soapAction, respArgs), http.StatusOK }() bodyStr := fmt.Sprintf(`<?xml version="1.0" encoding="utf-8" standalone="yes"?><s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"><s:Body>%s</s:Body></s:Envelope>`, soapRespXML) w.WriteHeader(code) if _, err := w.Write([]byte(bodyStr)); err != nil { fs.Infof(s, "Error writing response: %v", err) } } // Handle a SOAP request and return the response arguments or UPnP error. func (s *server) soapActionResponse(sa upnp.SoapAction, actionRequestXML []byte, r *http.Request) (map[string]string, error) { service, ok := s.services[sa.Type] if !ok { // TODO: What's the invalid service error? return nil, upnp.Errorf(upnp.InvalidActionErrorCode, "Invalid service: %s", sa.Type) } return service.Handle(sa.Action, actionRequestXML, r) } // Serves actual resources (media files). func (s *server) resourceHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() remotePath := r.URL.Path node, err := s.vfs.Stat(r.URL.Path) if err != nil { http.NotFound(w, r) return } w.Header().Set("Content-Length", strconv.FormatInt(node.Size(), 10)) // add some DLNA specific headers if r.Header.Get("getContentFeatures.dlna.org") != "" { w.Header().Set("contentFeatures.dlna.org", dms_dlna.ContentFeatures{ SupportRange: true, }.String()) } w.Header().Set("transferMode.dlna.org", "Streaming") file := node.(*vfs.File) in, err := file.Open(os.O_RDONLY) if err != nil { serveError(ctx, node, w, "Could not open resource", err) return } defer fs.CheckClose(in, &err) http.ServeContent(w, r, remotePath, node.ModTime(), in) } // Serve runs the server - returns the error only if the listener was // not started. Blocks until the server is closed. func (s *server) Serve() (err error) { go func() { s.startSSDP() }() go func() { fs.Logf(s.f, "Serving HTTP on %s", s.HTTPConn.Addr().String()) err := s.serveHTTP() if err != nil { fs.Logf(s.f, "Error on serving HTTP server: %v", err) } }() s.Wait() return nil } // Wait blocks while the listener is open. func (s *server) Wait() { <-s.waitChan } // Shutdown the DLNA server func (s *server) Shutdown() error { err := s.HTTPConn.Close() close(s.waitChan) if err != nil { return fmt.Errorf("failed to shutdown DLNA server: %w", err) } return nil } // Return the first address of the server func (s *server) Addr() net.Addr { return s.HTTPConn.Addr() } // Run SSDP (multicast for server discovery) on all interfaces. func (s *server) startSSDP() { active := 0 stopped := make(chan struct{}) for _, intf := range s.Interfaces { active++ go func(intf2 net.Interface) { defer func() { stopped <- struct{}{} }() s.ssdpInterface(intf2) }(intf) } for active > 0 { <-stopped active-- } } // Run SSDP server on an interface. func (s *server) ssdpInterface(intf net.Interface) { // Figure out whether should an ip be announced ipfilterFn := func(ip net.IP) bool { listenaddr := s.HTTPConn.Addr().String() listenip := listenaddr[:strings.LastIndex(listenaddr, ":")] switch listenip { case "0.0.0.0": if strings.Contains(ip.String(), ":") { // Any IPv6 address should not be announced // because SSDP only listen on IPv4 multicast address return false } return true case "[::]": // In the @Serve() section, the default settings have been made to not listen on IPv6 addresses. // If actually still listening on [::], then allow to announce any address. return true default: if listenip == ip.String() { return true } return false } } // Figure out which HTTP location to advertise based on the interface IP. advertiseLocationFn := func(ip net.IP) string { url := url.URL{ Scheme: "http", Host: (&net.TCPAddr{ IP: ip, Port: s.HTTPConn.Addr().(*net.TCPAddr).Port, }).String(), Path: rootDescPath, } return url.String() } _, err := intf.Addrs() if err != nil { panic(err) } fs.Logf(s, "Started SSDP on %v", intf.Name) // Note that the devices and services advertised here via SSDP should be // in agreement with the rootDesc XML descriptor that is defined above. ssdpServer := ssdp.Server{ Interface: intf, Devices: []string{ "urn:schemas-upnp-org:device:MediaServer:1"}, Services: []string{ "urn:schemas-upnp-org:service:ContentDirectory:1", "urn:schemas-upnp-org:service:ConnectionManager:1", "urn:microsoft.com:service:X_MS_MediaReceiverRegistrar:1"}, IPFilter: ipfilterFn, Location: advertiseLocationFn, Server: serverField, UUID: s.RootDeviceUUID, NotifyInterval: s.AnnounceInterval, Logger: log.Default, } // An interface with these flags should be valid for SSDP. const ssdpInterfaceFlags = net.FlagUp | net.FlagMulticast if err := ssdpServer.Init(); err != nil { if intf.Flags&ssdpInterfaceFlags != ssdpInterfaceFlags { // Didn't expect it to work anyway. return } if strings.Contains(err.Error(), "listen") { // OSX has a lot of dud interfaces. Failure to create a socket on // the interface are what we're expecting if the interface is no // good. return } fs.Errorf(s, "Error creating ssdp server on %s: %s", intf.Name, err) return } defer ssdpServer.Close() fs.Infof(s, "Started SSDP on %v", intf.Name) stopped := make(chan struct{}) go func() { defer close(stopped) if err := ssdpServer.Serve(); err != nil { fs.Errorf(s, "%q: %q\n", intf.Name, err) } }() select { case <-s.waitChan: // Returning will close the server. case <-stopped: } } func (s *server) serveHTTP() error { srv := &http.Server{ Handler: s.handler, } err := srv.Serve(s.HTTPConn) select { case <-s.waitChan: return nil default: return err } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/dlna/dlna_util.go
cmd/serve/dlna/dlna_util.go
package dlna import ( "context" "crypto/md5" "encoding/xml" "fmt" "io" "maps" "net" "net/http" "net/http/httptest" "net/http/httputil" "os" "github.com/anacrolix/dms/soap" "github.com/anacrolix/dms/upnp" "github.com/rclone/rclone/fs" ) // Return a default "friendly name" for the server. func makeDefaultFriendlyName() string { hostName, err := os.Hostname() if err != nil { hostName = "" } else { hostName = " (" + hostName + ")" } return "rclone" + hostName } func makeDeviceUUID(unique string) string { h := md5.New() if _, err := io.WriteString(h, unique); err != nil { fs.Panicf(nil, "makeDeviceUUID write failed: %s", err) } buf := h.Sum(nil) return upnp.FormatUUID(buf) } // Get all available active network interfaces. func listInterfaces() []net.Interface { ifs, err := net.Interfaces() if err != nil { fs.Logf(nil, "list network interfaces: %v", err) return []net.Interface{} } var active []net.Interface for _, intf := range ifs { if isAppropriatelyConfigured(intf) { active = append(active, intf) } } return active } func isAppropriatelyConfigured(intf net.Interface) bool { return intf.Flags&net.FlagUp != 0 && intf.Flags&net.FlagMulticast != 0 && intf.MTU > 0 } func didlLite(chardata string) string { return `<DIDL-Lite` + ` xmlns:dc="http://purl.org/dc/elements/1.1/"` + ` xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"` + ` xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"` + ` xmlns:dlna="urn:schemas-dlna-org:metadata-1-0/">` + chardata + `</DIDL-Lite>` } func mustMarshalXML(value any) []byte { ret, err := xml.MarshalIndent(value, "", " ") if err != nil { fs.Panicf(nil, "mustMarshalXML failed to marshal %v: %s", value, err) } return ret } // Marshal SOAP response arguments into a response XML snippet. func marshalSOAPResponse(sa upnp.SoapAction, args map[string]string) []byte { soapArgs := make([]soap.Arg, 0, len(args)) for argName, value := range args { soapArgs = append(soapArgs, soap.Arg{ XMLName: xml.Name{Local: argName}, Value: value, }) } return fmt.Appendf(nil, `<u:%[1]sResponse xmlns:u="%[2]s">%[3]s</u:%[1]sResponse>`, sa.Action, sa.ServiceURN.String(), mustMarshalXML(soapArgs)) } type loggingResponseWriter struct { http.ResponseWriter request *http.Request committed bool } func (lrw *loggingResponseWriter) logRequest(code int, err any) { // Choose appropriate log level based on response status code. var level fs.LogLevel if code < 400 && err == nil { level = fs.LogLevelInfo } else { level = fs.LogLevelError } if err == nil { err = "" } fs.LogLevelPrintf(level, lrw.request.URL, "%s %s %d %s %s", lrw.request.RemoteAddr, lrw.request.Method, code, lrw.request.Header.Get("SOAPACTION"), err) } func (lrw *loggingResponseWriter) WriteHeader(code int) { lrw.committed = true lrw.logRequest(code, nil) lrw.ResponseWriter.WriteHeader(code) } // HTTP handler that logs requests and any errors or panics. func logging(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { lrw := &loggingResponseWriter{ResponseWriter: w, request: r} defer func() { err := recover() if err != nil { if !lrw.committed { lrw.logRequest(http.StatusInternalServerError, err) http.Error(w, fmt.Sprint(err), http.StatusInternalServerError) } else { // Too late to send the error to client, but at least log it. fs.Errorf(r.URL.Path, "Recovered panic: %v", err) } } }() next.ServeHTTP(lrw, r) }) } // HTTP handler that logs complete request and response bodies for debugging. // Error recovery and general request logging are left to logging(). func traceLogging(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() dump, err := httputil.DumpRequest(r, true) if err != nil { serveError(ctx, nil, w, "error dumping request", err) return } fs.Debugf(nil, "%s", dump) recorder := httptest.NewRecorder() next.ServeHTTP(recorder, r) dump, err = httputil.DumpResponse(recorder.Result(), true) if err != nil { // log the error but ignore it fs.Errorf(nil, "error dumping response: %v", err) } else { fs.Debugf(nil, "%s", dump) } // copy from recorder to the real response writer maps.Copy(w.Header(), recorder.Header()) w.WriteHeader(recorder.Code) _, err = recorder.Body.WriteTo(w) if err != nil { // Network error fs.Debugf(nil, "Error writing response: %v", err) } }) } // HTTP handler that sets headers. func withHeader(name string, value string, next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set(name, value) next.ServeHTTP(w, r) }) } // serveError returns an http.StatusInternalServerError and logs the error func serveError(ctx context.Context, what any, w http.ResponseWriter, text string, err error) { err = fs.CountError(ctx, err) fs.Errorf(what, "%s: %v", text, err) http.Error(w, text+".", http.StatusInternalServerError) } // Splits a path into (root, ext) such that root + ext == path, and ext is empty // or begins with a period. Extended version of path.Ext(). func splitExt(path string) (string, string) { for i := len(path) - 1; i >= 0 && path[i] != '/'; i-- { if path[i] == '.' { return path[:i], path[i:] } } return path, "" }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/dlna/mrrs.go
cmd/serve/dlna/mrrs.go
package dlna import ( "net/http" "github.com/anacrolix/dms/upnp" ) type mediaReceiverRegistrarService struct { *server upnp.Eventing } func (mrrs *mediaReceiverRegistrarService) Handle(action string, argsXML []byte, r *http.Request) (map[string]string, error) { switch action { case "IsAuthorized", "IsValidated": return map[string]string{ "Result": "1", }, nil case "RegisterDevice": return map[string]string{ "RegistrationRespMsg": mrrs.RootDeviceUUID, }, nil default: return nil, upnp.InvalidActionError } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/dlna/upnpav/upnpav.go
cmd/serve/dlna/upnpav/upnpav.go
// Package upnpav provides utilities for DLNA server. package upnpav import ( "encoding/xml" "time" ) const ( // NoSuchObjectErrorCode : The specified ObjectID is invalid. NoSuchObjectErrorCode = 701 ) // Resource description type Resource struct { XMLName xml.Name `xml:"res"` ProtocolInfo string `xml:"protocolInfo,attr"` URL string `xml:",chardata"` Size uint64 `xml:"size,attr,omitempty"` Bitrate uint `xml:"bitrate,attr,omitempty"` Duration string `xml:"duration,attr,omitempty"` Resolution string `xml:"resolution,attr,omitempty"` } // Container description type Container struct { Object XMLName xml.Name `xml:"container"` ChildCount *int `xml:"childCount,attr"` } // Item description type Item struct { Object XMLName xml.Name `xml:"item"` Res []Resource InnerXML string `xml:",innerxml"` } // Object description type Object struct { ID string `xml:"id,attr"` ParentID string `xml:"parentID,attr"` Restricted int `xml:"restricted,attr"` // indicates whether the object is modifiable Class string `xml:"upnp:class"` Icon string `xml:"upnp:icon,omitempty"` Title string `xml:"dc:title"` Date Timestamp `xml:"dc:date"` Artist string `xml:"upnp:artist,omitempty"` Album string `xml:"upnp:album,omitempty"` Genre string `xml:"upnp:genre,omitempty"` AlbumArtURI string `xml:"upnp:albumArtURI,omitempty"` Searchable int `xml:"searchable,attr"` } // Timestamp wraps time.Time for formatting purposes type Timestamp struct { time.Time } // MarshalXML formats the Timestamp per DIDL-Lite spec func (t Timestamp) MarshalXML(e *xml.Encoder, start xml.StartElement) error { return e.EncodeElement(t.Format("2006-01-02"), start) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/dlna/data/data.go
cmd/serve/dlna/data/data.go
// Package data provides utilities for DLNA server. // The "go:generate" directive compiles static assets by running assets_generate.go // //go:generate go run assets_generate.go package data import ( "fmt" "io" "text/template" "github.com/rclone/rclone/fs" ) // GetTemplate returns the rootDesc XML template func GetTemplate() (tpl *template.Template, err error) { templateFile, err := Assets.Open("rootDesc.xml.tmpl") if err != nil { return nil, fmt.Errorf("get template open: %w", err) } defer fs.CheckClose(templateFile, &err) templateBytes, err := io.ReadAll(templateFile) if err != nil { return nil, fmt.Errorf("get template read: %w", err) } var templateString = string(templateBytes) tpl, err = template.New("rootDesc").Parse(templateString) if err != nil { return nil, fmt.Errorf("get template parse: %w", err) } return }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/dlna/data/assets_generate.go
cmd/serve/dlna/data/assets_generate.go
//go:generate go run assets_generate.go // The "go:generate" directive compiles static assets by running assets_generate.go //go:build ignore package main import ( "log" "net/http" "github.com/shurcooL/vfsgen" ) func main() { var AssetDir http.FileSystem = http.Dir("./static") err := vfsgen.Generate(AssetDir, vfsgen.Options{ PackageName: "data", BuildTags: "!dev", VariableName: "Assets", }) if err != nil { log.Fatalln(err) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/dlna/data/assets_vfsdata.go
cmd/serve/dlna/data/assets_vfsdata.go
// Code generated by vfsgen; DO NOT EDIT. //go:build !dev package data import ( "bytes" "compress/gzip" "fmt" "io" "net/http" "os" pathpkg "path" "time" ) // Assets statically implements the virtual filesystem provided to vfsgen. var Assets = func() http.FileSystem { fs := vfsgen۰FS{ "/": &vfsgen۰DirInfo{ name: "/", modTime: time.Date(2022, 10, 1, 4, 23, 36, 728914000, time.UTC), }, "/ConnectionManager.xml": &vfsgen۰CompressedFileInfo{ name: "ConnectionManager.xml", modTime: time.Date(2022, 10, 1, 4, 23, 36, 726914600, time.UTC), uncompressedSize: 5686, compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xdc\x57\x5f\x6f\xda\x30\x10\x7f\x4e\xa5\x7e\x07\x94\x77\x96\x56\xda\xc3\x54\x99\x54\x1d\xfd\x23\xb4\x55\x45\x2d\x45\xda\x53\xe5\x39\x57\xea\x35\x39\x47\xf6\xa5\xb4\xdf\x7e\x82\x10\x48\x20\x8c\x90\x39\xc0\xf6\x66\x5f\xee\xee\xf7\xcb\x9d\x7d\x77\x66\xe7\xef\x51\xd8\x7a\x03\x6d\xa4\xc2\x8e\x7b\xfa\xe9\xc4\x6d\x01\x0a\x15\x48\x1c\x75\xdc\xc7\xc1\x75\xfb\x8b\x7b\xee\x1f\x1f\x31\x23\xe2\xa0\xf5\x1e\x85\x68\x3a\x6e\xa2\xf1\xcc\x88\x17\x88\xb8\x69\x27\x31\xc6\x6d\xa5\x47\x67\x06\xf4\x9b\x14\xd0\x3e\x6d\x9f\xb8\xfe\xf1\x91\xc3\x4c\x0c\x62\x98\x3a\x9e\xec\x1d\x16\xf1\x5f\x4a\xfb\xa7\xcc\x4b\x17\xa9\x4c\xa2\xd2\xfe\x09\xf3\xd2\xc5\xc4\xce\x5b\x32\x64\x5c\x90\x54\xf8\x5d\x1a\x4a\x6d\xd2\xfd\x74\xed\x30\xe4\x11\xf8\x37\x40\x7d\xad\x48\x09\x15\xf6\xf0\x59\x31\x6f\x2a\x4d\x15\xb8\x1e\x25\x11\x20\xcd\xed\x73\xb2\xd9\x7e\xe6\xe5\x41\x25\x5a\x40\xde\xd8\x71\x58\x20\x35\xa4\x78\x2a\x21\xe6\x2d\xb6\x99\x82\x86\x90\x13\x04\x0f\xc4\x09\x86\x5c\x4b\xfe\x33\xcc\x5c\x15\x39\x95\x2a\xce\x08\x79\x4b\x8c\xd6\x31\x94\xf8\x6a\x87\x9f\xc4\xd7\xda\xec\x16\xfb\x45\x4e\xbc\x5c\x52\x4a\x12\xd4\xd7\x10\x73\x0d\xd7\x4a\x77\x15\x62\xca\xb0\x66\x96\xee\x21\x52\x04\xeb\xd2\x5d\x88\x88\xc4\xca\x01\xb9\x78\xba\xb8\xbf\x79\x1a\xfc\xe8\x5f\x3d\x59\xcf\x5a\x1f\x20\xf7\xdb\xb7\x1c\xf9\x08\xb4\x65\xd2\x25\xfe\xad\x33\xef\x5d\x36\x46\x7a\xe2\xda\x02\xdf\xcb\x8c\x82\x65\xa2\x39\xbf\x16\x58\x56\x8a\xe8\x36\xb7\xb9\xc9\x90\x5e\x0c\x07\x9a\xa3\x89\x95\x26\xfb\x6c\x97\x9c\x5b\xa0\x7b\x2f\x8c\x7d\x9a\x33\xa7\xcd\x96\xc8\x45\xe6\xba\x2a\x8a\x43\x20\xa8\x59\x20\x0f\xf1\xc2\xd6\x88\xc7\x0d\x50\x37\xd1\x1a\x90\xf2\xa8\xc6\x42\x50\x8c\x95\xf3\x51\x4e\x6e\x0f\x31\xa9\x3f\xf0\xfc\x4b\xa5\xfd\x40\x2e\xf6\x7f\x52\x26\x2b\x8d\x4f\x35\xd9\xee\x79\x7e\xfa\xeb\xc6\xb9\x97\x01\xea\x50\xda\xfd\xe6\x09\xaa\x26\x53\xbb\x23\xd4\xc4\x36\x31\xcd\x45\x33\xf3\x6f\xaf\x9c\x67\xeb\xec\x23\x9b\xbd\x97\xa7\xae\x07\x73\xbf\xcc\xe4\xa1\x5a\x06\x30\xb8\x7a\x03\x24\xd3\x71\x3f\xc0\xb8\xf9\x66\x50\xf6\xc6\xcc\xb5\x81\x80\x13\x1f\x7c\xc4\xe0\x1b\xd2\x12\x47\xcc\x9b\x0b\x52\x6a\x66\xe5\x9f\xb6\xc1\x5e\x79\x3f\xee\x0a\x79\xe3\x4c\x60\x15\x1d\x55\x01\xfc\x8f\x67\xa5\x32\x07\x87\xf1\x30\x54\x63\x08\x86\x3c\x4c\xa0\xd0\xa6\x73\x72\xff\xee\x1b\xf3\x0a\x82\x32\xa5\xae\x42\x02\xa4\x6b\xa5\x23\x4e\xb7\xd2\x44\x9c\xc4\x4b\x05\xbb\x1e\x9a\xe4\xf9\x59\x0a\x09\x48\x5f\x39\x06\x63\x19\x50\x15\xbb\x47\xd4\x10\x4e\xe3\xd4\x7d\xe1\x88\x10\x56\xb2\x79\x45\x35\xc6\x32\xcd\xa2\x2c\x77\x71\xac\x67\xa9\xa4\x6d\xec\xea\xa8\x94\xd6\x54\x3b\x67\xa4\x87\xf1\xa4\xd4\x6d\x4c\xc1\x5d\x42\x6b\x14\x9b\xce\xc0\x6e\xaa\x44\x95\x13\x50\xec\xbe\x0b\x7c\xf9\xb9\x29\xec\xb5\x43\xe1\x2e\xc0\x57\x06\xe6\xed\x40\x99\x57\xd6\xa0\x98\x67\x44\x1c\xf8\xbf\x03\x00\x00\xff\xff\xb5\x30\x72\xd3\x36\x16\x00\x00"), }, "/ContentDirectory.xml": &vfsgen۰CompressedFileInfo{ name: "ContentDirectory.xml", modTime: time.Date(2022, 10, 1, 4, 23, 36, 726914600, time.UTC), uncompressedSize: 15030, compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x5a\x51\x6f\xa3\x38\x10\x7e\xce\x4a\xfb\x1f\xaa\xbc\xf7\x68\xa5\x7b\x5a\xb9\x59\xed\xa6\x69\x15\xa9\x6d\x22\x92\xad\xf6\x9e\x22\x17\xa6\xc4\xbb\x60\x73\xf6\xd0\x26\xff\xfe\x44\x80\x04\x9a\xd0\xa6\x74\x60\x69\x2f\x6f\x81\xe0\x6f\x3e\x7f\x8c\x67\xf0\x78\xd8\xd7\x45\xe0\x1f\x3d\x80\x36\x42\xc9\xb3\xee\xe9\x5f\x27\xdd\xaf\xbd\xcf\x9f\x98\x71\x42\xf7\x68\x11\xf8\xd2\x9c\x75\x23\x2d\xbf\x18\x67\x0e\x01\x37\xc7\x51\x28\xc3\x63\xa5\xbd\x2f\x06\xf4\x83\x70\xe0\xf8\xf4\xf8\xa4\xdb\xfb\xfc\xa9\xc3\x4c\x08\xce\x6d\x82\x13\x5f\x77\x58\xc0\x7f\x29\xdd\x3b\x65\x56\xf2\x23\xb9\x27\xa4\xd2\xbd\x13\x66\x25\x3f\xe2\x71\xd6\x93\x81\x8c\x3b\x28\x94\xbc\x12\x06\x93\x31\xc9\xf5\xea\x77\x87\x49\x1e\x40\xef\x12\x70\x02\x5c\x3b\xf3\x3e\x0f\xf9\x9d\xf0\x05\x0a\x30\xcc\x5a\xfd\x97\x3c\xc6\xb5\x17\x05\x20\x71\x8d\x92\xbb\x97\x5e\xa7\x58\x6b\xa0\x02\x40\xa7\xc3\x5c\xa1\x21\xb1\xac\x22\x64\xd6\xe6\x32\x7b\x40\x83\xcf\x11\xdc\x09\x72\x84\x5b\xae\x05\xbf\xf3\x73\x70\x39\x5e\x3b\x1f\x4c\x49\x59\x45\x56\x9b\xeb\xcd\xfc\xad\x9c\x00\x25\x62\x28\x8d\x14\x52\x24\x30\x44\x42\x6c\x71\x6a\x44\x86\xc1\x02\x41\xc6\xae\x44\xa4\x47\x1e\x8f\x4e\x98\x12\x96\xb5\x2b\x74\x01\x1c\x23\x0d\xf1\x90\x8a\x9a\x94\x20\x54\x56\xa3\x80\x57\xbf\x87\x2c\x0d\x42\xf0\x23\x74\x39\xc2\xf0\xbc\xa2\x04\x43\x97\xc6\x0f\x9e\x70\xa9\x77\xf2\xdf\xb5\x7a\x34\x50\x71\xc6\xa3\xbb\x5f\xe0\x60\x51\xb0\xc2\xbc\x85\xdc\x7b\xda\xdf\x66\xdf\xec\xcb\xd9\xf4\x9f\xf1\x60\xb6\x81\x7d\xc5\xdc\xcb\x38\x26\x13\xbc\xf0\xb9\x47\xcc\x32\x0f\x4c\xc0\xf3\x42\xf8\x08\x9a\x98\x63\x06\x4a\xc0\x6f\x82\x5c\xa3\x90\xde\x50\xba\xb0\x20\xa6\x99\x62\x12\xb0\xb4\xe1\xdf\x08\x0c\x82\xdb\x57\x91\x44\x62\x9a\x29\x26\x85\x98\x71\x12\xd4\x02\x41\x0b\x4e\x4c\xb2\x08\x4d\x22\xa9\x89\x7c\x9a\xa0\x9e\xa3\x99\xa1\x12\x10\xbc\x89\x82\x3b\xd0\x36\x60\xa4\x25\xb8\xd4\x44\xe9\x5e\xfa\x54\x21\xf7\xaf\x39\x3a\x73\x30\xed\x65\xb9\x2b\x09\x52\x30\x6c\x2a\xa1\x25\x1f\xda\x15\x13\x5a\x5f\x49\xe4\x42\x82\x6e\x77\x4e\x4b\x37\x13\x35\x05\x90\x27\xe0\x87\xdc\x76\xc8\x6d\x87\xdc\x76\xc8\x6d\x87\xdc\xf6\xc7\x73\x5b\x5f\x03\x47\x48\x52\xc9\xc7\xce\x70\x03\x1f\xe2\x9b\x86\x98\x23\xe1\xca\x7c\x71\xef\x5b\xd1\x97\x48\x65\x6c\x4d\x7c\xab\xe0\xec\xe7\x60\x50\xab\xe5\x9b\xbc\xbd\x4d\x05\x8a\x0a\x12\x24\x71\xe5\xc3\x28\x50\x1a\x93\x22\xad\x41\xe2\x94\x7b\xb7\xdc\x8f\x80\x98\x6a\x06\xfb\xea\x52\x62\x69\x4e\x86\xc7\xb6\x51\xad\xe0\x5b\xd7\xea\xe1\xe3\x7b\xd6\x0d\x3c\x8e\x79\xec\x5b\xad\xa7\xd9\xaa\x74\x52\xc1\x9d\x86\x41\xa8\x34\xda\x60\x54\xa4\x1d\xa8\x7c\xb0\x12\x0f\xfe\x61\x0f\x89\x5f\xd6\x0a\x91\xe0\x3d\xc5\x29\x49\x48\x1e\xdb\x6e\x2f\xc9\xa9\xe6\xd2\xdc\x3f\xfb\x81\x57\xd1\x97\xf2\xc8\xf5\x7a\xd3\x60\x71\xf0\xa6\x83\x37\x91\x95\x04\x51\x85\x99\xb5\x37\xfa\xd4\x1e\x72\x54\x4c\xfd\x8d\xa9\x71\x0e\x3e\x20\xbc\x51\x87\x6c\xf8\x9f\x77\xdc\x6a\x07\xbe\x99\xdc\x63\xad\x3c\x0d\xc6\x7c\x10\x77\x78\x89\x67\x8c\x11\x99\xba\x56\x72\x86\x4e\x48\xf8\x0a\xa4\x87\xf3\xba\x08\x67\xe8\x84\x84\x57\x85\xb0\xba\xf8\xa6\xe0\x4d\x14\x99\x6c\xb8\x07\x0d\xb2\x72\x80\x78\x27\x75\xa6\x77\xb2\x89\x79\xbf\xfb\x82\x9f\xb3\x56\xb6\x17\xe5\xcf\x91\x12\x68\xbf\x81\x3d\xf7\xcf\xd9\x04\xf0\xbb\x52\xbf\x03\xae\x7f\x57\x5d\x58\x1c\xc1\x53\x7a\x39\x5d\x86\xd4\xe5\x87\x22\x34\x45\xf9\x91\x7c\x61\xd9\xff\xa3\x75\x3f\x56\x66\x02\x8e\x92\x2e\x31\xc9\x04\x97\xd2\xd7\xb3\xdf\xd9\x9f\x2c\x6d\x02\x5e\x41\x4f\xd7\xb8\xcc\xe4\x4d\x1d\x19\x90\xee\xe0\x01\x24\x9a\xb3\xae\x54\xdd\xed\x43\xfc\xd2\x56\x4d\x97\x23\x8f\x9d\xb4\x67\x50\x0b\xe9\x31\x6b\x7d\x23\x61\x66\xb6\xa6\xf4\x0a\xd3\xcf\xf5\xcc\xd6\x6d\xf8\xe5\x2e\x55\x52\x06\x4b\x30\x45\x0a\xe5\x6d\x90\x6b\x33\x91\xf8\x9b\xd6\xe8\xfa\x4b\x21\xb3\xdb\xe0\x84\x37\x9f\xd9\xcd\xbd\xe7\xb2\x64\x58\xa7\xcd\x9d\x21\xaa\x79\xdb\xdb\xc7\x51\x4d\x59\x2e\xef\x98\x69\x8a\xc1\xee\x3e\xd4\x17\xac\x77\x18\xf7\x7d\xf5\x08\xee\xfa\x58\x60\x9d\x32\x72\xf7\xd3\x1e\xd7\x6b\x40\x1e\x8f\x66\x56\xe1\xcf\xf2\x01\xe7\xab\xd4\xd1\x9f\x0b\xdf\xd5\x20\x77\x0d\x2b\xde\xcb\xc5\x7e\x22\x51\xb6\x1b\x83\x1a\x73\x88\x92\x2e\x95\xa6\xec\x6f\xb5\x1a\x11\x46\xd8\x72\xab\x5b\x9d\x43\x8d\x58\xad\x39\xa1\x94\x1b\xde\x5d\x9e\x69\xd4\xf4\x76\xc5\x85\x66\xc9\xf7\x47\xd7\xe3\xab\xc1\x74\x70\xbe\xc7\x6a\x1f\xd8\xf6\xc8\xde\xe3\xb9\xe1\xcd\x6c\x6c\x8f\x2e\xed\xc1\x64\xb2\xc7\xd3\x93\xe9\x68\x3c\xde\x6d\xbf\xee\xb0\x51\x5e\x1d\x6a\x6a\xf9\x96\x96\x7b\x1a\x23\x50\x38\x2a\x6e\xde\xfe\x93\xc2\x6f\x6e\x4d\x69\x51\x5b\xf4\x2a\xd9\xfa\x3e\xb7\xa0\x3b\xcc\x85\x7b\x1e\xf9\xb8\x52\xeb\xc8\x22\xfe\xa0\xd9\x3f\xb2\xd4\x4b\x24\xdb\xd3\xb5\x81\x4b\xa1\xa0\xf2\x8a\xb8\xb7\x1f\x27\x66\xed\xda\x5c\x32\xcb\x38\xa1\xdb\xfb\x2f\x00\x00\xff\xff\x3e\xc6\x59\x3c\xb6\x3a\x00\x00"), }, "/X_MS_MediaReceiverRegistrar.xml": &vfsgen۰CompressedFileInfo{ name: "X_MS_MediaReceiverRegistrar.xml", modTime: time.Date(2022, 10, 1, 4, 23, 36, 727914200, time.UTC), uncompressedSize: 2572, compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x96\xd1\x6b\xdb\x30\x10\xc6\x9f\x5d\xe8\xff\x10\xfc\x9e\x2a\x81\xb1\x87\xa2\xb8\x04\x52\x4a\x60\x83\xe1\x66\x81\x3d\x05\xc5\x3a\xdc\xdb\xec\x93\xa7\x93\x43\xbb\xbf\x7e\x38\x72\x62\xaf\x71\x0a\x0b\x4e\xf3\xd2\x37\x9d\xd4\xfb\xbe\x5f\xf5\x45\x97\xc8\xbb\xe7\x3c\x1b\x6c\xc0\x32\x1a\x9a\x84\xe3\x9b\x51\x38\xb8\x8b\xae\xaf\x24\x27\x85\x1e\x3c\xe7\x19\xf1\x24\x2c\x2d\xdd\x72\xf2\x04\xb9\xe2\x61\x59\x50\x31\x34\x36\xbd\x65\xb0\x1b\x4c\x60\x38\x1e\x8e\xc2\xe8\xfa\x2a\x90\x5c\x40\xb2\xf4\x42\x55\x1d\xc8\x5c\xfd\x34\x36\x1a\x4b\xe1\x17\x7e\x0f\xc9\xd8\x68\x24\x85\x5f\x54\x7d\xe2\x55\xa3\x54\x89\x43\x43\x5f\x90\x9d\xef\xf1\xf5\x76\x1d\x48\x52\x39\x44\x73\x9e\x96\xee\xc9\x58\xfc\x03\x5a\x8a\xed\x96\x3f\x55\x36\x2d\x73\x20\xb7\x6f\x6e\xed\xd5\x75\x2d\x31\x83\x8a\x7e\x3e\x6b\xb7\x07\x81\xd4\x68\xc1\xdb\x21\x49\xd1\x54\xbb\x73\x0b\x99\x72\xa0\x1f\x9d\x72\xb0\x54\x16\xd5\x3a\x83\x68\xba\x9a\xc6\x0f\xab\xc5\x8f\x6f\xf7\xab\x46\xb6\xf3\x2f\x6b\x24\xf1\x8a\xe9\x08\x63\x0c\x5c\x66\xee\x28\xa1\x29\xdd\x29\x88\x3b\xd5\xff\x00\x6c\xea\x26\x15\xd1\x8a\xa5\x23\xa2\x18\x52\x64\x07\xd6\x5f\xc8\x89\x21\x79\x11\xab\x2a\xf1\x18\x7e\x7f\xe5\xb4\xe7\xb8\xba\x0c\x7a\x09\xae\x2d\xcb\xc5\x5b\xe0\x27\xa7\xd8\x61\x71\xde\x48\xe7\xbc\x54\x19\xea\xca\xe1\xe3\xd1\x5d\xfc\xd1\xed\xd6\xbb\x43\x59\x0f\xe4\xad\xf4\x62\xaf\x2b\xb9\x6d\x35\x60\x20\x7d\xbf\x01\x72\x3c\x09\xc9\x84\xed\x78\x3b\x2f\xb4\x15\xb3\x56\x4e\x2d\x5e\x0a\x88\xd8\x59\xa4\x54\x8a\xfd\x86\x47\xe3\x83\xff\xe9\x24\xef\xc3\xfb\x6f\x9c\x91\xdc\xf9\x6c\xdf\x1a\x35\x0d\xc2\x1a\xe9\x66\xad\x18\x3e\x7f\x7a\x1f\x92\x83\xe1\xd1\x3f\xca\x0b\xf0\xbf\x2c\xf5\x77\xeb\x96\xe0\xc1\x2a\x72\xa0\xbf\x17\xd5\xbb\x3f\xf6\x89\x28\xf1\x9c\x08\x33\x20\x7c\x77\x82\x7a\xd4\xa1\xa1\xc7\x32\x49\x00\xf4\x05\x09\x62\xd8\x98\x5f\xbd\xf8\x4b\xd1\x35\x25\xa4\xa8\x7e\xed\x45\x7f\x03\x00\x00\xff\xff\x63\x7d\x2d\x0d\x0c\x0a\x00\x00"), }, "/rclone-120x120.png": &vfsgen۰FileInfo{ name: "rclone-120x120.png", modTime: time.Date(2022, 10, 1, 4, 23, 36, 727914200, time.UTC), content: []byte("\x89\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\x00\x00\x78\x00\x00\x00\x78\x08\x03\x00\x00\x00\x0e\xba\xc6\xe0\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x06\xeb\x00\x00\x06\xeb\x01\x4c\x31\x9e\x4a\x00\x00\x00\x4e\x50\x4c\x54\x45\x47\x70\x4c\x70\xca\xf3\x3f\x79\xad\xb4\xe3\xf9\x40\x79\xae\x3f\x79\xad\x96\xd8\xf6\x77\xb6\xde\x75\xb6\xde\x8a\xd3\xf5\x70\xca\xf2\x3f\x79\xad\x70\xca\xf3\x70\xca\xf2\x70\xca\xf2\xb4\xe4\xfa\xb4\xe3\xf9\xb4\xe3\xf9\x3f\x79\xad\x3f\x79\xad\x9b\xda\xf7\xb4\xe3\xfa\x5d\xab\xd7\x3f\x79\xad\x70\xca\xf2\xb4\xe3\xf9\xb7\xab\x8d\x85\x00\x00\x00\x17\x74\x52\x4e\x53\x00\xef\xe8\xd0\x32\x7d\x51\x17\x09\x29\xb8\xc7\x96\x73\xd4\x6d\x8f\xea\xa6\x5d\x3c\xaf\x9c\x1b\x2f\x88\x80\x00\x00\x04\xc7\x49\x44\x41\x54\x68\xde\xed\x5b\xc9\x96\xa3\x30\x0c\x6c\xcc\xe2\x8d\xcd\x2c\x0f\xf3\xff\x3f\x3a\x40\xba\x5f\x02\xb1\x6c\xc9\xb8\x33\x73\x98\x3a\x93\x54\x24\x97\xe4\x92\x92\x7c\x7d\xfd\xc7\xbf\x0c\x5e\x75\xfd\x9c\xe7\xb9\xc9\xf3\xb9\xef\x26\xfe\x11\x52\x51\xf5\xf9\x7a\x81\xe9\x2b\xf1\xcb\xb4\xd5\x6c\x56\x37\xe6\xe9\x17\x83\xed\xf2\xd5\x03\xd3\xfd\x52\xd8\x9d\x59\x03\x30\xdd\x2f\xd0\x4e\xf9\x8a\x40\x9e\x3a\xe1\xa2\x5f\x91\xe8\xc5\xe7\xc3\xfd\x0e\x5a\x26\xd4\xf2\x4a\x42\x95\x4c\x55\x2b\x11\x89\x34\xd6\xaf\x64\xf4\x7f\x89\x37\x09\x73\xb7\x46\xa1\xfb\xb0\xae\x92\x29\x4c\x9a\x58\x62\x73\xab\xaa\x44\xbe\x46\x23\x17\x9f\x16\x56\x02\x81\xc9\xf5\x16\xe2\x93\x9d\xdf\x23\x9e\x3f\xae\xe8\x9b\xca\x0e\x29\xcb\x98\x3c\xa0\xf9\x3c\x7d\xc0\xa6\x7f\x58\x3c\x31\x79\x5d\x49\x95\xf8\x84\xcf\xfe\x6a\x9a\xd3\x9e\x32\x28\x69\xf3\x16\x47\x65\x52\x0a\x1b\xaa\xe1\xd9\x61\xa3\x79\x9e\xb0\x96\x81\x28\x66\x67\x43\x12\x73\x32\x79\x4d\xb4\x46\x08\x95\x80\x4c\x74\x1d\x1a\x70\x5c\xe1\x26\xd1\xf5\x38\x53\xdf\xa7\x4b\xa4\x6b\x43\x3e\x32\x67\xb2\x4d\x9a\x62\xaa\xe8\x0d\x87\xa7\x68\x5b\xc6\x7b\xc5\x0a\x67\x92\xa6\x14\xda\x9a\x23\x64\xd1\xa5\x68\x1f\x5d\xc4\x87\x4d\x42\x3c\x45\x94\x7e\x9f\xa2\x9a\x78\x84\x20\x3f\x41\x2c\x52\x14\xf2\x41\x6c\xe6\xbe\xab\x26\x29\xf9\x37\x42\x3b\x99\x14\xc4\xfd\x3a\x77\x32\x45\xed\x53\x53\x2d\x45\xa2\x7b\x25\xd6\xe4\xf2\x62\x1c\xcb\x0b\x46\x68\x37\x93\x68\x88\x12\x63\xa9\xd9\xe2\x02\x27\x34\xeb\x8e\xcc\xaa\x17\x10\x03\x5e\x5b\xc4\x0b\xb9\x18\x16\x2f\x0a\xbc\x57\x12\x09\x69\x97\x45\xa3\x9d\xc0\x7e\x91\x36\xb8\xa8\x79\x90\x76\xc3\x88\x0d\x78\x17\x75\x6b\x15\xe2\x72\x2c\x19\x82\x77\x61\x02\xe9\x86\xb7\x1b\x9c\x5b\x6b\xb3\x26\x14\xae\x5e\x70\x28\x71\x56\x69\xef\xb2\x95\xdd\xe1\x0f\xba\x60\x0b\x16\x05\x6a\xde\xd9\x8f\x58\x1d\xc4\x36\xf3\xdc\x6d\xe3\x82\xc7\x80\x72\xb7\x5b\xa6\x45\x66\xbf\x01\xa6\xbb\x5c\x28\x28\x10\xbb\xa1\xdd\x2b\x55\xd6\x06\x98\x69\xbc\xa7\xf6\x05\x98\xea\x43\xd3\xca\x06\x98\x47\x1a\x6f\x89\x99\xb3\xb6\x02\x96\xd6\xfa\x99\x89\xbc\xa7\x7a\x9a\x3c\x23\x5b\x7d\x22\xb6\xd5\x0d\x3d\xbf\x77\x90\x19\x0e\x98\x67\x67\xe2\xec\xd2\xc5\x84\xa6\xf1\x6a\xcc\xea\xc0\x11\xb0\xb5\xed\xb9\xf5\x0c\x34\xde\x93\xa4\x05\xa0\xac\x7d\xc0\xbb\x06\xbc\xa1\x3e\x25\x9a\xc8\x3b\x60\xd6\xac\x9d\x2b\xe0\x0d\x53\x7c\xa2\x19\x47\x0c\xa8\x7b\xd3\x92\x0e\xde\xd7\x64\x13\x2b\xf8\xda\xa8\x3b\x70\xc0\x53\x2e\xe2\x67\x4d\x09\x76\xa3\x94\xa0\x7e\xb9\xdb\xda\xca\xc9\x6b\x33\x81\x09\x58\x97\x63\x71\x01\xc7\x8c\x96\x5b\xd9\x88\xd6\x4d\xfc\x13\xb2\x27\x60\x56\xee\x24\x61\xf7\x32\x3b\x4b\xa9\x01\x78\x6d\x16\xea\x59\xa5\x88\xb5\xf2\xc7\x24\x0d\x12\x7f\xf7\x2f\xa8\x86\x59\x11\x3f\x5c\x3e\x4c\xad\x84\x72\xad\x8e\x6a\x80\x0e\x97\xb2\x44\xb8\x94\xd4\xcf\xae\x84\x2b\x80\x99\xc3\x99\xd6\xb4\x19\xa6\x73\x0f\xd2\x90\xbe\x2a\x30\xd3\x94\x3c\xbf\x8d\x11\x2f\x13\xa2\xa3\x67\xfe\xe4\x9a\x61\xfd\x6b\xe4\xf2\x19\xd2\x75\x41\x18\x52\xa2\xe0\x3e\x66\x09\x1c\x71\x91\x8e\x58\x02\x87\x5c\x62\x47\x94\x68\xb4\xee\xe6\x35\x50\x4f\x58\xd4\x2f\xc0\x4c\x45\x8d\x5b\x5d\x9a\x9a\xe9\xfa\xad\x13\xc4\xe4\xba\x75\x8a\x9a\xe1\xdf\x06\xf3\x75\x47\x86\x26\xd6\x78\x91\xb6\x22\xee\x90\xb3\x2f\xa2\xb6\x26\xf0\x56\x27\x16\x14\x95\xf8\xed\xd3\x67\x3c\x8a\x38\x23\xa6\xda\xa1\xd0\x3a\x32\xd5\x24\x71\x39\x3b\xaf\x8c\x11\x57\xe6\x2e\x27\x81\x29\xa5\xa7\x40\xfd\xfb\x23\xee\x7e\x91\x26\x34\x10\xe9\xf3\x13\xb4\x6b\x42\xb9\x3b\xd7\x40\xe9\xf7\x4f\xd7\x48\x78\x55\xed\xbe\x24\x98\xc0\x37\xbf\x50\x49\x71\xe0\x25\x05\x6e\xb7\xe2\xe9\xf7\xdf\x4e\x86\xd4\xaa\x37\x45\x0a\xc4\x90\x02\xb7\x8f\x70\xcb\x06\x2c\xc8\x96\x52\xbd\x10\x7a\x88\xb2\x54\x7d\x29\xf0\x93\x96\x0b\x21\xd9\x12\xb2\xe8\x92\x94\xe8\x43\x15\xc5\x42\x61\x6e\x68\xcc\xd0\xe3\xc7\xd3\x9a\xb2\x1c\x06\x8e\x0c\xd8\x9f\xd5\xc0\xc3\xad\x7f\x66\xd3\x05\xda\x35\x1e\xe9\xbb\xd6\xa0\x54\xd6\x5b\x7f\x3c\x30\xb3\x3d\x77\xe7\x87\xd9\x6e\x41\xe6\xf6\x44\xcd\x6b\xf0\xc1\x9f\x1b\xcd\xb7\x00\xd1\xe3\x23\x6c\x3e\x0e\xec\xa1\xf5\xc9\xc2\xc8\xea\xea\x38\x3d\x21\x1b\xe5\x79\xac\x46\x6e\x40\x18\x63\xaf\x4d\xbc\xb6\x7e\xb4\x6d\x16\x78\x42\xd2\x77\x3e\x7b\x2f\xe5\xa1\xf7\x0d\xe1\x79\x83\x73\x46\x5a\x7f\x34\xf7\x78\x5f\x3d\x4b\x49\xda\x70\x89\xf6\x16\x71\x13\xb7\xd8\x1b\x3c\xfd\x0b\x85\xb3\x2f\x25\x6c\xd8\x30\xfa\xf2\x25\x5a\xc6\x2e\x6f\x8f\x8b\x4b\x25\x49\x34\x51\xd9\x7b\xb2\x79\x7b\x5b\xd1\x11\xcc\xbb\xb2\x65\x5c\x4d\xb9\x2e\x6e\x82\xc0\xf6\x66\x36\x65\xb7\x85\x45\xfe\xce\xe9\xb1\x20\x89\x60\x56\xd0\x2f\x94\x06\x92\xc0\xc8\xcc\x0a\x74\xa3\x78\xe6\x63\x09\x26\x69\x0a\xab\x7d\x2e\x78\x64\x94\x98\x05\xa1\xaa\xb2\x80\xed\x2f\xb0\x07\xfd\x58\xc0\x55\xd8\x74\xab\xf0\xd2\x02\x17\xf4\xc0\x03\x4b\xc3\x73\xb8\x88\x29\x1a\xf5\x05\xf2\xab\x29\x92\x41\xea\xac\xc6\xae\x45\x79\x89\xa6\xdd\xb7\xd9\x93\x4a\x43\xeb\xff\x6d\x02\x2b\x1d\x16\x50\xd6\xe0\x82\x98\xfe\xbf\xa0\x87\xcb\xba\x7e\x4f\x00\x2e\xa2\xa6\x46\x5d\x85\x96\xa9\x26\xf6\xaf\x50\xc5\x58\x0e\x5a\x6b\xc6\xb4\x1e\xca\xb1\x08\x7c\xf8\xcd\xe2\x35\xb5\x6a\x37\x28\x55\x37\x55\xc2\x7f\x88\x20\x8e\xe8\xeb\x3f\x20\xfc\x01\x6b\xf7\x98\xc9\x61\xe7\x4c\xda\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82"), }, "/rclone-48x48.png": &vfsgen۰FileInfo{ name: "rclone-48x48.png", modTime: time.Date(2022, 10, 1, 4, 23, 36, 727914200, time.UTC), content: []byte("\x89\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\x00\x00\x30\x00\x00\x00\x30\x08\x03\x00\x00\x00\x60\xdc\x09\xb5\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x02\xc4\x00\x00\x02\xc4\x01\x5b\x91\x9d\x0b\x00\x00\x00\x5d\x50\x4c\x54\x45\x47\x70\x4c\x3f\x7a\xad\xb4\xe3\xf9\x89\xd3\xf5\x3f\x79\xad\x9e\xdb\xf7\xb4\xe4\xfa\x84\xc5\xea\x70\xca\xf2\xb4\xe4\xf9\x70\xcb\xf3\x3f\x79\xad\x3f\x79\xad\x70\xcb\xf2\x70\xca\xf3\x3f\x79\xad\x70\xca\xf2\xb5\xe4\xf9\x3f\x79\xad\x70\xca\xf3\x3f\x79\xae\x3f\x79\xad\x3e\x79\xad\x70\xca\xf3\xb4\xe3\xfa\xb4\xe3\xfa\xb4\xe3\xf9\xb4\xe3\xfa\xb4\xe3\xf9\x3f\x79\xad\x70\xca\xf2\x48\xd5\xe6\x94\x00\x00\x00\x1c\x74\x52\x4e\x53\x00\x36\xed\x38\x94\x27\x68\x12\xda\xa4\x53\xee\xac\xa3\x80\x65\xef\x42\x7a\x6a\x4e\xda\xc7\xbc\x95\x8b\xd2\xb7\x4c\xbf\xbe\x53\x00\x00\x01\xd0\x49\x44\x41\x54\x48\xc7\xad\x56\xd9\x96\xab\x20\x10\x74\x41\x11\x15\xf7\xb8\x24\xca\xff\x7f\xe6\xa8\x08\xcd\xd2\xc6\x99\x73\x6f\x3d\x25\x48\xd9\x55\xd5\x8d\x1a\x04\xff\x01\xb4\x4d\xd9\x87\xb1\xa5\xa5\xbf\xda\x1e\xb3\x68\x55\x60\xf1\xf3\xdd\xd9\x6a\x81\x3d\x54\x69\xa3\xd5\x41\xd4\xfe\x6d\xff\x8e\x2f\x8c\x76\xc5\x10\xdd\x1a\xa1\xd1\x8a\x33\xee\x7c\xb0\xf5\x06\x33\xbe\x3f\x31\xb6\x7c\x96\xf9\x63\xfc\x4d\x1e\x0a\xcc\xd4\x49\x78\x41\x09\xda\x41\xaa\x56\x52\xed\xe2\x6b\x44\x0c\x29\x8a\x45\x9b\x22\x82\x13\xaf\xa8\x81\xf9\xba\xf6\x36\x17\xdf\x5f\x72\x62\xbe\x22\x58\xc4\x08\x0b\x4b\xe3\x84\xde\xdc\x05\xef\x44\x58\x13\x89\xce\x93\x84\xe4\x1a\x92\x62\xd3\x08\x1f\x4d\x87\xe5\x66\x62\xbc\x96\x75\xb7\x93\x9e\x5b\xdd\x1e\x8a\xcd\x46\x6d\xb7\x26\x0a\xb8\x68\x72\xd8\xdf\x6d\x2e\x5e\x96\x83\x75\xa1\x8d\x10\xa2\x52\xfb\x6b\x6f\xbf\xac\x90\xc2\xf0\xf5\xe2\x40\x7f\xe9\xdf\xf0\x02\x70\x3c\xe6\x20\x3b\x09\x42\x1e\xa5\x97\x4f\x18\x6c\x07\x54\x16\x10\x22\x73\x05\x8d\xe5\x09\x62\x67\x94\x9e\x0e\xb4\x28\x28\x40\x28\x7a\xa2\xde\x41\xae\xf6\x1f\x25\xb4\x83\x62\xf0\xa7\x45\x8d\x36\x30\x62\x88\xb4\x46\x9e\x0a\xe9\x81\xe3\x97\x32\xb1\x47\x3b\x5a\xc9\xdf\xe2\x8a\x49\x4c\x81\xea\x71\x67\x68\xcf\x0e\xd8\x86\x2a\x6d\xc2\x4a\x52\x62\x3a\x2f\x71\x8b\xa0\x5c\x34\x08\x21\x17\x66\x97\x3c\x82\x2f\x09\xe4\xa2\x92\x46\x67\xa2\x83\x58\x47\x98\xa3\xa6\x89\xa7\x89\xeb\x2e\x51\x4f\xd1\x1e\x2b\x34\x4e\x1d\x33\x18\x84\x49\x31\x92\x06\x1a\xa7\x4d\x6c\x85\x6c\x5d\xa8\xf5\xee\x35\xa4\xf1\xbe\x31\x46\xc3\x3c\x3d\x45\x49\xca\xd7\xd6\xd1\x4c\x00\x85\x57\xbc\x81\xbf\x15\x3a\xde\x14\x46\xc7\x81\x74\x35\xb8\x84\x12\x7c\x3b\xc8\x6f\x8e\x74\x6d\x8a\x12\x8e\xa0\x03\xc4\x21\x14\x21\xca\x30\x86\xa5\xf6\x19\xd3\xfd\xfd\xcf\x2c\x47\x9b\x50\x9b\x59\x9a\x09\x1b\xcf\x32\xa0\x14\x9d\x7c\x65\x55\xa0\x6b\xca\xb1\xb7\x6e\x4d\xca\xb1\x24\x5d\x68\x7c\x77\xf4\x7c\xe2\xbc\xca\xe9\x3f\x7c\xea\xfc\x00\xd9\x83\x7c\xed\x8a\x77\x2b\x47\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82"), }, "/rootDesc.xml.tmpl": &vfsgen۰CompressedFileInfo{ name: "rootDesc.xml.tmpl", modTime: time.Date(2022, 10, 1, 4, 23, 36, 728914000, time.UTC), uncompressedSize: 2586, compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xcc\x56\xd1\x6a\xe3\x3a\x10\x7d\x2f\xf4\x1f\x42\x9e\xee\x85\xc6\xb2\x4b\x2f\x04\xa3\xaa\x94\x98\x0b\x81\xa6\x5b\x92\xcd\xd2\xb7\xa0\xca\x13\x47\xbb\xb6\x64\x24\x39\x49\x29\xfd\xf7\x45\xb2\x9d\xca\x69\x9a\x16\x02\xcb\xfa\x25\x9a\x33\x73\x8e\x67\x46\x1a\xc5\xf8\x66\x5b\xe4\xbd\x35\x28\xcd\xa5\xb8\xee\x47\x41\xd8\xbf\x21\xe7\x67\x58\x49\x69\x7a\xdb\x22\x17\xfa\xba\x5f\x29\x11\x6b\xb6\x82\x82\xea\x41\x55\x8a\x72\x20\x55\x16\xa7\xb0\xe6\x0c\x06\xd1\x20\xec\x9f\x9f\xf5\xdc\xe3\xc2\xe3\x34\x17\xb4\xcb\xb1\xc8\x71\x8e\x06\x76\xdd\x5f\x19\x53\xc6\x08\x6d\x36\x9b\x40\x03\x0b\x98\x0c\x7e\x29\x64\xb9\x7d\x62\xa3\xb1\x2e\x81\xfd\xa8\x13\x25\x35\x1d\x17\xf4\xa7\x54\x24\xc2\xa8\x5e\xb4\x28\x17\x52\x91\x10\xa3\x7a\xe1\xc8\x68\x9f\x8d\xeb\x64\x5a\x4e\x6d\x7d\x7f\x2e\x81\x1c\x29\x37\x9e\x40\xca\xe9\x0c\xd4\x1a\x54\x1c\x61\xe4\xb1\x1a\x9d\xa5\xe2\x20\xd2\xfc\xf9\x9e\x16\x40\x5e\x5e\x82\xff\x3d\xfb\xf5\x15\xa3\x8e\x7f\x57\x85\xa8\x96\x94\x99\x4a\x81\x22\x8a\xe5\x52\x40\xef\x9f\xfa\x37\x90\x2a\xfb\xd7\x96\xe7\x45\x1c\x60\xcd\xa7\x77\xc4\xb6\x4f\xc7\x08\xbd\x11\x51\x97\x68\x83\x5a\xae\x4c\x21\x4f\x40\x33\xc5\x4b\x63\x3b\x52\x93\x30\x7a\xe7\xf0\x09\x2e\xe7\x4e\x64\xa7\x0a\x07\x54\xc5\x13\x28\x5b\xf8\xe4\xcd\xb4\x75\xfb\x5e\x9f\xf0\x71\xe6\xad\xb7\x89\xd6\xa0\x38\x6d\x15\xc2\xe6\xc1\xa8\x03\x37\xa1\xf3\xe4\xde\x66\x30\x95\xd2\x24\x6e\x83\xe6\xf3\x71\x62\x93\xb0\x8e\x76\xbf\x73\x41\xe3\xc7\x45\x72\x77\x7f\x3b\xba\x7d\x40\x07\xe0\xe4\xdb\x88\x24\x93\xd9\x20\x0a\xfe\x0b\x31\xda\x73\x1c\x0e\x9f\x0c\x3e\x23\x68\x60\xf1\x83\x92\x69\xc5\xcc\x88\x96\x44\x17\xfc\x22\x19\x4d\xa2\xf0\x22\x03\xe3\xce\xd6\x58\x2c\xa5\x3d\xfe\x16\x18\x51\xb7\x07\x2d\x64\x8b\xed\xb0\x3d\xcd\xc7\xc5\x69\xaa\x1d\x7e\xa3\xcb\x99\x14\x77\x5c\x1b\xd2\xce\xaa\x43\x76\x96\x1b\xb4\x02\x8c\x3d\xfc\xbc\xa0\x19\xa0\x52\x64\x76\xe6\x1a\xcc\x8b\xdb\xf0\xd4\xac\xc8\xd5\x10\xa3\x7a\xe5\xb9\x56\xc0\xb3\x95\x71\xbe\x66\xe9\x39\x53\x28\xcd\x8a\x0c\xed\x9c\x95\x5d\x5a\xa5\x72\x82\xb4\xa1\x86\xb3\xe6\xd8\x0c\xae\x86\xdb\xab\x61\xe0\x72\xb0\xde\x5d\xd2\xa8\x93\xf5\x89\x35\x44\x97\xe1\xc7\x45\x38\xe7\x89\x55\x44\x97\xe1\x36\xba\x0c\x3f\xa9\xa3\x36\xbc\xcd\xb1\xe3\x61\x4f\x7a\x77\xbf\x1a\xd0\x7f\x65\x03\x7d\x7c\xd1\x35\x01\xf1\x48\x0a\x03\xc2\x24\x5c\x01\x33\x52\x3d\xdb\xeb\xce\x27\xbf\xd7\x1c\xa7\x4e\x71\x5f\x69\x9c\xbe\xd3\xda\x29\x8d\x53\x5f\x67\x36\x7a\x48\xec\xc4\xb7\x2d\xd9\xa7\x05\xdb\x22\xc7\xa8\x8d\xf2\x88\x4c\x0a\xa3\xa4\xbb\x2d\x10\x33\x39\x46\x1e\xe0\x85\xc1\x1a\x84\x99\x55\x4f\x16\xc6\xc8\xb7\xde\x9a\xbc\xdf\xb2\x53\x7b\x28\x80\xd9\x69\x9b\x50\x41\xb3\xfa\x3f\xe3\x84\x26\x76\xc5\xbe\xde\xc5\x2e\xef\x6f\x6d\x63\xc1\x99\x92\x5a\x2e\x4d\xc0\x64\xb1\xeb\xe1\xe3\x62\x32\x5b\xb8\x4b\x6c\x0a\x0c\xf8\x1a\xd4\x14\x32\xae\x8d\xa2\x5f\xef\xe6\x41\xe5\x71\x7a\x4c\xfb\x8b\xcd\x3d\xa2\xf0\xe7\xdb\xbc\xb3\xfd\x8b\xa1\x54\xa0\x41\xd8\x6c\xa5\x70\xaf\xc4\x68\x1f\xaa\x3f\x90\x76\xdf\x43\x18\xd9\x8f\x3f\xf2\x3b\x00\x00\xff\xff\xa9\x37\x2a\x77\x1a\x0a\x00\x00"), }, } fs["/"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ fs["/ConnectionManager.xml"].(os.FileInfo), fs["/ContentDirectory.xml"].(os.FileInfo), fs["/X_MS_MediaReceiverRegistrar.xml"].(os.FileInfo), fs["/rclone-120x120.png"].(os.FileInfo), fs["/rclone-48x48.png"].(os.FileInfo), fs["/rootDesc.xml.tmpl"].(os.FileInfo), } return fs }() type vfsgen۰FS map[string]interface{} func (fs vfsgen۰FS) Open(path string) (http.File, error) { path = pathpkg.Clean("/" + path) f, ok := fs[path] if !ok { return nil, &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist} } switch f := f.(type) { case *vfsgen۰CompressedFileInfo: gr, err := gzip.NewReader(bytes.NewReader(f.compressedContent)) if err != nil { // This should never happen because we generate the gzip bytes such that they are always valid. panic("unexpected error reading own gzip compressed bytes: " + err.Error()) } return &vfsgen۰CompressedFile{ vfsgen۰CompressedFileInfo: f, gr: gr, }, nil case *vfsgen۰FileInfo: return &vfsgen۰File{ vfsgen۰FileInfo: f, Reader: bytes.NewReader(f.content), }, nil case *vfsgen۰DirInfo: return &vfsgen۰Dir{ vfsgen۰DirInfo: f, }, nil default: // This should never happen because we generate only the above types. panic(fmt.Sprintf("unexpected type %T", f)) } } // vfsgen۰CompressedFileInfo is a static definition of a gzip compressed file. type vfsgen۰CompressedFileInfo struct { name string modTime time.Time compressedContent []byte uncompressedSize int64 } func (f *vfsgen۰CompressedFileInfo) Readdir(count int) ([]os.FileInfo, error) { return nil, fmt.Errorf("cannot Readdir from file %s", f.name) } func (f *vfsgen۰CompressedFileInfo) Stat() (os.FileInfo, error) { return f, nil } func (f *vfsgen۰CompressedFileInfo) GzipBytes() []byte { return f.compressedContent } func (f *vfsgen۰CompressedFileInfo) Name() string { return f.name } func (f *vfsgen۰CompressedFileInfo) Size() int64 { return f.uncompressedSize } func (f *vfsgen۰CompressedFileInfo) Mode() os.FileMode { return 0444 } func (f *vfsgen۰CompressedFileInfo) ModTime() time.Time { return f.modTime } func (f *vfsgen۰CompressedFileInfo) IsDir() bool { return false } func (f *vfsgen۰CompressedFileInfo) Sys() interface{} { return nil } // vfsgen۰CompressedFile is an opened compressedFile instance. type vfsgen۰CompressedFile struct { *vfsgen۰CompressedFileInfo gr *gzip.Reader grPos int64 // Actual gr uncompressed position. seekPos int64 // Seek uncompressed position. } func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) { if f.grPos > f.seekPos { // Rewind to beginning. err = f.gr.Reset(bytes.NewReader(f.compressedContent)) if err != nil { return 0, err } f.grPos = 0 } if f.grPos < f.seekPos { // Fast-forward. _, err = io.CopyN(io.Discard, f.gr, f.seekPos-f.grPos) if err != nil { return 0, err } f.grPos = f.seekPos } n, err = f.gr.Read(p) f.grPos += int64(n) f.seekPos = f.grPos return n, err } func (f *vfsgen۰CompressedFile) Seek(offset int64, whence int) (int64, error) { switch whence { case io.SeekStart: f.seekPos = 0 + offset case io.SeekCurrent: f.seekPos += offset case io.SeekEnd: f.seekPos = f.uncompressedSize + offset default: panic(fmt.Errorf("invalid whence value: %v", whence)) } return f.seekPos, nil } func (f *vfsgen۰CompressedFile) Close() error { return f.gr.Close() } // vfsgen۰FileInfo is a static definition of an uncompressed file (because it's not worth gzip compressing). type vfsgen۰FileInfo struct { name string modTime time.Time content []byte } func (f *vfsgen۰FileInfo) Readdir(count int) ([]os.FileInfo, error) { return nil, fmt.Errorf("cannot Readdir from file %s", f.name) } func (f *vfsgen۰FileInfo) Stat() (os.FileInfo, error) { return f, nil } func (f *vfsgen۰FileInfo) NotWorthGzipCompressing() {} func (f *vfsgen۰FileInfo) Name() string { return f.name } func (f *vfsgen۰FileInfo) Size() int64 { return int64(len(f.content)) } func (f *vfsgen۰FileInfo) Mode() os.FileMode { return 0444 } func (f *vfsgen۰FileInfo) ModTime() time.Time { return f.modTime } func (f *vfsgen۰FileInfo) IsDir() bool { return false } func (f *vfsgen۰FileInfo) Sys() interface{} { return nil } // vfsgen۰File is an opened file instance. type vfsgen۰File struct { *vfsgen۰FileInfo *bytes.Reader } func (f *vfsgen۰File) Close() error { return nil } // vfsgen۰DirInfo is a static definition of a directory. type vfsgen۰DirInfo struct { name string modTime time.Time entries []os.FileInfo } func (d *vfsgen۰DirInfo) Read([]byte) (int, error) { return 0, fmt.Errorf("cannot Read from directory %s", d.name) } func (d *vfsgen۰DirInfo) Close() error { return nil } func (d *vfsgen۰DirInfo) Stat() (os.FileInfo, error) { return d, nil } func (d *vfsgen۰DirInfo) Name() string { return d.name } func (d *vfsgen۰DirInfo) Size() int64 { return 0 } func (d *vfsgen۰DirInfo) Mode() os.FileMode { return 0755 | os.ModeDir } func (d *vfsgen۰DirInfo) ModTime() time.Time { return d.modTime } func (d *vfsgen۰DirInfo) IsDir() bool { return true } func (d *vfsgen۰DirInfo) Sys() interface{} { return nil } // vfsgen۰Dir is an opened dir instance. type vfsgen۰Dir struct { *vfsgen۰DirInfo pos int // Position within entries for Seek and Readdir. } func (d *vfsgen۰Dir) Seek(offset int64, whence int) (int64, error) { if offset == 0 && whence == io.SeekStart { d.pos = 0 return 0, nil } return 0, fmt.Errorf("unsupported Seek in directory %s", d.name) } func (d *vfsgen۰Dir) Readdir(count int) ([]os.FileInfo, error) { if d.pos >= len(d.entries) && count > 0 { return nil, io.EOF } if count <= 0 || count > len(d.entries)-d.pos { count = len(d.entries) - d.pos } e := d.entries[d.pos : d.pos+count] d.pos += count return e, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/restic/restic.go
cmd/serve/restic/restic.go
// Package restic serves a remote suitable for use with restic package restic import ( "context" "encoding/json" "errors" "fmt" "net" "net/http" "os" "path" "regexp" "strings" "time" "github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5/middleware" "github.com/rclone/rclone/cmd" cmdserve "github.com/rclone/rclone/cmd/serve" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fs/walk" libhttp "github.com/rclone/rclone/lib/http" "github.com/rclone/rclone/lib/http/serve" "github.com/rclone/rclone/lib/systemd" "github.com/rclone/rclone/lib/terminal" "github.com/spf13/cobra" "golang.org/x/net/http2" ) // OptionsInfo describes the Options in use var OptionsInfo = fs.Options{{ Name: "stdio", Default: false, Help: "Run an HTTP2 server on stdin/stdout", }, { Name: "append_only", Default: false, Help: "Disallow deletion of repository data", }, { Name: "private_repos", Default: false, Help: "Users can only access their private repo", }, { Name: "cache_objects", Default: true, Help: "Cache listed objects", }}. Add(libhttp.ConfigInfo). Add(libhttp.AuthConfigInfo) // Options required for http server type Options struct { Auth libhttp.AuthConfig HTTP libhttp.Config Stdio bool `config:"stdio"` AppendOnly bool `config:"append_only"` PrivateRepos bool `config:"private_repos"` CacheObjects bool `config:"cache_objects"` } // Opt is options set by command line flags var Opt Options // flagPrefix is the prefix used to uniquely identify command line flags. // It is intentionally empty for this package. const flagPrefix = "" func init() { fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "restic", Opt: &Opt, Options: OptionsInfo}) flagSet := Command.Flags() flags.AddFlagsFromOptions(flagSet, "", OptionsInfo) cmdserve.Command.AddCommand(Command) cmdserve.AddRc("restic", func(ctx context.Context, f fs.Fs, in rc.Params) (cmdserve.Handle, error) { // Read opts var opt = Opt // set default opts err := configstruct.SetAny(in, &opt) if err != nil { return nil, err } if opt.Stdio { return nil, errors.New("can't use --stdio via the rc") } // Create server return newServer(ctx, f, &opt) }) } // Command definition for cobra var Command = &cobra.Command{ Use: "restic remote:path", Short: `Serve the remote for restic's REST API.`, Long: `Run a basic web server to serve a remote over restic's REST backend API over HTTP. This allows restic to use rclone as a data storage mechanism for cloud providers that restic does not support directly. [Restic](https://restic.net/) is a command-line program for doing backups. The server will log errors. Use -v to see access logs. ` + "`--bwlimit`" + ` will be respected for file transfers. Use ` + "`--stats`" + ` to control the stats printing. ### Setting up rclone for use by restic First [set up a remote for your chosen cloud provider](/docs/#configure). Once you have set up the remote, check it is working with, for example "rclone lsd remote:". You may have called the remote something other than "remote:" - just substitute whatever you called it in the following instructions. Now start the rclone restic server ` + "```console" + ` rclone serve restic -v remote:backup ` + "```" + ` Where you can replace "backup" in the above by whatever path in the remote you wish to use. By default this will serve on "localhost:8080" you can change this with use of the ` + "`--addr`" + ` flag. You might wish to start this server on boot. Adding ` + "`--cache-objects=false`" + ` will cause rclone to stop caching objects returned from the List call. Caching is normally desirable as it speeds up downloading objects, saves transactions and uses very little memory. ### Setting up restic to use rclone Now you can [follow the restic instructions](http://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#rest-server) on setting up restic. Note that you will need restic 0.8.2 or later to interoperate with rclone. For the example above you will want to use "http://localhost:8080/" as the URL for the REST server. For example: ` + "```console" + ` $ export RESTIC_REPOSITORY=rest:http://localhost:8080/ $ export RESTIC_PASSWORD=yourpassword $ restic init created restic backend 8b1a4b56ae at rest:http://localhost:8080/ Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. $ restic backup /path/to/files/to/backup scan [/path/to/files/to/backup] scanned 189 directories, 312 files in 0:00 [0:00] 100.00% 38.128 MiB / 38.128 MiB 501 / 501 items 0 errors ETA 0:00 duration: 0:00 snapshot 45c8fdd8 saved ` + "```" + ` #### Multiple repositories Note that you can use the endpoint to host multiple repositories. Do this by adding a directory name or path after the URL. Note that these **must** end with /. Eg ` + "```console" + ` $ export RESTIC_REPOSITORY=rest:http://localhost:8080/user1repo/ # backup user1 stuff $ export RESTIC_REPOSITORY=rest:http://localhost:8080/user2repo/ # backup user2 stuff ` + "```" + ` #### Private repositories The` + "`--private-repos`" + ` flag can be used to limit users to repositories starting with a path of ` + "`/<username>/`" + `. ` + strings.TrimSpace(libhttp.Help(flagPrefix)+libhttp.AuthHelp(flagPrefix)), Annotations: map[string]string{ "versionIntroduced": "v1.40", }, Run: func(command *cobra.Command, args []string) { ctx := context.Background() cmd.CheckArgs(1, 1, command, args) f := cmd.NewFsSrc(args) cmd.Run(false, true, command, func() error { s, err := newServer(ctx, f, &Opt) if err != nil { return err } if s.opt.Stdio { if terminal.IsTerminal(int(os.Stdout.Fd())) { return errors.New("refusing to run HTTP2 server directly on a terminal, please let restic start rclone") } conn := &StdioConn{ stdin: os.Stdin, stdout: os.Stdout, } httpSrv := &http2.Server{} opts := &http2.ServeConnOpts{ Handler: s.server.Router(), } httpSrv.ServeConn(conn, opts) return nil } fs.Logf(s.f, "Serving restic REST API on %s", s.server.URLs()) defer systemd.Notify()() return s.Serve() }) }, } const ( resticAPIV2 = "application/vnd.x.restic.rest.v2" ) type contextRemoteType struct{} // ContextRemoteKey is a simple context key for storing the username of the request var ContextRemoteKey = &contextRemoteType{} // WithRemote makes a remote from a URL path. This implements the backend layout // required by restic. func WithRemote(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var urlpath string rctx := chi.RouteContext(r.Context()) if rctx != nil && rctx.RoutePath != "" { urlpath = rctx.RoutePath } else { urlpath = r.URL.Path } urlpath = strings.Trim(urlpath, "/") parts := matchData.FindStringSubmatch(urlpath) // if no data directory, layout is flat if parts != nil { // otherwise map // data/2159dd48 to // data/21/2159dd48 fileName := parts[1] prefix := urlpath[:len(urlpath)-len(fileName)] urlpath = prefix + fileName[:2] + "/" + fileName } ctx := context.WithValue(r.Context(), ContextRemoteKey, urlpath) next.ServeHTTP(w, r.WithContext(ctx)) }) } // Middleware to ensure authenticated user is accessing their own private folder func checkPrivate(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { user := chi.URLParam(r, "userID") userID, ok := libhttp.CtxGetUser(r.Context()) if ok && user != "" && user == userID { next.ServeHTTP(w, r) } else { http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) } }) } // server contains everything to run the server type server struct { server *libhttp.Server f fs.Fs cache *cache opt Options } func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *server, err error) { s = &server{ f: f, cache: newCache(opt.CacheObjects), opt: *opt, } // Don't bind any HTTP listeners if running with --stdio if opt.Stdio { opt.HTTP.ListenAddr = nil } s.server, err = libhttp.NewServer(ctx, libhttp.WithConfig(opt.HTTP), libhttp.WithAuth(opt.Auth), ) if err != nil { return nil, fmt.Errorf("failed to init server: %w", err) } router := s.server.Router() s.Bind(router) return s, nil } // Serve restic until the server is shutdown func (s *server) Serve() error { s.server.Serve() s.server.Wait() return nil } // Return the first address of the server func (s *server) Addr() net.Addr { return s.server.Addr() } // Shutdown the server func (s *server) Shutdown() error { return s.server.Shutdown() } // bind helper for main Bind method func (s *server) bind(router chi.Router) { router.MethodFunc("GET", "/*", func(w http.ResponseWriter, r *http.Request) { urlpath := chi.URLParam(r, "*") if urlpath == "" || strings.HasSuffix(urlpath, "/") { s.listObjects(w, r) } else { s.serveObject(w, r) } }) router.MethodFunc("POST", "/*", func(w http.ResponseWriter, r *http.Request) { urlpath := chi.URLParam(r, "*") if urlpath == "" || strings.HasSuffix(urlpath, "/") { s.createRepo(w, r) } else { s.postObject(w, r) } }) router.MethodFunc("HEAD", "/*", s.serveObject) router.MethodFunc("DELETE", "/*", s.deleteObject) } // Bind restic server routes to passed router func (s *server) Bind(router chi.Router) { // FIXME // if m := authX.Auth(authX.Opt); m != nil { // router.Use(m) // } router.Use( middleware.SetHeader("Accept-Ranges", "bytes"), middleware.SetHeader("Server", "rclone/"+fs.Version), WithRemote, ) if s.opt.PrivateRepos { router.Route("/{userID}", func(r chi.Router) { r.Use(checkPrivate) s.bind(r) }) router.NotFound(func(w http.ResponseWriter, _ *http.Request) { http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) }) } else { s.bind(router) } } var matchData = regexp.MustCompile("(?:^|/)data/([^/]{2,})$") // newObject returns an object with the remote given either from the // cache or directly func (s *server) newObject(ctx context.Context, remote string) (fs.Object, error) { o := s.cache.find(remote) if o != nil { return o, nil } o, err := s.f.NewObject(ctx, remote) if err != nil { return o, err } s.cache.add(remote, o) return o, nil } // get the remote func (s *server) serveObject(w http.ResponseWriter, r *http.Request) { remote, ok := r.Context().Value(ContextRemoteKey).(string) if !ok { http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) return } o, err := s.newObject(r.Context(), remote) if err != nil { fs.Debugf(remote, "%s request error: %v", r.Method, err) if errors.Is(err, fs.ErrorObjectNotFound) { http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) } else { http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) } return } serve.Object(w, r, o) } // postObject posts an object to the repository func (s *server) postObject(w http.ResponseWriter, r *http.Request) { remote, ok := r.Context().Value(ContextRemoteKey).(string) if !ok { http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) return } if s.opt.AppendOnly { // make sure the file does not exist yet _, err := s.newObject(r.Context(), remote) if err == nil { fs.Errorf(remote, "Post request: file already exists, refusing to overwrite in append-only mode") http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) return } } o, err := operations.RcatSize(r.Context(), s.f, remote, r.Body, r.ContentLength, time.Now(), nil) if err != nil { err = accounting.Stats(r.Context()).Error(err) fs.Errorf(remote, "Post request rcat error: %v", err) http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) return } // if successfully uploaded add to cache s.cache.add(remote, o) } // delete the remote func (s *server) deleteObject(w http.ResponseWriter, r *http.Request) { remote, ok := r.Context().Value(ContextRemoteKey).(string) if !ok { http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) return } if s.opt.AppendOnly { parts := strings.Split(r.URL.Path, "/") // if path doesn't end in "/locks/:name", disallow the operation if len(parts) < 2 || parts[len(parts)-2] != "locks" { http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) return } } o, err := s.newObject(r.Context(), remote) if err != nil { fs.Debugf(remote, "Delete request error: %v", err) http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) return } if err := o.Remove(r.Context()); err != nil { fs.Errorf(remote, "Delete request remove error: %v", err) if errors.Is(err, fs.ErrorObjectNotFound) { http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) } else { http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) } return } // remove object from cache s.cache.remove(remote) } // listItem is an element returned for the restic v2 list response type listItem struct { Name string `json:"name"` Size int64 `json:"size"` } // return type for list type listItems []listItem // add an fs.Object to the listItems func (ls *listItems) add(o fs.Object) { *ls = append(*ls, listItem{ Name: path.Base(o.Remote()), Size: o.Size(), }) } // listObjects lists all Objects of a given type in an arbitrary order. func (s *server) listObjects(w http.ResponseWriter, r *http.Request) { remote, ok := r.Context().Value(ContextRemoteKey).(string) if !ok { http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) return } if r.Header.Get("Accept") != resticAPIV2 { fs.Errorf(remote, "Restic v2 API required for List Objects") http.Error(w, "Restic v2 API required for List Objects", http.StatusBadRequest) return } fs.Debugf(remote, "list request") // make sure an empty list is returned, and not a 'nil' value ls := listItems{} // Remove all existing values from the cache s.cache.removePrefix(remote) // if remote supports ListR use that directly, otherwise use recursive Walk err := walk.ListR(r.Context(), s.f, remote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error { for _, entry := range entries { if o, ok := entry.(fs.Object); ok { ls.add(o) s.cache.add(o.Remote(), o) } } return nil }) if err != nil { if !errors.Is(err, fs.ErrorDirNotFound) { fs.Errorf(remote, "list failed: %#v %T", err, err) http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) return } } w.Header().Set("Content-Type", "application/vnd.x.restic.rest.v2") enc := json.NewEncoder(w) err = enc.Encode(ls) if err != nil { fs.Errorf(remote, "failed to write list: %v", err) http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) return } } // createRepo creates repository directories. // // We don't bother creating the data dirs as rclone will create them on the fly func (s *server) createRepo(w http.ResponseWriter, r *http.Request) { remote, ok := r.Context().Value(ContextRemoteKey).(string) if !ok { http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) return } fs.Infof(remote, "Creating repository") if r.URL.Query().Get("create") != "true" { http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) return } err := s.f.Mkdir(r.Context(), remote) if err != nil { fs.Errorf(remote, "Create repo failed to Mkdir: %v", err) http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) return } for _, name := range []string{"data", "index", "keys", "locks", "snapshots"} { dirRemote := path.Join(remote, name) err := s.f.Mkdir(r.Context(), dirRemote) if err != nil { fs.Errorf(dirRemote, "Create repo failed to Mkdir: %v", err) http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) return } } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/restic/cache.go
cmd/serve/restic/cache.go
package restic import ( "strings" "sync" "github.com/rclone/rclone/fs" ) // cache implements a simple object cache type cache struct { mu sync.RWMutex // protects the cache items map[string]fs.Object // cache of objects cacheObjects bool // whether we are actually caching } // create a new cache func newCache(cacheObjects bool) *cache { return &cache{ items: map[string]fs.Object{}, cacheObjects: cacheObjects, } } // find the object at remote or return nil func (c *cache) find(remote string) fs.Object { if !c.cacheObjects { return nil } c.mu.RLock() o := c.items[remote] c.mu.RUnlock() return o } // add the object to the cache func (c *cache) add(remote string, o fs.Object) { if !c.cacheObjects { return } c.mu.Lock() c.items[remote] = o c.mu.Unlock() } // remove the object from the cache func (c *cache) remove(remote string) { if !c.cacheObjects { return } c.mu.Lock() delete(c.items, remote) c.mu.Unlock() } // remove all the items with prefix from the cache func (c *cache) removePrefix(prefix string) { if !c.cacheObjects { return } c.mu.Lock() defer c.mu.Unlock() if !strings.HasSuffix(prefix, "/") { prefix += "/" } if prefix == "/" { c.items = map[string]fs.Object{} return } for key := range c.items { if strings.HasPrefix(key, prefix) { delete(c.items, key) } } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/restic/stdio_conn.go
cmd/serve/restic/stdio_conn.go
package restic import ( "net" "os" "time" ) // Addr implements net.Addr for stdin/stdout. type Addr struct{} // Network returns the network type as a string. func (a Addr) Network() string { return "stdio" } func (a Addr) String() string { return "stdio" } // StdioConn implements a net.Conn via stdin/stdout. type StdioConn struct { stdin *os.File stdout *os.File } func (s *StdioConn) Read(p []byte) (int, error) { return s.stdin.Read(p) } func (s *StdioConn) Write(p []byte) (int, error) { return s.stdout.Write(p) } // Close closes both streams. func (s *StdioConn) Close() error { err1 := s.stdin.Close() err2 := s.stdout.Close() if err1 != nil { return err1 } return err2 } // LocalAddr returns nil. func (s *StdioConn) LocalAddr() net.Addr { return Addr{} } // RemoteAddr returns nil. func (s *StdioConn) RemoteAddr() net.Addr { return Addr{} } // SetDeadline sets the read/write deadline. func (s *StdioConn) SetDeadline(t time.Time) error { err1 := s.stdin.SetReadDeadline(t) err2 := s.stdout.SetWriteDeadline(t) if err1 != nil { return err1 } return err2 } // SetReadDeadline sets the read/write deadline. func (s *StdioConn) SetReadDeadline(t time.Time) error { return s.stdin.SetReadDeadline(t) } // SetWriteDeadline sets the read/write deadline. func (s *StdioConn) SetWriteDeadline(t time.Time) error { return s.stdout.SetWriteDeadline(t) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/restic/cache_test.go
cmd/serve/restic/cache_test.go
package restic import ( "sort" "strings" "testing" "github.com/rclone/rclone/fstest/mockobject" "github.com/stretchr/testify/assert" ) func (c *cache) String() string { keys := []string{} c.mu.Lock() for k := range c.items { keys = append(keys, k) } c.mu.Unlock() sort.Strings(keys) return strings.Join(keys, ",") } func TestCacheCRUD(t *testing.T) { c := newCache(true) assert.Equal(t, "", c.String()) assert.Nil(t, c.find("potato")) o := mockobject.New("potato") c.add(o.Remote(), o) assert.Equal(t, "potato", c.String()) assert.Equal(t, o, c.find("potato")) c.remove("potato") assert.Equal(t, "", c.String()) assert.Nil(t, c.find("potato")) c.remove("notfound") } func TestCacheRemovePrefix(t *testing.T) { c := newCache(true) for _, remote := range []string{ "a", "b", "b/1", "b/2/3", "b/2/4", "b/2", "c", } { c.add(remote, mockobject.New(remote)) } assert.Equal(t, "a,b,b/1,b/2,b/2/3,b/2/4,c", c.String()) c.removePrefix("b") assert.Equal(t, "a,b,c", c.String()) c.removePrefix("/") assert.Equal(t, "", c.String()) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/restic/restic_test.go
cmd/serve/restic/restic_test.go
// Serve restic tests set up a server and run the integration tests // for restic against it. package restic import ( "context" "errors" "net/http" "net/http/httptest" "os" "os/exec" "testing" _ "github.com/rclone/rclone/backend/all" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/serve/servetest" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fstest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( testBindAddress = "localhost:0" resticSource = "../../../../../restic/restic" ) func newOpt() Options { opt := Opt opt.HTTP.ListenAddr = []string{testBindAddress} return opt } // TestRestic runs the restic server then runs the unit tests for the // restic remote against it. // // Requires the restic source code in the location indicated by resticSource. func TestResticIntegration(t *testing.T) { ctx := context.Background() _, err := os.Stat(resticSource) if err != nil { t.Skipf("Skipping test as restic source not found: %v", err) } opt := newOpt() fstest.Initialise() fremote, _, clean, err := fstest.RandomRemote() assert.NoError(t, err) defer clean() err = fremote.Mkdir(context.Background(), "") assert.NoError(t, err) // Start the server s, err := newServer(ctx, fremote, &opt) require.NoError(t, err) go func() { require.NoError(t, s.Serve()) }() testURL := s.server.URLs()[0] defer func() { _ = s.Shutdown() }() // Change directory to run the tests err = os.Chdir(resticSource) require.NoError(t, err, "failed to cd to restic source code") // Run the restic tests runTests := func(path string) { args := []string{"test", "./internal/backend/rest", "-run", "TestBackendRESTExternalServer", "-count=1"} if testing.Verbose() { args = append(args, "-v") } cmd := exec.Command("go", args...) cmd.Env = append(os.Environ(), "RESTIC_TEST_REST_REPOSITORY=rest:"+testURL+path, "GO111MODULE=on", ) out, err := cmd.CombinedOutput() if len(out) != 0 { t.Logf("\n----------\n%s----------\n", string(out)) } assert.NoError(t, err, "Running restic integration tests") } // Run the tests with no path runTests("") //... and again with a path runTests("potato/sausage/") } func TestMakeRemote(t *testing.T) { for _, test := range []struct { in, want string }{ {"/", ""}, {"/data", "data"}, {"/data/", "data"}, {"/data/1", "data/1"}, {"/data/12", "data/12/12"}, {"/data/123", "data/12/123"}, {"/data/123/", "data/12/123"}, {"/keys", "keys"}, {"/keys/1", "keys/1"}, {"/keys/12", "keys/12"}, {"/keys/123", "keys/123"}, } { r := httptest.NewRequest("GET", test.in, nil) w := httptest.NewRecorder() next := http.HandlerFunc(func(_ http.ResponseWriter, request *http.Request) { remote, ok := request.Context().Value(ContextRemoteKey).(string) assert.True(t, ok, "Failed to get remote from context") assert.Equal(t, test.want, remote, test.in) }) got := WithRemote(next) got.ServeHTTP(w, r) } } type listErrorFs struct { fs.Fs } func (f *listErrorFs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return fs.DirEntries{}, errors.New("oops") } func TestListErrors(t *testing.T) { ctx := context.Background() // setup rclone with a local backend in a temporary directory tempdir := t.TempDir() opt := newOpt() // make a new file system in the temp dir f := &listErrorFs{Fs: cmd.NewFsSrc([]string{tempdir})} s, err := newServer(ctx, f, &opt) require.NoError(t, err) router := s.server.Router() req := newRequest(t, "GET", "/test/snapshots/", nil) checkRequest(t, router.ServeHTTP, req, []wantFunc{wantCode(http.StatusInternalServerError)}) } type newObjectErrorFs struct { fs.Fs err error } func (f *newObjectErrorFs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return nil, f.err } func TestServeErrors(t *testing.T) { ctx := context.Background() // setup rclone with a local backend in a temporary directory tempdir := t.TempDir() opt := newOpt() // make a new file system in the temp dir f := &newObjectErrorFs{Fs: cmd.NewFsSrc([]string{tempdir})} s, err := newServer(ctx, f, &opt) require.NoError(t, err) router := s.server.Router() f.err = errors.New("oops") req := newRequest(t, "GET", "/test/config", nil) checkRequest(t, router.ServeHTTP, req, []wantFunc{wantCode(http.StatusInternalServerError)}) f.err = fs.ErrorObjectNotFound checkRequest(t, router.ServeHTTP, req, []wantFunc{wantCode(http.StatusNotFound)}) } func TestRc(t *testing.T) { servetest.TestRc(t, rc.Params{ "type": "restic", }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/restic/restic_privaterepos_test.go
cmd/serve/restic/restic_privaterepos_test.go
package restic import ( "context" "crypto/rand" "io" "net/http" "strings" "testing" "github.com/rclone/rclone/cmd" "github.com/stretchr/testify/require" ) // newAuthenticatedRequest returns a new HTTP request with the given params. func newAuthenticatedRequest(t testing.TB, method, path string, body io.Reader, user, pass string) *http.Request { req := newRequest(t, method, path, body) req.SetBasicAuth(user, pass) req.Header.Add("Accept", resticAPIV2) return req } // TestResticPrivateRepositories runs tests on the restic handler code for private repositories func TestResticPrivateRepositories(t *testing.T) { ctx := context.Background() buf := make([]byte, 32) _, err := io.ReadFull(rand.Reader, buf) require.NoError(t, err) // setup rclone with a local backend in a temporary directory tempdir := t.TempDir() opt := newOpt() // set private-repos mode & test user opt.PrivateRepos = true opt.Auth.BasicUser = "test" opt.Auth.BasicPass = "password" // make a new file system in the temp dir f := cmd.NewFsSrc([]string{tempdir}) s, err := newServer(ctx, f, &opt) require.NoError(t, err) router := s.server.Router() // Requesting /test/ should allow access reqs := []*http.Request{ newAuthenticatedRequest(t, "POST", "/test/?create=true", nil, opt.Auth.BasicUser, opt.Auth.BasicPass), newAuthenticatedRequest(t, "POST", "/test/config", strings.NewReader("foobar test config"), opt.Auth.BasicUser, opt.Auth.BasicPass), newAuthenticatedRequest(t, "GET", "/test/config", nil, opt.Auth.BasicUser, opt.Auth.BasicPass), } for _, req := range reqs { checkRequest(t, router.ServeHTTP, req, []wantFunc{wantCode(http.StatusOK)}) } // Requesting with bad credentials should raise unauthorised errors reqs = []*http.Request{ newRequest(t, "GET", "/test/config", nil), newAuthenticatedRequest(t, "GET", "/test/config", nil, opt.Auth.BasicUser, ""), newAuthenticatedRequest(t, "GET", "/test/config", nil, "", opt.Auth.BasicPass), newAuthenticatedRequest(t, "GET", "/test/config", nil, opt.Auth.BasicUser+"x", opt.Auth.BasicPass), newAuthenticatedRequest(t, "GET", "/test/config", nil, opt.Auth.BasicUser, opt.Auth.BasicPass+"x"), } for _, req := range reqs { checkRequest(t, router.ServeHTTP, req, []wantFunc{wantCode(http.StatusUnauthorized)}) } // Requesting everything else should raise forbidden errors reqs = []*http.Request{ newAuthenticatedRequest(t, "GET", "/", nil, opt.Auth.BasicUser, opt.Auth.BasicPass), newAuthenticatedRequest(t, "POST", "/other_user", nil, opt.Auth.BasicUser, opt.Auth.BasicPass), newAuthenticatedRequest(t, "GET", "/other_user/config", nil, opt.Auth.BasicUser, opt.Auth.BasicPass), } for _, req := range reqs { checkRequest(t, router.ServeHTTP, req, []wantFunc{wantCode(http.StatusForbidden)}) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/restic/restic_appendonly_test.go
cmd/serve/restic/restic_appendonly_test.go
package restic import ( "context" "crypto/rand" "encoding/hex" "io" "net/http" "strings" "testing" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs/config/configfile" "github.com/stretchr/testify/require" ) // createOverwriteDeleteSeq returns a sequence which will create a new file at // path, and then try to overwrite and delete it. func createOverwriteDeleteSeq(t testing.TB, path string) []TestRequest { // add a file, try to overwrite and delete it req := []TestRequest{ { req: newRequest(t, "GET", path, nil), want: []wantFunc{wantCode(http.StatusNotFound)}, }, { req: newRequest(t, "POST", path, strings.NewReader("foobar test config")), want: []wantFunc{wantCode(http.StatusOK)}, }, { req: newRequest(t, "GET", path, nil), want: []wantFunc{ wantCode(http.StatusOK), wantBody("foobar test config"), }, }, { req: newRequest(t, "POST", path, strings.NewReader("other config")), want: []wantFunc{wantCode(http.StatusForbidden)}, }, { req: newRequest(t, "GET", path, nil), want: []wantFunc{ wantCode(http.StatusOK), wantBody("foobar test config"), }, }, { req: newRequest(t, "DELETE", path, nil), want: []wantFunc{wantCode(http.StatusForbidden)}, }, { req: newRequest(t, "GET", path, nil), want: []wantFunc{ wantCode(http.StatusOK), wantBody("foobar test config"), }, }, } return req } // TestResticHandler runs tests on the restic handler code, especially in append-only mode. func TestResticHandler(t *testing.T) { ctx := context.Background() configfile.Install() buf := make([]byte, 32) _, err := io.ReadFull(rand.Reader, buf) require.NoError(t, err) randomID := hex.EncodeToString(buf) var tests = []struct { seq []TestRequest }{ {createOverwriteDeleteSeq(t, "/config")}, {createOverwriteDeleteSeq(t, "/data/"+randomID)}, { // ensure we can add and remove lock files []TestRequest{ { req: newRequest(t, "GET", "/locks/"+randomID, nil), want: []wantFunc{wantCode(http.StatusNotFound)}, }, { req: newRequest(t, "POST", "/locks/"+randomID, strings.NewReader("lock file")), want: []wantFunc{wantCode(http.StatusOK)}, }, { req: newRequest(t, "GET", "/locks/"+randomID, nil), want: []wantFunc{ wantCode(http.StatusOK), wantBody("lock file"), }, }, { req: newRequest(t, "POST", "/locks/"+randomID, strings.NewReader("other lock file")), want: []wantFunc{wantCode(http.StatusForbidden)}, }, { req: newRequest(t, "DELETE", "/locks/"+randomID, nil), want: []wantFunc{wantCode(http.StatusOK)}, }, { req: newRequest(t, "GET", "/locks/"+randomID, nil), want: []wantFunc{wantCode(http.StatusNotFound)}, }, }, }, } // setup rclone with a local backend in a temporary directory tempdir := t.TempDir() // set append-only mode opt := newOpt() opt.AppendOnly = true // make a new file system in the temp dir f := cmd.NewFsSrc([]string{tempdir}) s, err := newServer(ctx, f, &opt) require.NoError(t, err) router := s.server.Router() // create the repo checkRequest(t, router.ServeHTTP, newRequest(t, "POST", "/?create=true", nil), []wantFunc{wantCode(http.StatusOK)}) for _, test := range tests { t.Run("", func(t *testing.T) { for i, seq := range test.seq { t.Logf("request %v: %v %v", i, seq.req.Method, seq.req.URL.Path) checkRequest(t, router.ServeHTTP, seq.req, seq.want) } }) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/restic/restic_utils_test.go
cmd/serve/restic/restic_utils_test.go
package restic import ( "io" "net/http" "net/http/httptest" "testing" "github.com/stretchr/testify/assert" ) // declare a few helper functions // wantFunc tests the HTTP response in res and marks the test as errored if something is incorrect. type wantFunc func(t testing.TB, res *httptest.ResponseRecorder) // newRequest returns a new HTTP request with the given params func newRequest(t testing.TB, method, path string, body io.Reader) *http.Request { req := httptest.NewRequest(method, path, body) req.Header.Add("Accept", resticAPIV2) return req } // wantCode returns a function which checks that the response has the correct HTTP status code. func wantCode(code int) wantFunc { return func(t testing.TB, res *httptest.ResponseRecorder) { assert.Equal(t, code, res.Code) } } // wantBody returns a function which checks that the response has the data in the body. func wantBody(body string) wantFunc { return func(t testing.TB, res *httptest.ResponseRecorder) { assert.NotNil(t, res.Body) assert.Equal(t, res.Body.Bytes(), []byte(body)) } } // checkRequest uses f to process the request and runs the checker functions on the result. func checkRequest(t testing.TB, f http.HandlerFunc, req *http.Request, want []wantFunc) { rr := httptest.NewRecorder() f(rr, req) for _, fn := range want { fn(t, rr) } } // TestRequest is a sequence of HTTP requests with (optional) tests for the response. type TestRequest struct { req *http.Request want []wantFunc }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/s3/s3_test.go
cmd/serve/s3/s3_test.go
// Serve s3 tests set up a server and run the integration tests // for the s3 remote against it. package s3 import ( "bytes" "context" "fmt" "io" "net/url" "path" "path/filepath" "slices" "testing" "time" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/cmd/serve/proxy" "github.com/rclone/rclone/cmd/serve/servetest" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/vfs/vfscommon" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( endpoint = "localhost:0" ) // Configure and serve the server func serveS3(t *testing.T, f fs.Fs) (testURL string, keyid string, keysec string, w *Server) { keyid = random.String(16) keysec = random.String(16) opt := Opt // copy default options opt.AuthKey = []string{fmt.Sprintf("%s,%s", keyid, keysec)} opt.HTTP.ListenAddr = []string{endpoint} w, _ = newServer(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt) go func() { require.NoError(t, w.Serve()) }() testURL = w.server.URLs()[0] return } // TestS3 runs the s3 server then runs the unit tests for the // s3 remote against it. func TestS3(t *testing.T) { start := func(f fs.Fs) (configmap.Simple, func()) { testURL, keyid, keysec, _ := serveS3(t, f) // Config for the backend we'll use to connect to the server config := configmap.Simple{ "type": "s3", "provider": "Rclone", "endpoint": testURL, "access_key_id": keyid, "secret_access_key": keysec, } return config, func() {} } servetest.Run(t, "s3", start) } // tests using the minio client func TestEncodingWithMinioClient(t *testing.T) { cases := []struct { description string bucket string path string filename string expected string }{ { description: "weird file in bucket root", bucket: "mybucket", path: "", filename: " file with w€r^d ch@r \\#~+§4%&'. txt ", }, { description: "weird file inside a weird folder", bucket: "mybucket", path: "ä#/नेपाल&/?/", filename: " file with w€r^d ch@r \\#~+§4%&'. txt ", }, } for _, tt := range cases { t.Run(tt.description, func(t *testing.T) { fstest.Initialise() f, _, clean, err := fstest.RandomRemote() assert.NoError(t, err) defer clean() err = f.Mkdir(context.Background(), path.Join(tt.bucket, tt.path)) assert.NoError(t, err) buf := bytes.NewBufferString("contents") uploadHash := hash.NewMultiHasher() in := io.TeeReader(buf, uploadHash) obji := object.NewStaticObjectInfo( path.Join(tt.bucket, tt.path, tt.filename), time.Now(), int64(buf.Len()), true, nil, nil, ) _, err = f.Put(context.Background(), in, obji) assert.NoError(t, err) endpoint, keyid, keysec, _ := serveS3(t, f) testURL, _ := url.Parse(endpoint) minioClient, err := minio.New(testURL.Host, &minio.Options{ Creds: credentials.NewStaticV4(keyid, keysec, ""), Secure: false, }) assert.NoError(t, err) buckets, err := minioClient.ListBuckets(context.Background()) assert.NoError(t, err) assert.Equal(t, buckets[0].Name, tt.bucket) objects := minioClient.ListObjects(context.Background(), tt.bucket, minio.ListObjectsOptions{ Recursive: true, }) for object := range objects { assert.Equal(t, path.Join(tt.path, tt.filename), object.Key) } }) } } type FileStuct struct { path string filename string } type TestCase struct { description string bucket string files []FileStuct keyID string keySec string shouldFail bool } func testListBuckets(t *testing.T, cases []TestCase, useProxy bool) { fstest.Initialise() var f fs.Fs if useProxy { // the backend config will be made by the proxy prog, err := filepath.Abs("../servetest/proxy_code.go") require.NoError(t, err) files, err := filepath.Abs("testdata") require.NoError(t, err) cmd := "go run " + prog + " " + files // FIXME: this is untidy setting a global variable! proxy.Opt.AuthProxy = cmd defer func() { proxy.Opt.AuthProxy = "" }() f = nil } else { // create a test Fs var err error f, err = fs.NewFs(context.Background(), "testdata") require.NoError(t, err) } for _, tt := range cases { t.Run(tt.description, func(t *testing.T) { endpoint, keyid, keysec, s := serveS3(t, f) defer func() { assert.NoError(t, s.server.Shutdown()) }() if tt.keyID != "" { keyid = tt.keyID } if tt.keySec != "" { keysec = tt.keySec } testURL, _ := url.Parse(endpoint) minioClient, err := minio.New(testURL.Host, &minio.Options{ Creds: credentials.NewStaticV4(keyid, keysec, ""), Secure: false, }) assert.NoError(t, err) buckets, err := minioClient.ListBuckets(context.Background()) if tt.shouldFail { require.Error(t, err) } else { require.NoError(t, err) require.NotEmpty(t, buckets) assert.Equal(t, buckets[0].Name, tt.bucket) o := minioClient.ListObjects(context.Background(), tt.bucket, minio.ListObjectsOptions{ Recursive: true, }) // save files after reading from channel objects := []string{} for object := range o { objects = append(objects, object.Key) } for _, tt := range tt.files { file := path.Join(tt.path, tt.filename) found := slices.Contains(objects, file) require.Equal(t, true, found, "Object not found: "+file) } } }) } } func TestListBuckets(t *testing.T) { var cases = []TestCase{ { description: "list buckets", bucket: "mybucket", files: []FileStuct{ { path: "", filename: "lorem.txt", }, { path: "foo", filename: "bar.txt", }, }, }, { description: "list buckets: wrong s3 key", bucket: "mybucket", keyID: "invalid", shouldFail: true, }, { description: "list buckets: wrong s3 secret", bucket: "mybucket", keySec: "invalid", shouldFail: true, }, } testListBuckets(t, cases, false) } func TestListBucketsAuthProxy(t *testing.T) { var cases = []TestCase{ { description: "list buckets", bucket: "mybucket", // request with random keyid // instead of what was set in 'authPair' keyID: random.String(16), files: []FileStuct{ { path: "", filename: "lorem.txt", }, { path: "foo", filename: "bar.txt", }, }, }, { description: "list buckets: wrong s3 secret", bucket: "mybucket", keySec: "invalid", shouldFail: true, }, } testListBuckets(t, cases, true) } func TestRc(t *testing.T) { servetest.TestRc(t, rc.Params{ "type": "s3", "vfs_cache_mode": "off", }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/s3/s3.go
cmd/serve/s3/s3.go
package s3 import ( "context" _ "embed" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/serve" "github.com/rclone/rclone/cmd/serve/proxy" "github.com/rclone/rclone/cmd/serve/proxy/proxyflags" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/rc" httplib "github.com/rclone/rclone/lib/http" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" "github.com/rclone/rclone/vfs/vfsflags" "github.com/spf13/cobra" ) // OptionsInfo describes the Options in use var OptionsInfo = fs.Options{{ Name: "force_path_style", Default: true, Help: "If true use path style access if false use virtual hosted style", }, { Name: "etag_hash", Default: "MD5", Help: "Which hash to use for the ETag, or auto or blank for off", }, { Name: "auth_key", Default: []string{}, Help: "Set key pair for v4 authorization: access_key_id,secret_access_key", }, { Name: "no_cleanup", Default: false, Help: "Not to cleanup empty folder after object is deleted", }}. Add(httplib.ConfigInfo). Add(httplib.AuthConfigInfo) // Options contains options for the s3 Server type Options struct { //TODO add more options ForcePathStyle bool `config:"force_path_style"` EtagHash string `config:"etag_hash"` AuthKey []string `config:"auth_key"` NoCleanup bool `config:"no_cleanup"` Auth httplib.AuthConfig HTTP httplib.Config } // Opt is options set by command line flags var Opt Options const flagPrefix = "" func init() { fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "s3", Opt: &Opt, Options: OptionsInfo}) flagSet := Command.Flags() flags.AddFlagsFromOptions(flagSet, "", OptionsInfo) vfsflags.AddFlags(flagSet) proxyflags.AddFlags(flagSet) serve.Command.AddCommand(Command) serve.AddRc("s3", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) { // Read VFS Opts var vfsOpt = vfscommon.Opt // set default opts err := configstruct.SetAny(in, &vfsOpt) if err != nil { return nil, err } // Read Proxy Opts var proxyOpt = proxy.Opt // set default opts err = configstruct.SetAny(in, &proxyOpt) if err != nil { return nil, err } // Read opts var opt = Opt // set default opts err = configstruct.SetAny(in, &opt) if err != nil { return nil, err } // Create server return newServer(ctx, f, &opt, &vfsOpt, &proxyOpt) }) } //go:embed serve_s3.md var serveS3Help string // help returns the help string cleaned up to simplify appending func help() string { return strings.TrimSpace(serveS3Help) + "\n\n" } // Command definition for cobra var Command = &cobra.Command{ Annotations: map[string]string{ "versionIntroduced": "v1.65", "groups": "Filter", "status": "Experimental", }, Use: "s3 remote:path", Short: `Serve remote:path over s3.`, Long: help() + strings.TrimSpace(httplib.AuthHelp(flagPrefix)+httplib.Help(flagPrefix)+vfs.Help()), RunE: func(command *cobra.Command, args []string) error { var f fs.Fs if proxy.Opt.AuthProxy == "" { cmd.CheckArgs(1, 1, command, args) f = cmd.NewFsSrc(args) } else { cmd.CheckArgs(0, 0, command, args) } cmd.Run(false, false, command, func() error { s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt) if err != nil { return err } return s.Serve() }) return nil }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/s3/utils.go
cmd/serve/s3/utils.go
package s3 import ( "context" "encoding/hex" "errors" "io" "os" "path" "strings" "github.com/rclone/gofakes3" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/vfs" ) func getDirEntries(prefix string, VFS *vfs.VFS) (vfs.Nodes, error) { node, err := VFS.Stat(prefix) if err == vfs.ENOENT { return nil, gofakes3.ErrNoSuchKey } else if err != nil { return nil, err } if !node.IsDir() { return nil, gofakes3.ErrNoSuchKey } dir := node.(*vfs.Dir) dirEntries, err := dir.ReadDirAll() if err != nil { return nil, err } return dirEntries, nil } func getFileHashByte(node any, hashType hash.Type) []byte { b, err := hex.DecodeString(getFileHash(node, hashType)) if err != nil { return nil } return b } func getFileHash(node any, hashType hash.Type) string { if hashType == hash.None { return "" } var o fs.Object switch b := node.(type) { case vfs.Node: fsObj, ok := b.DirEntry().(fs.Object) if !ok { fs.Debugf("serve s3", "File uploading - reading hash from VFS cache") in, err := b.Open(os.O_RDONLY) if err != nil { return "" } defer func() { _ = in.Close() }() h, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType)) if err != nil { return "" } _, err = io.Copy(h, in) if err != nil { return "" } return h.Sums()[hashType] } o = fsObj case fs.Object: o = b } hash, err := o.Hash(context.Background(), hashType) if err != nil { return "" } return hash } func prefixParser(p *gofakes3.Prefix) (path, remaining string) { idx := strings.LastIndexByte(p.Prefix, '/') if idx < 0 { return "", p.Prefix } return p.Prefix[:idx], p.Prefix[idx+1:] } // FIXME this could be implemented by VFS.MkdirAll() func mkdirRecursive(path string, VFS *vfs.VFS) error { path = strings.Trim(path, "/") dirs := strings.Split(path, "/") dir := "" for _, d := range dirs { dir += "/" + d if _, err := VFS.Stat(dir); err != nil { err := VFS.Mkdir(dir, 0777) if err != nil { return err } } } return nil } func rmdirRecursive(p string, VFS *vfs.VFS) { dir := path.Dir(p) if !strings.ContainsAny(dir, "/\\") { // might be bucket(root) return } if _, err := VFS.Stat(dir); err == nil { err := VFS.Remove(dir) if err != nil { return } rmdirRecursive(dir, VFS) } } func authlistResolver(list []string) (map[string]string, error) { authList := make(map[string]string) for _, v := range list { parts := strings.Split(v, ",") if len(parts) != 2 { return nil, errors.New("invalid auth pair: expecting a single comma") } authList[parts[0]] = parts[1] } return authList, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/s3/list.go
cmd/serve/s3/list.go
package s3 import ( "path" "strings" "github.com/rclone/gofakes3" "github.com/rclone/rclone/vfs" ) func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPrefix bool, response *gofakes3.ObjectList) error { fp := path.Join(bucket, fdPath) dirEntries, err := getDirEntries(fp, _vfs) if err != nil { return err } for _, entry := range dirEntries { object := entry.Name() // workaround for control-chars detect objectPath := path.Join(fdPath, object) if !strings.HasPrefix(object, name) { continue } if entry.IsDir() { if addPrefix { prefixWithTrailingSlash := objectPath + "/" response.AddPrefix(prefixWithTrailingSlash) continue } err := b.entryListR(_vfs, bucket, path.Join(fdPath, object), "", false, response) if err != nil { return err } } else { item := &gofakes3.Content{ Key: objectPath, LastModified: gofakes3.NewContentTime(entry.ModTime()), ETag: getFileHash(entry, b.s.etagHashType), Size: entry.Size(), StorageClass: gofakes3.StorageStandard, } response.Add(item) } } return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/s3/server.go
cmd/serve/s3/server.go
// Package s3 implements a fake s3 server for rclone package s3 import ( "context" "crypto/md5" "encoding/hex" "errors" "fmt" "math/rand" "net" "net/http" "strings" "github.com/go-chi/chi/v5" "github.com/rclone/gofakes3" "github.com/rclone/gofakes3/signature" "github.com/rclone/rclone/cmd/serve/proxy" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" httplib "github.com/rclone/rclone/lib/http" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" ) type ctxKey int const ( ctxKeyID ctxKey = iota ) // Server is a s3.FileSystem interface type Server struct { server *httplib.Server opt Options f fs.Fs _vfs *vfs.VFS // don't use directly, use getVFS faker *gofakes3.GoFakeS3 handler http.Handler proxy *proxy.Proxy ctx context.Context // for global config s3Secret string etagHashType hash.Type } // Make a new S3 Server to serve the remote func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (s *Server, err error) { w := &Server{ f: f, ctx: ctx, opt: *opt, etagHashType: hash.None, } if w.opt.EtagHash == "auto" { w.etagHashType = f.Hashes().GetOne() } else if w.opt.EtagHash != "" { err := w.etagHashType.Set(w.opt.EtagHash) if err != nil { return nil, err } } if w.etagHashType != hash.None { fs.Debugf(f, "Using hash %v for ETag", w.etagHashType) } if len(opt.AuthKey) == 0 { fs.Logf("serve s3", "No auth provided so allowing anonymous access") } else { w.s3Secret = getAuthSecret(opt.AuthKey) } authList, err := authlistResolver(opt.AuthKey) if err != nil { return nil, fmt.Errorf("parsing auth list failed: %q", err) } var newLogger logger w.faker = gofakes3.New( newBackend(w), gofakes3.WithHostBucket(!opt.ForcePathStyle), gofakes3.WithLogger(newLogger), gofakes3.WithRequestID(rand.Uint64()), gofakes3.WithoutVersioning(), gofakes3.WithV4Auth(authList), gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied ) w.handler = w.faker.Server() if proxy.Opt.AuthProxy != "" { w.proxy = proxy.New(ctx, proxyOpt, vfsOpt) // proxy auth middleware w.handler = proxyAuthMiddleware(w.handler, w) w.handler = authPairMiddleware(w.handler, w) } else { w._vfs = vfs.New(f, vfsOpt) if len(opt.AuthKey) > 0 { w.faker.AddAuthKeys(authList) } } w.server, err = httplib.NewServer(ctx, httplib.WithConfig(opt.HTTP), httplib.WithAuth(opt.Auth), ) if err != nil { return nil, fmt.Errorf("failed to init server: %w", err) } router := w.server.Router() w.Bind(router) return w, nil } func (w *Server) getVFS(ctx context.Context) (VFS *vfs.VFS, err error) { if w._vfs != nil { return w._vfs, nil } value := ctx.Value(ctxKeyID) if value == nil { return nil, errors.New("no VFS found in context") } VFS, ok := value.(*vfs.VFS) if !ok { return nil, fmt.Errorf("context value is not VFS: %#v", value) } return VFS, nil } // auth does proxy authorization func (w *Server) auth(accessKeyID string) (value any, err error) { VFS, _, err := w.proxy.Call(stringToMd5Hash(accessKeyID), accessKeyID, false) if err != nil { return nil, err } return VFS, err } // Bind register the handler to http.Router func (w *Server) Bind(router chi.Router) { router.Handle("/*", w.handler) } // Serve serves the s3 server until the server is shutdown func (w *Server) Serve() error { w.server.Serve() fs.Logf(w.f, "Starting s3 server on %s", w.server.URLs()) w.server.Wait() return nil } // Addr returns the first address of the server func (w *Server) Addr() net.Addr { return w.server.Addr() } // Shutdown the server func (w *Server) Shutdown() error { return w.server.Shutdown() } func authPairMiddleware(next http.Handler, ws *Server) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { accessKey, _ := parseAccessKeyID(r) // set the auth pair authPair := map[string]string{ accessKey: ws.s3Secret, } ws.faker.AddAuthKeys(authPair) next.ServeHTTP(w, r) }) } func proxyAuthMiddleware(next http.Handler, ws *Server) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { accessKey, _ := parseAccessKeyID(r) value, err := ws.auth(accessKey) if err != nil { fs.Infof(r.URL.Path, "%s: Auth failed: %v", r.RemoteAddr, err) } if value != nil { r = r.WithContext(context.WithValue(r.Context(), ctxKeyID, value)) } next.ServeHTTP(w, r) }) } func parseAccessKeyID(r *http.Request) (accessKey string, error signature.ErrorCode) { v4Auth := r.Header.Get("Authorization") req, err := signature.ParseSignV4(v4Auth) if err != signature.ErrNone { return "", err } return req.Credential.GetAccessKey(), signature.ErrNone } func stringToMd5Hash(s string) string { hasher := md5.New() hasher.Write([]byte(s)) return hex.EncodeToString(hasher.Sum(nil)) } func getAuthSecret(authPair []string) string { if len(authPair) == 0 { return "" } splited := strings.Split(authPair[0], ",") if len(splited) != 2 { return "" } secret := strings.TrimSpace(splited[1]) return secret }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/s3/backend.go
cmd/serve/s3/backend.go
// Package s3 implements an s3 server for rclone package s3 import ( "context" "encoding/hex" "io" "maps" "os" "path" "strings" "sync" "time" "github.com/ncw/swift/v2" "github.com/rclone/gofakes3" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/vfs" ) var ( emptyPrefix = &gofakes3.Prefix{} ) // s3Backend implements the gofacess3.Backend interface to make an S3 // backend for gofakes3 type s3Backend struct { s *Server meta *sync.Map } // newBackend creates a new SimpleBucketBackend. func newBackend(s *Server) gofakes3.Backend { return &s3Backend{ s: s, meta: new(sync.Map), } } // ListBuckets always returns the default bucket. func (b *s3Backend) ListBuckets(ctx context.Context) ([]gofakes3.BucketInfo, error) { _vfs, err := b.s.getVFS(ctx) if err != nil { return nil, err } dirEntries, err := getDirEntries("/", _vfs) if err != nil { return nil, err } var response []gofakes3.BucketInfo for _, entry := range dirEntries { if entry.IsDir() { response = append(response, gofakes3.BucketInfo{ Name: entry.Name(), CreationDate: gofakes3.NewContentTime(entry.ModTime()), }) } // FIXME: handle files in root dir } return response, nil } // ListBucket lists the objects in the given bucket. func (b *s3Backend) ListBucket(ctx context.Context, bucket string, prefix *gofakes3.Prefix, page gofakes3.ListBucketPage) (*gofakes3.ObjectList, error) { _vfs, err := b.s.getVFS(ctx) if err != nil { return nil, err } _, err = _vfs.Stat(bucket) if err != nil { return nil, gofakes3.BucketNotFound(bucket) } if prefix == nil { prefix = emptyPrefix } // workaround if strings.TrimSpace(prefix.Prefix) == "" { prefix.HasPrefix = false } if strings.TrimSpace(prefix.Delimiter) == "" { prefix.HasDelimiter = false } response := gofakes3.NewObjectList() path, remaining := prefixParser(prefix) err = b.entryListR(_vfs, bucket, path, remaining, prefix.HasDelimiter, response) if err == gofakes3.ErrNoSuchKey { // AWS just returns an empty list response = gofakes3.NewObjectList() } else if err != nil { return nil, err } return b.pager(response, page) } // formatHeaderTime makes an timestamp which is the same as that used by AWS. // // This is like RFC1123 always in UTC, but has GMT instead of UTC func formatHeaderTime(t time.Time) string { return t.UTC().Format("Mon, 02 Jan 2006 15:04:05") + " GMT" } // HeadObject returns the fileinfo for the given object name. // // Note that the metadata is not supported yet. func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName string) (*gofakes3.Object, error) { _vfs, err := b.s.getVFS(ctx) if err != nil { return nil, err } _, err = _vfs.Stat(bucketName) if err != nil { return nil, gofakes3.BucketNotFound(bucketName) } fp := path.Join(bucketName, objectName) node, err := _vfs.Stat(fp) if err != nil { return nil, gofakes3.KeyNotFound(objectName) } if !node.IsFile() { return nil, gofakes3.KeyNotFound(objectName) } entry := node.DirEntry() if entry == nil { return nil, gofakes3.KeyNotFound(objectName) } fobj := entry.(fs.Object) size := node.Size() hash := getFileHashByte(fobj, b.s.etagHashType) meta := map[string]string{ "Last-Modified": formatHeaderTime(node.ModTime()), "Content-Type": fs.MimeType(context.Background(), fobj), } if val, ok := b.meta.Load(fp); ok { metaMap := val.(map[string]string) maps.Copy(meta, metaMap) } return &gofakes3.Object{ Name: objectName, Hash: hash, Metadata: meta, Size: size, Contents: noOpReadCloser{}, }, nil } // GetObject fetches the object from the filesystem. func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (obj *gofakes3.Object, err error) { _vfs, err := b.s.getVFS(ctx) if err != nil { return nil, err } _, err = _vfs.Stat(bucketName) if err != nil { return nil, gofakes3.BucketNotFound(bucketName) } fp := path.Join(bucketName, objectName) node, err := _vfs.Stat(fp) if err != nil { return nil, gofakes3.KeyNotFound(objectName) } if !node.IsFile() { return nil, gofakes3.KeyNotFound(objectName) } entry := node.DirEntry() if entry == nil { return nil, gofakes3.KeyNotFound(objectName) } fobj := entry.(fs.Object) file := node.(*vfs.File) size := node.Size() hash := getFileHashByte(fobj, b.s.etagHashType) in, err := file.Open(os.O_RDONLY) if err != nil { return nil, gofakes3.ErrInternal } defer func() { // If an error occurs, the caller may not have access to Object.Body in order to close it: if err != nil { _ = in.Close() } }() var rdr io.ReadCloser = in rnge, err := rangeRequest.Range(size) if err != nil { return nil, err } if rnge != nil { if _, err := in.Seek(rnge.Start, io.SeekStart); err != nil { return nil, err } rdr = limitReadCloser(rdr, in.Close, rnge.Length) } meta := map[string]string{ "Last-Modified": formatHeaderTime(node.ModTime()), "Content-Type": fs.MimeType(context.Background(), fobj), } if val, ok := b.meta.Load(fp); ok { metaMap := val.(map[string]string) maps.Copy(meta, metaMap) } return &gofakes3.Object{ Name: objectName, Hash: hash, Metadata: meta, Size: size, Range: rnge, Contents: rdr, }, nil } // storeModtime sets both "mtime" and "X-Amz-Meta-Mtime" to val in b.meta. // Call this whenever modtime is updated. func (b *s3Backend) storeModtime(fp string, meta map[string]string, val string) { meta["X-Amz-Meta-Mtime"] = val meta["mtime"] = val b.meta.Store(fp, meta) } // TouchObject creates or updates meta on specified object. func (b *s3Backend) TouchObject(ctx context.Context, fp string, meta map[string]string) (result gofakes3.PutObjectResult, err error) { _vfs, err := b.s.getVFS(ctx) if err != nil { return result, err } _, err = _vfs.Stat(fp) if err == vfs.ENOENT { f, err := _vfs.Create(fp) if err != nil { return result, err } _ = f.Close() return b.TouchObject(ctx, fp, meta) } else if err != nil { return result, err } _, err = _vfs.Stat(fp) if err != nil { return result, err } b.meta.Store(fp, meta) if val, ok := meta["X-Amz-Meta-Mtime"]; ok { ti, err := swift.FloatStringToTime(val) if err == nil { b.storeModtime(fp, meta, val) return result, _vfs.Chtimes(fp, ti, ti) } // ignore error since the file is successfully created } if val, ok := meta["mtime"]; ok { ti, err := swift.FloatStringToTime(val) if err == nil { b.storeModtime(fp, meta, val) return result, _vfs.Chtimes(fp, ti, ti) } // ignore error since the file is successfully created } return result, nil } // PutObject creates or overwrites the object with the given name. func (b *s3Backend) PutObject( ctx context.Context, bucketName, objectName string, meta map[string]string, input io.Reader, size int64, ) (result gofakes3.PutObjectResult, err error) { _vfs, err := b.s.getVFS(ctx) if err != nil { return result, err } _, err = _vfs.Stat(bucketName) if err != nil { return result, gofakes3.BucketNotFound(bucketName) } fp := path.Join(bucketName, objectName) objectDir := path.Dir(fp) // _, err = db.fs.Stat(objectDir) // if err == vfs.ENOENT { // fs.Errorf(objectDir, "PutObject failed: path not found") // return result, gofakes3.KeyNotFound(objectName) // } if objectDir != "." { if err := mkdirRecursive(objectDir, _vfs); err != nil { return result, err } } f, err := _vfs.Create(fp) if err != nil { return result, err } if _, err := io.Copy(f, input); err != nil { // remove file when i/o error occurred (FsPutErr) _ = f.Close() _ = _vfs.Remove(fp) return result, err } if err := f.Close(); err != nil { // remove file when close error occurred (FsPutErr) _ = _vfs.Remove(fp) return result, err } _, err = _vfs.Stat(fp) if err != nil { return result, err } b.meta.Store(fp, meta) if val, ok := meta["X-Amz-Meta-Mtime"]; ok { ti, err := swift.FloatStringToTime(val) if err == nil { b.storeModtime(fp, meta, val) return result, _vfs.Chtimes(fp, ti, ti) } // ignore error since the file is successfully created if val, ok := meta["mtime"]; ok { b.storeModtime(fp, meta, val) return result, _vfs.Chtimes(fp, ti, ti) } // ignore error since the file is successfully created } return result, nil } // DeleteMulti deletes multiple objects in a single request. func (b *s3Backend) DeleteMulti(ctx context.Context, bucketName string, objects ...string) (result gofakes3.MultiDeleteResult, rerr error) { for _, object := range objects { if err := b.deleteObject(ctx, bucketName, object); err != nil { fs.Errorf("serve s3", "delete object failed: %v", err) result.Error = append(result.Error, gofakes3.ErrorResult{ Code: gofakes3.ErrInternal, Message: gofakes3.ErrInternal.Message(), Key: object, }) } else { result.Deleted = append(result.Deleted, gofakes3.ObjectID{ Key: object, }) } } return result, nil } // DeleteObject deletes the object with the given name. func (b *s3Backend) DeleteObject(ctx context.Context, bucketName, objectName string) (result gofakes3.ObjectDeleteResult, rerr error) { return result, b.deleteObject(ctx, bucketName, objectName) } // deleteObject deletes the object from the filesystem. func (b *s3Backend) deleteObject(ctx context.Context, bucketName, objectName string) error { _vfs, err := b.s.getVFS(ctx) if err != nil { return err } _, err = _vfs.Stat(bucketName) if err != nil { return gofakes3.BucketNotFound(bucketName) } fp := path.Join(bucketName, objectName) // S3 does not report an error when attempting to delete a key that does not exist, so // we need to skip IsNotExist errors. if err := _vfs.Remove(fp); err != nil && !os.IsNotExist(err) { return err } // FIXME: unsafe operation rmdirRecursive(fp, _vfs) return nil } // CreateBucket creates a new bucket. func (b *s3Backend) CreateBucket(ctx context.Context, name string) error { _vfs, err := b.s.getVFS(ctx) if err != nil { return err } _, err = _vfs.Stat(name) if err != nil && err != vfs.ENOENT { return gofakes3.ErrInternal } if err == nil { return gofakes3.ErrBucketAlreadyExists } if err := _vfs.Mkdir(name, 0755); err != nil { return gofakes3.ErrInternal } return nil } // DeleteBucket deletes the bucket with the given name. func (b *s3Backend) DeleteBucket(ctx context.Context, name string) error { _vfs, err := b.s.getVFS(ctx) if err != nil { return err } _, err = _vfs.Stat(name) if err != nil { return gofakes3.BucketNotFound(name) } if err := _vfs.Remove(name); err != nil { return gofakes3.ErrBucketNotEmpty } return nil } // BucketExists checks if the bucket exists. func (b *s3Backend) BucketExists(ctx context.Context, name string) (exists bool, err error) { _vfs, err := b.s.getVFS(ctx) if err != nil { return false, err } _, err = _vfs.Stat(name) if err != nil { return false, nil } return true, nil } // CopyObject copy specified object from srcKey to dstKey. func (b *s3Backend) CopyObject(ctx context.Context, srcBucket, srcKey, dstBucket, dstKey string, meta map[string]string) (result gofakes3.CopyObjectResult, err error) { _vfs, err := b.s.getVFS(ctx) if err != nil { return result, err } fp := path.Join(srcBucket, srcKey) if srcBucket == dstBucket && srcKey == dstKey { b.meta.Store(fp, meta) val, ok := meta["X-Amz-Meta-Mtime"] if !ok { if val, ok = meta["mtime"]; !ok { return } } // update modtime ti, err := swift.FloatStringToTime(val) if err != nil { return result, nil } b.storeModtime(fp, meta, val) return result, _vfs.Chtimes(fp, ti, ti) } cStat, err := _vfs.Stat(fp) if err != nil { return } c, err := b.GetObject(ctx, srcBucket, srcKey, nil) if err != nil { return } defer func() { _ = c.Contents.Close() }() for k, v := range c.Metadata { if _, found := meta[k]; !found && k != "X-Amz-Acl" { meta[k] = v } } if _, ok := meta["mtime"]; !ok { meta["mtime"] = swift.TimeToFloatString(cStat.ModTime()) } _, err = b.PutObject(ctx, dstBucket, dstKey, meta, c.Contents, c.Size) if err != nil { return } return gofakes3.CopyObjectResult{ ETag: `"` + hex.EncodeToString(c.Hash) + `"`, LastModified: gofakes3.NewContentTime(cStat.ModTime()), }, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/s3/pager.go
cmd/serve/s3/pager.go
// Package s3 implements a fake s3 server for rclone package s3 import ( "sort" "github.com/rclone/gofakes3" ) // pager splits the object list into smulitply pages. func (db *s3Backend) pager(list *gofakes3.ObjectList, page gofakes3.ListBucketPage) (*gofakes3.ObjectList, error) { // sort by alphabet sort.Slice(list.CommonPrefixes, func(i, j int) bool { return list.CommonPrefixes[i].Prefix < list.CommonPrefixes[j].Prefix }) // sort by modtime sort.Slice(list.Contents, func(i, j int) bool { return list.Contents[i].LastModified.Before(list.Contents[j].LastModified.Time) }) tokens := page.MaxKeys if tokens == 0 { tokens = 1000 } if page.HasMarker { for i, obj := range list.Contents { if obj.Key == page.Marker { list.Contents = list.Contents[i+1:] break } } for i, obj := range list.CommonPrefixes { if obj.Prefix == page.Marker { list.CommonPrefixes = list.CommonPrefixes[i+1:] break } } } response := gofakes3.NewObjectList() for _, obj := range list.CommonPrefixes { if tokens <= 0 { break } response.AddPrefix(obj.Prefix) tokens-- } for _, obj := range list.Contents { if tokens <= 0 { break } response.Add(obj) tokens-- } if len(list.CommonPrefixes)+len(list.Contents) > int(page.MaxKeys) { response.IsTruncated = true if len(response.Contents) > 0 { response.NextMarker = response.Contents[len(response.Contents)-1].Key } else { response.NextMarker = response.CommonPrefixes[len(response.CommonPrefixes)-1].Prefix } } return response, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/s3/logger.go
cmd/serve/s3/logger.go
package s3 import ( "fmt" "strings" "github.com/rclone/gofakes3" "github.com/rclone/rclone/fs" ) // logger output formatted message type logger struct{} // print log message func (l logger) Print(level gofakes3.LogLevel, v ...any) { var b strings.Builder for i := range v { if i > 0 { fmt.Fprintf(&b, " ") } fmt.Fprint(&b, v[i]) } s := b.String() switch level { default: fallthrough case gofakes3.LogErr: fs.Errorf("serve s3", s) case gofakes3.LogWarn: fs.Infof("serve s3", s) case gofakes3.LogInfo: fs.Debugf("serve s3", s) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/s3/ioutils.go
cmd/serve/s3/ioutils.go
package s3 import "io" type noOpReadCloser struct{} type readerWithCloser struct { io.Reader closer func() error } var _ io.ReadCloser = &readerWithCloser{} func (d noOpReadCloser) Read(b []byte) (n int, err error) { return 0, io.EOF } func (d noOpReadCloser) Close() error { return nil } func limitReadCloser(rdr io.Reader, closer func() error, sz int64) io.ReadCloser { return &readerWithCloser{ Reader: io.LimitReader(rdr, sz), closer: closer, } } func (rwc *readerWithCloser) Close() error { if rwc.closer != nil { return rwc.closer() } return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/ftp/ftp.go
cmd/serve/ftp/ftp.go
//go:build !plan9 // Package ftp implements an FTP server for rclone package ftp import ( "context" "errors" "fmt" "io" iofs "io/fs" "net" "os" "os/user" "regexp" "strconv" "strings" "sync" "time" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/serve" "github.com/rclone/rclone/cmd/serve/proxy" "github.com/rclone/rclone/cmd/serve/proxy/proxyflags" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" "github.com/rclone/rclone/vfs/vfsflags" "github.com/spf13/cobra" "github.com/spf13/pflag" ftp "goftp.io/server/v2" ) // OptionsInfo descripts the Options in use var OptionsInfo = fs.Options{{ Name: "addr", Default: "localhost:2121", Help: "IPaddress:Port or :Port to bind server to", }, { Name: "public_ip", Default: "", Help: "Public IP address to advertise for passive connections", }, { Name: "passive_port", Default: "30000-32000", Help: "Passive port range to use", }, { Name: "user", Default: "anonymous", Help: "User name for authentication", }, { Name: "pass", Default: "", Help: "Password for authentication (empty value allow every password)", }, { Name: "cert", Default: "", Help: "TLS PEM key (concatenation of certificate and CA certificate)", }, { Name: "key", Default: "", Help: "TLS PEM Private key", }} // Options contains options for the http Server type Options struct { //TODO add more options ListenAddr string `config:"addr"` // Port to listen on PublicIP string `config:"public_ip"` // Passive ports range PassivePorts string `config:"passive_port"` // Passive ports range User string `config:"user"` // single username for basic auth if not using Htpasswd Pass string `config:"pass"` // password for User TLSCert string `config:"cert"` // TLS PEM key (concatenation of certificate and CA certificate) TLSKey string `config:"key"` // TLS PEM Private key } // Opt is options set by command line flags var Opt Options // AddFlags adds flags for ftp func AddFlags(flagSet *pflag.FlagSet) { flags.AddFlagsFromOptions(flagSet, "", OptionsInfo) } func init() { vfsflags.AddFlags(Command.Flags()) proxyflags.AddFlags(Command.Flags()) AddFlags(Command.Flags()) serve.Command.AddCommand(Command) serve.AddRc("ftp", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) { // Read VFS Opts var vfsOpt = vfscommon.Opt // set default opts err := configstruct.SetAny(in, &vfsOpt) if err != nil { return nil, err } // Read Proxy Opts var proxyOpt = proxy.Opt // set default opts err = configstruct.SetAny(in, &proxyOpt) if err != nil { return nil, err } // Read opts var opt = Opt // set default opts err = configstruct.SetAny(in, &opt) if err != nil { return nil, err } // Create server return newServer(ctx, f, &opt, &vfsOpt, &proxyOpt) }) } // Command definition for cobra var Command = &cobra.Command{ Use: "ftp remote:path", Short: `Serve remote:path over FTP.`, Long: `Run a basic FTP server to serve a remote over FTP protocol. This can be viewed with a FTP client or you can make a remote of type FTP to read and write it. ### Server options Use --addr to specify which IP address and port the server should listen on, e.g. --addr 1.2.3.4:8000 or --addr :8080 to listen to all IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port. If you set --addr to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info. #### Authentication By default this will serve files without needing a login. You can set a single username and password with the --user and --pass flags. ` + strings.TrimSpace(vfs.Help()+proxy.Help), Annotations: map[string]string{ "versionIntroduced": "v1.44", "groups": "Filter", }, Run: func(command *cobra.Command, args []string) { var f fs.Fs if proxy.Opt.AuthProxy == "" { cmd.CheckArgs(1, 1, command, args) f = cmd.NewFsSrc(args) } else { cmd.CheckArgs(0, 0, command, args) } cmd.Run(false, false, command, func() error { s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt) if err != nil { return err } return s.Serve() }) }, } // driver contains everything to run the driver for the FTP server type driver struct { f fs.Fs srv *ftp.Server ctx context.Context // for global config opt Options globalVFS *vfs.VFS // the VFS if not using auth proxy proxy *proxy.Proxy // may be nil if not in use useTLS bool userPassMu sync.Mutex // to protect userPass userPass map[string]string // cache of username => password when using vfs proxy } func init() { fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "ftp", Opt: &Opt, Options: OptionsInfo}) } var passivePortsRe = regexp.MustCompile(`^\s*\d+\s*-\s*\d+\s*$`) // Make a new FTP to serve the remote func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (*driver, error) { host, port, err := net.SplitHostPort(opt.ListenAddr) if err != nil { return nil, fmt.Errorf("failed to parse host:port from %q", opt.ListenAddr) } portNum, err := strconv.Atoi(port) if err != nil { return nil, fmt.Errorf("failed to parse port number from %q", port) } d := &driver{ f: f, ctx: ctx, opt: *opt, } if proxy.Opt.AuthProxy != "" { d.proxy = proxy.New(ctx, proxyOpt, vfsOpt) d.userPass = make(map[string]string, 16) } else { d.globalVFS = vfs.New(f, vfsOpt) } d.useTLS = d.opt.TLSKey != "" // Check PassivePorts format since the server library doesn't! if !passivePortsRe.MatchString(opt.PassivePorts) { return nil, fmt.Errorf("invalid format for passive ports %q", opt.PassivePorts) } ftpopt := &ftp.Options{ Name: "Rclone FTP Server", WelcomeMessage: "Welcome to Rclone " + fs.Version + " FTP Server", Driver: d, Hostname: host, Port: portNum, PublicIP: opt.PublicIP, PassivePorts: opt.PassivePorts, Auth: d, Perm: ftp.NewSimplePerm("ftp", "ftp"), // fake user and group Logger: &Logger{}, TLS: d.useTLS, CertFile: d.opt.TLSCert, KeyFile: d.opt.TLSKey, //TODO implement a maximum of https://godoc.org/goftp.io/server#ServerOpts } d.srv, err = ftp.NewServer(ftpopt) if err != nil { return nil, fmt.Errorf("failed to create new FTP server: %w", err) } return d, nil } // Serve runs the FTP server until it is shutdown func (d *driver) Serve() error { fs.Logf(d.f, "Serving FTP on %s", d.srv.Hostname+":"+strconv.Itoa(d.srv.Port)) err := d.srv.ListenAndServe() if err == ftp.ErrServerClosed { err = nil } return err } // Shutdown stops the ftp server // //lint:ignore U1000 unused when not building linux func (d *driver) Shutdown() error { fs.Logf(d.f, "Stopping FTP on %s", d.srv.Hostname+":"+strconv.Itoa(d.srv.Port)) return d.srv.Shutdown() } // Return the first address of the server func (d *driver) Addr() net.Addr { // The FTP server doesn't let us read the listener // so we have to synthesize the net.Addr here. // On errors we'll return a zero item or zero parts. addr := &net.TCPAddr{} // Split host and port host, port, err := net.SplitHostPort(d.opt.ListenAddr) if err != nil { fs.Errorf(nil, "ftp: addr: invalid address format: %v", err) return addr } // Parse port addr.Port, err = strconv.Atoi(port) if err != nil { fs.Errorf(nil, "ftp: addr: invalid port number: %v", err) } // Resolve the host to an IP address. ipAddrs, err := net.LookupIP(host) if err != nil { fs.Errorf(nil, "ftp: addr: failed to resolve host: %v", err) } else if len(ipAddrs) == 0 { fs.Errorf(nil, "ftp: addr: no IP addresses found for host: %s", host) } else { // Choose the first IP address. addr.IP = ipAddrs[0] } return addr } // Logger ftp logger output formatted message type Logger struct{} // Print log simple text message func (l *Logger) Print(sessionID string, message any) { fs.Infof(sessionID, "%s", message) } // Printf log formatted text message func (l *Logger) Printf(sessionID string, format string, v ...any) { fs.Infof(sessionID, format, v...) } // PrintCommand log formatted command execution func (l *Logger) PrintCommand(sessionID string, command string, params string) { if command == "PASS" { fs.Infof(sessionID, "> PASS ****") } else { fs.Infof(sessionID, "> %s %s", command, params) } } // PrintResponse log responses func (l *Logger) PrintResponse(sessionID string, code int, message string) { fs.Infof(sessionID, "< %d %s", code, message) } // CheckPasswd handle auth based on configuration func (d *driver) CheckPasswd(sctx *ftp.Context, user, pass string) (ok bool, err error) { if d.proxy != nil { _, _, err = d.proxy.Call(user, pass, false) if err != nil { fs.Infof(nil, "proxy login failed: %v", err) return false, nil } // Cache obscured password for later lookup. // // We don't cache the VFS directly in the driver as we want them // to be expired and the auth proxy does that for us. oPass, err := obscure.Obscure(pass) if err != nil { return false, err } d.userPassMu.Lock() d.userPass[user] = oPass d.userPassMu.Unlock() } else { ok = d.opt.User == user && (d.opt.Pass == "" || d.opt.Pass == pass) if !ok { fs.Infof(nil, "login failed: bad credentials") return false, nil } } return true, nil } // Get the VFS for this connection func (d *driver) getVFS(sctx *ftp.Context) (VFS *vfs.VFS, err error) { if d.proxy == nil { // If no proxy always use the same VFS return d.globalVFS, nil } user := sctx.Sess.LoginUser() d.userPassMu.Lock() oPass, ok := d.userPass[user] d.userPassMu.Unlock() if !ok { return nil, fmt.Errorf("proxy user not logged in") } pass, err := obscure.Reveal(oPass) if err != nil { return nil, err } VFS, _, err = d.proxy.Call(user, pass, false) if err != nil { return nil, fmt.Errorf("proxy login failed: %w", err) } return VFS, nil } // Stat get information on file or folder func (d *driver) Stat(sctx *ftp.Context, path string) (fi iofs.FileInfo, err error) { defer log.Trace(path, "")("fi=%+v, err = %v", &fi, &err) VFS, err := d.getVFS(sctx) if err != nil { return nil, err } n, err := VFS.Stat(path) if err != nil { return nil, err } return &FileInfo{n, n.Mode(), VFS.Opt.UID, VFS.Opt.GID}, err } // ChangeDir move current folder func (d *driver) ChangeDir(sctx *ftp.Context, path string) (err error) { defer log.Trace(path, "")("err = %v", &err) VFS, err := d.getVFS(sctx) if err != nil { return err } n, err := VFS.Stat(path) if err != nil { return err } if !n.IsDir() { return errors.New("not a directory") } return nil } // ListDir list content of a folder func (d *driver) ListDir(sctx *ftp.Context, path string, callback func(iofs.FileInfo) error) (err error) { defer log.Trace(path, "")("err = %v", &err) VFS, err := d.getVFS(sctx) if err != nil { return err } node, err := VFS.Stat(path) if err == vfs.ENOENT { return errors.New("directory not found") } else if err != nil { return err } if !node.IsDir() { return errors.New("not a directory") } dir := node.(*vfs.Dir) dirEntries, err := dir.ReadDirAll() if err != nil { return err } // Account the transfer tr := accounting.GlobalStats().NewTransferRemoteSize(path, node.Size(), d.f, nil) defer func() { tr.Done(d.ctx, err) }() for _, file := range dirEntries { err = callback(&FileInfo{file, file.Mode(), VFS.Opt.UID, VFS.Opt.GID}) if err != nil { return err } } return nil } // DeleteDir delete a folder and his content func (d *driver) DeleteDir(sctx *ftp.Context, path string) (err error) { defer log.Trace(path, "")("err = %v", &err) VFS, err := d.getVFS(sctx) if err != nil { return err } node, err := VFS.Stat(path) if err != nil { return err } if !node.IsDir() { return errors.New("not a directory") } err = node.Remove() if err != nil { return err } return nil } // DeleteFile delete a file func (d *driver) DeleteFile(sctx *ftp.Context, path string) (err error) { defer log.Trace(path, "")("err = %v", &err) VFS, err := d.getVFS(sctx) if err != nil { return err } node, err := VFS.Stat(path) if err != nil { return err } if !node.IsFile() { return errors.New("not a file") } err = node.Remove() if err != nil { return err } return nil } // Rename rename a file or folder func (d *driver) Rename(sctx *ftp.Context, oldName, newName string) (err error) { defer log.Trace(oldName, "newName=%q", newName)("err = %v", &err) VFS, err := d.getVFS(sctx) if err != nil { return err } return VFS.Rename(oldName, newName) } // MakeDir create a folder func (d *driver) MakeDir(sctx *ftp.Context, path string) (err error) { defer log.Trace(path, "")("err = %v", &err) VFS, err := d.getVFS(sctx) if err != nil { return err } dir, leaf, err := VFS.StatParent(path) if err != nil { return err } _, err = dir.Mkdir(leaf) return err } // GetFile download a file func (d *driver) GetFile(sctx *ftp.Context, path string, offset int64) (size int64, fr io.ReadCloser, err error) { defer log.Trace(path, "offset=%v", offset)("err = %v", &err) VFS, err := d.getVFS(sctx) if err != nil { return 0, nil, err } node, err := VFS.Stat(path) if err == vfs.ENOENT { fs.Infof(path, "File not found") return 0, nil, errors.New("file not found") } else if err != nil { return 0, nil, err } if !node.IsFile() { return 0, nil, errors.New("not a file") } handle, err := node.Open(os.O_RDONLY) if err != nil { return 0, nil, err } _, err = handle.Seek(offset, io.SeekStart) if err != nil { return 0, nil, err } // Account the transfer tr := accounting.GlobalStats().NewTransferRemoteSize(path, node.Size(), d.f, nil) defer tr.Done(d.ctx, nil) return node.Size(), handle, nil } // PutFile upload a file func (d *driver) PutFile(sctx *ftp.Context, path string, data io.Reader, offset int64) (n int64, err error) { defer log.Trace(path, "offset=%d", offset)("err = %v", &err) var isExist bool VFS, err := d.getVFS(sctx) if err != nil { return 0, err } fi, err := VFS.Stat(path) if err == nil { isExist = true if fi.IsDir() { return 0, errors.New("can't create file - directory exists") } } else { if os.IsNotExist(err) { isExist = false } else { return 0, err } } if offset > -1 && !isExist { offset = -1 } var f vfs.Handle if offset == -1 { if isExist { err = VFS.Remove(path) if err != nil { return 0, err } } f, err = VFS.Create(path) if err != nil { return 0, err } defer fs.CheckClose(f, &err) n, err = io.Copy(f, data) if err != nil { return 0, err } return n, nil } f, err = VFS.OpenFile(path, os.O_APPEND|os.O_RDWR, 0660) if err != nil { return 0, err } defer fs.CheckClose(f, &err) info, err := f.Stat() if err != nil { return 0, err } if offset > info.Size() { return 0, fmt.Errorf("offset %d is beyond file size %d", offset, info.Size()) } _, err = f.Seek(offset, io.SeekStart) if err != nil { return 0, err } bytes, err := io.Copy(f, data) if err != nil { return 0, err } return bytes, nil } // FileInfo struct to hold file info for ftp server type FileInfo struct { os.FileInfo mode os.FileMode owner uint32 group uint32 } // Mode return mode of file. func (f *FileInfo) Mode() os.FileMode { return f.mode } // Owner return owner of file. Try to find the username if possible func (f *FileInfo) Owner() string { str := fmt.Sprint(f.owner) u, err := user.LookupId(str) if err != nil { return str //User not found } return u.Username } // Group return group of file. Try to find the group name if possible func (f *FileInfo) Group() string { str := fmt.Sprint(f.group) g, err := user.LookupGroupId(str) if err != nil { return str //Group not found default to numerical value } return g.Name } // ModTime returns the time in UTC func (f *FileInfo) ModTime() time.Time { return f.FileInfo.ModTime().UTC() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/ftp/ftp_test.go
cmd/serve/ftp/ftp_test.go
// Serve ftp tests set up a server and run the integration tests // for the ftp remote against it. // // We skip tests on platforms with troublesome character mappings //go:build !windows && !darwin && !plan9 package ftp import ( "context" "testing" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/cmd/serve/proxy" "github.com/rclone/rclone/cmd/serve/servetest" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/lib/israce" "github.com/rclone/rclone/vfs/vfscommon" "github.com/stretchr/testify/assert" ) const ( testHOST = "localhost" testPORT = "51780" testPASSIVEPORTRANGE = "30000-32000" testUSER = "rclone" testPASS = "password" ) // TestFTP runs the ftp server then runs the unit tests for the // ftp remote against it. func TestFTP(t *testing.T) { // Configure and start the server start := func(f fs.Fs) (configmap.Simple, func()) { opt := Opt opt.ListenAddr = testHOST + ":" + testPORT opt.PassivePorts = testPASSIVEPORTRANGE opt.User = testUSER opt.Pass = testPASS w, err := newServer(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt) assert.NoError(t, err) quit := make(chan struct{}) go func() { assert.NoError(t, w.Serve()) close(quit) }() // Config for the backend we'll use to connect to the server config := configmap.Simple{ "type": "ftp", "host": testHOST, "port": testPORT, "user": testUSER, "pass": obscure.MustObscure(testPASS), } return config, func() { err := w.Shutdown() assert.NoError(t, err) <-quit } } servetest.Run(t, "ftp", start) } func TestRc(t *testing.T) { if israce.Enabled { t.Skip("Skipping under race detector as underlying library is racy") } servetest.TestRc(t, rc.Params{ "type": "ftp", "vfs_cache_mode": "off", }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/ftp/ftp_unsupported.go
cmd/serve/ftp/ftp_unsupported.go
// Build for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 // Package ftp implements an FTP server for rclone package ftp import "github.com/spf13/cobra" // Command definition is nil to show not implemented var Command *cobra.Command
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/http/http_test.go
cmd/serve/http/http_test.go
package http import ( "context" "flag" "io" stdfs "io/fs" "net/http" "os" "path/filepath" "strings" "testing" "time" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/cmd/serve/proxy" "github.com/rclone/rclone/cmd/serve/servetest" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/rc" libhttp "github.com/rclone/rclone/lib/http" "github.com/rclone/rclone/vfs/vfscommon" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var ( updateGolden = flag.Bool("updategolden", false, "update golden files for regression test") ) const ( testBindAddress = "localhost:0" testUser = "user" testPass = "pass" testTemplate = "testdata/golden/testindex.html" ) func start(ctx context.Context, t *testing.T, f fs.Fs) (s *HTTP, testURL string) { opts := Options{ HTTP: libhttp.DefaultCfg(), Template: libhttp.TemplateConfig{ Path: testTemplate, }, } opts.HTTP.ListenAddr = []string{testBindAddress} if proxy.Opt.AuthProxy == "" { opts.Auth.BasicUser = testUser opts.Auth.BasicPass = testPass } s, err := newServer(ctx, f, &opts, &vfscommon.Opt, &proxy.Opt) require.NoError(t, err, "failed to start server") go func() { require.NoError(t, s.Serve()) }() urls := s.server.URLs() require.Len(t, urls, 1, "expected one URL") testURL = urls[0] // try to connect to the test server pause := time.Millisecond for range 10 { resp, err := http.Head(testURL) if err == nil { _ = resp.Body.Close() return } // t.Logf("couldn't connect, sleeping for %v: %v", pause, err) time.Sleep(pause) pause *= 2 } t.Fatal("couldn't connect to server") return s, testURL } // setAllModTimes walks root and sets atime/mtime to t for every file & directory. func setAllModTimes(root string, t time.Time) error { return filepath.WalkDir(root, func(path string, d stdfs.DirEntry, err error) error { if err != nil { return err } return os.Chtimes(path, t, t) }) } var ( datedObject = "two.txt" expectedTime = time.Date(2000, 1, 2, 3, 4, 5, 0, time.UTC) ) // check body against the file, or re-write body if -updategolden is // set. func checkGolden(t *testing.T, fileName string, got []byte) { if *updateGolden { t.Logf("Updating golden file %q", fileName) err := os.WriteFile(fileName, got, 0666) require.NoError(t, err) } else { want, err := os.ReadFile(fileName) require.NoError(t, err) wants := strings.Split(string(want), "\n") gots := strings.Split(string(got), "\n") assert.Equal(t, wants, gots, fileName) } } func testGET(t *testing.T, useProxy bool) { ctx := context.Background() // ci := fs.GetConfig(ctx) // ci.LogLevel = fs.LogLevelDebug // exclude files called hidden.txt and directories called hidden fi := filter.GetConfig(ctx) require.NoError(t, fi.AddRule("- hidden.txt")) require.NoError(t, fi.AddRule("- hidden/**")) var f fs.Fs if useProxy { // the backend config will be made by the proxy prog, err := filepath.Abs("../servetest/proxy_code.go") require.NoError(t, err) files, err := filepath.Abs("testdata/files") require.NoError(t, err) cmd := "go run " + prog + " " + files // FIXME this is untidy setting a global variable! proxy.Opt.AuthProxy = cmd defer func() { proxy.Opt.AuthProxy = "" }() f = nil } else { // set all the mod times to expectedTime require.NoError(t, setAllModTimes("testdata/files", expectedTime)) // Create a test Fs var err error f, err = fs.NewFs(context.Background(), "testdata/files") require.NoError(t, err) // set date of datedObject to expectedTime obj, err := f.NewObject(context.Background(), datedObject) require.NoError(t, err) require.NoError(t, obj.SetModTime(context.Background(), expectedTime)) } s, testURL := start(ctx, t, f) defer func() { assert.NoError(t, s.server.Shutdown()) }() for _, test := range []struct { URL string Status int Golden string Method string Range string }{ { URL: "", Status: http.StatusOK, Golden: "testdata/golden/index.html", }, { URL: "notfound", Status: http.StatusNotFound, Golden: "testdata/golden/notfound.html", }, { URL: "dirnotfound/", Status: http.StatusNotFound, Golden: "testdata/golden/dirnotfound.html", }, { URL: "hidden/", Status: http.StatusNotFound, Golden: "testdata/golden/hiddendir.html", }, { URL: "one%25.txt", Status: http.StatusOK, Golden: "testdata/golden/one.txt", }, { URL: "hidden.txt", Status: http.StatusNotFound, Golden: "testdata/golden/hidden.txt", }, { URL: "three/", Status: http.StatusOK, Golden: "testdata/golden/three.html", }, { URL: "three/a.txt", Status: http.StatusOK, Golden: "testdata/golden/a.txt", }, { URL: "", Method: "HEAD", Status: http.StatusOK, Golden: "testdata/golden/indexhead.txt", }, { URL: "one%25.txt", Method: "HEAD", Status: http.StatusOK, Golden: "testdata/golden/onehead.txt", }, { URL: "", Method: "POST", Status: http.StatusMethodNotAllowed, Golden: "testdata/golden/indexpost.txt", }, { URL: "one%25.txt", Method: "POST", Status: http.StatusMethodNotAllowed, Golden: "testdata/golden/onepost.txt", }, { URL: "two.txt", Status: http.StatusOK, Golden: "testdata/golden/two.txt", }, { URL: "two.txt", Status: http.StatusPartialContent, Range: "bytes=2-5", Golden: "testdata/golden/two2-5.txt", }, { URL: "two.txt", Status: http.StatusPartialContent, Range: "bytes=0-6", Golden: "testdata/golden/two-6.txt", }, { URL: "two.txt", Status: http.StatusPartialContent, Range: "bytes=3-", Golden: "testdata/golden/two3-.txt", }, { URL: "/?download=zip", Status: http.StatusOK, Golden: "testdata/golden/root.zip", }, { URL: "/three/?download=zip", Status: http.StatusOK, Golden: "testdata/golden/three.zip", }, } { method := test.Method if method == "" { method = "GET" } req, err := http.NewRequest(method, testURL+test.URL, nil) require.NoError(t, err) if test.Range != "" { req.Header.Add("Range", test.Range) } req.SetBasicAuth(testUser, testPass) resp, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, test.Status, resp.StatusCode, test.Golden) body, err := io.ReadAll(resp.Body) require.NoError(t, err) // Check we got a Last-Modified header and that it is a valid date if test.Status == http.StatusOK || test.Status == http.StatusPartialContent { lastModified := resp.Header.Get("Last-Modified") assert.NotEqual(t, "", lastModified, test.Golden) modTime, err := http.ParseTime(lastModified) assert.NoError(t, err, test.Golden) // check the actual date on our special file if test.URL == datedObject { assert.Equal(t, expectedTime, modTime, test.Golden) } } checkGolden(t, test.Golden, body) } } func TestGET(t *testing.T) { testGET(t, false) } func TestAuthProxy(t *testing.T) { testGET(t, true) } func TestRc(t *testing.T) { servetest.TestRc(t, rc.Params{ "type": "http", "vfs_cache_mode": "off", }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/http/http.go
cmd/serve/http/http.go
// Package http provides common functionality for http servers package http import ( "context" "errors" "fmt" "io" "net" "net/http" "os" "path" "strconv" "strings" "time" "github.com/go-chi/chi/v5/middleware" "github.com/rclone/rclone/cmd" cmdserve "github.com/rclone/rclone/cmd/serve" "github.com/rclone/rclone/cmd/serve/proxy" "github.com/rclone/rclone/cmd/serve/proxy/proxyflags" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/rc" libhttp "github.com/rclone/rclone/lib/http" "github.com/rclone/rclone/lib/http/serve" "github.com/rclone/rclone/lib/systemd" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" "github.com/rclone/rclone/vfs/vfsflags" "github.com/spf13/cobra" ) // OptionsInfo describes the Options in use var OptionsInfo = fs.Options{}. Add(libhttp.ConfigInfo). Add(libhttp.AuthConfigInfo). Add(libhttp.TemplateConfigInfo) // Options required for http server type Options struct { Auth libhttp.AuthConfig HTTP libhttp.Config Template libhttp.TemplateConfig DisableZip bool } // DefaultOpt is the default values used for Options var DefaultOpt = Options{ Auth: libhttp.DefaultAuthCfg(), HTTP: libhttp.DefaultCfg(), Template: libhttp.DefaultTemplateCfg(), } // Opt is options set by command line flags var Opt = DefaultOpt func init() { fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "http", Opt: &Opt, Options: OptionsInfo}) } // flagPrefix is the prefix used to uniquely identify command line flags. // It is intentionally empty for this package. const flagPrefix = "" func init() { flagSet := Command.Flags() flags.AddFlagsFromOptions(flagSet, "", OptionsInfo) vfsflags.AddFlags(flagSet) proxyflags.AddFlags(flagSet) flagSet.BoolVar(&Opt.DisableZip, "disable-zip", false, "Disable zip download of directories") cmdserve.Command.AddCommand(Command) cmdserve.AddRc("http", func(ctx context.Context, f fs.Fs, in rc.Params) (cmdserve.Handle, error) { // Read VFS Opts var vfsOpt = vfscommon.Opt // set default opts err := configstruct.SetAny(in, &vfsOpt) if err != nil { return nil, err } // Read Proxy Opts var proxyOpt = proxy.Opt // set default opts err = configstruct.SetAny(in, &proxyOpt) if err != nil { return nil, err } // Read opts var opt = Opt // set default opts err = configstruct.SetAny(in, &opt) if err != nil { return nil, err } // Create server return newServer(ctx, f, &opt, &vfsOpt, &proxyOpt) }) } // Command definition for cobra var Command = &cobra.Command{ Use: "http remote:path", Short: `Serve the remote over HTTP.`, Long: `Run a basic web server to serve a remote over HTTP. This can be viewed in a web browser or you can make a remote of type http read from it. You can use the filter flags (e.g. ` + "`--include`, `--exclude`" + `) to control what is served. The server will log errors. Use ` + "`-v`" + ` to see access logs. ` + "`--bwlimit`" + ` will be respected for file transfers. Use ` + "`--stats`" + ` to control the stats printing. ` + strings.TrimSpace(libhttp.Help(flagPrefix)+libhttp.TemplateHelp(flagPrefix)+libhttp.AuthHelp(flagPrefix)+vfs.Help()+proxy.Help), Annotations: map[string]string{ "versionIntroduced": "v1.39", "groups": "Filter", }, Run: func(command *cobra.Command, args []string) { var f fs.Fs if proxy.Opt.AuthProxy == "" { cmd.CheckArgs(1, 1, command, args) f = cmd.NewFsSrc(args) } else { cmd.CheckArgs(0, 0, command, args) } cmd.Run(false, true, command, func() error { s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt) if err != nil { fs.Fatal(nil, fmt.Sprint(err)) } defer systemd.Notify()() return s.Serve() }) }, } // HTTP contains everything to run the server type HTTP struct { f fs.Fs _vfs *vfs.VFS // don't use directly, use getVFS server *libhttp.Server opt Options proxy *proxy.Proxy ctx context.Context // for global config } // Gets the VFS in use for this request func (s *HTTP) getVFS(ctx context.Context) (VFS *vfs.VFS, err error) { if s._vfs != nil { return s._vfs, nil } value := libhttp.CtxGetAuth(ctx) if value == nil { return nil, errors.New("no VFS found in context") } VFS, ok := value.(*vfs.VFS) if !ok { return nil, fmt.Errorf("context value is not VFS: %#v", value) } return VFS, nil } // auth does proxy authorization func (s *HTTP) auth(user, pass string) (value any, err error) { VFS, _, err := s.proxy.Call(user, pass, false) if err != nil { return nil, err } return VFS, err } func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (s *HTTP, err error) { s = &HTTP{ f: f, ctx: ctx, opt: *opt, } if proxyOpt.AuthProxy != "" { s.proxy = proxy.New(ctx, proxyOpt, vfsOpt) // override auth s.opt.Auth.CustomAuthFn = s.auth } else { s._vfs = vfs.New(f, vfsOpt) } s.server, err = libhttp.NewServer(ctx, libhttp.WithConfig(s.opt.HTTP), libhttp.WithAuth(s.opt.Auth), libhttp.WithTemplate(s.opt.Template), ) if err != nil { return nil, fmt.Errorf("failed to init server: %w", err) } router := s.server.Router() router.Use( middleware.SetHeader("Accept-Ranges", "bytes"), middleware.SetHeader("Server", "rclone/"+fs.Version), ) router.Get("/*", s.handler) router.Head("/*", s.handler) return s, nil } // Serve HTTP until the server is shutdown func (s *HTTP) Serve() error { s.server.Serve() fs.Logf(s.f, "HTTP Server started on %s", s.server.URLs()) s.server.Wait() return nil } // Addr returns the first address of the server func (s *HTTP) Addr() net.Addr { return s.server.Addr() } // Shutdown the server func (s *HTTP) Shutdown() error { return s.server.Shutdown() } // handler reads incoming requests and dispatches them func (s *HTTP) handler(w http.ResponseWriter, r *http.Request) { isDir := strings.HasSuffix(r.URL.Path, "/") remote := strings.Trim(r.URL.Path, "/") if isDir { s.serveDir(w, r, remote) } else { s.serveFile(w, r, remote) } } // serveDir serves a directory index at dirRemote func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string) { ctx := r.Context() VFS, err := s.getVFS(r.Context()) if err != nil { http.Error(w, "Root directory not found", http.StatusNotFound) fs.Errorf(nil, "Failed to serve directory: %v", err) return } // List the directory node, err := VFS.Stat(dirRemote) if err == vfs.ENOENT { http.Error(w, "Directory not found", http.StatusNotFound) return } else if err != nil { serve.Error(ctx, dirRemote, w, "Failed to list directory", err) return } if !node.IsDir() { http.Error(w, "Not a directory", http.StatusNotFound) return } dir := node.(*vfs.Dir) if r.URL.Query().Get("download") == "zip" && !s.opt.DisableZip { fs.Infof(dirRemote, "%s: Zipping directory", r.RemoteAddr) zipName := path.Base(dirRemote) if dirRemote == "" { zipName = "root" } w.Header().Set("Content-Disposition", "attachment; filename=\""+zipName+".zip\"") w.Header().Set("Content-Type", "application/zip") w.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat)) err := vfs.CreateZip(ctx, dir, w) if err != nil { serve.Error(ctx, dirRemote, w, "Failed to create zip", err) return } return } dirEntries, err := dir.ReadDirAll() if err != nil { serve.Error(ctx, dirRemote, w, "Failed to list directory", err) return } // Make the entries for display directory := serve.NewDirectory(dirRemote, s.server.HTMLTemplate()) for _, node := range dirEntries { if vfscommon.Opt.NoModTime { directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), time.Time{}) } else { directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), node.ModTime().UTC()) } } sortParm := r.URL.Query().Get("sort") orderParm := r.URL.Query().Get("order") directory.ProcessQueryParams(sortParm, orderParm) // Set the Last-Modified header to the timestamp w.Header().Set("Last-Modified", dir.ModTime().UTC().Format(http.TimeFormat)) directory.DisableZip = s.opt.DisableZip directory.Serve(w, r) } // serveFile serves a file object at remote func (s *HTTP) serveFile(w http.ResponseWriter, r *http.Request, remote string) { ctx := r.Context() VFS, err := s.getVFS(r.Context()) if err != nil { http.Error(w, "File not found", http.StatusNotFound) fs.Errorf(nil, "Failed to serve file: %v", err) return } node, err := VFS.Stat(remote) if err == vfs.ENOENT { fs.Infof(remote, "%s: File not found", r.RemoteAddr) http.Error(w, "File not found", http.StatusNotFound) return } else if err != nil { serve.Error(ctx, remote, w, "Failed to find file", err) return } if !node.IsFile() { http.Error(w, "Not a file", http.StatusNotFound) return } entry := node.DirEntry() if entry == nil { http.Error(w, "Can't open file being written", http.StatusNotFound) return } obj := entry.(fs.Object) file := node.(*vfs.File) // Set content length if we know how long the object is knownSize := obj.Size() >= 0 if knownSize { w.Header().Set("Content-Length", strconv.FormatInt(node.Size(), 10)) } // Set content type mimeType := fs.MimeType(r.Context(), obj) if mimeType == "application/octet-stream" && path.Ext(remote) == "" { // Leave header blank so http server guesses } else { w.Header().Set("Content-Type", mimeType) } // Set the Last-Modified header to the timestamp w.Header().Set("Last-Modified", file.ModTime().UTC().Format(http.TimeFormat)) // If HEAD no need to read the object since we have set the headers if r.Method == "HEAD" { return } // open the object in, err := file.Open(os.O_RDONLY) if err != nil { serve.Error(ctx, remote, w, "Failed to open file", err) return } defer func() { err := in.Close() if err != nil { fs.Errorf(remote, "Failed to close file: %v", err) } }() // Account the transfer tr := accounting.Stats(r.Context()).NewTransfer(obj, nil) defer tr.Done(r.Context(), nil) // FIXME in = fs.NewAccount(in, obj).WithBuffer() // account the transfer // Serve the file if knownSize { http.ServeContent(w, r, remote, node.ModTime(), in) } else { // http.ServeContent can't serve unknown length files if rangeRequest := r.Header.Get("Range"); rangeRequest != "" { http.Error(w, "Can't use Range: on files of unknown length", http.StatusRequestedRangeNotSatisfiable) return } n, err := io.Copy(w, in) if err != nil { fs.Errorf(obj, "Didn't finish writing GET request (wrote %d/unknown bytes): %v", n, err) return } } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/servetest/rc.go
cmd/serve/servetest/rc.go
package servetest import ( "context" "fmt" "net" "strings" "testing" "time" "github.com/rclone/rclone/fs/rc" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // GetEphemeralPort opens a listening port on localhost:0, closes it, // and returns the address as "localhost:port". func GetEphemeralPort(t *testing.T) string { listener, err := net.Listen("tcp", "localhost:0") // Listen on any available port require.NoError(t, err) defer func() { require.NoError(t, listener.Close()) }() return listener.Addr().String() } // checkTCP attempts to establish a TCP connection to the given address, // and closes it if successful. Returns an error if the connection fails. func checkTCP(address string) error { conn, err := net.DialTimeout("tcp", address, 5*time.Second) if err != nil { return fmt.Errorf("failed to connect to %s: %w", address, err) } err = conn.Close() if err != nil { return fmt.Errorf("failed to close connection to %s: %w", address, err) } return nil } // TestRc tests the rc interface for the servers // // in should contain any options necessary however this code will add // "fs", "addr". func TestRc(t *testing.T, in rc.Params) { ctx := context.Background() dir := t.TempDir() serveStart := rc.Calls.Get("serve/start") serveStop := rc.Calls.Get("serve/stop") name := in["type"].(string) addr := GetEphemeralPort(t) // Start the server in["fs"] = dir in["addr"] = addr out, err := serveStart.Fn(ctx, in) require.NoError(t, err) id := out["id"].(string) assert.True(t, strings.HasPrefix(id, name+"-")) gotAddr := out["addr"].(string) assert.Equal(t, addr, gotAddr) // Check we can make a TCP connection to the server t.Logf("Checking connection on %q", addr) err = checkTCP(addr) assert.NoError(t, err) // Stop the server _, err = serveStop.Fn(ctx, rc.Params{"id": id}) require.NoError(t, err) // Check we can make no longer make connections to the server err = checkTCP(addr) assert.Error(t, err) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/servetest/servetest.go
cmd/serve/servetest/servetest.go
// Package servetest provides infrastructure for running loopback // tests of "rclone serve backend:" against the backend integration // tests. package servetest import ( "context" "flag" "fmt" "os" "os/exec" "path/filepath" "strings" "testing" "github.com/rclone/rclone/cmd/serve/proxy" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fstest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var subRun = flag.String("sub-run", "", "pass this to the -run command of the backend tests") // StartFn describes the callback which should start the server with // the Fs passed in. // It should return a config for the backend used to connect to the // server and a clean up function type StartFn func(f fs.Fs) (configmap.Simple, func()) // run runs the server then runs the unit tests for the remote against // it. func run(t *testing.T, name string, start StartFn, useProxy bool) { fremote, _, clean, err := fstest.RandomRemote() assert.NoError(t, err) defer clean() err = fremote.Mkdir(context.Background(), "") assert.NoError(t, err) f := fremote if useProxy { // If using a proxy don't pass in the backend f = nil // the backend config will be made by the proxy prog, err := filepath.Abs("../servetest/proxy_code.go") require.NoError(t, err) cmd := "go run " + prog + " " + fremote.Root() // FIXME this is untidy setting a global variable! proxy.Opt.AuthProxy = cmd defer func() { proxy.Opt.AuthProxy = "" }() } config, cleanup := start(f) defer cleanup() // Change directory to run the tests cwd, err := os.Getwd() require.NoError(t, err) err = os.Chdir("../../../backend/" + name) require.NoError(t, err, "failed to cd to "+name+" backend") defer func() { // Change back to the old directory require.NoError(t, os.Chdir(cwd)) }() // Run the backend tests with an on the fly remote args := []string{"test"} if testing.Verbose() { args = append(args, "-v") } if *fstest.Verbose { args = append(args, "-verbose") } remoteName := "serve" + name + "test:" if *subRun != "" { args = append(args, "-run", *subRun) } args = append(args, "-remote", remoteName) args = append(args, "-list-retries", fmt.Sprint(*fstest.ListRetries)) cmd := exec.Command("go", args...) // Configure the backend with environment variables cmd.Env = os.Environ() prefix := "RCLONE_CONFIG_" + strings.ToUpper(remoteName[:len(remoteName)-1]) + "_" for k, v := range config { cmd.Env = append(cmd.Env, prefix+strings.ToUpper(k)+"="+v) } // Run the test out, err := cmd.CombinedOutput() if len(out) != 0 { t.Logf("\n----------\n%s----------\n", string(out)) } assert.NoError(t, err, "Running "+name+" integration tests") } // Run runs the server then runs the unit tests for the remote against // it. func Run(t *testing.T, name string, start StartFn) { fstest.Initialise() t.Run("Normal", func(t *testing.T) { run(t, name, start, false) }) t.Run("AuthProxy", func(t *testing.T) { run(t, name, start, true) }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/serve/servetest/proxy_code.go
cmd/serve/servetest/proxy_code.go
//go:build ignore // A simple auth proxy for testing purposes package main import ( "encoding/json" "log" "os" ) func main() { if len(os.Args) < 2 { log.Fatalf("Syntax: %s <root>", os.Args[0]) } root := os.Args[1] // Read the input var in map[string]string err := json.NewDecoder(os.Stdin).Decode(&in) if err != nil { log.Fatal(err) } // Write the output var out = map[string]string{ "type": "local", "_root": root, "_obscure": "pass", } json.NewEncoder(os.Stdout).Encode(&out) if err != nil { log.Fatal(err) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/ls/ls.go
cmd/ls/ls.go
// Package ls provides the ls command. package ls import ( "context" "os" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/ls/lshelp" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(commandDefinition) } var commandDefinition = &cobra.Command{ Use: "ls remote:path", Short: `List the objects in the path with size and path.`, Long: `Lists the objects in the source path to standard output in a human readable format with size and path. Recurses by default. E.g. ` + "```console" + ` $ rclone ls swift:bucket 60295 bevajer5jef 90613 canole 94467 diwogej7 37600 fubuwic ` + "```" + ` ` + lshelp.Help, Annotations: map[string]string{ "groups": "Filter,Listing", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { return operations.List(context.Background(), fsrc, os.Stdout) }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/ls/lshelp/lshelp.go
cmd/ls/lshelp/lshelp.go
// Package lshelp provides common help for list commands. package lshelp import ( "strings" ) // Help describes the common help for all the list commands // Warning! "|" will be replaced by backticks below var Help = strings.ReplaceAll(`Any of the filtering options can be applied to this command. There are several related list commands - |ls| to list size and path of objects only - |lsl| to list modification time, size and path of objects only - |lsd| to list directories only - |lsf| to list objects and directories in easy to parse format - |lsjson| to list objects and directories in JSON format |ls|,|lsl|,|lsd| are designed to be human-readable. |lsf| is designed to be human and machine-readable. |lsjson| is designed to be machine-readable. Note that |ls| and |lsl| recurse by default - use |--max-depth 1| to stop the recursion. The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default - use |-R| to make them recurse. List commands prefer a recursive method that uses more memory but fewer transactions by default. Use |--disable ListR| to suppress the behavior. See [|--fast-list|](/docs/#fast-list) for more details. Listing a nonexistent directory will produce an error except for remotes which can't have empty directories (e.g. s3, swift, or gcs - the bucket-based remotes).`, "|", "`")
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/listremotes/listremotes.go
cmd/listremotes/listremotes.go
// Package ls provides the ls command. package ls import ( "encoding/json" "fmt" "os" "regexp" "sort" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/filter" "github.com/spf13/cobra" ) var ( listLong bool jsonOutput bool filterName string filterType string filterSource string filterDescription string orderBy string ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &listLong, "long", "", false, "Show type and description in addition to name", "") flags.StringVarP(cmdFlags, &filterName, "name", "", "", "Filter remotes by name", "") flags.StringVarP(cmdFlags, &filterType, "type", "", "", "Filter remotes by type", "") flags.StringVarP(cmdFlags, &filterSource, "source", "", "", "Filter remotes by source, e.g. 'file' or 'environment'", "") flags.StringVarP(cmdFlags, &filterDescription, "description", "", "", "Filter remotes by description", "") flags.StringVarP(cmdFlags, &orderBy, "order-by", "", "", "Instructions on how to order the result, e.g. 'type,name=descending'", "") flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON", "") } // lessFn compares to remotes for order by type lessFn func(a, b config.Remote) bool // newLess returns a function for comparing remotes based on an order by string func newLess(orderBy string) (less lessFn, err error) { if orderBy == "" { return nil, nil } parts := strings.Split(strings.ToLower(orderBy), ",") n := len(parts) for i := n - 1; i >= 0; i-- { fieldAndDirection := strings.SplitN(parts[i], "=", 2) descending := false if len(fieldAndDirection) > 1 { switch fieldAndDirection[1] { case "ascending", "asc": case "descending", "desc": descending = true default: return nil, fmt.Errorf("unknown --order-by direction %q", fieldAndDirection[1]) } } var field func(o config.Remote) string switch fieldAndDirection[0] { case "name": field = func(o config.Remote) string { return o.Name } case "type": field = func(o config.Remote) string { return o.Type } case "source": field = func(o config.Remote) string { return o.Source } case "description": field = func(o config.Remote) string { return o.Description } default: return nil, fmt.Errorf("unknown --order-by field %q", fieldAndDirection[0]) } var thisLess lessFn if descending { thisLess = func(a, b config.Remote) bool { return field(a) > field(b) } } else { thisLess = func(a, b config.Remote) bool { return field(a) < field(b) } } if i == n-1 { less = thisLess } else { nextLess := less less = func(a, b config.Remote) bool { if field(a) == field(b) { return nextLess(a, b) } return thisLess(a, b) } } } return less, nil } var commandDefinition = &cobra.Command{ Use: "listremotes [<filter>]", Short: `List all the remotes in the config file and defined in environment variables.`, Long: `Lists all the available remotes from the config file, or the remotes matching an optional filter. Prints the result in human-readable format by default, and as a simple list of remote names, or if used with flag ` + "`--long`" + ` a tabular format including the remote names, types and descriptions. Using flag ` + "`--json`" + ` produces machine-readable output instead, which always includes all attributes - including the source (file or environment). Result can be filtered by a filter argument which applies to all attributes, and/or filter flags specific for each attribute. The values must be specified according to regular rclone filtering pattern syntax.`, Annotations: map[string]string{ "versionIntroduced": "v1.34", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(0, 1, command, args) var filterDefault string if len(args) > 0 { filterDefault = args[0] } filters := make(map[string]*regexp.Regexp) for k, v := range map[string]string{ "all": filterDefault, "name": filterName, "type": filterType, "source": filterSource, "description": filterDescription, } { if v != "" { filterRe, err := filter.GlobStringToRegexp(v, false, true) if err != nil { return fmt.Errorf("invalid %s filter argument: %w", k, err) } fs.Debugf(nil, "Filter for %s: %s", k, filterRe.String()) filters[k] = filterRe } } remotes := config.GetRemotes() maxName := 0 maxType := 0 i := 0 for _, remote := range remotes { include := true for k, v := range filters { if k == "all" && !(v.MatchString(remote.Name) || v.MatchString(remote.Type) || v.MatchString(remote.Source) || v.MatchString(remote.Description)) { include = false } else if k == "name" && !v.MatchString(remote.Name) { include = false } else if k == "type" && !v.MatchString(remote.Type) { include = false } else if k == "source" && !v.MatchString(remote.Source) { include = false } else if k == "description" && !v.MatchString(remote.Description) { include = false } } if include { if len(remote.Name) > maxName { maxName = len(remote.Name) } if len(remote.Type) > maxType { maxType = len(remote.Type) } remotes[i] = remote i++ } } remotes = remotes[:i] less, err := newLess(orderBy) if err != nil { return err } if less != nil { sliceLessFn := func(i, j int) bool { return less(remotes[i], remotes[j]) } sort.SliceStable(remotes, sliceLessFn) } if jsonOutput { fmt.Println("[") first := true for _, remote := range remotes { out, err := json.Marshal(remote) if err != nil { return fmt.Errorf("failed to marshal remote object: %w", err) } if first { first = false } else { fmt.Print(",\n") } _, err = os.Stdout.Write(out) if err != nil { return fmt.Errorf("failed to write to output: %w", err) } } if !first { fmt.Println() } fmt.Println("]") } else if listLong { for _, remote := range remotes { fmt.Printf("%-*s %-*s %s\n", maxName+1, remote.Name+":", maxType, remote.Type, remote.Description) } } else { for _, remote := range remotes { fmt.Printf("%s:\n", remote.Name) } } return nil }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/obscure/obscure.go
cmd/obscure/obscure.go
// Package obscure provides the obscure command. package obscure import ( "bufio" "fmt" "os" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs/config/obscure" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(commandDefinition) } var commandDefinition = &cobra.Command{ Use: "obscure password", Short: `Obscure password for use in the rclone config file.`, Long: `In the rclone config file, human-readable passwords are obscured. Obscuring them is done by encrypting them and writing them out in base64. This is **not** a secure way of encrypting these passwords as rclone can decrypt them - it is to prevent "eyedropping" - namely someone seeing a password in the rclone config file by accident. Many equally important things (like access tokens) are not obscured in the config file. However it is very hard to shoulder surf a 64 character hex token. This command can also accept a password through STDIN instead of an argument by passing a hyphen as an argument. This will use the first line of STDIN as the password not including the trailing newline. ` + "```console" + ` echo "secretpassword" | rclone obscure - ` + "```" + ` If there is no data on STDIN to read, rclone obscure will default to obfuscating the hyphen itself. If you want to encrypt the config file then please use config file encryption - see [rclone config](/commands/rclone_config/) for more info.`, Annotations: map[string]string{ "versionIntroduced": "v1.36", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(1, 1, command, args) var password string fi, _ := os.Stdin.Stat() if args[0] == "-" && (fi.Mode()&os.ModeCharDevice) == 0 { scanner := bufio.NewScanner(os.Stdin) if scanner.Scan() { password = scanner.Text() } if err := scanner.Err(); err != nil { return err } } else { password = args[0] } cmd.Run(false, false, command, func() error { obscured := obscure.MustObscure(password) fmt.Println(obscured) return nil }) return nil }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/size/size.go
cmd/size/size.go
// Package size provides the size command. package size import ( "context" "encoding/json" "os" "strconv" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) var jsonOutput bool func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON", "") } var commandDefinition = &cobra.Command{ Use: "size remote:path", Short: `Prints the total size and number of objects in remote:path.`, Long: `Counts objects in the path and calculates the total size. Prints the result to standard output. By default the output is in human-readable format, but shows values in both human-readable format as well as the raw numbers (global option ` + "`--human-readable`" + ` is not considered). Use option ` + "`--json`" + ` to format output as JSON instead. Recurses by default, use ` + "`--max-depth 1`" + ` to stop the recursion. Some backends do not always provide file sizes, see for example [Google Photos](/googlephotos/#size) and [Google Docs](/drive/#limitations-of-google-docs). Rclone will then show a notice in the log indicating how many such files were encountered, and count them in as empty files in the output of the size command.`, Annotations: map[string]string{ "versionIntroduced": "v1.23", "groups": "Filter,Listing", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { var err error var results struct { Count int64 `json:"count"` Bytes int64 `json:"bytes"` Sizeless int64 `json:"sizeless"` } results.Count, results.Bytes, results.Sizeless, err = operations.Count(context.Background(), fsrc) if err != nil { return err } if results.Sizeless > 0 { fs.Logf(fsrc, "Size may be underestimated due to %d objects with unknown size", results.Sizeless) } if jsonOutput { return json.NewEncoder(os.Stdout).Encode(results) } count := strconv.FormatInt(results.Count, 10) countSuffix := fs.CountSuffix(results.Count).String() if count == countSuffix { operations.SyncPrintf("Total objects: %s\n", count) } else { operations.SyncPrintf("Total objects: %s (%s)\n", countSuffix, count) } operations.SyncPrintf("Total size: %s (%d Byte)\n", fs.SizeSuffix(results.Bytes).ByteUnit(), results.Bytes) if results.Sizeless > 0 { operations.SyncPrintf("Total objects with unknown size: %s (%d)\n", fs.CountSuffix(results.Sizeless), results.Sizeless) } return nil }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/tree/tree.go
cmd/tree/tree.go
// Package tree provides the tree command. package tree import ( "context" "fmt" "io" "os" "path" "path/filepath" "strings" "time" "github.com/a8m/tree" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/dirtree" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/terminal" "github.com/spf13/cobra" ) var ( opts tree.Options outFileName string noReport bool sort string enc = encoder.OS ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() // List flags.BoolVarP(cmdFlags, &opts.All, "all", "a", false, "All files are listed (list . files too)", "") flags.BoolVarP(cmdFlags, &opts.DirsOnly, "dirs-only", "d", false, "List directories only", "") flags.BoolVarP(cmdFlags, &opts.FullPath, "full-path", "", false, "Print the full path prefix for each file", "") //flags.BoolVarP(cmdFlags, &opts.IgnoreCase, "ignore-case", "", false, "Ignore case when pattern matching", "") flags.BoolVarP(cmdFlags, &noReport, "noreport", "", false, "Turn off file/directory count at end of tree listing", "") // flags.BoolVarP(cmdFlags, &opts.FollowLink, "follow", "l", false, "Follow symbolic links like directories","") flags.IntVarP(cmdFlags, &opts.DeepLevel, "level", "", 0, "Descend only level directories deep", "") // flags.StringVarP(cmdFlags, &opts.Pattern, "pattern", "P", "", "List only those files that match the pattern given") // flags.StringVarP(cmdFlags, &opts.IPattern, "exclude", "", "", "Do not list files that match the given pattern") flags.StringVarP(cmdFlags, &outFileName, "output", "o", "", "Output to file instead of stdout", "") // Files flags.BoolVarP(cmdFlags, &opts.ByteSize, "size", "s", false, "Print the size in bytes of each file.", "") flags.BoolVarP(cmdFlags, &opts.FileMode, "protections", "p", false, "Print the protections for each file.", "") // flags.BoolVarP(cmdFlags, &opts.ShowUid, "uid", "", false, "Displays file owner or UID number.") // flags.BoolVarP(cmdFlags, &opts.ShowGid, "gid", "", false, "Displays file group owner or GID number.") flags.BoolVarP(cmdFlags, &opts.Quotes, "quote", "Q", false, "Quote filenames with double quotes.", "") flags.BoolVarP(cmdFlags, &opts.LastMod, "modtime", "D", false, "Print the date of last modification.", "") // flags.BoolVarP(cmdFlags, &opts.Inodes, "inodes", "", false, "Print inode number of each file.") // flags.BoolVarP(cmdFlags, &opts.Device, "device", "", false, "Print device ID number to which each file belongs.") // Sort flags.BoolVarP(cmdFlags, &opts.NoSort, "unsorted", "U", false, "Leave files unsorted", "") flags.BoolVarP(cmdFlags, &opts.VerSort, "version", "", false, "Sort files alphanumerically by version", "") flags.BoolVarP(cmdFlags, &opts.ModSort, "sort-modtime", "t", false, "Sort files by last modification time", "") flags.BoolVarP(cmdFlags, &opts.CTimeSort, "sort-ctime", "", false, "Sort files by last status change time", "") flags.BoolVarP(cmdFlags, &opts.ReverSort, "sort-reverse", "r", false, "Reverse the order of the sort", "") flags.BoolVarP(cmdFlags, &opts.DirSort, "dirsfirst", "", false, "List directories before files (-U disables)", "") flags.StringVarP(cmdFlags, &sort, "sort", "", "", "Select sort: name,version,size,mtime,ctime", "") // Graphics flags.BoolVarP(cmdFlags, &opts.NoIndent, "noindent", "", false, "Don't print indentation lines", "") } var commandDefinition = &cobra.Command{ Use: "tree remote:path", Short: `List the contents of the remote in a tree like fashion.`, Long: `Lists the contents of a remote in a similar way to the unix tree command. For example ` + "```text" + ` $ rclone tree remote:path / ├── file1 ├── file2 ├── file3 └── subdir ├── file4 └── file5 1 directories, 5 files ` + "```" + ` You can use any of the filtering options with the tree command (e.g. ` + "`--include` and `--exclude`" + `. You can also use ` + "`--fast-list`" + `. The tree command has many options for controlling the listing which are compatible with the tree command, for example you can include file sizes with ` + "`--size`" + `. Note that not all of them have short options as they conflict with rclone's short options. For a more interactive navigation of the remote see the [ncdu](/commands/rclone_ncdu/) command.`, Annotations: map[string]string{ "versionIntroduced": "v1.38", "groups": "Filter,Listing", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) ci := fs.GetConfig(context.Background()) var outFile io.Writer if outFileName != "" { var err error outFile, err = os.Create(outFileName) if err != nil { return fmt.Errorf("failed to create output file: %w", err) } opts.Colorize = false } else { terminal.Start() outFile = terminal.Out opts.Colorize = true } opts.VerSort = opts.VerSort || sort == "version" opts.ModSort = opts.ModSort || sort == "mtime" opts.CTimeSort = opts.CTimeSort || sort == "ctime" opts.NameSort = sort == "name" opts.SizeSort = sort == "size" opts.UnitSize = ci.HumanReadable if opts.DeepLevel == 0 { opts.DeepLevel = ci.MaxDepth } cmd.Run(false, false, command, func() error { return Tree(fsrc, outFile, &opts) }) return nil }, } // Tree lists fsrc to outFile using the Options passed in func Tree(fsrc fs.Fs, outFile io.Writer, opts *tree.Options) error { dirs, err := walk.NewDirTree(context.Background(), fsrc, "", false, opts.DeepLevel) if err != nil { return err } opts.Fs = NewFs(dirs) opts.OutFile = outFile inf := tree.New("/") var nd, nf int if d, f := inf.Visit(opts); f != 0 { nd, nf = nd+d, nf+f } inf.Print(opts) // Print footer report if !noReport { footer := fmt.Sprintf("\n%d directories", nd) if !opts.DirsOnly { footer += fmt.Sprintf(", %d files", nf) } _, _ = fmt.Fprintln(outFile, footer) } return nil } // FileInfo maps an fs.DirEntry into an os.FileInfo type FileInfo struct { entry fs.DirEntry } // Name is base name of the file func (to *FileInfo) Name() string { return enc.FromStandardName(path.Base(to.entry.Remote())) } // Size in bytes for regular files; system-dependent for others func (to *FileInfo) Size() int64 { return to.entry.Size() } // Mode is file mode bits func (to *FileInfo) Mode() os.FileMode { if to.IsDir() { return os.FileMode(0777) } return os.FileMode(0666) } // ModTime is modification time func (to *FileInfo) ModTime() time.Time { return to.entry.ModTime(context.Background()) } // IsDir is abbreviation for Mode().IsDir() func (to *FileInfo) IsDir() bool { _, ok := to.entry.(fs.Directory) return ok } // Sys is underlying data source (can return nil) func (to *FileInfo) Sys() any { return nil } // String returns the full path func (to *FileInfo) String() string { return filepath.FromSlash(enc.FromStandardPath(to.entry.Remote())) } // Fs maps an fs.Fs into a tree.Fs type Fs dirtree.DirTree // NewFs creates a new tree func NewFs(dirs dirtree.DirTree) Fs { return Fs(dirs) } // Stat returns info about the file func (dirs Fs) Stat(filePath string) (fi os.FileInfo, err error) { defer log.Trace(nil, "filePath=%q", filePath)("fi=%+v, err=%v", &fi, &err) filePath = filepath.ToSlash(filePath) filePath = enc.ToStandardPath(filePath) filePath = strings.TrimLeft(filePath, "/") if filePath == "" { return &FileInfo{fs.NewDir("", time.Now())}, nil } _, entry := dirtree.DirTree(dirs).Find(filePath) if entry == nil { return nil, fmt.Errorf("couldn't find %q in directory cache", filePath) } return &FileInfo{entry}, nil } // ReadDir returns info about the directory and fills up the directory cache func (dirs Fs) ReadDir(dir string) (names []string, err error) { defer log.Trace(nil, "dir=%s", dir)("names=%+v, err=%v", &names, &err) dir = filepath.ToSlash(dir) dir = enc.ToStandardPath(dir) dir = strings.TrimLeft(dir, "/") entries, ok := dirs[dir] if !ok { return nil, fmt.Errorf("couldn't find directory %q", dir) } for _, entry := range entries { names = append(names, enc.FromStandardName(path.Base(entry.Remote()))) } return } // check interfaces var ( _ tree.Fs = (*Fs)(nil) _ os.FileInfo = (*FileInfo)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/tree/tree_test.go
cmd/tree/tree_test.go
package tree import ( "bytes" "context" "testing" "github.com/a8m/tree" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestTree(t *testing.T) { fstest.Initialise() buf := new(bytes.Buffer) f, err := fs.NewFs(context.Background(), "testfiles") require.NoError(t, err) err = Tree(f, buf, new(tree.Options)) require.NoError(t, err) assert.Equal(t, `/ ├── file1 ├── file2 ├── file3 └── subdir ├── file4 └── file5 1 directories, 5 files `, buf.String()) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/reveal/reveal.go
cmd/reveal/reveal.go
// Package reveal provides the reveal command. package reveal import ( "fmt" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs/config/obscure" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(commandDefinition) } var commandDefinition = &cobra.Command{ Use: "reveal password", Short: `Reveal obscured password from rclone.conf`, Annotations: map[string]string{ "versionIntroduced": "v1.43", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) cmd.Run(false, false, command, func() error { revealed, err := obscure.Reveal(args[0]) if err != nil { return err } fmt.Println(revealed) return nil }) }, Hidden: true, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/genautocomplete/genautocomplete_fish.go
cmd/genautocomplete/genautocomplete_fish.go
package genautocomplete import ( "fmt" "os" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/spf13/cobra" ) func init() { completionDefinition.AddCommand(fishCommandDefinition) } var fishCommandDefinition = &cobra.Command{ Use: "fish [output_file]", Short: `Output fish completion script for rclone.`, Long: `Generates a fish autocompletion script for rclone. This writes to /etc/fish/completions/rclone.fish by default so will probably need to be run with sudo or as root, e.g. ` + "```console" + ` sudo rclone completion fish ` + "```" + ` Logout and login again to use the autocompletion scripts, or source them directly ` + "```console" + ` . /etc/fish/completions/rclone.fish ` + "```" + ` If you supply a command line argument the script will be written there. If output_file is "-", then the output will be written to stdout.`, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(0, 1, command, args) out := "/etc/fish/completions/rclone.fish" if len(args) > 0 { if args[0] == "-" { err := cmd.Root.GenFishCompletion(os.Stdout, true) if err != nil { fs.Fatal(nil, fmt.Sprint(err)) } return } out = args[0] } err := cmd.Root.GenFishCompletionFile(out, true) if err != nil { fs.Fatal(nil, fmt.Sprint(err)) } }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/genautocomplete/genautocomplete_powershell.go
cmd/genautocomplete/genautocomplete_powershell.go
package genautocomplete import ( "fmt" "os" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/spf13/cobra" ) func init() { completionDefinition.AddCommand(powershellCommandDefinition) } var powershellCommandDefinition = &cobra.Command{ Use: "powershell [output_file]", Short: `Output powershell completion script for rclone.`, Long: `Generate the autocompletion script for powershell. To load completions in your current shell session: ` + "```console" + ` rclone completion powershell | Out-String | Invoke-Expression ` + "```" + ` To load completions for every new session, add the output of the above command to your powershell profile. If output_file is "-" or missing, then the output will be written to stdout.`, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(0, 1, command, args) if len(args) == 0 || (len(args) > 0 && args[0] == "-") { err := cmd.Root.GenPowerShellCompletion(os.Stdout) if err != nil { fs.Fatal(nil, fmt.Sprint(err)) } return } err := cmd.Root.GenPowerShellCompletionFile(args[0]) if err != nil { fs.Fatal(nil, fmt.Sprint(err)) } }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/genautocomplete/genautocomplete_bash.go
cmd/genautocomplete/genautocomplete_bash.go
package genautocomplete import ( "fmt" "os" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/spf13/cobra" ) func init() { completionDefinition.AddCommand(bashCommandDefinition) } var bashCommandDefinition = &cobra.Command{ Use: "bash [output_file]", Short: `Output bash completion script for rclone.`, Long: `Generates a bash shell autocompletion script for rclone. By default, when run without any arguments, ` + "```console" + ` rclone completion bash ` + "```" + ` the generated script will be written to ` + "```console" + ` /etc/bash_completion.d/rclone ` + "```" + ` and so rclone will probably need to be run as root, or with sudo. If you supply a path to a file as the command line argument, then the generated script will be written to that file, in which case you should not need root privileges. If output_file is "-", then the output will be written to stdout. If you have installed the script into the default location, you can logout and login again to use the autocompletion script. Alternatively, you can source the script directly ` + "```console" + ` . /path/to/my_bash_completion_scripts/rclone ` + "```" + ` and the autocompletion functionality will be added to your current shell.`, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(0, 1, command, args) out := "/etc/bash_completion.d/rclone" if len(args) > 0 { if args[0] == "-" { err := cmd.Root.GenBashCompletionV2(os.Stdout, false) if err != nil { fs.Fatal(nil, fmt.Sprint(err)) } return } out = args[0] } err := cmd.Root.GenBashCompletionFileV2(out, false) if err != nil { fs.Fatal(nil, fmt.Sprint(err)) } }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/genautocomplete/genautocomplete_zsh.go
cmd/genautocomplete/genautocomplete_zsh.go
package genautocomplete import ( "fmt" "os" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/spf13/cobra" ) func init() { completionDefinition.AddCommand(zshCommandDefinition) } var zshCommandDefinition = &cobra.Command{ Use: "zsh [output_file]", Short: `Output zsh completion script for rclone.`, Long: `Generates a zsh autocompletion script for rclone. This writes to /usr/share/zsh/vendor-completions/_rclone by default so will probably need to be run with sudo or as root, e.g. ` + "```console" + ` sudo rclone completion zsh ` + "```" + ` Logout and login again to use the autocompletion scripts, or source them directly ` + "```console" + ` autoload -U compinit && compinit ` + "```" + ` If you supply a command line argument the script will be written there. If output_file is "-", then the output will be written to stdout.`, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(0, 1, command, args) out := "/usr/share/zsh/vendor-completions/_rclone" if len(args) > 0 { if args[0] == "-" { err := cmd.Root.GenZshCompletion(os.Stdout) if err != nil { fs.Fatal(nil, fmt.Sprint(err)) } return } out = args[0] } outFile, err := os.Create(out) if err != nil { fs.Fatal(nil, fmt.Sprint(err)) } defer func() { _ = outFile.Close() }() err = cmd.Root.GenZshCompletion(outFile) if err != nil { fs.Fatal(nil, fmt.Sprint(err)) } }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/genautocomplete/genautocomplete_test.go
cmd/genautocomplete/genautocomplete_test.go
package genautocomplete import ( "os" "testing" "github.com/stretchr/testify/assert" ) func TestCompletionBash(t *testing.T) { tempFile, err := os.CreateTemp("", "completion_bash") assert.NoError(t, err) defer func() { _ = tempFile.Close() _ = os.Remove(tempFile.Name()) }() bashCommandDefinition.Run(bashCommandDefinition, []string{tempFile.Name()}) bs, err := os.ReadFile(tempFile.Name()) assert.NoError(t, err) assert.NotEmpty(t, string(bs)) } func TestCompletionBashStdout(t *testing.T) { originalStdout := os.Stdout tempFile, err := os.CreateTemp("", "completion_zsh") assert.NoError(t, err) defer func() { _ = tempFile.Close() _ = os.Remove(tempFile.Name()) }() os.Stdout = tempFile defer func() { os.Stdout = originalStdout }() bashCommandDefinition.Run(bashCommandDefinition, []string{"-"}) output, err := os.ReadFile(tempFile.Name()) assert.NoError(t, err) assert.NotEmpty(t, string(output)) } func TestCompletionZsh(t *testing.T) { tempFile, err := os.CreateTemp("", "completion_zsh") assert.NoError(t, err) defer func() { _ = tempFile.Close() _ = os.Remove(tempFile.Name()) }() zshCommandDefinition.Run(zshCommandDefinition, []string{tempFile.Name()}) bs, err := os.ReadFile(tempFile.Name()) assert.NoError(t, err) assert.NotEmpty(t, string(bs)) } func TestCompletionZshStdout(t *testing.T) { originalStdout := os.Stdout tempFile, err := os.CreateTemp("", "completion_zsh") assert.NoError(t, err) defer func() { _ = tempFile.Close() _ = os.Remove(tempFile.Name()) }() os.Stdout = tempFile defer func() { os.Stdout = originalStdout }() zshCommandDefinition.Run(zshCommandDefinition, []string{"-"}) output, err := os.ReadFile(tempFile.Name()) assert.NoError(t, err) assert.NotEmpty(t, string(output)) } func TestCompletionFish(t *testing.T) { tempFile, err := os.CreateTemp("", "completion_fish") assert.NoError(t, err) defer func() { _ = tempFile.Close() _ = os.Remove(tempFile.Name()) }() fishCommandDefinition.Run(fishCommandDefinition, []string{tempFile.Name()}) bs, err := os.ReadFile(tempFile.Name()) assert.NoError(t, err) assert.NotEmpty(t, string(bs)) } func TestCompletionFishStdout(t *testing.T) { originalStdout := os.Stdout tempFile, err := os.CreateTemp("", "completion_zsh") assert.NoError(t, err) defer func() { _ = tempFile.Close() _ = os.Remove(tempFile.Name()) }() os.Stdout = tempFile defer func() { os.Stdout = originalStdout }() fishCommandDefinition.Run(fishCommandDefinition, []string{"-"}) output, err := os.ReadFile(tempFile.Name()) assert.NoError(t, err) assert.NotEmpty(t, string(output)) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/genautocomplete/genautocomplete.go
cmd/genautocomplete/genautocomplete.go
// Package genautocomplete provides the completion command. package genautocomplete import ( "github.com/rclone/rclone/cmd" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(completionDefinition) } var completionDefinition = &cobra.Command{ Use: "completion [shell]", Short: `Output completion script for a given shell.`, Long: `Generates a shell completion script for rclone. Run with ` + "`--help`" + ` to list the supported shells.`, Annotations: map[string]string{ "versionIntroduced": "v1.33", }, Aliases: []string{"genautocomplete"}, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/sync/sync.go
cmd/sync/sync.go
// Package sync provides the sync command. package sync import ( "context" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations/operationsflags" "github.com/rclone/rclone/fs/sync" "github.com/spf13/cobra" ) var ( createEmptySrcDirs = false loggerOpt = operations.LoggerOpt{} loggerFlagsOpt = operationsflags.AddLoggerFlagsOptions{} ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after sync", "") operationsflags.AddLoggerFlags(cmdFlags, &loggerOpt, &loggerFlagsOpt) loggerOpt.LoggerFn = operations.NewDefaultLoggerFn(&loggerOpt) } var commandDefinition = &cobra.Command{ Use: "sync source:path dest:path", Short: `Make source and dest identical, modifying destination only.`, // Warning! "|" will be replaced by backticks below Long: strings.ReplaceAll(`Sync the source to the destination, changing the destination only. Doesn't transfer files that are identical on source and destination, testing by size and modification time or MD5SUM. Destination is updated to match source, including deleting files if necessary (except duplicate objects, see below). If you don't want to delete files from destination, use the [copy](/commands/rclone_copy/) command instead. **Important**: Since this can cause data loss, test first with the |--dry-run| or the |--interactive|/|i| flag. |||sh rclone sync --interactive SOURCE remote:DESTINATION ||| Files in the destination won't be deleted if there were any errors at any point. Duplicate objects (files with the same name, on those providers that support it) are not yet handled. Files that are excluded won't be deleted unless |--delete-excluded| is used. Symlinks won't be transferred or deleted from local file systems unless |--links| is used. It is always the contents of the directory that is synced, not the directory itself. So when source:path is a directory, it's the contents of source:path that are copied, not the directory name and contents. See extended explanation in the [copy](/commands/rclone_copy/) command if unsure. If dest:path doesn't exist, it is created and the source:path contents go there. It is not possible to sync overlapping remotes. However, you may exclude the destination from the sync with a filter rule or by putting an exclude-if-present file inside the destination directory and sync to a destination that is inside the source directory. Rclone will sync the modification times of files and directories if the backend supports it. If metadata syncing is required then use the |--metadata| flag. Note that the modification time and metadata for the root directory will **not** be synced. See <https://github.com/rclone/rclone/issues/7652> for more info. **Note**: Use the |-P|/|--progress| flag to view real-time transfer statistics **Note**: Use the |rclone dedupe| command to deal with "Duplicate object/directory found in source/destination - ignoring" errors. See [this forum post](https://forum.rclone.org/t/sync-not-clearing-duplicates/14372) for more info. `, "|", "`") + operationsflags.Help(), Annotations: map[string]string{ "groups": "Sync,Copy,Filter,Listing,Important", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(2, 2, command, args) fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args) cmd.Run(true, true, command, func() error { ctx := context.Background() close, err := operationsflags.ConfigureLoggers(ctx, fdst, command, &loggerOpt, loggerFlagsOpt) if err != nil { return err } defer close() if loggerFlagsOpt.AnySet() { ctx = operations.WithSyncLogger(ctx, loggerOpt) } if srcFileName == "" { return sync.Sync(ctx, fdst, fsrc, createEmptySrcDirs) } return operations.CopyFile(ctx, fdst, fsrc, srcFileName, srcFileName) }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/hashsum/hashsum.go
cmd/hashsum/hashsum.go
// Package hashsum provides the hashsum command. package hashsum import ( "context" "fmt" "os" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" "github.com/spf13/pflag" ) // Global hashsum flags for reuse in hashsum, md5sum, sha1sum var ( OutputBase64 = false DownloadFlag = false HashsumOutfile = "" ChecksumFile = "" ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() AddHashsumFlags(cmdFlags) } // AddHashsumFlags is a convenience function to add the command flags OutputBase64 and DownloadFlag to hashsum, md5sum, sha1sum func AddHashsumFlags(cmdFlags *pflag.FlagSet) { flags.BoolVarP(cmdFlags, &OutputBase64, "base64", "", OutputBase64, "Output base64 encoded hashsum", "") flags.StringVarP(cmdFlags, &HashsumOutfile, "output-file", "", HashsumOutfile, "Output hashsums to a file rather than the terminal", "") flags.StringVarP(cmdFlags, &ChecksumFile, "checkfile", "C", ChecksumFile, "Validate hashes against a given SUM file instead of printing them", "") flags.BoolVarP(cmdFlags, &DownloadFlag, "download", "", DownloadFlag, "Download the file and hash it locally; if this flag is not specified, the hash is requested from the remote", "") } // GetHashsumOutput opens and closes the output file when using the output-file flag func GetHashsumOutput(filename string) (out *os.File, close func(), err error) { out, err = os.Create(filename) if err != nil { err = fmt.Errorf("failed to open output file %v: %w", filename, err) return nil, nil, err } close = func() { err := out.Close() if err != nil { fs.Errorf(nil, "Failed to close output file %v: %v", filename, err) } } return out, close, nil } // CreateFromStdinArg checks args and produces hashsum from standard input if it is requested func CreateFromStdinArg(ht hash.Type, args []string, startArg int) (bool, error) { var stdinArg bool if len(args) == startArg { // Missing arg: Always read from stdin stdinArg = true } else if len(args) > startArg && args[startArg] == "-" { // Special arg: Read from stdin only if there is data available if fi, _ := os.Stdin.Stat(); fi.Mode()&os.ModeCharDevice == 0 { stdinArg = true } } if !stdinArg { return false, nil } if HashsumOutfile == "" { return true, operations.HashSumStream(ht, OutputBase64, os.Stdin, nil) } output, close, err := GetHashsumOutput(HashsumOutfile) if err != nil { return true, err } defer close() return true, operations.HashSumStream(ht, OutputBase64, os.Stdin, output) } var commandDefinition = &cobra.Command{ Use: "hashsum [<hash> remote:path]", Short: `Produces a hashsum file for all the objects in the path.`, Long: `Produces a hash file for all the objects in the path using the hash named. The output is in the same format as the standard md5sum/sha1sum tool. By default, the hash is requested from the remote. If the hash is not supported by the remote, no hash will be returned. With the download flag, the file will be downloaded from the remote and hashed locally enabling any hash for any remote. For the MD5 and SHA1 algorithms there are also dedicated commands, [md5sum](/commands/rclone_md5sum/) and [sha1sum](/commands/rclone_sha1sum/). This command can also hash data received on standard input (stdin), by not passing a remote:path, or by passing a hyphen as remote:path when there is data to read (if not, the hyphen will be treated literally, as a relative path). Run without a hash to see the list of all supported hashes, e.g. ` + "```console" + ` $ rclone hashsum ` + hash.HelpString(0) + "```" + ` Then ` + "```console" + ` rclone hashsum MD5 remote:path ` + "```" + ` Note that hash names are case insensitive and values are output in lower case.`, Annotations: map[string]string{ "versionIntroduced": "v1.41", "groups": "Filter,Listing", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(0, 2, command, args) if len(args) == 0 { fmt.Print(hash.HelpString(0)) return nil } var ht hash.Type err := ht.Set(args[0]) if err != nil { fmt.Println(hash.HelpString(0)) return err } if found, err := CreateFromStdinArg(ht, args, 1); found { return err } fsrc := cmd.NewFsSrc(args[1:]) cmd.Run(false, false, command, func() error { if ChecksumFile != "" { fsum, sumFile := cmd.NewFsFile(ChecksumFile) return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, ht, nil, DownloadFlag) } if HashsumOutfile == "" { return operations.HashLister(context.Background(), ht, OutputBase64, DownloadFlag, fsrc, nil) } output, close, err := GetHashsumOutput(HashsumOutfile) if err != nil { return err } defer close() return operations.HashLister(context.Background(), ht, OutputBase64, DownloadFlag, fsrc, output) }) return nil }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/dropbox/dropbox_internal_test.go
backend/dropbox/dropbox_internal_test.go
package dropbox import ( "context" "io" "strings" "testing" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files" "github.com/rclone/rclone/fstest/fstests" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestInternalCheckPathLength(t *testing.T) { rep := func(n int, r rune) (out string) { rs := make([]rune, n) for i := range rs { rs[i] = r } return string(rs) } for _, test := range []struct { in string ok bool }{ {in: "", ok: true}, {in: rep(maxFileNameLength, 'a'), ok: true}, {in: rep(maxFileNameLength+1, 'a'), ok: false}, {in: rep(maxFileNameLength, '£'), ok: true}, {in: rep(maxFileNameLength+1, '£'), ok: false}, {in: rep(maxFileNameLength, '☺'), ok: true}, {in: rep(maxFileNameLength+1, '☺'), ok: false}, {in: rep(maxFileNameLength, '你'), ok: true}, {in: rep(maxFileNameLength+1, '你'), ok: false}, {in: "/ok/ok", ok: true}, {in: "/ok/" + rep(maxFileNameLength, 'a') + "/ok", ok: true}, {in: "/ok/" + rep(maxFileNameLength+1, 'a') + "/ok", ok: false}, {in: "/ok/" + rep(maxFileNameLength, '£') + "/ok", ok: true}, {in: "/ok/" + rep(maxFileNameLength+1, '£') + "/ok", ok: false}, {in: "/ok/" + rep(maxFileNameLength, '☺') + "/ok", ok: true}, {in: "/ok/" + rep(maxFileNameLength+1, '☺') + "/ok", ok: false}, {in: "/ok/" + rep(maxFileNameLength, '你') + "/ok", ok: true}, {in: "/ok/" + rep(maxFileNameLength+1, '你') + "/ok", ok: false}, } { err := checkPathLength(test.in) assert.Equal(t, test.ok, err == nil, test.in) } } func (f *Fs) importPaperForTest(t *testing.T) { content := `# test doc Lorem ipsum __dolor__ sit amet [link](http://google.com) ` arg := files.PaperCreateArg{ Path: f.slashRootSlash + "export.paper", ImportFormat: &files.ImportFormat{Tagged: dropbox.Tagged{Tag: files.ImportFormatMarkdown}}, } var err error err = f.pacer.Call(func() (bool, error) { reader := strings.NewReader(content) _, err = f.srv.PaperCreate(&arg, reader) return shouldRetry(context.Background(), err) }) require.NoError(t, err) } func (f *Fs) InternalTestPaperExport(t *testing.T) { ctx := context.Background() f.importPaperForTest(t) f.exportExts = []exportExtension{"html"} obj, err := f.NewObject(ctx, "export.html") require.NoError(t, err) rc, err := obj.Open(ctx) require.NoError(t, err) defer func() { require.NoError(t, rc.Close()) }() buf, err := io.ReadAll(rc) require.NoError(t, err) text := string(buf) for _, excerpt := range []string{ "Lorem ipsum", "<b>dolor</b>", `href="http://google.com"`, } { require.Contains(t, text, excerpt) } } func (f *Fs) InternalTest(t *testing.T) { t.Run("PaperExport", f.InternalTestPaperExport) } var _ fstests.InternalTester = (*Fs)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/dropbox/dropbox_test.go
backend/dropbox/dropbox_test.go
// Test Dropbox filesystem interface package dropbox import ( "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestDropbox:", NilObject: (*Object)(nil), ChunkedUpload: fstests.ChunkedUploadConfig{ MaxChunkSize: maxChunkSize, }, }) } func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadChunkSize(cs) } var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/dropbox/batcher.go
backend/dropbox/batcher.go
// This file contains the implementation of the sync batcher for uploads // // Dropbox rules say you can start as many batches as you want, but // you may only have one batch being committed and must wait for the // batch to be finished before committing another. package dropbox import ( "context" "fmt" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files" ) // finishBatch commits the batch, returning a batch status to poll or maybe complete func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) { var arg = &files.UploadSessionFinishBatchArg{ Entries: items, } err = f.pacer.Call(func() (bool, error) { complete, err = f.srv.UploadSessionFinishBatchV2(arg) if retry, err := shouldRetryExclude(ctx, err); !retry { return retry, err } // after the first chunk is uploaded, we retry everything except the excluded errors return err != nil, err }) if err != nil { return nil, fmt.Errorf("batch commit failed: %w", err) } return complete, nil } // Called by the batcher to commit a batch func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []*files.FileMetadata, errors []error) (err error) { // finalise the batch getting either a result or a job id to poll complete, err := f.finishBatch(ctx, items) if err != nil { return err } // Check we got the right number of entries entries := complete.Entries if len(entries) != len(results) { return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries)) } // Format results for return for i := range results { item := entries[i] if item.Tag == "success" { results[i] = item.Success } else { errorTag := item.Tag if item.Failure != nil { errorTag = item.Failure.Tag if item.Failure.LookupFailed != nil { errorTag += "/" + item.Failure.LookupFailed.Tag } if item.Failure.Path != nil { errorTag += "/" + item.Failure.Path.Tag } if item.Failure.PropertiesError != nil { errorTag += "/" + item.Failure.PropertiesError.Tag } } errors[i] = fmt.Errorf("upload failed: %s", errorTag) } } return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/dropbox/dropbox.go
backend/dropbox/dropbox.go
// Package dropbox provides an interface to Dropbox object storage package dropbox // FIXME dropbox for business would be quite easy to add /* The Case folding of PathDisplay problem From the docs: path_display String. The cased path to be used for display purposes only. In rare instances the casing will not correctly match the user's filesystem, but this behavior will match the path provided in the Core API v1, and at least the last path component will have the correct casing. Changes to only the casing of paths won't be returned by list_folder/continue. This field will be null if the file or folder is not mounted. This field is optional. We solve this by not implementing the ListR interface. The dropbox remote will recurse directory by directory only using the last element of path_display and all will be well. */ import ( "context" "errors" "fmt" "io" "path" "regexp" "strings" "time" "unicode/utf8" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/auth" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/common" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/sharing" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/team" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/users" "github.com/rclone/rclone/backend/dropbox/dbhash" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/batcher" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/readers" "golang.org/x/oauth2" ) // Constants const ( rcloneClientID = "5jcck7diasz0rqy" rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g" defaultMinSleep = fs.Duration(10 * time.Millisecond) maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential // Upload chunk size - setting too small makes uploads slow. // Chunks are buffered into memory for retries. // // Speed vs chunk size uploading a 1 GiB file on 2017-11-22 // // Chunk Size MiB, Speed MiB/s, % of max // 1 1.364 11% // 2 2.443 19% // 4 4.288 33% // 8 6.79 52% // 16 8.916 69% // 24 10.195 79% // 32 10.427 81% // 40 10.96 85% // 48 11.828 91% // 56 11.763 91% // 64 12.047 93% // 96 12.302 95% // 128 12.945 100% // // Choose 48 MiB which is 91% of Maximum speed. rclone by // default does 4 transfers so this should use 4*48 MiB = 192 MiB // by default. defaultChunkSize = 48 * fs.Mebi maxChunkSize = 150 * fs.Mebi // Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing maxFileNameLength = 255 ) type exportAPIFormat string type exportExtension string // dotless var ( // Description of how to auth for this app dropboxConfig = &oauthutil.Config{ Scopes: []string{ "files.metadata.write", "files.content.write", "files.content.read", "sharing.write", "account_info.read", // needed for About // "file_requests.write", // "members.read", // needed for impersonate - but causes app to need to be approved by Dropbox Team Admin during the flow // "team_data.member" }, // Endpoint: oauth2.Endpoint{ // AuthURL: "https://www.dropbox.com/1/oauth2/authorize", // TokenURL: "https://api.dropboxapi.com/1/oauth2/token", // }, AuthURL: dropbox.OAuthEndpoint("").AuthURL, TokenURL: dropbox.OAuthEndpoint("").TokenURL, ClientID: rcloneClientID, ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), RedirectURL: oauthutil.RedirectLocalhostURL, } // A regexp matching path names for files Dropbox ignores // See https://www.dropbox.com/en/help/145 - Ignored files ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`) // DbHashType is the hash.Type for Dropbox DbHashType hash.Type // Errors errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode")) // Configure the batcher defaultBatcherOptions = batcher.Options{ MaxBatchSize: 1000, DefaultTimeoutSync: 500 * time.Millisecond, DefaultTimeoutAsync: 10 * time.Second, DefaultBatchSizeAsync: 100, } exportKnownAPIFormats = map[exportAPIFormat]exportExtension{ "markdown": "md", "html": "html", } // Populated based on exportKnownAPIFormats exportKnownExtensions = map[exportExtension]exportAPIFormat{} paperExtension = ".paper" paperTemplateExtension = ".papert" ) // Gets an oauth config with the right scopes func getOauthConfig(m configmap.Mapper) *oauthutil.Config { // If not impersonating, use standard scopes if impersonate, _ := m.Get("impersonate"); impersonate == "" { return dropboxConfig } // Make a copy of the config config := *dropboxConfig // Make a copy of the scopes with extra scopes requires appended config.Scopes = append(config.Scopes, "members.read", "team_data.member") return &config } // Register with Fs func init() { DbHashType = hash.RegisterHash("dropbox", "DropboxHash", 64, dbhash.New) fs.Register(&fs.RegInfo{ Name: "dropbox", Description: "Dropbox", NewFs: NewFs, Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { return oauthutil.ConfigOut("", &oauthutil.Options{ OAuth2Config: getOauthConfig(m), NoOffline: true, OAuth2Opts: []oauth2.AuthCodeOption{ oauth2.SetAuthURLParam("token_access_type", "offline"), }, }) }, Options: append(append(oauthutil.SharedOptions, []fs.Option{{ Name: "chunk_size", Help: fmt.Sprintf(`Upload chunk size (< %v). Any files larger than this will be uploaded in chunks of this size. Note that chunks are buffered in memory (one at a time) so rclone can deal with retries. Setting this larger will increase the speed slightly (at most 10%% for 128 MiB in tests) at the cost of using more memory. It can be set smaller if you are tight on memory.`, maxChunkSize), Default: defaultChunkSize, Advanced: true, }, { Name: "impersonate", Help: `Impersonate this user when using a business account. Note that if you want to use impersonate, you should make sure this flag is set when running "rclone config" as this will cause rclone to request the "members.read" scope which it won't normally. This is needed to lookup a members email address into the internal ID that dropbox uses in the API. Using the "members.read" scope will require a Dropbox Team Admin to approve during the OAuth flow. You will have to use your own App (setting your own client_id and client_secret) to use this option as currently rclone's default set of permissions doesn't include "members.read". This can be added once v1.55 or later is in use everywhere. `, Default: "", Advanced: true, Sensitive: true, }, { Name: "shared_files", Help: `Instructs rclone to work on individual shared files. In this mode rclone's features are extremely limited - only list (ls, lsl, etc.) operations and read operations (e.g. downloading) are supported in this mode. All other operations will be disabled.`, Default: false, Advanced: true, }, { Name: "shared_folders", Help: `Instructs rclone to work on shared folders. When this flag is used with no path only the List operation is supported and all available shared folders will be listed. If you specify a path the first part will be interpreted as the name of shared folder. Rclone will then try to mount this shared to the root namespace. On success shared folder rclone proceeds normally. The shared folder is now pretty much a normal folder and all normal operations are supported. Note that we don't unmount the shared folder afterwards so the --dropbox-shared-folders can be omitted after the first use of a particular shared folder. See also --dropbox-root-namespace for an alternative way to work with shared folders.`, Default: false, Advanced: true, }, { Name: "pacer_min_sleep", Default: defaultMinSleep, Help: "Minimum time to sleep between API calls.", Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // https://www.dropbox.com/help/syncing-uploads/files-not-syncing lists / and \ // as invalid characters. // Testing revealed names with trailing spaces and the DEL character don't work. // Also encode invalid UTF-8 bytes as json doesn't handle them properly. Default: encoder.Base | encoder.EncodeBackSlash | encoder.EncodeDel | encoder.EncodeRightSpace | encoder.EncodeInvalidUtf8, }, { Name: "root_namespace", Help: "Specify a different Dropbox namespace ID to use as the root for all paths.", Default: "", Advanced: true, }, { Name: "export_formats", Help: `Comma separated list of preferred formats for exporting files Certain Dropbox files can only be accessed by exporting them to another format. These include Dropbox Paper documents. For each such file, rclone will choose the first format on this list that Dropbox considers valid. If none is valid, it will choose Dropbox's default format. Known formats include: "html", "md" (markdown)`, Default: fs.CommaSepList{"html", "md"}, Advanced: true, }, { Name: "skip_exports", Help: "Skip exportable files in all listings.\n\nIf given, exportable files practically become invisible to rclone.", Default: false, Advanced: true, }, { Name: "show_all_exports", Default: false, Help: `Show all exportable files in listings. Adding this flag will allow all exportable files to be server side copied. Note that rclone doesn't add extensions to the exportable file names in this mode. Do **not** use this flag when trying to download exportable files - rclone will fail to download them. `, Advanced: true, }, }...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...), }) for apiFormat, ext := range exportKnownAPIFormats { exportKnownExtensions[ext] = apiFormat } } // Options defines the configuration for this backend type Options struct { ChunkSize fs.SizeSuffix `config:"chunk_size"` Impersonate string `config:"impersonate"` SharedFiles bool `config:"shared_files"` SharedFolders bool `config:"shared_folders"` BatchMode string `config:"batch_mode"` BatchSize int `config:"batch_size"` BatchTimeout fs.Duration `config:"batch_timeout"` AsyncBatch bool `config:"async_batch"` PacerMinSleep fs.Duration `config:"pacer_min_sleep"` Enc encoder.MultiEncoder `config:"encoding"` RootNsid string `config:"root_namespace"` ExportFormats fs.CommaSepList `config:"export_formats"` SkipExports bool `config:"skip_exports"` ShowAllExports bool `config:"show_all_exports"` } // Fs represents a remote dropbox server type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options ci *fs.ConfigInfo // global config features *fs.Features // optional features srv files.Client // the connection to the dropbox server svc files.Client // the connection to the dropbox server (unauthorized) sharing sharing.Client // as above, but for generating sharing links users users.Client // as above, but for accessing user information team team.Client // for the Teams API slashRoot string // root with "/" prefix, lowercase slashRootSlash string // root with "/" prefix and postfix, lowercase pacer *fs.Pacer // To pace the API calls ns string // The namespace we are using or "" for none batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata] exportExts []exportExtension } type exportType int const ( notExport exportType = iota // a regular file exportHide // should be hidden exportListOnly // listable, but can't export exportExportable // can export ) // Object describes a dropbox object // // Dropbox Objects always have full metadata type Object struct { fs *Fs // what this object is part of id string url string remote string // The remote path bytes int64 // size of the object modTime time.Time // time it was last modified hash string // content_hash of the object exportType exportType exportAPIFormat exportAPIFormat } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("Dropbox root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // Some specific errors which should be excluded from retries func shouldRetryExclude(ctx context.Context, err error) (bool, error) { if err == nil { return false, err } if fserrors.ContextError(ctx, &err) { return false, err } // First check for specific errors // // These come back from the SDK in a whole host of different // error types, but there doesn't seem to be a consistent way // of reading the error cause, so here we just check using the // error string which isn't perfect but does the job. errString := err.Error() if strings.Contains(errString, "insufficient_space") { return false, fserrors.FatalError(err) } else if strings.Contains(errString, "malformed_path") { return false, fserrors.NoRetryError(err) } return true, err } // shouldRetry returns a boolean as to whether this err deserves to be // retried. It returns the err as a convenience func shouldRetry(ctx context.Context, err error) (bool, error) { if retry, err := shouldRetryExclude(ctx, err); !retry { return retry, err } // Then handle any official Retry-After header from Dropbox's SDK switch e := err.(type) { case auth.RateLimitAPIError: if e.RateLimitError.RetryAfter > 0 { fs.Logf(nil, "Error %v. Too many requests or write operations. Trying again in %d seconds.", err, e.RateLimitError.RetryAfter) err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second) } return true, err } // Keep old behavior for backward compatibility errString := err.Error() if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" { return true, err } return fserrors.ShouldRetry(err), err } func checkUploadChunkSize(cs fs.SizeSuffix) error { const minChunkSize = fs.SizeSuffixBase if cs < minChunkSize { return fmt.Errorf("%s is less than %s", cs, minChunkSize) } if cs > maxChunkSize { return fmt.Errorf("%s is greater than %s", cs, maxChunkSize) } return nil } func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadChunkSize(cs) if err == nil { old, f.opt.ChunkSize = f.opt.ChunkSize, cs } return } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } err = checkUploadChunkSize(opt.ChunkSize) if err != nil { return nil, fmt.Errorf("dropbox: chunk size: %w", err) } // Convert the old token if it exists. The old token was just // just a string, the new one is a JSON blob oldToken, ok := m.Get(config.ConfigToken) oldToken = strings.TrimSpace(oldToken) if ok && oldToken != "" && oldToken[0] != '{' { fs.Infof(name, "Converting token to new format") newToken := fmt.Sprintf(`{"access_token":%q,"token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken) err := config.SetValueAndSave(name, config.ConfigToken, newToken) if err != nil { return nil, fmt.Errorf("NewFS convert token: %w", err) } } oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, getOauthConfig(m)) if err != nil { return nil, fmt.Errorf("failed to configure dropbox: %w", err) } ci := fs.GetConfig(ctx) f := &Fs{ name: name, opt: *opt, ci: ci, pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } batcherOptions := defaultBatcherOptions batcherOptions.Mode = f.opt.BatchMode batcherOptions.Size = f.opt.BatchSize batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout) f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions) if err != nil { return nil, err } cfg := dropbox.Config{ LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo Client: oAuthClient, // maybe??? HeaderGenerator: f.headerGenerator, } for _, e := range opt.ExportFormats { ext := exportExtension(e) if exportKnownExtensions[ext] == "" { return nil, fmt.Errorf("dropbox: unknown export format '%s'", e) } f.exportExts = append(f.exportExts, ext) } // unauthorized config for endpoints that fail with auth ucfg := dropbox.Config{ LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo } // NOTE: needs to be created pre-impersonation so we can look up the impersonated user f.team = team.New(cfg) if opt.Impersonate != "" { user := team.UserSelectorArg{ Email: opt.Impersonate, } user.Tag = "email" members := []*team.UserSelectorArg{&user} args := team.NewMembersGetInfoArgs(members) memberIDs, err := f.team.MembersGetInfo(args) if err != nil { return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err) } if len(memberIDs) == 0 || memberIDs[0].MemberInfo == nil || memberIDs[0].MemberInfo.Profile == nil { return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate) } cfg.AsMemberID = memberIDs[0].MemberInfo.Profile.MemberProfile.TeamMemberId } f.srv = files.New(cfg) f.svc = files.New(ucfg) f.sharing = sharing.New(cfg) f.users = users.New(cfg) f.features = (&fs.Features{ CaseInsensitive: true, ReadMimeType: false, CanHaveEmptyDirectories: true, }) // do not fill features yet if f.opt.SharedFiles { f.setRoot(root) if f.root == "" { return f, nil } _, err := f.findSharedFile(ctx, f.root) f.root = "" if err == nil { return f, fs.ErrorIsFile } return f, nil } if f.opt.SharedFolders { f.setRoot(root) if f.root == "" { return f, nil // our root it empty so we probably want to list shared folders } dir := path.Dir(f.root) if dir == "." { dir = f.root } // root is not empty so we have find the right shared folder if it exists id, err := f.findSharedFolder(ctx, dir) if err != nil { // if we didn't find the specified shared folder we have to bail out here return nil, err } // we found the specified shared folder so let's mount it // this will add it to the users normal root namespace and allows us // to actually perform operations on it using the normal api endpoints. err = f.mountSharedFolder(ctx, id) if err != nil { switch e := err.(type) { case sharing.MountFolderAPIError: if e.EndpointError == nil || (e.EndpointError != nil && e.EndpointError.Tag != sharing.MountFolderErrorAlreadyMounted) { return nil, err } default: return nil, err } // if the mount failed we have to abort here } // if the mount succeeded it's now a normal folder in the users root namespace // we disable shared folder mode and proceed normally f.opt.SharedFolders = false } f.features.Fill(ctx, f) if f.opt.RootNsid != "" { f.ns = f.opt.RootNsid fs.Debugf(f, "Overriding root namespace to %q", f.ns) } else if strings.HasPrefix(root, "/") { // If root starts with / then use the actual root var acc *users.FullAccount err = f.pacer.Call(func() (bool, error) { acc, err = f.users.GetCurrentAccount() return shouldRetry(ctx, err) }) if err != nil { return nil, fmt.Errorf("get current account failed: %w", err) } switch x := acc.RootInfo.(type) { case *common.TeamRootInfo: f.ns = x.RootNamespaceId case *common.UserRootInfo: f.ns = x.RootNamespaceId default: return nil, fmt.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo) } fs.Debugf(f, "Using root namespace %q", f.ns) } f.setRoot(root) // See if the root is actually an object if f.root != "" { _, err = f.getFileMetadata(ctx, f.slashRoot) if err == nil { newRoot := path.Dir(f.root) if newRoot == "." { newRoot = "" } f.setRoot(newRoot) // return an error with an fs which points to the parent return f, fs.ErrorIsFile } } return f, nil } // headerGenerator for dropbox sdk func (f *Fs) headerGenerator(hostType string, namespace string, route string) map[string]string { if f.ns == "" { return map[string]string{} } return map[string]string{ "Dropbox-API-Path-Root": `{".tag": "namespace_id", "namespace_id": "` + f.ns + `"}`, } } // Sets root in f func (f *Fs) setRoot(root string) { f.root = strings.Trim(root, "/") f.slashRoot = "/" + f.root f.slashRootSlash = f.slashRoot if f.root != "" { f.slashRootSlash += "/" } } type getMetadataResult struct { entry files.IsMetadata notFound bool err error } // getMetadata gets the metadata for a file or directory func (f *Fs) getMetadata(ctx context.Context, objPath string) (res getMetadataResult) { res.err = f.pacer.Call(func() (bool, error) { res.entry, res.err = f.srv.GetMetadata(&files.GetMetadataArg{ Path: f.opt.Enc.FromStandardPath(objPath), }) return shouldRetry(ctx, res.err) }) if res.err != nil { switch e := res.err.(type) { case files.GetMetadataAPIError: if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound { res.notFound = true res.err = nil } } } return } // Get metadata such that the result would be exported with the given extension // Return a channel that will eventually receive the metadata func (f *Fs) getMetadataForExt(ctx context.Context, filePath string, wantExportExtension exportExtension) chan getMetadataResult { ch := make(chan getMetadataResult, 1) wantDownloadable := (wantExportExtension == "") go func() { defer close(ch) res := f.getMetadata(ctx, filePath) info, ok := res.entry.(*files.FileMetadata) if !ok { // Can't check anything about file, just return what we have ch <- res return } // Return notFound if downloadability or extension doesn't match if wantDownloadable != info.IsDownloadable { ch <- getMetadataResult{notFound: true} return } if !info.IsDownloadable { _, ext := f.chooseExportFormat(info) if ext != wantExportExtension { ch <- getMetadataResult{notFound: true} return } } // Return our real result or error ch <- res }() return ch } // For a given rclone-path, figure out what the Dropbox-path may be, in order of preference. // Multiple paths might be plausible, due to export path munging. func (f *Fs) possibleMetadatas(ctx context.Context, filePath string) (ret []<-chan getMetadataResult) { ret = []<-chan getMetadataResult{} // Prefer an exact match ret = append(ret, f.getMetadataForExt(ctx, filePath, "")) // Check if we're plausibly an export path, otherwise we're done if f.opt.SkipExports || f.opt.ShowAllExports { return } dotted := path.Ext(filePath) if dotted == "" { return } ext := exportExtension(dotted[1:]) if exportKnownExtensions[ext] == "" { return } // We might be an export path! Try all possibilities base := strings.TrimSuffix(filePath, dotted) // `foo.papert.md` will only come from `foo.papert`. Never check something like `foo.papert.paper` if strings.HasSuffix(base, paperTemplateExtension) { ret = append(ret, f.getMetadataForExt(ctx, base, ext)) return } // Otherwise, try both `foo.md` coming from `foo`, or from `foo.paper` ret = append(ret, f.getMetadataForExt(ctx, base, ext)) ret = append(ret, f.getMetadataForExt(ctx, base+paperExtension, ext)) return } // getFileMetadata gets the metadata for a file func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileMetadata, error) { var res getMetadataResult // Try all possible metadatas possibleMetadatas := f.possibleMetadatas(ctx, filePath) for _, ch := range possibleMetadatas { res = <-ch if res.err != nil { return nil, res.err } if !res.notFound { break } } if res.notFound { return nil, fs.ErrorObjectNotFound } fileInfo, ok := res.entry.(*files.FileMetadata) if !ok { if _, ok = res.entry.(*files.FolderMetadata); ok { return nil, fs.ErrorIsDir } return nil, fs.ErrorNotAFile } return fileInfo, nil } // getDirMetadata gets the metadata for a directory func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (*files.FolderMetadata, error) { res := f.getMetadata(ctx, dirPath) if res.err != nil { return nil, res.err } if res.notFound { return nil, fs.ErrorDirNotFound } dirInfo, ok := res.entry.(*files.FolderMetadata) if !ok { return nil, fs.ErrorIsFile } return dirInfo, nil } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *files.FileMetadata) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } var err error if info != nil { err = o.setMetadataFromEntry(info) } else { err = o.readEntryAndSetMetadata(ctx) } if err != nil { return nil, err } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { if f.opt.SharedFiles { return f.findSharedFile(ctx, remote) } return f.newObjectWithInfo(ctx, remote, nil) } // listSharedFolders lists all available shared folders mounted and not mounted // we'll need the id later so we have to return them in original format func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) error) (err error) { started := false var res *sharing.ListFoldersResult for { if !started { arg := sharing.ListFoldersArgs{ Limit: 100, } err := f.pacer.Call(func() (bool, error) { res, err = f.sharing.ListFolders(&arg) return shouldRetry(ctx, err) }) if err != nil { return err } started = true } else { arg := sharing.ListFoldersContinueArg{ Cursor: res.Cursor, } err := f.pacer.Call(func() (bool, error) { res, err = f.sharing.ListFoldersContinue(&arg) return shouldRetry(ctx, err) }) if err != nil { return fmt.Errorf("list continue: %w", err) } } for _, entry := range res.Entries { leaf := f.opt.Enc.ToStandardName(entry.Name) d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId) err = callback(d) if err != nil { return err } } if res.Cursor == "" { break } } return nil } // findSharedFolder find the id for a given shared folder name // somewhat annoyingly there is no endpoint to query a shared folder by it's name // so our only option is to iterate over all shared folders func (f *Fs) findSharedFolder(ctx context.Context, name string) (id string, err error) { errFoundFile := errors.New("found file") err = f.listSharedFolders(ctx, func(entry fs.DirEntry) error { if entry.(*fs.Dir).Remote() == name { id = entry.(*fs.Dir).ID() return errFoundFile } return nil }) if errors.Is(err, errFoundFile) { return id, nil } else if err != nil { return "", err } return "", fs.ErrorDirNotFound } // mountSharedFolder mount a shared folder to the root namespace func (f *Fs) mountSharedFolder(ctx context.Context, id string) error { arg := sharing.MountFolderArg{ SharedFolderId: id, } err := f.pacer.Call(func() (bool, error) { _, err := f.sharing.MountFolder(&arg) return shouldRetry(ctx, err) }) return err } // listReceivedFiles lists shared the user as access to (note this means individual // files not files contained in shared folders) func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) error) (err error) { started := false var res *sharing.ListFilesResult for { if !started { arg := sharing.ListFilesArg{ Limit: 100, } err := f.pacer.Call(func() (bool, error) { res, err = f.sharing.ListReceivedFiles(&arg) return shouldRetry(ctx, err) }) if err != nil { return err } started = true } else { arg := sharing.ListFilesContinueArg{ Cursor: res.Cursor, } err := f.pacer.Call(func() (bool, error) { res, err = f.sharing.ListReceivedFilesContinue(&arg) return shouldRetry(ctx, err) }) if err != nil { return fmt.Errorf("list continue: %w", err) } } for _, entry := range res.Entries { fmt.Printf("%+v\n", entry) entryPath := entry.Name o := &Object{ fs: f, url: entry.PreviewUrl, remote: entryPath, modTime: *entry.TimeInvited, } if err != nil { return err } err = callback(o) if err != nil { return err } } if res.Cursor == "" { break } } return nil } func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err error) { errFoundFile := errors.New("found file") err = f.listReceivedFiles(ctx, func(entry fs.DirEntry) error { if entry.(*Object).remote == name { o = entry.(*Object) return errFoundFile } return nil }) if errors.Is(err, errFoundFile) { return o, nil } else if err != nil { return nil, err } return nil, fs.ErrorObjectNotFound } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return list.WithListP(ctx, dir, f) } // ListP lists the objects and directories of the Fs starting // from dir non recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { list := list.NewHelper(callback) if f.opt.SharedFiles { err := f.listReceivedFiles(ctx, list.Add) if err != nil { return err } return list.Flush() } if f.opt.SharedFolders { err := f.listSharedFolders(ctx, list.Add) if err != nil { return err } return list.Flush() } root := f.slashRoot if dir != "" { root += "/" + dir } started := false var res *files.ListFolderResult for { if !started { arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(root)) arg.Recursive = false arg.Limit = 1000 if root == "/" { arg.Path = "" // Specify root folder as empty string } err = f.pacer.Call(func() (bool, error) { res, err = f.srv.ListFolder(arg) return shouldRetry(ctx, err) }) if err != nil { switch e := err.(type) { case files.ListFolderAPIError: if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound { err = fs.ErrorDirNotFound } } return err } started = true } else { arg := files.ListFolderContinueArg{ Cursor: res.Cursor, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/dropbox/dbhash/dbhash_test.go
backend/dropbox/dbhash/dbhash_test.go
package dbhash_test import ( "encoding/hex" "fmt" "testing" "github.com/rclone/rclone/backend/dropbox/dbhash" "github.com/stretchr/testify/assert" ) func testChunk(t *testing.T, chunk int) { data := make([]byte, chunk) for i := range chunk { data[i] = 'A' } for _, test := range []struct { n int want string }{ {0, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, {1, "1cd6ef71e6e0ff46ad2609d403dc3fee244417089aa4461245a4e4fe23a55e42"}, {2, "01e0655fb754d10418a73760f57515f4903b298e6d67dda6bf0987fa79c22c88"}, {4096, "8620913d33852befe09f16fff8fd75f77a83160d29f76f07e0276e9690903035"}, {4194303, "647c8627d70f7a7d13ce96b1e7710a771a55d41a62c3da490d92e56044d311fa"}, {4194304, "d4d63bac5b866c71620185392a8a6218ac1092454a2d16f820363b69852befa3"}, {4194305, "8f553da8d00d0bf509d8470e242888be33019c20c0544811f5b2b89e98360b92"}, {8388607, "83b30cf4fb5195b04a937727ae379cf3d06673bf8f77947f6a92858536e8369c"}, {8388608, "e08b3ba1f538804075c5f939accdeaa9efc7b5c01865c94a41e78ca6550a88e7"}, {8388609, "02c8a4aefc2bfc9036f89a7098001865885938ca580e5c9e5db672385edd303c"}, } { d := dbhash.New() var toWrite int for toWrite = test.n; toWrite >= chunk; toWrite -= chunk { n, err := d.Write(data) assert.Nil(t, err) assert.Equal(t, chunk, n) } n, err := d.Write(data[:toWrite]) assert.Nil(t, err) assert.Equal(t, toWrite, n) got := hex.EncodeToString(d.Sum(nil)) assert.Equal(t, test.want, got, fmt.Sprintf("when testing length %d", n)) } } func TestHashChunk16M(t *testing.T) { testChunk(t, 16*1024*1024) } func TestHashChunk8M(t *testing.T) { testChunk(t, 8*1024*1024) } func TestHashChunk4M(t *testing.T) { testChunk(t, 4*1024*1024) } func TestHashChunk2M(t *testing.T) { testChunk(t, 2*1024*1024) } func TestHashChunk1M(t *testing.T) { testChunk(t, 1*1024*1024) } func TestHashChunk64k(t *testing.T) { testChunk(t, 64*1024) } func TestHashChunk32k(t *testing.T) { testChunk(t, 32*1024) } func TestHashChunk2048(t *testing.T) { testChunk(t, 2048) } func TestHashChunk2047(t *testing.T) { testChunk(t, 2047) } func TestSumCalledTwice(t *testing.T) { d := dbhash.New() assert.NotPanics(t, func() { d.Sum(nil) }) d.Reset() assert.NotPanics(t, func() { d.Sum(nil) }) assert.NotPanics(t, func() { d.Sum(nil) }) _, _ = d.Write([]byte{1}) assert.Panics(t, func() { d.Sum(nil) }) } func TestSize(t *testing.T) { d := dbhash.New() assert.Equal(t, 32, d.Size()) } func TestBlockSize(t *testing.T) { d := dbhash.New() assert.Equal(t, 64, d.BlockSize()) } func TestSum(t *testing.T) { assert.Equal(t, [64]byte{ 0x1c, 0xd6, 0xef, 0x71, 0xe6, 0xe0, 0xff, 0x46, 0xad, 0x26, 0x09, 0xd4, 0x03, 0xdc, 0x3f, 0xee, 0x24, 0x44, 0x17, 0x08, 0x9a, 0xa4, 0x46, 0x12, 0x45, 0xa4, 0xe4, 0xfe, 0x23, 0xa5, 0x5e, 0x42, }, dbhash.Sum([]byte{'A'}), ) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/dropbox/dbhash/dbhash.go
backend/dropbox/dbhash/dbhash.go
// Package dbhash implements the dropbox hash as described in // // https://www.dropbox.com/developers/reference/content-hash package dbhash import ( "crypto/sha256" "hash" ) const ( // BlockSize of the checksum in bytes. BlockSize = sha256.BlockSize // Size of the checksum in bytes. Size = sha256.BlockSize bytesPerBlock = 4 * 1024 * 1024 hashReturnedError = "hash function returned error" ) type digest struct { n int // bytes written into blockHash so far blockHash hash.Hash totalHash hash.Hash sumCalled bool writtenMore bool } // New returns a new hash.Hash computing the Dropbox checksum. func New() hash.Hash { d := &digest{} d.Reset() return d } // writeBlockHash writes the current block hash into the total hash func (d *digest) writeBlockHash() { blockHash := d.blockHash.Sum(nil) _, err := d.totalHash.Write(blockHash) if err != nil { panic(hashReturnedError) } // reset counters for blockhash d.n = 0 d.blockHash.Reset() } // Write writes len(p) bytes from p to the underlying data stream. It returns // the number of bytes written from p (0 <= n <= len(p)) and any error // encountered that caused the write to stop early. Write must return a non-nil // error if it returns n < len(p). Write must not modify the slice data, even // temporarily. // // Implementations must not retain p. func (d *digest) Write(p []byte) (n int, err error) { n = len(p) for len(p) > 0 { d.writtenMore = true toWrite := min(bytesPerBlock-d.n, len(p)) _, err = d.blockHash.Write(p[:toWrite]) if err != nil { panic(hashReturnedError) } d.n += toWrite p = p[toWrite:] // Accumulate the total hash if d.n == bytesPerBlock { d.writeBlockHash() } } return n, nil } // Sum appends the current hash to b and returns the resulting slice. // It does not change the underlying hash state. // // TODO(ncw) Sum() can only be called once for this type of hash. // If you call Sum(), then Write() then Sum() it will result in // a panic. Calling Write() then Sum(), then Sum() is OK. func (d *digest) Sum(b []byte) []byte { if d.sumCalled && d.writtenMore { panic("digest.Sum() called more than once") } d.sumCalled = true d.writtenMore = false if d.n != 0 { d.writeBlockHash() } return d.totalHash.Sum(b) } // Reset resets the Hash to its initial state. func (d *digest) Reset() { d.n = 0 d.totalHash = sha256.New() d.blockHash = sha256.New() d.sumCalled = false d.writtenMore = false } // Size returns the number of bytes Sum will return. func (d *digest) Size() int { return d.totalHash.Size() } // BlockSize returns the hash's underlying block size. // The Write method must be able to accept any amount // of data, but it may operate more efficiently if all writes // are a multiple of the block size. func (d *digest) BlockSize() int { return d.totalHash.BlockSize() } // Sum returns the Dropbox checksum of the data. func Sum(data []byte) [Size]byte { var d digest d.Reset() _, _ = d.Write(data) var out [Size]byte d.Sum(out[:0]) return out } // must implement this interface var _ hash.Hash = (*digest)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/doi/zenodo.go
backend/doi/zenodo.go
// Implementation for Zenodo package doi import ( "context" "fmt" "net/url" "regexp" "github.com/rclone/rclone/backend/doi/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/rest" ) var zenodoRecordRegex = regexp.MustCompile(`zenodo[.](.+)`) // Resolve the main API endpoint for a DOI hosted on Zenodo func resolveZenodoEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL, doi string) (provider Provider, endpoint *url.URL, err error) { match := zenodoRecordRegex.FindStringSubmatch(doi) if match == nil { return "", nil, fmt.Errorf("could not derive API endpoint URL from '%s'", resolvedURL.String()) } recordID := match[1] endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/records/" + recordID}) var result api.InvenioRecordResponse opts := rest.Opts{ Method: "GET", RootURL: endpointURL.String(), } err = pacer.Call(func() (bool, error) { res, err := srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, res, err) }) if err != nil { return "", nil, err } endpointURL, err = url.Parse(result.Links.Self) if err != nil { return "", nil, err } return Zenodo, endpointURL, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/doi/link_header.go
backend/doi/link_header.go
package doi import ( "regexp" "strings" ) var linkRegex = regexp.MustCompile(`^<(.+)>$`) var valueRegex = regexp.MustCompile(`^"(.+)"$`) // headerLink represents a link as presented in HTTP headers // MDN Reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Link type headerLink struct { Href string Rel string Type string Extras map[string]string } func parseLinkHeader(header string) (links []headerLink) { for link := range strings.SplitSeq(header, ",") { link = strings.TrimSpace(link) parsed := parseLink(link) if parsed != nil { links = append(links, *parsed) } } return links } func parseLink(link string) (parsedLink *headerLink) { var parts []string for part := range strings.SplitSeq(link, ";") { parts = append(parts, strings.TrimSpace(part)) } match := linkRegex.FindStringSubmatch(parts[0]) if match == nil { return nil } result := &headerLink{ Href: match[1], Extras: map[string]string{}, } for _, keyValue := range parts[1:] { parsed := parseKeyValue(keyValue) if parsed != nil { key, value := parsed[0], parsed[1] switch strings.ToLower(key) { case "rel": result.Rel = value case "type": result.Type = value default: result.Extras[key] = value } } } return result } func parseKeyValue(keyValue string) []string { parts := strings.SplitN(keyValue, "=", 2) if parts[0] == "" || len(parts) < 2 { return nil } match := valueRegex.FindStringSubmatch(parts[1]) if match != nil { parts[1] = match[1] return parts } return parts }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/doi/doi_internal_test.go
backend/doi/doi_internal_test.go
package doi import ( "context" "crypto/md5" "encoding/hex" "encoding/json" "io" "net/http" "net/http/httptest" "net/url" "sort" "strings" "testing" "time" "github.com/rclone/rclone/backend/doi/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/hash" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var remoteName = "TestDoi" func TestParseDoi(t *testing.T) { // 10.1000/182 -> 10.1000/182 doi := "10.1000/182" parsed := parseDoi(doi) assert.Equal(t, "10.1000/182", parsed) // https://doi.org/10.1000/182 -> 10.1000/182 doi = "https://doi.org/10.1000/182" parsed = parseDoi(doi) assert.Equal(t, "10.1000/182", parsed) // https://dx.doi.org/10.1000/182 -> 10.1000/182 doi = "https://dxdoi.org/10.1000/182" parsed = parseDoi(doi) assert.Equal(t, "10.1000/182", parsed) // doi:10.1000/182 -> 10.1000/182 doi = "doi:10.1000/182" parsed = parseDoi(doi) assert.Equal(t, "10.1000/182", parsed) // doi://10.1000/182 -> 10.1000/182 doi = "doi://10.1000/182" parsed = parseDoi(doi) assert.Equal(t, "10.1000/182", parsed) } // prepareMockDoiResolverServer prepares a test server to resolve DOIs func prepareMockDoiResolverServer(t *testing.T, resolvedURL string) (doiResolverAPIURL string) { mux := http.NewServeMux() // Handle requests for resolving DOIs mux.HandleFunc("GET /api/handles/{handle...}", func(w http.ResponseWriter, r *http.Request) { // Check that we are resolving a DOI handle := strings.TrimPrefix(r.URL.Path, "/api/handles/") assert.NotEmpty(t, handle) index := r.URL.Query().Get("index") assert.Equal(t, "1", index) // Return the most basic response result := api.DoiResolverResponse{ ResponseCode: 1, Handle: handle, Values: []api.DoiResolverResponseValue{ { Index: 1, Type: "URL", Data: api.DoiResolverResponseValueData{ Format: "string", Value: resolvedURL, }, }, }, } resultBytes, err := json.Marshal(result) require.NoError(t, err) w.Header().Add("Content-Type", "application/json") _, err = w.Write(resultBytes) require.NoError(t, err) }) // Make the test server ts := httptest.NewServer(mux) // Close the server at the end of the test t.Cleanup(ts.Close) return ts.URL + "/api" } func md5Sum(text string) string { hash := md5.Sum([]byte(text)) return hex.EncodeToString(hash[:]) } // prepareMockZenodoServer prepares a test server that mocks Zenodo.org func prepareMockZenodoServer(t *testing.T, files map[string]string) *httptest.Server { mux := http.NewServeMux() // Handle requests for a single record mux.HandleFunc("GET /api/records/{recordID...}", func(w http.ResponseWriter, r *http.Request) { // Check that we are returning data about a single record recordID := strings.TrimPrefix(r.URL.Path, "/api/records/") assert.NotEmpty(t, recordID) // Return the most basic response selfURL, err := url.Parse("http://" + r.Host) require.NoError(t, err) selfURL = selfURL.JoinPath(r.URL.String()) result := api.InvenioRecordResponse{ Links: api.InvenioRecordResponseLinks{ Self: selfURL.String(), }, } resultBytes, err := json.Marshal(result) require.NoError(t, err) w.Header().Add("Content-Type", "application/json") _, err = w.Write(resultBytes) require.NoError(t, err) }) // Handle requests for listing files in a record mux.HandleFunc("GET /api/records/{record}/files", func(w http.ResponseWriter, r *http.Request) { // Return the most basic response filesBaseURL, err := url.Parse("http://" + r.Host) require.NoError(t, err) filesBaseURL = filesBaseURL.JoinPath("/api/files/") entries := []api.InvenioFilesResponseEntry{} for filename, contents := range files { entries = append(entries, api.InvenioFilesResponseEntry{ Key: filename, Checksum: md5Sum(contents), Size: int64(len(contents)), Updated: time.Now().UTC().Format(time.RFC3339), MimeType: "text/plain; charset=utf-8", Links: api.InvenioFilesResponseEntryLinks{ Content: filesBaseURL.JoinPath(filename).String(), }, }, ) } result := api.InvenioFilesResponse{ Entries: entries, } resultBytes, err := json.Marshal(result) require.NoError(t, err) w.Header().Add("Content-Type", "application/json") _, err = w.Write(resultBytes) require.NoError(t, err) }) // Handle requests for file contents mux.HandleFunc("/api/files/{file}", func(w http.ResponseWriter, r *http.Request) { // Check that we are returning the contents of a file filename := strings.TrimPrefix(r.URL.Path, "/api/files/") assert.NotEmpty(t, filename) contents, found := files[filename] if !found { w.WriteHeader(404) return } // Return the most basic response _, err := w.Write([]byte(contents)) require.NoError(t, err) }) // Make the test server ts := httptest.NewServer(mux) // Close the server at the end of the test t.Cleanup(ts.Close) return ts } func TestZenodoRemote(t *testing.T) { recordID := "2600782" doi := "10.5281/zenodo.2600782" // The files in the dataset files := map[string]string{ "README.md": "This is a dataset.", "data.txt": "Some data", } ts := prepareMockZenodoServer(t, files) resolvedURL := ts.URL + "/record/" + recordID doiResolverAPIURL := prepareMockDoiResolverServer(t, resolvedURL) testConfig := configmap.Simple{ "type": "doi", "doi": doi, "provider": "zenodo", "doi_resolver_api_url": doiResolverAPIURL, } f, err := NewFs(context.Background(), remoteName, "", testConfig) require.NoError(t, err) // Test listing the DOI files entries, err := f.List(context.Background(), "") require.NoError(t, err) sort.Sort(entries) require.Equal(t, len(files), len(entries)) e := entries[0] assert.Equal(t, "README.md", e.Remote()) assert.Equal(t, int64(18), e.Size()) _, ok := e.(*Object) assert.True(t, ok) e = entries[1] assert.Equal(t, "data.txt", e.Remote()) assert.Equal(t, int64(9), e.Size()) _, ok = e.(*Object) assert.True(t, ok) // Test reading the DOI files o, err := f.NewObject(context.Background(), "README.md") require.NoError(t, err) assert.Equal(t, int64(18), o.Size()) md5Hash, err := o.Hash(context.Background(), hash.MD5) require.NoError(t, err) assert.Equal(t, "464352b1cab5240e44528a56fda33d9d", md5Hash) fd, err := o.Open(context.Background()) require.NoError(t, err) data, err := io.ReadAll(fd) require.NoError(t, err) require.NoError(t, fd.Close()) assert.Equal(t, []byte(files["README.md"]), data) do, ok := o.(fs.MimeTyper) require.True(t, ok) assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background())) o, err = f.NewObject(context.Background(), "data.txt") require.NoError(t, err) assert.Equal(t, int64(9), o.Size()) md5Hash, err = o.Hash(context.Background(), hash.MD5) require.NoError(t, err) assert.Equal(t, "5b82f8bf4df2bfb0e66ccaa7306fd024", md5Hash) fd, err = o.Open(context.Background()) require.NoError(t, err) data, err = io.ReadAll(fd) require.NoError(t, err) require.NoError(t, fd.Close()) assert.Equal(t, []byte(files["data.txt"]), data) do, ok = o.(fs.MimeTyper) require.True(t, ok) assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background())) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/doi/invenio.go
backend/doi/invenio.go
// Implementation for InvenioRDM package doi import ( "context" "fmt" "net/http" "net/url" "regexp" "strings" "time" "github.com/rclone/rclone/backend/doi/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/rest" ) var invenioRecordRegex = regexp.MustCompile(`\/records?\/(.+)`) // Returns true if resolvedURL is likely a DOI hosted on an InvenioRDM intallation func activateInvenio(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (isActive bool) { _, _, err := resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL) return err == nil } // Resolve the main API endpoint for a DOI hosted on an InvenioRDM installation func resolveInvenioEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) { var res *http.Response opts := rest.Opts{ Method: "GET", RootURL: resolvedURL.String(), } err = pacer.Call(func() (bool, error) { res, err = srv.Call(ctx, &opts) return shouldRetry(ctx, res, err) }) if err != nil { return "", nil, err } // First, attempt to grab the API URL from the headers var linksetURL *url.URL links := parseLinkHeader(res.Header.Get("Link")) for _, link := range links { if link.Rel == "linkset" && link.Type == "application/linkset+json" { parsed, err := url.Parse(link.Href) if err == nil { linksetURL = parsed break } } } if linksetURL != nil { endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, linksetURL) if err == nil { return Invenio, endpoint, nil } fs.Logf(nil, "using linkset URL failed: %s", err.Error()) } // If there is no linkset header, try to grab the record ID from the URL recordID := "" resURL := res.Request.URL match := invenioRecordRegex.FindStringSubmatch(resURL.EscapedPath()) if match != nil { recordID = match[1] guessedURL := res.Request.URL.ResolveReference(&url.URL{ Path: "/api/records/" + recordID, }) endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, guessedURL) if err == nil { return Invenio, endpoint, nil } fs.Logf(nil, "guessing the URL failed: %s", err.Error()) } return "", nil, fmt.Errorf("could not resolve the Invenio API endpoint for '%s'", resolvedURL.String()) } func checkInvenioAPIURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (endpoint *url.URL, err error) { var result api.InvenioRecordResponse opts := rest.Opts{ Method: "GET", RootURL: resolvedURL.String(), } err = pacer.Call(func() (bool, error) { res, err := srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, res, err) }) if err != nil { return nil, err } if result.Links.Self == "" { return nil, fmt.Errorf("could not parse API response from '%s'", resolvedURL.String()) } return url.Parse(result.Links.Self) } // invenioProvider implements the doiProvider interface for InvenioRDM installations type invenioProvider struct { f *Fs } // ListEntries returns the full list of entries found at the remote, regardless of root func (ip *invenioProvider) ListEntries(ctx context.Context) (entries []*Object, err error) { // Use the cache if populated cachedEntries, found := ip.f.cache.GetMaybe("files") if found { parsedEntries, ok := cachedEntries.([]Object) if ok { for _, entry := range parsedEntries { newEntry := entry entries = append(entries, &newEntry) } return entries, nil } } filesURL := ip.f.endpoint.JoinPath("files") var result api.InvenioFilesResponse opts := rest.Opts{ Method: "GET", Path: strings.TrimLeft(filesURL.EscapedPath(), "/"), } err = ip.f.pacer.Call(func() (bool, error) { res, err := ip.f.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, res, err) }) if err != nil { return nil, fmt.Errorf("readDir failed: %w", err) } for _, file := range result.Entries { modTime, modTimeErr := time.Parse(time.RFC3339, file.Updated) if modTimeErr != nil { fs.Logf(ip.f, "error: could not parse last update time %v", modTimeErr) modTime = timeUnset } entry := &Object{ fs: ip.f, remote: file.Key, contentURL: file.Links.Content, size: file.Size, modTime: modTime, contentType: file.MimeType, md5: strings.TrimPrefix(file.Checksum, "md5:"), } entries = append(entries, entry) } // Populate the cache cacheEntries := []Object{} for _, entry := range entries { cacheEntries = append(cacheEntries, *entry) } ip.f.cache.Put("files", cacheEntries) return entries, nil } func newInvenioProvider(f *Fs) doiProvider { return &invenioProvider{ f: f, } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/doi/doi_test.go
backend/doi/doi_test.go
// Test DOI filesystem interface package doi import ( "testing" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestDoi:", NilObject: (*Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/doi/doi.go
backend/doi/doi.go
// Package doi provides a filesystem interface for digital objects identified by DOIs. // // See: https://www.doi.org/the-identifier/what-is-a-doi/ package doi import ( "context" "errors" "fmt" "io" "net/http" "net/url" "path" "strings" "time" "github.com/rclone/rclone/backend/doi/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/cache" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" ) const ( // the URL of the DOI resolver // // Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation doiResolverAPIURL = "https://doi.org/api" minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential ) var ( errorReadOnly = errors.New("doi remotes are read only") timeUnset = time.Unix(0, 0) ) func init() { fsi := &fs.RegInfo{ Name: "doi", Description: "DOI datasets", NewFs: NewFs, CommandHelp: commandHelp, Options: []fs.Option{{ Name: "doi", Help: "The DOI or the doi.org URL.", Required: true, }, { Name: fs.ConfigProvider, Help: `DOI provider. The DOI provider can be set when rclone does not automatically recognize a supported DOI provider.`, Examples: []fs.OptionExample{ { Value: "auto", Help: "Auto-detect provider", }, { Value: string(Zenodo), Help: "Zenodo", }, { Value: string(Dataverse), Help: "Dataverse", }, { Value: string(Invenio), Help: "Invenio", }}, Required: false, Advanced: true, }, { Name: "doi_resolver_api_url", Help: `The URL of the DOI resolver API to use. The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used. Defaults to "https://doi.org/api".`, Required: false, Advanced: true, }}, } fs.Register(fsi) } // Provider defines the type of provider hosting the DOI type Provider string const ( // Zenodo provider, see https://zenodo.org Zenodo Provider = "zenodo" // Dataverse provider, see https://dataverse.harvard.edu Dataverse Provider = "dataverse" // Invenio provider, see https://inveniordm.docs.cern.ch Invenio Provider = "invenio" ) // Options defines the configuration for this backend type Options struct { Doi string `config:"doi"` // The DOI, a digital identifier of an object, usually a dataset Provider string `config:"provider"` // The DOI provider DoiResolverAPIURL string `config:"doi_resolver_api_url"` // The URL of the DOI resolver API to use. } // Fs stores the interface to the remote HTTP files type Fs struct { name string // name of this remote root string // the path we are working on provider Provider // the DOI provider doiProvider doiProvider // the interface used to interact with the DOI provider features *fs.Features // optional features opt Options // options for this backend ci *fs.ConfigInfo // global config endpoint *url.URL // the main API endpoint for this remote endpointURL string // endpoint as a string srv *rest.Client // the connection to the server pacer *fs.Pacer // pacer for API calls cache *cache.Cache // a cache for the remote metadata } // Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading) type Object struct { fs *Fs // what this object is part of remote string // the remote path contentURL string // the URL where the contents of the file can be downloaded size int64 // size of the object modTime time.Time // modification time of the object contentType string // content type of the object md5 string // MD5 hash of the object content } // doiProvider is the interface used to list objects in a DOI type doiProvider interface { // ListEntries returns the full list of entries found at the remote, regardless of root ListEntries(ctx context.Context) (entries []*Object, err error) } // Parse the input string as a DOI // Examples: // 10.1000/182 -> 10.1000/182 // https://doi.org/10.1000/182 -> 10.1000/182 // doi:10.1000/182 -> 10.1000/182 func parseDoi(doi string) string { doiURL, err := url.Parse(doi) if err != nil { return doi } if doiURL.Scheme == "doi" { return strings.TrimLeft(strings.TrimPrefix(doi, "doi:"), "/") } if strings.HasSuffix(doiURL.Hostname(), "doi.org") { return strings.TrimLeft(doiURL.Path, "/") } return doi } // Resolve a DOI to a URL // Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation func resolveDoiURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (doiURL *url.URL, err error) { resolverURL := opt.DoiResolverAPIURL if resolverURL == "" { resolverURL = doiResolverAPIURL } var result api.DoiResolverResponse params := url.Values{} params.Add("index", "1") opts := rest.Opts{ Method: "GET", RootURL: resolverURL, Path: "/handles/" + opt.Doi, Parameters: params, } err = pacer.Call(func() (bool, error) { res, err := srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, res, err) }) if err != nil { return nil, err } if result.ResponseCode != 1 { return nil, fmt.Errorf("could not resolve DOI (error code %d)", result.ResponseCode) } resolvedURLStr := "" for _, value := range result.Values { if value.Type == "URL" && value.Data.Format == "string" { valueStr, ok := value.Data.Value.(string) if !ok { return nil, fmt.Errorf("could not resolve DOI (incorrect response format)") } resolvedURLStr = valueStr } } resolvedURL, err := url.Parse(resolvedURLStr) if err != nil { return nil, err } return resolvedURL, nil } // Resolve the passed configuration into a provider and enpoint func resolveEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (provider Provider, endpoint *url.URL, err error) { resolvedURL, err := resolveDoiURL(ctx, srv, pacer, opt) if err != nil { return "", nil, err } switch opt.Provider { case string(Dataverse): return resolveDataverseEndpoint(resolvedURL) case string(Invenio): return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL) case string(Zenodo): return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi) } hostname := strings.ToLower(resolvedURL.Hostname()) if hostname == "dataverse.harvard.edu" || activateDataverse(resolvedURL) { return resolveDataverseEndpoint(resolvedURL) } if hostname == "zenodo.org" || strings.HasSuffix(hostname, ".zenodo.org") { return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi) } if activateInvenio(ctx, srv, pacer, resolvedURL) { return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL) } return "", nil, fmt.Errorf("provider '%s' is not supported", resolvedURL.Hostname()) } // Make the http connection from the passed options func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) { provider, endpoint, err := resolveEndpoint(ctx, f.srv, f.pacer, opt) if err != nil { return false, err } // Update f with the new parameters f.srv.SetRoot(endpoint.ResolveReference(&url.URL{Path: "/"}).String()) f.endpoint = endpoint f.endpointURL = endpoint.String() f.provider = provider f.opt.Provider = string(provider) switch f.provider { case Dataverse: f.doiProvider = newDataverseProvider(f) case Invenio, Zenodo: f.doiProvider = newInvenioProvider(f) default: return false, fmt.Errorf("provider type '%s' not supported", f.provider) } // Determine if the root is a file entries, err := f.doiProvider.ListEntries(ctx) if err != nil { return false, err } for _, entry := range entries { if entry.remote == f.root { isFile = true break } } return isFile, nil } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry returns a boolean as to whether this res and err // deserve to be retried. It returns the err as a convenience. func shouldRetry(ctx context.Context, res *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err } // NewFs creates a new Fs object from the name and root. It connects to // the host specified in the config file. func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { root = strings.Trim(root, "/") // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } opt.Doi = parseDoi(opt.Doi) client := fshttp.NewClient(ctx) ci := fs.GetConfig(ctx) f := &Fs{ name: name, root: root, opt: *opt, ci: ci, srv: rest.NewClient(client), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), cache: cache.New(), } f.features = (&fs.Features{ CanHaveEmptyDirectories: true, }).Fill(ctx, f) isFile, err := f.httpConnection(ctx, opt) if err != nil { return nil, err } if isFile { // return an error with an fs which points to the parent newRoot := path.Dir(f.root) if newRoot == "." { newRoot = "" } f.root = newRoot return f, fs.ErrorIsFile } return f, nil } // Name returns the configured name of the file system func (f *Fs) Name() string { return f.name } // Root returns the root for the filesystem func (f *Fs) Root() string { return f.root } // String returns the URL for the filesystem func (f *Fs) String() string { return fmt.Sprintf("DOI %s", f.opt.Doi) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s func (f *Fs) Precision() time.Duration { return time.Second } // Hashes returns hash.HashNone to indicate remote hashing is unavailable func (f *Fs) Hashes() hash.Set { return hash.Set(hash.MD5) // return hash.Set(hash.None) } // Mkdir makes the root directory of the Fs object func (f *Fs) Mkdir(ctx context.Context, dir string) error { return errorReadOnly } // Remove a remote http file object func (o *Object) Remove(ctx context.Context) error { return errorReadOnly } // Rmdir removes the root directory of the Fs object func (f *Fs) Rmdir(ctx context.Context, dir string) error { return errorReadOnly } // NewObject creates a new remote http file object func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { entries, err := f.doiProvider.ListEntries(ctx) if err != nil { return nil, err } remoteFullPath := remote if f.root != "" { remoteFullPath = path.Join(f.root, remote) } for _, entry := range entries { if entry.Remote() == remoteFullPath { return entry, nil } } return nil, fs.ErrorObjectNotFound } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { fileEntries, err := f.doiProvider.ListEntries(ctx) if err != nil { return nil, fmt.Errorf("error listing %q: %w", dir, err) } fullDir := path.Join(f.root, dir) if fullDir != "" { fullDir += "/" } dirPaths := map[string]bool{} for _, entry := range fileEntries { // First, filter out files not in `fullDir` if !strings.HasPrefix(entry.remote, fullDir) { continue } // Then, find entries in subfolers remotePath := entry.remote if fullDir != "" { remotePath = strings.TrimLeft(strings.TrimPrefix(remotePath, fullDir), "/") } parts := strings.SplitN(remotePath, "/", 2) if len(parts) == 1 { newEntry := *entry newEntry.remote = path.Join(dir, remotePath) entries = append(entries, &newEntry) } else { dirPaths[path.Join(dir, parts[0])] = true } } for dirPath := range dirPaths { entry := fs.NewDir(dirPath, time.Time{}) entries = append(entries, entry) } return entries, nil } // Put in to the remote path with the modTime given of the given size // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return nil, errorReadOnly } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return nil, errorReadOnly } // Fs is the filesystem this remote http file object is located within func (o *Object) Fs() fs.Info { return o.fs } // String returns the URL to the remote HTTP file func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote the name of the remote HTTP file, relative to the fs root func (o *Object) Remote() string { return o.remote } // Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } return o.md5, nil } // Size returns the size in bytes of the remote http file func (o *Object) Size() int64 { return o.size } // ModTime returns the modification time of the remote http file func (o *Object) ModTime(ctx context.Context) time.Time { return o.modTime } // SetModTime sets the modification and access time to the specified time // // it also updates the info field func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return errorReadOnly } // Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.) func (o *Object) Storable() bool { return true } // Open a remote http file object for reading. Seek is supported func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { fs.FixRangeOption(options, o.size) opts := rest.Opts{ Method: "GET", RootURL: o.contentURL, Options: options, } var res *http.Response err = o.fs.pacer.Call(func() (bool, error) { res, err = o.fs.srv.Call(ctx, &opts) return shouldRetry(ctx, res, err) }) if err != nil { return nil, fmt.Errorf("Open failed: %w", err) } // Handle non-compliant redirects if res.Header.Get("Location") != "" { newURL, err := res.Location() if err == nil { opts.RootURL = newURL.String() err = o.fs.pacer.Call(func() (bool, error) { res, err = o.fs.srv.Call(ctx, &opts) return shouldRetry(ctx, res, err) }) if err != nil { return nil, fmt.Errorf("Open failed: %w", err) } } } return res.Body, nil } // Update in to the object with the modTime given of the given size func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { return errorReadOnly } // MimeType of an Object if known, "" otherwise func (o *Object) MimeType(ctx context.Context) string { return o.contentType } var commandHelp = []fs.CommandHelp{{ Name: "metadata", Short: "Show metadata about the DOI.", Long: `This command returns a JSON object with some information about the DOI. Usage example: ` + "```console" + ` rclone backend metadata doi: ` + "```" + ` It returns a JSON object representing metadata about the DOI.`, }, { Name: "set", Short: "Set command for updating the config parameters.", Long: `This set command can be used to update the config parameters for a running doi backend. Usage examples: ` + "```console" + ` rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2] rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2] rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI ` + "```" + ` The option keys are named as they are in the config file. This rebuilds the connection to the doi backend when it is called with the new parameters. Only new parameters need be passed as the values will default to those currently in use. It doesn't return anything.`, }} // Command the backend to run a named command // // The command run is name // args may be used to read arguments from // opts may be used to read optional arguments from // // The result should be capable of being JSON encoded // If it is a string or a []string it will be shown to the user // otherwise it will be JSON encoded and shown to the user like that func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { switch name { case "metadata": return f.ShowMetadata(ctx) case "set": newOpt := f.opt err := configstruct.Set(configmap.Simple(opt), &newOpt) if err != nil { return nil, fmt.Errorf("reading config: %w", err) } _, err = f.httpConnection(ctx, &newOpt) if err != nil { return nil, fmt.Errorf("updating session: %w", err) } f.opt = newOpt keys := []string{} for k := range opt { keys = append(keys, k) } fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", ")) return nil, nil default: return nil, fs.ErrorCommandNotFound } } // ShowMetadata returns some metadata about the corresponding DOI func (f *Fs) ShowMetadata(ctx context.Context) (metadata any, err error) { doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi) if err != nil { return nil, err } info := map[string]any{} info["DOI"] = f.opt.Doi info["URL"] = doiURL.String() info["metadataURL"] = f.endpointURL info["provider"] = f.provider return info, nil } // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) _ fs.PutStreamer = (*Fs)(nil) _ fs.Commander = (*Fs)(nil) _ fs.Object = (*Object)(nil) _ fs.MimeTyper = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/doi/dataverse.go
backend/doi/dataverse.go
// Implementation for Dataverse package doi import ( "context" "fmt" "net/http" "net/url" "path" "strings" "time" "github.com/rclone/rclone/backend/doi/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/rest" ) // Returns true if resolvedURL is likely a DOI hosted on a Dataverse intallation func activateDataverse(resolvedURL *url.URL) (isActive bool) { queryValues := resolvedURL.Query() persistentID := queryValues.Get("persistentId") return persistentID != "" } // Resolve the main API endpoint for a DOI hosted on a Dataverse installation func resolveDataverseEndpoint(resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) { queryValues := resolvedURL.Query() persistentID := queryValues.Get("persistentId") query := url.Values{} query.Add("persistentId", persistentID) endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/datasets/:persistentId/", RawQuery: query.Encode()}) return Dataverse, endpointURL, nil } // dataverseProvider implements the doiProvider interface for Dataverse installations type dataverseProvider struct { f *Fs } // ListEntries returns the full list of entries found at the remote, regardless of root func (dp *dataverseProvider) ListEntries(ctx context.Context) (entries []*Object, err error) { // Use the cache if populated cachedEntries, found := dp.f.cache.GetMaybe("files") if found { parsedEntries, ok := cachedEntries.([]Object) if ok { for _, entry := range parsedEntries { newEntry := entry entries = append(entries, &newEntry) } return entries, nil } } filesURL := dp.f.endpoint var res *http.Response var result api.DataverseDatasetResponse opts := rest.Opts{ Method: "GET", Path: strings.TrimLeft(filesURL.EscapedPath(), "/"), Parameters: filesURL.Query(), } err = dp.f.pacer.Call(func() (bool, error) { res, err = dp.f.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, res, err) }) if err != nil { return nil, fmt.Errorf("readDir failed: %w", err) } modTime, modTimeErr := time.Parse(time.RFC3339, result.Data.LatestVersion.LastUpdateTime) if modTimeErr != nil { fs.Logf(dp.f, "error: could not parse last update time %v", modTimeErr) modTime = timeUnset } for _, file := range result.Data.LatestVersion.Files { contentURLPath := fmt.Sprintf("/api/access/datafile/%d", file.DataFile.ID) query := url.Values{} query.Add("format", "original") contentURL := dp.f.endpoint.ResolveReference(&url.URL{Path: contentURLPath, RawQuery: query.Encode()}) entry := &Object{ fs: dp.f, remote: path.Join(file.DirectoryLabel, file.DataFile.Filename), contentURL: contentURL.String(), size: file.DataFile.FileSize, modTime: modTime, md5: file.DataFile.MD5, contentType: file.DataFile.ContentType, } if file.DataFile.OriginalFileName != "" { entry.remote = path.Join(file.DirectoryLabel, file.DataFile.OriginalFileName) entry.size = file.DataFile.OriginalFileSize entry.contentType = file.DataFile.OriginalFileFormat } entries = append(entries, entry) } // Populate the cache cacheEntries := []Object{} for _, entry := range entries { cacheEntries = append(cacheEntries, *entry) } dp.f.cache.Put("files", cacheEntries) return entries, nil } func newDataverseProvider(f *Fs) doiProvider { return &dataverseProvider{ f: f, } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/doi/link_header_internal_test.go
backend/doi/link_header_internal_test.go
package doi import ( "testing" "github.com/stretchr/testify/assert" ) func TestParseLinkHeader(t *testing.T) { header := "<https://zenodo.org/api/records/15063252> ; rel=\"linkset\" ; type=\"application/linkset+json\"" links := parseLinkHeader(header) expected := headerLink{ Href: "https://zenodo.org/api/records/15063252", Rel: "linkset", Type: "application/linkset+json", Extras: map[string]string{}, } assert.Contains(t, links, expected) header = "<https://api.example.com/issues?page=2>; rel=\"prev\", <https://api.example.com/issues?page=4>; rel=\"next\", <https://api.example.com/issues?page=10>; rel=\"last\", <https://api.example.com/issues?page=1>; rel=\"first\"" links = parseLinkHeader(header) expectedList := []headerLink{{ Href: "https://api.example.com/issues?page=2", Rel: "prev", Type: "", Extras: map[string]string{}, }, { Href: "https://api.example.com/issues?page=4", Rel: "next", Type: "", Extras: map[string]string{}, }, { Href: "https://api.example.com/issues?page=10", Rel: "last", Type: "", Extras: map[string]string{}, }, { Href: "https://api.example.com/issues?page=1", Rel: "first", Type: "", Extras: map[string]string{}, }} assert.Equal(t, links, expectedList) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/doi/api/dataversetypes.go
backend/doi/api/dataversetypes.go
// Type definitions specific to Dataverse package api // DataverseDatasetResponse is returned by the Dataverse dataset API type DataverseDatasetResponse struct { Status string `json:"status"` Data DataverseDataset `json:"data"` } // DataverseDataset is the representation of a dataset type DataverseDataset struct { LatestVersion DataverseDatasetVersion `json:"latestVersion"` } // DataverseDatasetVersion is the representation of a dataset version type DataverseDatasetVersion struct { LastUpdateTime string `json:"lastUpdateTime"` Files []DataverseFile `json:"files"` } // DataverseFile is the representation of a file found in a dataset type DataverseFile struct { DirectoryLabel string `json:"directoryLabel"` DataFile DataverseDataFile `json:"dataFile"` } // DataverseDataFile represents file metadata details type DataverseDataFile struct { ID int64 `json:"id"` Filename string `json:"filename"` ContentType string `json:"contentType"` FileSize int64 `json:"filesize"` OriginalFileFormat string `json:"originalFileFormat"` OriginalFileSize int64 `json:"originalFileSize"` OriginalFileName string `json:"originalFileName"` MD5 string `json:"md5"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/doi/api/types.go
backend/doi/api/types.go
// Package api has general type definitions for doi package api // DoiResolverResponse is returned by the DOI resolver API // // Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation type DoiResolverResponse struct { ResponseCode int `json:"responseCode"` Handle string `json:"handle"` Values []DoiResolverResponseValue `json:"values"` } // DoiResolverResponseValue is a single handle record value type DoiResolverResponseValue struct { Index int `json:"index"` Type string `json:"type"` Data DoiResolverResponseValueData `json:"data"` TTL int `json:"ttl"` Timestamp string `json:"timestamp"` } // DoiResolverResponseValueData is the data held in a handle value type DoiResolverResponseValueData struct { Format string `json:"format"` Value any `json:"value"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/doi/api/inveniotypes.go
backend/doi/api/inveniotypes.go
// Type definitions specific to InvenioRDM package api // InvenioRecordResponse is the representation of a record stored in InvenioRDM type InvenioRecordResponse struct { Links InvenioRecordResponseLinks `json:"links"` } // InvenioRecordResponseLinks represents a record's links type InvenioRecordResponseLinks struct { Self string `json:"self"` } // InvenioFilesResponse is the representation of a record's files type InvenioFilesResponse struct { Entries []InvenioFilesResponseEntry `json:"entries"` } // InvenioFilesResponseEntry is the representation of a file entry type InvenioFilesResponseEntry struct { Key string `json:"key"` Checksum string `json:"checksum"` Size int64 `json:"size"` Updated string `json:"updated"` MimeType string `json:"mimetype"` Links InvenioFilesResponseEntryLinks `json:"links"` } // InvenioFilesResponseEntryLinks represents file links details type InvenioFilesResponseEntryLinks struct { Content string `json:"content"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/drive/drive_internal_test.go
backend/drive/drive_internal_test.go
package drive import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "mime" "os" "path" "path/filepath" "strings" "testing" "time" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/sync" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/lib/random" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/api/drive/v3" "google.golang.org/api/googleapi" ) func TestDriveScopes(t *testing.T) { for _, test := range []struct { in string want []string wantFlag bool }{ {"", []string{ "https://www.googleapis.com/auth/drive", }, false}, {" drive.file , drive.readonly", []string{ "https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/drive.readonly", }, false}, {" drive.file , drive.appfolder", []string{ "https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/drive.appfolder", }, true}, } { got := driveScopes(test.in) assert.Equal(t, test.want, got, test.in) gotFlag := driveScopesContainsAppFolder(got) assert.Equal(t, test.wantFlag, gotFlag, test.in) } } /* var additionalMimeTypes = map[string]string{ "application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm", "application/vnd.ms-excel.template.macroenabled.12": ".xltm", "application/vnd.ms-powerpoint.presentation.macroenabled.12": ".pptm", "application/vnd.ms-powerpoint.slideshow.macroenabled.12": ".ppsm", "application/vnd.ms-powerpoint.template.macroenabled.12": ".potm", "application/vnd.ms-powerpoint": ".ppt", "application/vnd.ms-word.document.macroenabled.12": ".docm", "application/vnd.ms-word.template.macroenabled.12": ".dotm", "application/vnd.openxmlformats-officedocument.presentationml.template": ".potx", "application/vnd.openxmlformats-officedocument.spreadsheetml.template": ".xltx", "application/vnd.openxmlformats-officedocument.wordprocessingml.template": ".dotx", "application/vnd.sun.xml.writer": ".sxw", "text/richtext": ".rtf", } */ // Load the example export formats into exportFormats for testing func TestInternalLoadExampleFormats(t *testing.T) { fetchFormatsOnce.Do(func() {}) buf, err := os.ReadFile(filepath.FromSlash("test/about.json")) var about struct { ExportFormats map[string][]string `json:"exportFormats,omitempty"` ImportFormats map[string][]string `json:"importFormats,omitempty"` } require.NoError(t, err) require.NoError(t, json.Unmarshal(buf, &about)) _exportFormats = fixMimeTypeMap(about.ExportFormats) _importFormats = fixMimeTypeMap(about.ImportFormats) } func TestInternalParseExtensions(t *testing.T) { for _, test := range []struct { in string want []string wantErr error }{ {"doc", []string{".doc"}, nil}, {" docx ,XLSX, pptx,svg,md", []string{".docx", ".xlsx", ".pptx", ".svg", ".md"}, nil}, {"docx,svg,Docx", []string{".docx", ".svg"}, nil}, {"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)}, } { extensions, _, gotErr := parseExtensions(test.in) if test.wantErr == nil { assert.NoError(t, gotErr) } else { assert.EqualError(t, gotErr, test.wantErr.Error()) } assert.Equal(t, test.want, extensions) } // Test it is appending extensions, _, gotErr := parseExtensions("docx,svg", "docx,svg,xlsx") assert.NoError(t, gotErr) assert.Equal(t, []string{".docx", ".svg", ".xlsx"}, extensions) } func TestInternalFindExportFormat(t *testing.T) { ctx := context.Background() item := &drive.File{ Name: "file", MimeType: "application/vnd.google-apps.document", } for _, test := range []struct { extensions []string wantExtension string wantMimeType string }{ {[]string{}, "", ""}, {[]string{".pdf"}, ".pdf", "application/pdf"}, {[]string{".pdf", ".rtf", ".xls"}, ".pdf", "application/pdf"}, {[]string{".xls", ".rtf", ".pdf"}, ".rtf", "application/rtf"}, {[]string{".xls", ".csv", ".svg"}, "", ""}, } { f := new(Fs) f.exportExtensions = test.extensions gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(ctx, item) assert.Equal(t, test.wantExtension, gotExtension) if test.wantExtension != "" { assert.Equal(t, item.Name+gotExtension, gotFilename) } else { assert.Equal(t, "", gotFilename) } assert.Equal(t, test.wantMimeType, gotMimeType) assert.Equal(t, true, gotIsDocument) } } func TestMimeTypesToExtension(t *testing.T) { for mimeType, extension := range _mimeTypeToExtension { extensions, err := mime.ExtensionsByType(mimeType) assert.NoError(t, err) assert.Contains(t, extensions, extension) } } func TestExtensionToMimeType(t *testing.T) { for mimeType, extension := range _mimeTypeToExtension { gotMimeType := mime.TypeByExtension(extension) mediatype, _, err := mime.ParseMediaType(gotMimeType) assert.NoError(t, err) assert.Equal(t, mimeType, mediatype) } } func TestExtensionsForExportFormats(t *testing.T) { if _exportFormats == nil { t.Error("exportFormats == nil") } for fromMT, toMTs := range _exportFormats { for _, toMT := range toMTs { if !isInternalMimeType(toMT) { extensions, err := mime.ExtensionsByType(toMT) assert.NoError(t, err, "invalid MIME type %q", toMT) assert.NotEmpty(t, extensions, "No extension found for %q (from: %q)", fromMT, toMT) } } } } func TestExtensionsForImportFormats(t *testing.T) { t.Skip() if _importFormats == nil { t.Error("_importFormats == nil") } for fromMT := range _importFormats { if !isInternalMimeType(fromMT) { extensions, err := mime.ExtensionsByType(fromMT) assert.NoError(t, err, "invalid MIME type %q", fromMT) assert.NotEmpty(t, extensions, "No extension found for %q", fromMT) } } } func (f *Fs) InternalTestShouldRetry(t *testing.T) { ctx := context.Background() gatewayTimeout := googleapi.Error{ Code: 503, } timeoutRetry, timeoutError := f.shouldRetry(ctx, &gatewayTimeout) assert.True(t, timeoutRetry) assert.Equal(t, &gatewayTimeout, timeoutError) generic403 := googleapi.Error{ Code: 403, } rLEItem := googleapi.ErrorItem{ Reason: "rateLimitExceeded", Message: "User rate limit exceeded.", } generic403.Errors = append(generic403.Errors, rLEItem) oldStopUpload := f.opt.StopOnUploadLimit oldStopDownload := f.opt.StopOnDownloadLimit f.opt.StopOnUploadLimit = true f.opt.StopOnDownloadLimit = true defer func() { f.opt.StopOnUploadLimit = oldStopUpload f.opt.StopOnDownloadLimit = oldStopDownload }() expectedRLError := fserrors.FatalError(&generic403) rateLimitRetry, rateLimitErr := f.shouldRetry(ctx, &generic403) assert.False(t, rateLimitRetry) assert.Equal(t, rateLimitErr, expectedRLError) dQEItem := googleapi.ErrorItem{ Reason: "downloadQuotaExceeded", } generic403.Errors[0] = dQEItem expectedDQError := fserrors.FatalError(&generic403) downloadQuotaRetry, downloadQuotaError := f.shouldRetry(ctx, &generic403) assert.False(t, downloadQuotaRetry) assert.Equal(t, downloadQuotaError, expectedDQError) tDFLEItem := googleapi.ErrorItem{ Reason: "teamDriveFileLimitExceeded", } generic403.Errors[0] = tDFLEItem expectedTDFLError := fserrors.FatalError(&generic403) teamDriveFileLimitRetry, teamDriveFileLimitError := f.shouldRetry(ctx, &generic403) assert.False(t, teamDriveFileLimitRetry) assert.Equal(t, teamDriveFileLimitError, expectedTDFLError) qEItem := googleapi.ErrorItem{ Reason: "quotaExceeded", } generic403.Errors[0] = qEItem expectedQuotaError := fserrors.FatalError(&generic403) quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403) assert.False(t, quotaExceededRetry) assert.Equal(t, quotaExceededError, expectedQuotaError) sqEItem := googleapi.ErrorItem{ Reason: "storageQuotaExceeded", } generic403.Errors[0] = sqEItem expectedStorageQuotaError := fserrors.FatalError(&generic403) storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403) assert.False(t, storageQuotaExceededRetry) assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError) } func (f *Fs) InternalTestDocumentImport(t *testing.T) { oldAllow := f.opt.AllowImportNameChange f.opt.AllowImportNameChange = true defer func() { f.opt.AllowImportNameChange = oldAllow }() testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files")) require.NoError(t, err) testFilesFs, err := fs.NewFs(context.Background(), testFilesPath) require.NoError(t, err) _, f.importMimeTypes, err = parseExtensions("odt,ods,doc") require.NoError(t, err) err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.doc", "example2.doc") require.NoError(t, err) } func (f *Fs) InternalTestDocumentUpdate(t *testing.T) { testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files")) require.NoError(t, err) testFilesFs, err := fs.NewFs(context.Background(), testFilesPath) require.NoError(t, err) _, f.importMimeTypes, err = parseExtensions("odt,ods,doc") require.NoError(t, err) err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.xlsx", "example1.ods") require.NoError(t, err) } func (f *Fs) InternalTestDocumentExport(t *testing.T) { var buf bytes.Buffer var err error f.exportExtensions, _, err = parseExtensions("txt") require.NoError(t, err) obj, err := f.NewObject(context.Background(), "example2.txt") require.NoError(t, err) rc, err := obj.Open(context.Background()) require.NoError(t, err) defer func() { require.NoError(t, rc.Close()) }() _, err = io.Copy(&buf, rc) require.NoError(t, err) text := buf.String() for _, excerpt := range []string{ "Lorem ipsum dolor sit amet, consectetur", "porta at ultrices in, consectetur at augue.", } { require.Contains(t, text, excerpt) } } func (f *Fs) InternalTestDocumentLink(t *testing.T) { var buf bytes.Buffer var err error f.exportExtensions, _, err = parseExtensions("link.html") require.NoError(t, err) obj, err := f.NewObject(context.Background(), "example2.link.html") require.NoError(t, err) rc, err := obj.Open(context.Background()) require.NoError(t, err) defer func() { require.NoError(t, rc.Close()) }() _, err = io.Copy(&buf, rc) require.NoError(t, err) text := buf.String() require.True(t, strings.HasPrefix(text, "<html>")) require.True(t, strings.HasSuffix(text, "</html>\n")) for _, excerpt := range []string{ `<meta http-equiv="refresh"`, `Loading <a href="`, } { require.Contains(t, text, excerpt) } } const ( // from fstest/fstests/fstests.go existingDir = "hello? sausage" existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt` existingSubDir = "êé" ) // TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts func (f *Fs) InternalTestShortcuts(t *testing.T) { ctx := context.Background() srcObj, err := f.NewObject(ctx, existingFile) require.NoError(t, err) srcHash, err := srcObj.Hash(ctx, hash.MD5) require.NoError(t, err) assert.NotEqual(t, "", srcHash) t.Run("Errors", func(t *testing.T) { _, err := f.makeShortcut(ctx, "", f, "") assert.Error(t, err) assert.Contains(t, err.Error(), "can't be root") _, err = f.makeShortcut(ctx, "notfound", f, "dst") assert.Error(t, err) assert.Contains(t, err.Error(), "can't find source") _, err = f.makeShortcut(ctx, existingFile, f, existingFile) assert.Error(t, err) assert.Contains(t, err.Error(), "not overwriting") assert.Contains(t, err.Error(), "existing file") _, err = f.makeShortcut(ctx, existingFile, f, existingDir) assert.Error(t, err) assert.Contains(t, err.Error(), "not overwriting") assert.Contains(t, err.Error(), "existing directory") }) t.Run("File", func(t *testing.T) { dstObj, err := f.makeShortcut(ctx, existingFile, f, "shortcut.txt") require.NoError(t, err) require.NotNil(t, dstObj) assert.Equal(t, "shortcut.txt", dstObj.Remote()) dstHash, err := dstObj.Hash(ctx, hash.MD5) require.NoError(t, err) assert.Equal(t, srcHash, dstHash) require.NoError(t, dstObj.Remove(ctx)) }) t.Run("Dir", func(t *testing.T) { dstObj, err := f.makeShortcut(ctx, existingDir, f, "shortcutdir") require.NoError(t, err) require.Nil(t, dstObj) entries, err := f.List(ctx, "shortcutdir") require.NoError(t, err) require.Equal(t, 1, len(entries)) require.Equal(t, "shortcutdir/"+existingSubDir, entries[0].Remote()) require.NoError(t, f.Rmdir(ctx, "shortcutdir")) }) t.Run("Command", func(t *testing.T) { _, err := f.Command(ctx, "shortcut", []string{"one"}, nil) require.Error(t, err) require.Contains(t, err.Error(), "need exactly 2 arguments") _, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{ "target": "doesnotexistremote:", }) require.Error(t, err) require.Contains(t, err.Error(), "couldn't find target") _, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{ "target": ".", }) require.Error(t, err) require.Contains(t, err.Error(), "target is not a drive backend") dstObjI, err := f.Command(ctx, "shortcut", []string{existingFile, "shortcut2.txt"}, map[string]string{ "target": fs.ConfigString(f), }) require.NoError(t, err) dstObj := dstObjI.(*Object) assert.Equal(t, "shortcut2.txt", dstObj.Remote()) dstHash, err := dstObj.Hash(ctx, hash.MD5) require.NoError(t, err) assert.Equal(t, srcHash, dstHash) require.NoError(t, dstObj.Remove(ctx)) dstObjI, err = f.Command(ctx, "shortcut", []string{existingFile, "shortcut3.txt"}, nil) require.NoError(t, err) dstObj = dstObjI.(*Object) assert.Equal(t, "shortcut3.txt", dstObj.Remote()) dstHash, err = dstObj.Hash(ctx, hash.MD5) require.NoError(t, err) assert.Equal(t, srcHash, dstHash) require.NoError(t, dstObj.Remove(ctx)) }) } // TestIntegration/FsMkdir/FsPutFiles/Internal/UnTrash func (f *Fs) InternalTestUnTrash(t *testing.T) { ctx := context.Background() // Make some objects, one in a subdir contents := random.String(100) file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now()) obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false) file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now()) _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false) // Check objects checkObjects := func() { fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{ file1, file2, }, []string{ "trashDir/subdir", }, f.Precision()) } checkObjects() // Make sure we are using the trash require.Equal(t, true, f.opt.UseTrash) // Remove the object and the dir require.NoError(t, obj1.Remove(ctx)) require.NoError(t, f.Purge(ctx, "trashDir/subdir")) // Check objects gone fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{}, []string{}, f.Precision()) // Restore the object and directory r, err := f.unTrashDir(ctx, "trashDir", true) require.NoError(t, err) assert.Equal(t, unTrashResult{Errors: 0, Untrashed: 2}, r) // Check objects restored checkObjects() // Remove the test dir require.NoError(t, f.Purge(ctx, "trashDir")) } // TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) { ctx := context.Background() obj, err := f.NewObject(ctx, existingFile) require.NoError(t, err) o := obj.(*Object) dir := t.TempDir() checkFile := func(name string) { filePath := filepath.Join(dir, name) fi, err := os.Stat(filePath) require.NoError(t, err) assert.Equal(t, int64(100), fi.Size()) err = os.Remove(filePath) require.NoError(t, err) } t.Run("BadID", func(t *testing.T) { err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/") require.Error(t, err) assert.Contains(t, err.Error(), "couldn't find id") }) t.Run("Directory", func(t *testing.T) { rootID, err := f.dirCache.RootID(ctx, false) require.NoError(t, err) err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/") require.Error(t, err) assert.Contains(t, err.Error(), "can't moveid directory") }) t.Run("MoveWithoutDestName", func(t *testing.T) { err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/") require.NoError(t, err) checkFile(path.Base(existingFile)) }) t.Run("CopyWithoutDestName", func(t *testing.T) { err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/") require.NoError(t, err) checkFile(path.Base(existingFile)) }) t.Run("MoveWithDestName", func(t *testing.T) { err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt") require.NoError(t, err) checkFile("potato.txt") }) t.Run("CopyWithDestName", func(t *testing.T) { err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt") require.NoError(t, err) checkFile("potato.txt") }) } // TestIntegration/FsMkdir/FsPutFiles/Internal/Query func (f *Fs) InternalTestQuery(t *testing.T) { ctx := context.Background() var err error t.Run("BadQuery", func(t *testing.T) { _, err = f.query(ctx, "this is a bad query") require.Error(t, err) assert.Contains(t, err.Error(), "failed to execute query") }) t.Run("NoMatch", func(t *testing.T) { results, err := f.query(ctx, fmt.Sprintf("name='%s' and name!='%s'", existingSubDir, existingSubDir)) require.NoError(t, err) assert.Len(t, results, 0) }) t.Run("GoodQuery", func(t *testing.T) { pathSegments := strings.Split(existingFile, "/") var parent string for _, item := range pathSegments { // the file name contains ' characters which must be escaped escapedItem := f.opt.Enc.FromStandardName(item) escapedItem = strings.ReplaceAll(escapedItem, `\`, `\\`) escapedItem = strings.ReplaceAll(escapedItem, `'`, `\'`) results, err := f.query(ctx, fmt.Sprintf("%strashed=false and name='%s'", parent, escapedItem)) require.NoError(t, err) require.True(t, len(results) > 0) for _, result := range results { assert.True(t, len(result.Id) > 0) assert.Equal(t, result.Name, item) } parent = fmt.Sprintf("'%s' in parents and ", results[0].Id) } }) } // TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery func (f *Fs) InternalTestAgeQuery(t *testing.T) { // Check set up for filtering assert.True(t, f.Features().FilterAware) opt := &filter.Options{} err := opt.MaxAge.Set("1h") assert.NoError(t, err) flt, err := filter.NewFilter(opt) assert.NoError(t, err) defCtx := context.Background() fltCtx := filter.ReplaceConfig(defCtx, flt) testCtx1 := fltCtx testCtx2 := filter.SetUseFilter(testCtx1, true) testCtx3, testCancel := context.WithCancel(testCtx2) testCtx4 := filter.SetUseFilter(testCtx3, false) testCancel() assert.False(t, filter.GetUseFilter(testCtx1)) assert.True(t, filter.GetUseFilter(testCtx2)) assert.True(t, filter.GetUseFilter(testCtx3)) assert.False(t, filter.GetUseFilter(testCtx4)) subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), "agequery-testdir") subFsResult, err := fs.NewFs(defCtx, subRemote) require.NoError(t, err) subFs, isDriveFs := subFsResult.(*Fs) require.True(t, isDriveFs) tempDir1 := t.TempDir() tempFs1, err := fs.NewFs(defCtx, tempDir1) require.NoError(t, err) tempDir2 := t.TempDir() tempFs2, err := fs.NewFs(defCtx, tempDir2) require.NoError(t, err) file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"} _ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true) // validate sync/copy const timeQuery = "(modifiedTime >= '" assert.NoError(t, sync.CopyDir(defCtx, subFs, tempFs1, false)) assert.NotContains(t, subFs.lastQuery, timeQuery) assert.NoError(t, sync.CopyDir(fltCtx, subFs, tempFs1, false)) assert.Contains(t, subFs.lastQuery, timeQuery) assert.NoError(t, sync.CopyDir(fltCtx, tempFs2, subFs, false)) assert.Contains(t, subFs.lastQuery, timeQuery) assert.NoError(t, sync.CopyDir(defCtx, tempFs2, subFs, false)) assert.NotContains(t, subFs.lastQuery, timeQuery) // validate list/walk devNull, errOpen := os.OpenFile(os.DevNull, os.O_WRONLY, 0) require.NoError(t, errOpen) defer func() { _ = devNull.Close() }() assert.NoError(t, operations.List(defCtx, subFs, devNull)) assert.NotContains(t, subFs.lastQuery, timeQuery) assert.NoError(t, operations.List(fltCtx, subFs, devNull)) assert.Contains(t, subFs.lastQuery, timeQuery) } func (f *Fs) InternalTest(t *testing.T) { // These tests all depend on each other so run them as nested tests t.Run("DocumentImport", func(t *testing.T) { f.InternalTestDocumentImport(t) t.Run("DocumentUpdate", func(t *testing.T) { f.InternalTestDocumentUpdate(t) t.Run("DocumentExport", func(t *testing.T) { f.InternalTestDocumentExport(t) t.Run("DocumentLink", func(t *testing.T) { f.InternalTestDocumentLink(t) }) }) }) }) t.Run("Shortcuts", f.InternalTestShortcuts) t.Run("UnTrash", f.InternalTestUnTrash) t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID) t.Run("Query", f.InternalTestQuery) t.Run("AgeQuery", f.InternalTestAgeQuery) t.Run("ShouldRetry", f.InternalTestShouldRetry) } var _ fstests.InternalTester = (*Fs)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/drive/drive.go
backend/drive/drive.go
// Package drive interfaces with the Google Drive object storage system package drive // FIXME need to deal with some corner cases // * multiple files with the same name // * files can be in multiple directories // * can have directory loops // * files with / in name import ( "bytes" "context" "crypto/tls" "errors" "fmt" "io" "mime" "net/http" "os" "path" "slices" "sort" "strconv" "strings" "sync" "sync/atomic" "text/template" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/readers" "golang.org/x/oauth2" "golang.org/x/oauth2/google" drive_v2 "google.golang.org/api/drive/v2" drive "google.golang.org/api/drive/v3" "google.golang.org/api/googleapi" "google.golang.org/api/option" ) // Constants const ( rcloneClientID = "202264815644.apps.googleusercontent.com" rcloneEncryptedClientSecret = "eX8GpZTVx3vxMWVkuuBdDWmAUE6rGhTwVrvG9GhllYccSdj2-mvHVg" driveFolderType = "application/vnd.google-apps.folder" shortcutMimeType = "application/vnd.google-apps.shortcut" shortcutMimeTypeDangling = "application/vnd.google-apps.shortcut.dangling" // synthetic mime type for internal use timeFormatIn = time.RFC3339 timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00" defaultMinSleep = fs.Duration(100 * time.Millisecond) defaultBurst = 100 defaultExportExtensions = "docx,xlsx,pptx,svg" scopePrefix = "https://www.googleapis.com/auth/" defaultScope = "drive" // chunkSize is the size of the chunks created during a resumable upload and should be a power of two. // 1<<18 is the minimum size supported by the Google uploader, and there is no maximum. minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize) defaultChunkSize = 8 * fs.Mebi partialFields = "id,name,size,md5Checksum,sha1Checksum,sha256Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey" listRGrouping = 50 // number of IDs to search at once when using ListR listRInputBuffer = 1000 // size of input buffer when using ListR defaultXDGIcon = "text-html" ) // Globals var ( // Description of how to auth for this app driveConfig = &oauthutil.Config{ Scopes: []string{scopePrefix + "drive"}, AuthURL: google.Endpoint.AuthURL, TokenURL: google.Endpoint.TokenURL, ClientID: rcloneClientID, ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), RedirectURL: oauthutil.RedirectURL, } _mimeTypeToExtensionDuplicates = map[string]string{ "application/x-vnd.oasis.opendocument.presentation": ".odp", "application/x-vnd.oasis.opendocument.spreadsheet": ".ods", "application/x-vnd.oasis.opendocument.text": ".odt", "image/jpg": ".jpg", "image/x-bmp": ".bmp", "image/x-png": ".png", "text/rtf": ".rtf", } _mimeTypeToExtension = map[string]string{ "application/epub+zip": ".epub", "application/json": ".json", "application/msword": ".doc", "application/pdf": ".pdf", "application/rtf": ".rtf", "application/vnd.ms-excel": ".xls", "application/vnd.oasis.opendocument.presentation": ".odp", "application/vnd.oasis.opendocument.spreadsheet": ".ods", "application/vnd.oasis.opendocument.text": ".odt", "application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ".xlsx", "application/vnd.openxmlformats-officedocument.wordprocessingml.document": ".docx", "application/x-msmetafile": ".wmf", "application/zip": ".zip", "image/bmp": ".bmp", "image/jpeg": ".jpg", "image/pjpeg": ".pjpeg", "image/png": ".png", "image/svg+xml": ".svg", "text/csv": ".csv", "text/html": ".html", "text/plain": ".txt", "text/tab-separated-values": ".tsv", "text/markdown": ".md", } _mimeTypeToExtensionLinks = map[string]string{ "application/x-link-desktop": ".desktop", "application/x-link-html": ".link.html", "application/x-link-url": ".url", "application/x-link-webloc": ".webloc", } _mimeTypeCustomTransform = map[string]string{ "application/vnd.google-apps.script+json": "application/json", } _mimeTypeToXDGLinkIcons = map[string]string{ "application/vnd.google-apps.document": "x-office-document", "application/vnd.google-apps.drawing": "x-office-drawing", "application/vnd.google-apps.presentation": "x-office-presentation", "application/vnd.google-apps.spreadsheet": "x-office-spreadsheet", } fetchFormatsOnce sync.Once // make sure we fetch the export/import formats only once _exportFormats map[string][]string // allowed export MIME type conversions _importFormats map[string][]string // allowed import MIME type conversions templatesOnce sync.Once // parse link templates only once _linkTemplates map[string]*template.Template // available link types ) // rwChoices type for fs.Bits type rwChoices struct{} func (rwChoices) Choices() []fs.BitsChoicesInfo { return []fs.BitsChoicesInfo{ {Bit: uint64(rwOff), Name: "off"}, {Bit: uint64(rwRead), Name: "read"}, {Bit: uint64(rwWrite), Name: "write"}, {Bit: uint64(rwFailOK), Name: "failok"}, } } // rwChoice type alias type rwChoice = fs.Bits[rwChoices] const ( rwRead rwChoice = 1 << iota rwWrite rwFailOK rwOff rwChoice = 0 ) // Examples for the options var rwExamples = fs.OptionExamples{{ Value: rwOff.String(), Help: "Do not read or write the value", }, { Value: rwRead.String(), Help: "Read the value only", }, { Value: rwWrite.String(), Help: "Write the value only", }, { Value: rwFailOK.String(), Help: "If writing fails log errors only, don't fail the transfer", }, { Value: (rwRead | rwWrite).String(), Help: "Read and Write the value.", }} // Parse the scopes option returning a slice of scopes func driveScopes(scopesString string) (scopes []string) { if scopesString == "" { scopesString = defaultScope } for scope := range strings.SplitSeq(scopesString, ",") { scope = strings.TrimSpace(scope) scopes = append(scopes, scopePrefix+scope) } return scopes } // Returns true if one of the scopes was "drive.appfolder" func driveScopesContainsAppFolder(scopes []string) bool { return slices.Contains(scopes, scopePrefix+"drive.appfolder") } func driveOAuthOptions() []fs.Option { opts := []fs.Option{} for _, opt := range oauthutil.SharedOptions { if opt.Name == config.ConfigClientID { opt.Help = "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance." } opts = append(opts, opt) } return opts } // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "drive", Description: "Google Drive", NewFs: NewFs, CommandHelp: commandHelp, Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, fmt.Errorf("couldn't parse config into struct: %w", err) } switch config.State { case "": // Fill in the scopes driveConfig.Scopes = driveScopes(opt.Scope) // Set the root_folder_id if using drive.appfolder if driveScopesContainsAppFolder(driveConfig.Scopes) { m.Set("root_folder_id", "appDataFolder") } if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" && !opt.EnvAuth { return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{ OAuth2Config: driveConfig, }) } return fs.ConfigGoto("teamdrive") case "teamdrive": if opt.TeamDriveID == "" { return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", "Configure this as a Shared Drive (Team Drive)?\n") } return fs.ConfigConfirm("teamdrive_change", false, "config_change_team_drive", fmt.Sprintf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID)) case "teamdrive_ok": if config.Result == "false" { m.Set("team_drive", "") return nil, nil } return fs.ConfigGoto("teamdrive_config") case "teamdrive_change": if config.Result == "false" { return nil, nil } return fs.ConfigGoto("teamdrive_config") case "teamdrive_config": f, err := newFs(ctx, name, "", m) if err != nil { return nil, fmt.Errorf("failed to make Fs to list Shared Drives: %w", err) } teamDrives, err := f.listTeamDrives(ctx) if err != nil { return nil, err } if len(teamDrives) == 0 { return fs.ConfigError("", "No Shared Drives found in your account") } return fs.ConfigChoose("teamdrive_final", "config_team_drive", "Shared Drive", len(teamDrives), func(i int) (string, string) { teamDrive := teamDrives[i] return teamDrive.Id, teamDrive.Name }) case "teamdrive_final": driveID := config.Result m.Set("team_drive", driveID) m.Set("root_folder_id", "") opt.TeamDriveID = driveID opt.RootFolderID = "" return nil, nil } return nil, fmt.Errorf("unknown state %q", config.State) }, MetadataInfo: &fs.MetadataInfo{ System: systemMetadataInfo, Help: `User metadata is stored in the properties field of the drive object. Metadata is supported on files and directories. `, }, Options: append(driveOAuthOptions(), []fs.Option{{ Name: "scope", Help: "Comma separated list of scopes that rclone should use when requesting access from drive.", Examples: []fs.OptionExample{{ Value: "drive", Help: "Full access all files, excluding Application Data Folder.", }, { Value: "drive.readonly", Help: "Read-only access to file metadata and file contents.", }, { Value: "drive.file", Help: "Access to files created by rclone only.\nThese are visible in the drive website.\nFile authorization is revoked when the user deauthorizes the app.", }, { Value: "drive.appfolder", Help: "Allows read and write access to the Application Data folder.\nThis is not visible in the drive website.", }, { Value: "drive.metadata.readonly", Help: "Allows read-only access to file metadata but\ndoes not allow any access to read or download file content.", }}, }, { Name: "root_folder_id", Help: `ID of the root folder. Leave blank normally. Fill in to access "Computers" folders (see docs), or for rclone to use a non root folder as its starting point. `, Advanced: true, Sensitive: true, }, { Name: "service_account_file", Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, }, { Name: "service_account_credentials", Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Hide: fs.OptionHideConfigurator, Advanced: true, Sensitive: true, }, { Name: "team_drive", Help: "ID of the Shared Drive (Team Drive).", Hide: fs.OptionHideConfigurator, Advanced: true, Sensitive: true, }, { Name: "auth_owner_only", Default: false, Help: "Only consider files owned by the authenticated user.", Advanced: true, }, { Name: "use_trash", Default: true, Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.", Advanced: true, }, { Name: "copy_shortcut_content", Default: false, Help: `Server side copy contents of shortcuts instead of the shortcut. When doing server side copies, normally rclone will copy shortcuts as shortcuts. If this flag is used then rclone will copy the contents of shortcuts rather than shortcuts themselves when doing server side copies.`, Advanced: true, }, { Name: "skip_gdocs", Default: false, Help: "Skip google documents in all listings.\n\nIf given, gdocs practically become invisible to rclone.", Advanced: true, }, { Name: "show_all_gdocs", Default: false, Help: `Show all Google Docs including non-exportable ones in listings. If you try a server side copy on a Google Form without this flag, you will get this error: No export formats found for "application/vnd.google-apps.form" However adding this flag will allow the form to be server side copied. Note that rclone doesn't add extensions to the Google Docs file names in this mode. Do **not** use this flag when trying to download Google Docs - rclone will fail to download them. `, Advanced: true, }, { Name: "skip_checksum_gphotos", Default: false, Help: `Skip checksums on Google photos and videos only. Use this if you get checksum errors when transferring Google photos or videos. Setting this flag will cause Google photos and videos to return a blank checksums. Google photos are identified by being in the "photos" space. Corrupted checksums are caused by Google modifying the image/video but not updating the checksum.`, Advanced: true, }, { Name: "shared_with_me", Default: false, Help: `Only show files that are shared with me. Instructs rclone to operate on your "Shared with me" folder (where Google Drive lets you access the files and folders others have shared with you). This works both with the "list" (lsd, lsl, etc.) and the "copy" commands (copy, sync, etc.), and with all other commands too.`, Advanced: true, }, { Name: "trashed_only", Default: false, Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.", Advanced: true, }, { Name: "starred_only", Default: false, Help: "Only show files that are starred.", Advanced: true, }, { Name: "formats", Default: "", Help: "Deprecated: See export_formats.", Advanced: true, Hide: fs.OptionHideConfigurator, }, { Name: "export_formats", Default: defaultExportExtensions, Help: "Comma separated list of preferred formats for downloading Google docs.", Advanced: true, }, { Name: "import_formats", Default: "", Help: "Comma separated list of preferred formats for uploading Google docs.", Advanced: true, }, { Name: "allow_import_name_change", Default: false, Help: "Allow the filetype to change when uploading Google docs.\n\nE.g. file.doc to file.docx. This will confuse sync and reupload every time.", Advanced: true, }, { Name: "use_created_date", Default: false, Help: `Use file created date instead of modified date. Useful when downloading data and you want the creation date used in place of the last modified date. **WARNING**: This flag may have some unexpected consequences. When uploading to your drive all files will be overwritten unless they haven't been modified since their creation. And the inverse will occur while downloading. This side effect can be avoided by using the "--checksum" flag. This feature was implemented to retain photos capture date as recorded by google photos. You will first need to check the "Create a Google Photos folder" option in your google drive settings. You can then copy or move the photos locally and use the date the image was taken (created) set as the modification date.`, Advanced: true, Hide: fs.OptionHideConfigurator, }, { Name: "use_shared_date", Default: false, Help: `Use date file was shared instead of modified date. Note that, as with "--drive-use-created-date", this flag may have unexpected consequences when uploading/downloading files. If both this flag and "--drive-use-created-date" are set, the created date is used.`, Advanced: true, Hide: fs.OptionHideConfigurator, }, { Name: "list_chunk", Default: 1000, Help: "Size of listing chunk 100-1000, 0 to disable.", Advanced: true, }, { Name: "impersonate", Default: "", Help: `Impersonate this user when using a service account.`, Advanced: true, Sensitive: true, }, { Name: "alternate_export", Default: false, Help: "Deprecated: No longer needed.", Hide: fs.OptionHideBoth, }, { Name: "upload_cutoff", Default: defaultChunkSize, Help: "Cutoff for switching to chunked upload.", Advanced: true, }, { Name: "chunk_size", Default: defaultChunkSize, Help: `Upload chunk size. Must a power of 2 >= 256k. Making this larger will improve performance, but note that each chunk is buffered in memory one per transfer. Reducing this will reduce memory usage but decrease performance.`, Advanced: true, }, { Name: "acknowledge_abuse", Default: false, Help: `Set to allow files which return cannotDownloadAbusiveFile to be downloaded. If downloading a file returns the error "This file has been identified as malware or spam and cannot be downloaded" with the error code "cannotDownloadAbusiveFile" then supply this flag to rclone to indicate you acknowledge the risks of downloading the file and rclone will download it anyway. Note that if you are using service account it will need Manager permission (not Content Manager) to for this flag to work. If the SA does not have the right permission, Google will just ignore the flag.`, Advanced: true, }, { Name: "keep_revision_forever", Default: false, Help: "Keep new head revision of each file forever.", Advanced: true, }, { Name: "size_as_quota", Default: false, Help: `Show sizes as storage quota usage, not actual size. Show the size of a file as the storage quota used. This is the current version plus any older versions that have been set to keep forever. **WARNING**: This flag may have some unexpected consequences. It is not recommended to set this flag in your config - the recommended usage is using the flag form --drive-size-as-quota when doing rclone ls/lsl/lsf/lsjson/etc only. If you do use this flag for syncing (not recommended) then you will need to use --ignore size also.`, Advanced: true, Hide: fs.OptionHideConfigurator, }, { Name: "v2_download_min_size", Default: fs.SizeSuffix(-1), Help: "If Object's are greater, use drive v2 API to download.", Advanced: true, }, { Name: "pacer_min_sleep", Default: defaultMinSleep, Help: "Minimum time to sleep between API calls.", Advanced: true, }, { Name: "pacer_burst", Default: defaultBurst, Help: "Number of API calls to allow without sleeping.", Advanced: true, }, { Name: "server_side_across_configs", Default: false, Help: `Deprecated: use --server-side-across-configs instead. Allow server-side operations (e.g. copy) to work across different drive configs. This can be useful if you wish to do a server-side copy between two different Google drives. Note that this isn't enabled by default because it isn't easy to tell if it will work between any two configurations.`, Advanced: true, }, { Name: "disable_http2", Default: true, Help: `Disable drive using http2. There is currently an unsolved issue with the google drive backend and HTTP/2. HTTP/2 is therefore disabled by default for the drive backend but can be re-enabled here. When the issue is solved this flag will be removed. See: https://github.com/rclone/rclone/issues/3631 `, Advanced: true, }, { Name: "stop_on_upload_limit", Default: false, Help: `Make upload limit errors be fatal. At the time of writing it is only possible to upload 750 GiB of data to Google Drive a day (this is an undocumented limit). When this limit is reached Google Drive produces a slightly different error message. When this flag is set it causes these errors to be fatal. These will stop the in-progress sync. Note that this detection is relying on error message strings which Google don't document so it may break in the future. See: https://github.com/rclone/rclone/issues/3857 `, Advanced: true, }, { Name: "stop_on_download_limit", Default: false, Help: `Make download limit errors be fatal. At the time of writing it is only possible to download 10 TiB of data from Google Drive a day (this is an undocumented limit). When this limit is reached Google Drive produces a slightly different error message. When this flag is set it causes these errors to be fatal. These will stop the in-progress sync. Note that this detection is relying on error message strings which Google don't document so it may break in the future. `, Advanced: true, }, { Name: "skip_shortcuts", Help: `If set skip shortcut files. Normally rclone dereferences shortcut files making them appear as if they are the original file (see [the shortcuts section](#shortcuts)). If this flag is set then rclone will ignore shortcut files completely. `, Advanced: true, Default: false, }, { Name: "skip_dangling_shortcuts", Help: `If set skip dangling shortcut files. If this is set then rclone will not show any dangling shortcuts in listings. `, Advanced: true, Default: false, }, { Name: "resource_key", Help: `Resource key for accessing a link-shared file. If you need to access files shared with a link like this https://drive.google.com/drive/folders/XXX?resourcekey=YYY&usp=sharing Then you will need to use the first part "XXX" as the "root_folder_id" and the second part "YYY" as the "resource_key" otherwise you will get 404 not found errors when trying to access the directory. See: https://developers.google.com/drive/api/guides/resource-keys This resource key requirement only applies to a subset of old files. Note also that opening the folder once in the web interface (with the user you've authenticated rclone with) seems to be enough so that the resource key is not needed. `, Advanced: true, Sensitive: true, }, { Name: "fast_list_bug_fix", Help: `Work around a bug in Google Drive listing. Normally rclone will work around a bug in Google Drive when using --fast-list (ListR) where the search "(A in parents) or (B in parents)" returns nothing sometimes. See #3114, #4289 and https://issuetracker.google.com/issues/149522397 Rclone detects this by finding no items in more than one directory when listing and retries them as lists of individual directories. This means that if you have a lot of empty directories rclone will end up listing them all individually and this can take many more API calls. This flag allows the work-around to be disabled. This is **not** recommended in normal use - only if you have a particular case you are having trouble with like many empty directories. `, Advanced: true, Default: true, }, { Name: "metadata_owner", Help: `Control whether owner should be read or written in metadata. Owner is a standard part of the file metadata so is easy to read. But it isn't always desirable to set the owner from the metadata. Note that you can't set the owner on Shared Drives, and that setting ownership will generate an email to the new owner (this can't be disabled), and you can't transfer ownership to someone outside your organization. `, Advanced: true, Default: rwRead, Examples: rwExamples, }, { Name: "metadata_permissions", Help: `Control whether permissions should be read or written in metadata. Reading permissions metadata from files can be done quickly, but it isn't always desirable to set the permissions from the metadata. Note that rclone drops any inherited permissions on Shared Drives and any owner permission on My Drives as these are duplicated in the owner metadata. `, Advanced: true, Default: rwOff, Examples: rwExamples, }, { Name: "metadata_labels", Help: `Control whether labels should be read or written in metadata. Reading labels metadata from files takes an extra API transaction and will slow down listings. It isn't always desirable to set the labels from the metadata. The format of labels is documented in the drive API documentation at https://developers.google.com/drive/api/reference/rest/v3/Label - rclone just provides a JSON dump of this format. When setting labels, the label and fields must already exist - rclone will not create them. This means that if you are transferring labels from two different accounts you will have to create the labels in advance and use the metadata mapper to translate the IDs between the two accounts. `, Advanced: true, Default: rwOff, Examples: rwExamples, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // Encode invalid UTF-8 bytes as json doesn't handle them properly. // Don't encode / as it's a valid name character in drive. Default: encoder.EncodeInvalidUtf8, }, { Name: "env_auth", Help: "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.", Default: false, Advanced: true, Examples: []fs.OptionExample{{ Value: "false", Help: "Enter credentials in the next step.", }, { Value: "true", Help: "Get GCP IAM credentials from the environment (env vars or IAM).", }}, }}...), }) // register duplicate MIME types first // this allows them to be used with mime.ExtensionsByType() but // mime.TypeByExtension() will return the later registered MIME type for _, m := range []map[string]string{ _mimeTypeToExtensionDuplicates, _mimeTypeToExtension, _mimeTypeToExtensionLinks, } { for mimeType, extension := range m { if err := mime.AddExtensionType(extension, mimeType); err != nil { fs.Errorf("Failed to register MIME type %q: %v", mimeType, err) } } } } // Options defines the configuration for this backend type Options struct { Scope string `config:"scope"` RootFolderID string `config:"root_folder_id"` ServiceAccountFile string `config:"service_account_file"` ServiceAccountCredentials string `config:"service_account_credentials"` TeamDriveID string `config:"team_drive"` AuthOwnerOnly bool `config:"auth_owner_only"` UseTrash bool `config:"use_trash"` CopyShortcutContent bool `config:"copy_shortcut_content"` SkipGdocs bool `config:"skip_gdocs"` ShowAllGdocs bool `config:"show_all_gdocs"` SkipChecksumGphotos bool `config:"skip_checksum_gphotos"` SharedWithMe bool `config:"shared_with_me"` TrashedOnly bool `config:"trashed_only"` StarredOnly bool `config:"starred_only"` Extensions string `config:"formats"` ExportExtensions string `config:"export_formats"` ImportExtensions string `config:"import_formats"` AllowImportNameChange bool `config:"allow_import_name_change"` UseCreatedDate bool `config:"use_created_date"` UseSharedDate bool `config:"use_shared_date"` ListChunk int64 `config:"list_chunk"` Impersonate string `config:"impersonate"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` ChunkSize fs.SizeSuffix `config:"chunk_size"` AcknowledgeAbuse bool `config:"acknowledge_abuse"` KeepRevisionForever bool `config:"keep_revision_forever"` SizeAsQuota bool `config:"size_as_quota"` V2DownloadMinSize fs.SizeSuffix `config:"v2_download_min_size"` PacerMinSleep fs.Duration `config:"pacer_min_sleep"` PacerBurst int `config:"pacer_burst"` ServerSideAcrossConfigs bool `config:"server_side_across_configs"` DisableHTTP2 bool `config:"disable_http2"` StopOnUploadLimit bool `config:"stop_on_upload_limit"` StopOnDownloadLimit bool `config:"stop_on_download_limit"` SkipShortcuts bool `config:"skip_shortcuts"` SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"` ResourceKey string `config:"resource_key"` FastListBugFix bool `config:"fast_list_bug_fix"` MetadataOwner rwChoice `config:"metadata_owner"` MetadataPermissions rwChoice `config:"metadata_permissions"` MetadataLabels rwChoice `config:"metadata_labels"` Enc encoder.MultiEncoder `config:"encoding"` EnvAuth bool `config:"env_auth"` } // Fs represents a remote drive server type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options ci *fs.ConfigInfo // global config features *fs.Features // optional features svc *drive.Service // the connection to the drive server v2Svc *drive_v2.Service // used to create download links for the v2 api client *http.Client // authorized client rootFolderID string // the id of the root folder dirCache *dircache.DirCache // Map of directory path to directory id lastQuery string // Last query string to check in unit tests pacer *fs.Pacer // To pace the API calls exportExtensions []string // preferred extensions to download docs importMimeTypes []string // MIME types to convert to docs isTeamDrive bool // true if this is a team drive m configmap.Mapper grouping int32 // number of IDs to search at once in ListR - read with atomic listRmu *sync.Mutex // protects listRempties listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable dirResourceKeys *sync.Map // map directory ID to resource key permissionsMu *sync.Mutex // protect the below permissions map[string]*drive.Permission // map permission IDs to Permissions } type baseObject struct { fs *Fs // what this object is part of remote string // The remote path id string // Drive Id of this object modifiedDate string // RFC3339 time it was last modified mimeType string // The object MIME type bytes int64 // size of the object parents []string // IDs of the parent directories resourceKey *string // resourceKey is needed for link shared objects metadata *fs.Metadata // metadata if known } type documentObject struct { baseObject url string // Download URL of this object
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/drive/metadata.go
backend/drive/metadata.go
package drive import ( "context" "encoding/json" "fmt" "maps" "strconv" "strings" "sync" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/lib/errcount" "golang.org/x/sync/errgroup" drive "google.golang.org/api/drive/v3" "google.golang.org/api/googleapi" ) // system metadata keys which this backend owns var systemMetadataInfo = map[string]fs.MetadataHelp{ "content-type": { Help: "The MIME type of the file.", Type: "string", Example: "text/plain", }, "mtime": { Help: "Time of last modification with mS accuracy.", Type: "RFC 3339", Example: "2006-01-02T15:04:05.999Z07:00", }, "btime": { Help: "Time of file birth (creation) with mS accuracy. Note that this is only writable on fresh uploads - it can't be written for updates.", Type: "RFC 3339", Example: "2006-01-02T15:04:05.999Z07:00", }, "copy-requires-writer-permission": { Help: "Whether the options to copy, print, or download this file, should be disabled for readers and commenters.", Type: "boolean", Example: "true", }, "writers-can-share": { Help: "Whether users with only writer permission can modify the file's permissions. Not populated and ignored when setting for items in shared drives.", Type: "boolean", Example: "false", }, "viewed-by-me": { Help: "Whether the file has been viewed by this user.", Type: "boolean", Example: "true", ReadOnly: true, }, "owner": { Help: "The owner of the file. Usually an email address. Enable with --drive-metadata-owner.", Type: "string", Example: "user@example.com", }, "permissions": { Help: "Permissions in a JSON dump of Google drive format. On shared drives these will only be present if they aren't inherited. Enable with --drive-metadata-permissions.", Type: "JSON", Example: "{}", }, "folder-color-rgb": { Help: "The color for a folder or a shortcut to a folder as an RGB hex string.", Type: "string", Example: "881133", }, "description": { Help: "A short description of the file.", Type: "string", Example: "Contract for signing", }, "starred": { Help: "Whether the user has starred the file.", Type: "boolean", Example: "false", }, "labels": { Help: "Labels attached to this file in a JSON dump of Googled drive format. Enable with --drive-metadata-labels.", Type: "JSON", Example: "[]", }, } // Extra fields we need to fetch to implement the system metadata above var metadataFields = googleapi.Field(strings.Join([]string{ "copyRequiresWriterPermission", "description", "folderColorRgb", "hasAugmentedPermissions", "owners", "permissionIds", "permissions", "properties", "starred", "viewedByMe", "viewedByMeTime", "writersCanShare", }, ",")) // Fields we need to read from permissions var permissionsFields = googleapi.Field(strings.Join([]string{ "*", "permissionDetails/*", }, ",")) // getPermission returns permissions for the fileID and permissionID passed in func (f *Fs) getPermission(ctx context.Context, fileID, permissionID string, useCache bool) (perm *drive.Permission, inherited bool, err error) { f.permissionsMu.Lock() defer f.permissionsMu.Unlock() if useCache { perm = f.permissions[permissionID] if perm != nil { return perm, false, nil } } fs.Debugf(f, "Fetching permission %q", permissionID) err = f.pacer.Call(func() (bool, error) { perm, err = f.svc.Permissions.Get(fileID, permissionID). Fields(permissionsFields). SupportsAllDrives(true). Context(ctx).Do() return f.shouldRetry(ctx, err) }) if err != nil { return nil, false, err } inherited = len(perm.PermissionDetails) > 0 && perm.PermissionDetails[0].Inherited cleanPermission(perm) // cache the permission f.permissions[permissionID] = perm return perm, inherited, err } // Set the permissions on the info func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions []*drive.Permission) (err error) { errs := errcount.New() for _, perm := range permissions { if perm.Role == "owner" { // ignore owner permissions - these are set with owner continue } cleanPermissionForWrite(perm) err := f.pacer.Call(func() (bool, error) { _, err := f.svc.Permissions.Create(info.Id, perm). SupportsAllDrives(true). SendNotificationEmail(false). Context(ctx).Do() return f.shouldRetry(ctx, err) }) if err != nil { fs.Errorf(f, "Failed to set permission %s for %q: %v", perm.Role, perm.EmailAddress, err) errs.Add(err) } } err = errs.Err("failed to set permission") if err != nil { err = fserrors.NoRetryError(err) } return err } // Clean attributes from permissions which we can't write func cleanPermissionForWrite(perm *drive.Permission) { perm.Deleted = false perm.DisplayName = "" perm.Id = "" perm.Kind = "" perm.PermissionDetails = nil perm.TeamDrivePermissionDetails = nil } // Clean and cache the permission if not already cached func (f *Fs) cleanAndCachePermission(perm *drive.Permission) { f.permissionsMu.Lock() defer f.permissionsMu.Unlock() cleanPermission(perm) if _, found := f.permissions[perm.Id]; !found { f.permissions[perm.Id] = perm } } // Clean fields we don't need to keep from the permission func cleanPermission(perm *drive.Permission) { // DisplayName: Output only. The "pretty" name of the value of the // permission. The following is a list of examples for each type of // permission: * `user` - User's full name, as defined for their Google // account, such as "Joe Smith." * `group` - Name of the Google Group, // such as "The Company Administrators." * `domain` - String domain // name, such as "thecompany.com." * `anyone` - No `displayName` is // present. perm.DisplayName = "" // Kind: Output only. Identifies what kind of resource this is. Value: // the fixed string "drive#permission". perm.Kind = "" // PermissionDetails: Output only. Details of whether the permissions on // this shared drive item are inherited or directly on this item. This // is an output-only field which is present only for shared drive items. perm.PermissionDetails = nil // PhotoLink: Output only. A link to the user's profile photo, if // available. perm.PhotoLink = "" // TeamDrivePermissionDetails: Output only. Deprecated: Output only. Use // `permissionDetails` instead. perm.TeamDrivePermissionDetails = nil } // Fields we need to read from labels var labelsFields = googleapi.Field(strings.Join([]string{ "*", }, ",")) // getLabels returns labels for the fileID passed in func (f *Fs) getLabels(ctx context.Context, fileID string) (labels []*drive.Label, err error) { fs.Debugf(f, "Fetching labels for %q", fileID) listLabels := f.svc.Files.ListLabels(fileID). Fields(labelsFields). Context(ctx) for { var info *drive.LabelList err = f.pacer.Call(func() (bool, error) { info, err = listLabels.Do() return f.shouldRetry(ctx, err) }) if err != nil { return nil, err } labels = append(labels, info.Labels...) if info.NextPageToken == "" { break } listLabels.PageToken(info.NextPageToken) } for _, label := range labels { cleanLabel(label) } return labels, nil } // Set the labels on the info func (f *Fs) setLabels(ctx context.Context, info *drive.File, labels []*drive.Label) (err error) { if len(labels) == 0 { return nil } req := drive.ModifyLabelsRequest{} for _, label := range labels { req.LabelModifications = append(req.LabelModifications, &drive.LabelModification{ FieldModifications: labelFieldsToFieldModifications(label.Fields), LabelId: label.Id, }) } err = f.pacer.Call(func() (bool, error) { _, err = f.svc.Files.ModifyLabels(info.Id, &req). Context(ctx).Do() return f.shouldRetry(ctx, err) }) if err != nil { return fmt.Errorf("failed to set labels: %w", err) } return nil } // Convert label fields into something which can set the fields func labelFieldsToFieldModifications(fields map[string]drive.LabelField) (out []*drive.LabelFieldModification) { for id, field := range fields { var emails []string for _, user := range field.User { emails = append(emails, user.EmailAddress) } out = append(out, &drive.LabelFieldModification{ // FieldId: The ID of the field to be modified. FieldId: id, // SetDateValues: Replaces the value of a dateString Field with these // new values. The string must be in the RFC 3339 full-date format: // YYYY-MM-DD. SetDateValues: field.DateString, // SetIntegerValues: Replaces the value of an `integer` field with these // new values. SetIntegerValues: field.Integer, // SetSelectionValues: Replaces a `selection` field with these new // values. SetSelectionValues: field.Selection, // SetTextValues: Sets the value of a `text` field. SetTextValues: field.Text, // SetUserValues: Replaces a `user` field with these new values. The // values must be valid email addresses. SetUserValues: emails, }) } return out } // Clean fields we don't need to keep from the label func cleanLabel(label *drive.Label) { // Kind: This is always drive#label label.Kind = "" for name, field := range label.Fields { // Kind: This is always drive#labelField. field.Kind = "" // Note the fields are copies so we need to write them // back to the map label.Fields[name] = field } } // Parse the metadata from drive item // // It should return nil if there is no Metadata func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err error) { metadata := make(fs.Metadata, 16) // Dump user metadata first as it overrides system metadata maps.Copy(metadata, info.Properties) // System metadata metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission) metadata["writers-can-share"] = fmt.Sprint(info.WritersCanShare) metadata["viewed-by-me"] = fmt.Sprint(info.ViewedByMe) metadata["content-type"] = info.MimeType // Owners: Output only. The owner of this file. Only certain legacy // files may have more than one owner. This field isn't populated for // items in shared drives. if o.fs.opt.MetadataOwner.IsSet(rwRead) && len(info.Owners) > 0 { user := info.Owners[0] if len(info.Owners) > 1 { fs.Logf(o, "Ignoring more than 1 owner") } if user != nil { id := user.EmailAddress if id == "" { id = user.DisplayName } metadata["owner"] = id } } if o.fs.opt.MetadataPermissions.IsSet(rwRead) { // We only write permissions out if they are not inherited. // // On My Drives permissions seem to be attached to every item // so they will always be written out. // // On Shared Drives only non-inherited permissions will be // written out. // To read the inherited permissions flag will mean we need to // read the permissions for each object and the cache will be // useless. However shared drives don't return permissions // only permissionIds so will need to fetch them for each // object. We use HasAugmentedPermissions to see if there are // special permissions before fetching them to save transactions. // HasAugmentedPermissions: Output only. Whether there are permissions // directly on this file. This field is only populated for items in // shared drives. if o.fs.isTeamDrive && !info.HasAugmentedPermissions { // Don't process permissions if there aren't any specifically set fs.Debugf(o, "Ignoring %d permissions and %d permissionIds as is shared drive with hasAugmentedPermissions false", len(info.Permissions), len(info.PermissionIds)) info.Permissions = nil info.PermissionIds = nil } // PermissionIds: Output only. List of permission IDs for users with // access to this file. // // Only process these if we have no Permissions if len(info.PermissionIds) > 0 && len(info.Permissions) == 0 { info.Permissions = make([]*drive.Permission, 0, len(info.PermissionIds)) g, gCtx := errgroup.WithContext(ctx) g.SetLimit(o.fs.ci.Checkers) var mu sync.Mutex // protect the info.Permissions from concurrent writes for _, permissionID := range info.PermissionIds { g.Go(func() error { // must fetch the team drive ones individually to check the inherited flag perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive) if err != nil { return fmt.Errorf("failed to read permission: %w", err) } // Don't write inherited permissions out if inherited { return nil } // Don't write owner role out - these are covered by the owner metadata if perm.Role == "owner" { return nil } mu.Lock() info.Permissions = append(info.Permissions, perm) mu.Unlock() return nil }) } err = g.Wait() if err != nil { return err } } else { // Clean the fetched permissions for _, perm := range info.Permissions { o.fs.cleanAndCachePermission(perm) } } // Permissions: Output only. The full list of permissions for the file. // This is only available if the requesting user can share the file. Not // populated for items in shared drives. if len(info.Permissions) > 0 { buf, err := json.Marshal(info.Permissions) if err != nil { return fmt.Errorf("failed to marshal permissions: %w", err) } metadata["permissions"] = string(buf) } // Permission propagation // https://developers.google.com/drive/api/guides/manage-sharing#permission-propagation // Leads me to believe that in non shared drives, permissions // are added to each item when you set permissions for a // folder whereas in shared drives they are inherited and // placed on the item directly. } if info.FolderColorRgb != "" { metadata["folder-color-rgb"] = info.FolderColorRgb } if info.Description != "" { metadata["description"] = info.Description } metadata["starred"] = fmt.Sprint(info.Starred) metadata["btime"] = info.CreatedTime metadata["mtime"] = info.ModifiedTime if o.fs.opt.MetadataLabels.IsSet(rwRead) { // FIXME would be really nice if we knew if files had labels // before listing but we need to know all possible label IDs // to get it in the listing. labels, err := o.fs.getLabels(ctx, actualID(info.Id)) if err != nil { return fmt.Errorf("failed to fetch labels: %w", err) } buf, err := json.Marshal(labels) if err != nil { return fmt.Errorf("failed to marshal labels: %w", err) } metadata["labels"] = string(buf) } o.metadata = &metadata return nil } // Set the owner on the info func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err error) { perm := drive.Permission{ Role: "owner", EmailAddress: owner, // Type: The type of the grantee. Valid values are: * `user` * `group` * // `domain` * `anyone` When creating a permission, if `type` is `user` // or `group`, you must provide an `emailAddress` for the user or group. // When `type` is `domain`, you must provide a `domain`. There isn't // extra information required for an `anyone` type. Type: "user", } err = f.pacer.Call(func() (bool, error) { _, err = f.svc.Permissions.Create(info.Id, &perm). SupportsAllDrives(true). TransferOwnership(true). // SendNotificationEmail(false). - required apparently! Context(ctx).Do() return f.shouldRetry(ctx, err) }) if err != nil { return fmt.Errorf("failed to set owner: %w", err) } return nil } // Call back to set metadata that can't be set on the upload/update // // The *drive.File passed in holds the current state of the drive.File // and this should update it with any modifications. type updateMetadataFn func(context.Context, *drive.File) error // read the metadata from meta and write it into updateInfo // // update should be true if this is being used to create metadata for // an update/PATCH call as the rules on what can be updated are // slightly different there. // // It returns a callback which should be called to finish the updates // after the data is uploaded. func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) { callbackFns := []updateMetadataFn{} callback = func(ctx context.Context, info *drive.File) error { for _, fn := range callbackFns { err := fn(ctx, info) if err != nil { return err } } return nil } // merge metadata into request and user metadata for k, v := range meta { // parse a boolean from v and write into out parseBool := func(out *bool) error { b, err := strconv.ParseBool(v) if err != nil { return fmt.Errorf("can't parse metadata %q = %q: %w", k, v, err) } *out = b return nil } switch k { case "copy-requires-writer-permission": if isFolder { fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v) } else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil { return nil, err } case "writers-can-share": if !f.isTeamDrive { if err := parseBool(&updateInfo.WritersCanShare); err != nil { return nil, err } } else { fs.Debugf(f, "Ignoring %s=%s as can't set on shared drives", k, v) } case "viewed-by-me": // Can't write this case "content-type": updateInfo.MimeType = v case "owner": if !f.opt.MetadataOwner.IsSet(rwWrite) { continue } // Can't set Owner on upload so need to set afterwards callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error { err := f.setOwner(ctx, info, v) if err != nil && f.opt.MetadataOwner.IsSet(rwFailOK) { fs.Errorf(f, "Ignoring error as failok is set: %v", err) return nil } return err }) case "permissions": if !f.opt.MetadataPermissions.IsSet(rwWrite) { continue } var perms []*drive.Permission err := json.Unmarshal([]byte(v), &perms) if err != nil { return nil, fmt.Errorf("failed to unmarshal permissions: %w", err) } // Can't set Permissions on upload so need to set afterwards callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error { err := f.setPermissions(ctx, info, perms) if err != nil && f.opt.MetadataPermissions.IsSet(rwFailOK) { // We've already logged the permissions errors individually here fs.Debugf(f, "Ignoring error as failok is set: %v", err) return nil } return err }) case "labels": if !f.opt.MetadataLabels.IsSet(rwWrite) { continue } var labels []*drive.Label err := json.Unmarshal([]byte(v), &labels) if err != nil { return nil, fmt.Errorf("failed to unmarshal labels: %w", err) } // Can't set Labels on upload so need to set afterwards callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error { err := f.setLabels(ctx, info, labels) if err != nil && f.opt.MetadataLabels.IsSet(rwFailOK) { fs.Errorf(f, "Ignoring error as failok is set: %v", err) return nil } return err }) case "folder-color-rgb": updateInfo.FolderColorRgb = v case "description": updateInfo.Description = v case "starred": if err := parseBool(&updateInfo.Starred); err != nil { return nil, err } case "btime": if update { fs.Debugf(f, "Skipping btime metadata as can't update it on an existing file: %v", v) } else { updateInfo.CreatedTime = v } case "mtime": updateInfo.ModifiedTime = v default: if updateInfo.Properties == nil { updateInfo.Properties = make(map[string]string, 1) } updateInfo.Properties[k] = v } } return callback, nil } // Fetch metadata and update updateInfo if --metadata is in use func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *drive.File, update bool) (callback updateMetadataFn, err error) { meta, err := fs.GetMetadataOptions(ctx, f, src, options) if err != nil { return nil, fmt.Errorf("failed to read metadata from source object: %w", err) } callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false) if err != nil { return nil, fmt.Errorf("failed to update metadata from source object: %w", err) } return callback, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/drive/drive_test.go
backend/drive/drive_test.go
// Test Drive filesystem interface package drive import ( "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestDrive:", NilObject: (*Object)(nil), ChunkedUpload: fstests.ChunkedUploadConfig{ MinChunkSize: minChunkSize, CeilChunkSize: fstests.NextPowerOfTwo, }, }) } func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadChunkSize(cs) } func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadCutoff(cs) } var ( _ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadCutoffer = (*Fs)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/drive/upload.go
backend/drive/upload.go
// Upload for drive // // Docs // Resumable upload: https://developers.google.com/drive/web/manage-uploads#resumable // Best practices: https://developers.google.com/drive/web/manage-uploads#best-practices // Files insert: https://developers.google.com/drive/v2/reference/files/insert // Files update: https://developers.google.com/drive/v2/reference/files/update // // This contains code adapted from google.golang.org/api (C) the GO AUTHORS package drive import ( "bytes" "context" "encoding/json" "fmt" "io" "net/http" "net/url" "strconv" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/lib/readers" "google.golang.org/api/drive/v3" "google.golang.org/api/googleapi" ) const ( // statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete. statusResumeIncomplete = 308 ) // resumableUpload is used by the generated APIs to provide resumable uploads. // It is not used by developers directly. type resumableUpload struct { f *Fs remote string // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". URI string // Media is the object being uploaded. Media io.Reader // MediaType defines the media type, e.g. "image/jpeg". MediaType string // ContentLength is the full size of the object being uploaded. ContentLength int64 // Return value ret *drive.File } // Upload the io.Reader in of size bytes with contentType and info func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) { params := url.Values{ "alt": {"json"}, "uploadType": {"resumable"}, "fields": {partialFields}, } params.Set("supportsAllDrives", "true") if f.opt.KeepRevisionForever { params.Set("keepRevisionForever", "true") } urls := "https://www.googleapis.com/upload/drive/v3/files" method := "POST" if fileID != "" { params.Set("setModifiedDate", "true") urls += "/{fileId}" method = "PATCH" } urls += "?" + params.Encode() var res *http.Response var err error err = f.pacer.Call(func() (bool, error) { var body io.Reader body, err = googleapi.WithoutDataWrapper.JSONReader(info) if err != nil { return false, err } var req *http.Request req, err = http.NewRequestWithContext(ctx, method, urls, body) if err != nil { return false, err } googleapi.Expand(req.URL, map[string]string{ "fileId": fileID, }) req.Header.Set("Content-Type", "application/json; charset=UTF-8") req.Header.Set("X-Upload-Content-Type", contentType) if size >= 0 { req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size)) } res, err = f.client.Do(req) if err == nil { defer googleapi.CloseBody(res) err = googleapi.CheckResponse(res) } return f.shouldRetry(ctx, err) }) if err != nil { return nil, err } loc := res.Header.Get("Location") rx := &resumableUpload{ f: f, remote: remote, URI: loc, Media: in, MediaType: contentType, ContentLength: size, } return rx.Upload(ctx) } // Make an http.Request for the range passed in func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request { req, _ := http.NewRequestWithContext(ctx, "POST", rx.URI, body) req.ContentLength = reqSize totalSize := "*" if rx.ContentLength >= 0 { totalSize = strconv.FormatInt(rx.ContentLength, 10) } if reqSize != 0 { req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, totalSize)) } else { req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", totalSize)) } req.Header.Set("Content-Type", rx.MediaType) return req } // Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) { _, _ = chunk.Seek(0, io.SeekStart) req := rx.makeRequest(ctx, start, chunk, chunkSize) res, err := rx.f.client.Do(req) if err != nil { return 599, err } defer googleapi.CloseBody(res) if res.StatusCode == statusResumeIncomplete { return res.StatusCode, nil } err = googleapi.CheckResponse(res) if err != nil { return res.StatusCode, err } // When the entire file upload is complete, the server // responds with an HTTP 201 Created along with any metadata // associated with this resource. If this request had been // updating an existing entity rather than creating a new one, // the HTTP response code for a completed upload would have // been 200 OK. // // So parse the response out of the body. We aren't expecting // any other 2xx codes, so we parse it unconditionally on // StatusCode if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil { return 598, err } return res.StatusCode, nil } // Upload uploads the chunks from the input // It retries each chunk using the pacer and --low-level-retries func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) { start := int64(0) var StatusCode int var err error buf := make([]byte, int(rx.f.opt.ChunkSize)) for finished := false; !finished; { var reqSize int64 var chunk io.ReadSeeker if rx.ContentLength >= 0 { // If size known use repeatable reader for smoother bwlimit if start >= rx.ContentLength { break } reqSize = min(rx.ContentLength-start, int64(rx.f.opt.ChunkSize)) chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize) } else { // If size unknown read into buffer var n int n, err = readers.ReadFill(rx.Media, buf) if err == io.EOF { // Send the last chunk with the correct ContentLength // otherwise Google doesn't know we've finished rx.ContentLength = start + int64(n) finished = true } else if err != nil { return nil, err } reqSize = int64(n) chunk = bytes.NewReader(buf[:reqSize]) } // Transfer the chunk err = rx.f.pacer.Call(func() (bool, error) { fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize) StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize) again, err := rx.f.shouldRetry(ctx, err) if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK { again = false err = nil } return again, err }) if err != nil { return nil, err } start += reqSize } // Resume or retry uploads that fail due to connection interruptions or // any 5xx errors, including: // // 500 Internal Server Error // 502 Bad Gateway // 503 Service Unavailable // 504 Gateway Timeout // // Use an exponential backoff strategy if any 5xx server error is // returned when resuming or retrying upload requests. These errors can // occur if a server is getting overloaded. Exponential backoff can help // alleviate these kinds of problems during periods of high volume of // requests or heavy network traffic. Other kinds of requests should not // be handled by exponential backoff but you can still retry a number of // them. When retrying these requests, limit the number of times you // retry them. For example your code could limit to ten retries or less // before reporting an error. // // Handle 404 Not Found errors when doing resumable uploads by starting // the entire upload over from the beginning. if rx.ret == nil { return nil, fserrors.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode) } return rx.ret, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/smb/smb_test.go
backend/smb/smb_test.go
// Test smb filesystem interface package smb_test import ( "path/filepath" "testing" "github.com/rclone/rclone/backend/smb" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestSMB:rclone", NilObject: (*smb.Object)(nil), }) } func TestIntegration2(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("skipping as -remote is set") } krb5Dir := t.TempDir() t.Setenv("KRB5_CONFIG", filepath.Join(krb5Dir, "krb5.conf")) t.Setenv("KRB5CCNAME", filepath.Join(krb5Dir, "ccache")) fstests.Run(t, &fstests.Opt{ RemoteName: "TestSMBKerberos:rclone", NilObject: (*smb.Object)(nil), }) } func TestIntegration3(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("skipping as -remote is set") } krb5Dir := t.TempDir() t.Setenv("KRB5_CONFIG", filepath.Join(krb5Dir, "krb5.conf")) ccache := filepath.Join(krb5Dir, "ccache") t.Setenv("RCLONE_TEST_CUSTOM_CCACHE_LOCATION", ccache) name := "TestSMBKerberosCcache" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":rclone", NilObject: (*smb.Object)(nil), ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "kerberos_ccache", Value: ccache}, }, }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/smb/connpool.go
backend/smb/connpool.go
package smb import ( "context" "errors" "fmt" "net" "os" "time" smb2 "github.com/cloudsoda/go-smb2" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fshttp" "golang.org/x/sync/errgroup" ) // dial starts a client connection to the given SMB server. It is a // convenience function that connects to the given network address, // initiates the SMB handshake, and then sets up a Client. // // The context is only used for establishing the connection, not after. func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) { dialer := fshttp.NewDialer(ctx) tconn, err := dialer.DialContext(ctx, network, addr) if err != nil { return nil, err } pass := "" if f.opt.Pass != "" { pass, err = obscure.Reveal(f.opt.Pass) if err != nil { return nil, err } } d := &smb2.Dialer{} if f.opt.UseKerberos { cl, err := NewKerberosFactory().GetClient(f.opt.KerberosCCache) if err != nil { return nil, err } spn := f.opt.SPN if spn == "" { spn = "cifs/" + f.opt.Host } d.Initiator = &smb2.Krb5Initiator{ Client: cl, TargetSPN: spn, } } else { d.Initiator = &smb2.NTLMInitiator{ User: f.opt.User, Password: pass, Domain: f.opt.Domain, TargetSPN: f.opt.SPN, } } session, err := d.DialConn(ctx, tconn, addr) if err != nil { return nil, err } return &conn{ smbSession: session, conn: &tconn, }, nil } // conn encapsulates a SMB client and corresponding SMB client type conn struct { conn *net.Conn smbSession *smb2.Session smbShare *smb2.Share shareName string } // Closes the connection func (c *conn) close() (err error) { if c.smbShare != nil { err = c.smbShare.Umount() } sessionLogoffErr := c.smbSession.Logoff() if err != nil { return err } return sessionLogoffErr } // True if it's closed func (c *conn) closed() bool { return c.smbSession.Echo() != nil } // Show that we are using a SMB session // // Call removeSession() when done func (f *Fs) addSession() { f.sessions.Add(1) } // Show the SMB session is no longer in use func (f *Fs) removeSession() { f.sessions.Add(-1) } // getSessions shows whether there are any sessions in use func (f *Fs) getSessions() int32 { return f.sessions.Load() } // Open a new connection to the SMB server. // // The context is only used for establishing the connection, not after. func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) { c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port) if err != nil { return nil, fmt.Errorf("couldn't connect SMB: %w", err) } if share != "" { // mount the specified share as well if user requested err = c.mountShare(share) if err != nil { _ = c.smbSession.Logoff() return nil, fmt.Errorf("couldn't initialize SMB: %w", err) } } return c, nil } // Ensure the specified share is mounted or the session is unmounted func (c *conn) mountShare(share string) (err error) { if c.shareName == share { return nil } if c.smbShare != nil { err = c.smbShare.Umount() c.smbShare = nil } if err != nil { return } if share != "" { c.smbShare, err = c.smbSession.Mount(share) if err != nil { return } } c.shareName = share return nil } // Get a SMB connection from the pool, or open a new one func (f *Fs) getConnection(ctx context.Context, share string) (c *conn, err error) { accounting.LimitTPS(ctx) f.poolMu.Lock() for len(f.pool) > 0 { c = f.pool[0] f.pool = f.pool[1:] err = c.mountShare(share) if err == nil { break } fs.Debugf(f, "Discarding unusable SMB connection: %v", err) c = nil } f.poolMu.Unlock() if c != nil { return c, nil } err = f.pacer.Call(func() (bool, error) { c, err = f.newConnection(ctx, share) if err != nil { return true, err } return false, nil }) return c, err } // Return a SMB connection to the pool // // It nils the pointed to connection out so it can't be reused // // if err is not nil then it checks the connection is alive using an // ECHO request func (f *Fs) putConnection(pc **conn, err error) { if pc == nil { return } c := *pc if c == nil { return } *pc = nil if err != nil { // If not a regular SMB error then check the connection if !(errors.Is(err, os.ErrNotExist) || errors.Is(err, os.ErrExist) || errors.Is(err, os.ErrPermission)) { echoErr := c.smbSession.Echo() if echoErr != nil { fs.Debugf(f, "Connection failed, closing: %v", echoErr) _ = c.close() return } fs.Debugf(f, "Connection OK after error: %v", err) } } f.poolMu.Lock() f.pool = append(f.pool, c) if f.opt.IdleTimeout > 0 { f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer } f.poolMu.Unlock() } // Drain the pool of any connections func (f *Fs) drainPool(ctx context.Context) (err error) { f.poolMu.Lock() defer f.poolMu.Unlock() if sessions := f.getSessions(); sessions != 0 { fs.Debugf(f, "Not closing %d unused connections as %d sessions active", len(f.pool), sessions) if f.opt.IdleTimeout > 0 { f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer } return nil } if f.opt.IdleTimeout > 0 { f.drain.Stop() } if len(f.pool) != 0 { fs.Debugf(f, "Closing %d unused connections", len(f.pool)) } g, _ := errgroup.WithContext(ctx) for i, c := range f.pool { g.Go(func() (err error) { if !c.closed() { err = c.close() } f.pool[i] = nil return err }) } err = g.Wait() f.pool = nil return err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/smb/smb.go
backend/smb/smb.go
// Package smb provides an interface to SMB servers package smb import ( "context" "errors" "fmt" "io" "os" "path" "strings" "sync" "sync/atomic" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/readers" ) const ( minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential ) var ( currentUser = env.CurrentUser() ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "smb", Description: "SMB / CIFS", NewFs: NewFs, Options: []fs.Option{{ Name: "host", Help: "SMB server hostname to connect to.\n\nE.g. \"example.com\".", Required: true, Sensitive: true, }, { Name: "user", Help: "SMB username.", Default: currentUser, Sensitive: true, }, { Name: "port", Help: "SMB port number.", Default: 445, }, { Name: "pass", Help: "SMB password.", IsPassword: true, }, { Name: "domain", Help: "Domain name for NTLM authentication.", Default: "WORKGROUP", Sensitive: true, }, { Name: "spn", Help: `Service principal name. Rclone presents this name to the server. Some servers use this as further authentication, and it often needs to be set for clusters. For example: cifs/remotehost:1020 Leave blank if not sure. `, Sensitive: true, }, { Name: "use_kerberos", Help: `Use Kerberos authentication. If set, rclone will use Kerberos authentication instead of NTLM. This requires a valid Kerberos configuration and credentials cache to be available, either in the default locations or as specified by the KRB5_CONFIG and KRB5CCNAME environment variables. `, Default: false, }, { Name: "idle_timeout", Default: fs.Duration(60 * time.Second), Help: `Max time before closing idle connections. If no connections have been returned to the connection pool in the time given, rclone will empty the connection pool. Set to 0 to keep connections indefinitely. `, Advanced: true, }, { Name: "hide_special_share", Help: "Hide special shares (e.g. print$) which users aren't supposed to access.", Default: true, Advanced: true, }, { Name: "case_insensitive", Help: "Whether the server is configured to be case-insensitive.\n\nAlways true on Windows shares.", Default: true, Advanced: true, }, { Name: "kerberos_ccache", Help: `Path to the Kerberos credential cache (krb5cc). Overrides the default KRB5CCNAME environment variable and allows this instance of the SMB backend to use a different Kerberos cache file. This is useful when mounting multiple SMB with different credentials or running in multi-user environments. Supported formats: - FILE:/path/to/ccache – Use the specified file. - DIR:/path/to/ccachedir – Use the primary file inside the specified directory. - /path/to/ccache – Interpreted as a file path.`, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: encoder.EncodeZero | // path separator encoder.EncodeSlash | encoder.EncodeBackSlash | // windows encoder.EncodeWin | encoder.EncodeCtl | encoder.EncodeDot | // the file turns into 8.3 names (and cannot be converted back) encoder.EncodeRightSpace | encoder.EncodeRightPeriod | // encoder.EncodeInvalidUtf8, }, }}) } // Options defines the configuration for this backend type Options struct { Host string `config:"host"` Port string `config:"port"` User string `config:"user"` Pass string `config:"pass"` Domain string `config:"domain"` SPN string `config:"spn"` UseKerberos bool `config:"use_kerberos"` KerberosCCache string `config:"kerberos_ccache"` HideSpecial bool `config:"hide_special_share"` CaseInsensitive bool `config:"case_insensitive"` IdleTimeout fs.Duration `config:"idle_timeout"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a SMB remote type Fs struct { name string // name of this remote root string // the path we are working on if any opt Options // parsed config options features *fs.Features // optional features pacer *fs.Pacer // pacer for operations sessions atomic.Int32 poolMu sync.Mutex pool []*conn drain *time.Timer // used to drain the pool when we stop using the connections ctx context.Context } // Object describes a file at the server type Object struct { fs *Fs // reference to Fs remote string // the remote path statResult os.FileInfo } // NewFs constructs an Fs from the path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } // if root is empty or ends with / (must be a directory) isRootDir := isPathDir(root) root = strings.Trim(root, "/") f := &Fs{ name: name, opt: *opt, ctx: ctx, root: root, } f.features = (&fs.Features{ CaseInsensitive: opt.CaseInsensitive, CanHaveEmptyDirectories: true, BucketBased: true, PartialUploads: true, }).Fill(ctx, f) f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))) // set the pool drainer timer going if opt.IdleTimeout > 0 { f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) }) } // test if the root exists as a file share, dir := f.split("") if share == "" || dir == "" { return f, nil } // Skip stat check if root is already a directory if isRootDir { return f, nil } cn, err := f.getConnection(ctx, share) if err != nil { return nil, err } stat, err := cn.smbShare.Stat(f.toSambaPath(dir)) f.putConnection(&cn, err) if err != nil { // ignore stat error here return f, nil } if !stat.IsDir() { f.root, err = path.Dir(root), fs.ErrorIsFile } fs.Debugf(f, "Using root directory %q", f.root) return f, err } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { bucket, file := f.split("") if bucket == "" { return fmt.Sprintf("smb://%s@%s:%s/", f.opt.User, f.opt.Host, f.opt.Port) } return fmt.Sprintf("smb://%s@%s:%s/%s/%s", f.opt.User, f.opt.Host, f.opt.Port, bucket, file) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // Hashes returns nothing as SMB itself doesn't have a way to tell checksums func (f *Fs) Hashes() hash.Set { return hash.NewHashSet() } // Precision returns the precision of mtime func (f *Fs) Precision() time.Duration { return time.Millisecond } // NewObject creates a new file object func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { share, path := f.split(remote) return f.findObjectSeparate(ctx, share, path) } func (f *Fs) findObjectSeparate(ctx context.Context, share, path string) (fs.Object, error) { if share == "" || path == "" { return nil, fs.ErrorIsDir } cn, err := f.getConnection(ctx, share) if err != nil { return nil, err } stat, err := cn.smbShare.Stat(f.toSambaPath(path)) f.putConnection(&cn, err) if err != nil { return nil, translateError(err, false) } if stat.IsDir() { return nil, fs.ErrorIsDir } return f.makeEntry(share, path, stat), nil } // Mkdir creates a directory on the server func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { share, path := f.split(dir) if share == "" || path == "" { return nil } cn, err := f.getConnection(ctx, share) if err != nil { return err } err = cn.smbShare.MkdirAll(f.toSambaPath(path), 0o755) f.putConnection(&cn, err) return err } // Rmdir removes an empty directory on the server func (f *Fs) Rmdir(ctx context.Context, dir string) error { share, path := f.split(dir) if share == "" || path == "" { return nil } cn, err := f.getConnection(ctx, share) if err != nil { return err } err = cn.smbShare.Remove(f.toSambaPath(path)) f.putConnection(&cn, err) return err } // Put uploads a file func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { o := &Object{ fs: f, remote: src.Remote(), } err := o.Update(ctx, in, src, options...) if err == nil { return o, nil } return nil, err } // PutStream uploads to the remote path with the modTime given of indeterminate size // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { o := &Object{ fs: f, remote: src.Remote(), } err := o.Update(ctx, in, src, options...) if err == nil { return o, nil } return nil, err } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (_ fs.Object, err error) { dstShare, dstPath := f.split(remote) srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } srcShare, srcPath := srcObj.split() if dstShare != srcShare { fs.Debugf(src, "Can't move - must be on the same share") return nil, fs.ErrorCantMove } err = f.ensureDirectory(ctx, dstShare, dstPath) if err != nil { return nil, fmt.Errorf("failed to make parent directories: %w", err) } cn, err := f.getConnection(ctx, dstShare) if err != nil { return nil, err } err = cn.smbShare.Rename(f.toSambaPath(srcPath), f.toSambaPath(dstPath)) f.putConnection(&cn, err) if err != nil { return nil, translateError(err, false) } return f.findObjectSeparate(ctx, dstShare, dstPath) } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) { dstShare, dstPath := f.split(dstRemote) srcFs, ok := src.(*Fs) if !ok { fs.Debugf(src, "Can't move - not same remote type") return fs.ErrorCantDirMove } srcShare, srcPath := srcFs.split(srcRemote) if dstShare != srcShare { fs.Debugf(src, "Can't move - must be on the same share") return fs.ErrorCantDirMove } err = f.ensureDirectory(ctx, dstShare, dstPath) if err != nil { return fmt.Errorf("failed to make parent directories: %w", err) } cn, err := f.getConnection(ctx, dstShare) if err != nil { return err } defer f.putConnection(&cn, err) _, err = cn.smbShare.Stat(dstPath) if os.IsNotExist(err) { err = cn.smbShare.Rename(f.toSambaPath(srcPath), f.toSambaPath(dstPath)) return translateError(err, true) } return fs.ErrorDirExists } // List files and directories in a directory func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { share, _path := f.split(dir) cn, err := f.getConnection(ctx, share) if err != nil { return nil, err } defer f.putConnection(&cn, err) if share == "" { shares, err := cn.smbSession.ListSharenames() for _, shh := range shares { shh = f.toNativePath(shh) if strings.HasSuffix(shh, "$") && f.opt.HideSpecial { continue } entries = append(entries, fs.NewDir(shh, time.Time{})) } return entries, err } dirents, err := cn.smbShare.ReadDir(f.toSambaPath(_path)) if err != nil { return entries, translateError(err, true) } for _, file := range dirents { nfn := f.toNativePath(file.Name()) if file.IsDir() { entries = append(entries, fs.NewDir(path.Join(dir, nfn), file.ModTime())) } else { entries = append(entries, f.makeEntryRelative(share, _path, nfn, file)) } } return entries, nil } // About returns things about remaining and used spaces func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) { share, dir := f.split("/") if share == "" { // Just return empty info rather than an error if called on the root return &fs.Usage{}, nil } dir = f.toSambaPath(dir) cn, err := f.getConnection(ctx, share) if err != nil { return nil, err } stat, err := cn.smbShare.Statfs(dir) f.putConnection(&cn, err) if err != nil { return nil, err } bs := stat.BlockSize() usage := &fs.Usage{ Total: fs.NewUsageValue(bs * stat.TotalBlockCount()), Used: fs.NewUsageValue(bs * (stat.TotalBlockCount() - stat.FreeBlockCount())), Free: fs.NewUsageValue(bs * stat.AvailableBlockCount()), } return usage, nil } type smbWriterAt struct { pool *filePool closed bool closeMu sync.Mutex wg sync.WaitGroup } func (w *smbWriterAt) WriteAt(p []byte, off int64) (int, error) { w.closeMu.Lock() if w.closed { w.closeMu.Unlock() return 0, errors.New("writer already closed") } w.wg.Add(1) w.closeMu.Unlock() defer w.wg.Done() f, err := w.pool.get() if err != nil { return 0, fmt.Errorf("failed to get file from pool: %w", err) } n, writeErr := f.WriteAt(p, off) w.pool.put(f, writeErr) if writeErr != nil { return n, fmt.Errorf("failed to write at offset %d: %w", off, writeErr) } return n, writeErr } func (w *smbWriterAt) Close() error { w.closeMu.Lock() defer w.closeMu.Unlock() if w.closed { return nil } w.closed = true // Wait for all pending writes to finish w.wg.Wait() var errs []error // Drain the pool if err := w.pool.drain(); err != nil { errs = append(errs, fmt.Errorf("failed to drain file pool: %w", err)) } // Remove session w.pool.fs.removeSession() if len(errs) > 0 { return errors.Join(errs...) } return nil } // OpenWriterAt opens with a handle for random access writes // // Pass in the remote desired and the size if known. // // It truncates any existing object func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) { o := &Object{ fs: f, remote: remote, } share, filename := o.split() if share == "" || filename == "" { return nil, fs.ErrorIsDir } err := o.fs.ensureDirectory(ctx, share, filename) if err != nil { return nil, fmt.Errorf("failed to make parent directories: %w", err) } smbPath := o.fs.toSambaPath(filename) // One-time truncate cn, err := o.fs.getConnection(ctx, share) if err != nil { return nil, err } file, err := cn.smbShare.OpenFile(smbPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) if err != nil { o.fs.putConnection(&cn, err) return nil, err } if size > 0 { if truncateErr := file.Truncate(size); truncateErr != nil { _ = file.Close() o.fs.putConnection(&cn, truncateErr) return nil, fmt.Errorf("failed to truncate file: %w", truncateErr) } } if closeErr := file.Close(); closeErr != nil { o.fs.putConnection(&cn, closeErr) return nil, fmt.Errorf("failed to close file after truncate: %w", closeErr) } o.fs.putConnection(&cn, nil) // Add a new session o.fs.addSession() return &smbWriterAt{ pool: newFilePool(ctx, o.fs, share, smbPath), }, nil } // Shutdown the backend, closing any background tasks and any // cached connections. func (f *Fs) Shutdown(ctx context.Context) error { return f.drainPool(ctx) } func (f *Fs) makeEntry(share, _path string, stat os.FileInfo) *Object { remote := path.Join(share, _path) return &Object{ fs: f, remote: trimPathPrefix(remote, f.root), statResult: stat, } } func (f *Fs) makeEntryRelative(share, _path, relative string, stat os.FileInfo) *Object { return f.makeEntry(share, path.Join(_path, relative), stat) } func (f *Fs) ensureDirectory(ctx context.Context, share, _path string) error { dir := path.Dir(_path) if dir == "." { return nil } cn, err := f.getConnection(ctx, share) if err != nil { return err } err = cn.smbShare.MkdirAll(f.toSambaPath(dir), 0o755) f.putConnection(&cn, err) return err } /// Object // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // ModTime is the last modified time (read-only) func (o *Object) ModTime(ctx context.Context) time.Time { return o.statResult.ModTime() } // Size is the file length func (o *Object) Size() int64 { return o.statResult.Size() } // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Hash always returns empty value func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) { return "", hash.ErrUnsupported } // Storable returns if this object is storable func (o *Object) Storable() bool { return true } // SetModTime sets modTime on a particular file func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) { share, reqDir := o.split() if share == "" || reqDir == "" { return fs.ErrorCantSetModTime } reqDir = o.fs.toSambaPath(reqDir) cn, err := o.fs.getConnection(ctx, share) if err != nil { return err } defer o.fs.putConnection(&cn, err) err = cn.smbShare.Chtimes(reqDir, t, t) if err != nil { return err } fi, err := cn.smbShare.Stat(reqDir) if err != nil { return fmt.Errorf("SetModTime: stat: %w", err) } o.statResult = fi return err } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { share, filename := o.split() if share == "" || filename == "" { return nil, fs.ErrorIsDir } filename = o.fs.toSambaPath(filename) var offset, limit int64 = 0, -1 for _, option := range options { switch x := option.(type) { case *fs.SeekOption: offset = x.Offset case *fs.RangeOption: offset, limit = x.Decode(o.Size()) default: if option.Mandatory() { fs.Logf(o, "Unsupported mandatory option: %v", option) } } } o.fs.addSession() // Show session in use defer o.fs.removeSession() cn, err := o.fs.getConnection(ctx, share) if err != nil { return nil, err } fl, err := cn.smbShare.OpenFile(filename, os.O_RDONLY, 0) if err != nil { o.fs.putConnection(&cn, err) return nil, fmt.Errorf("failed to open: %w", err) } pos, err := fl.Seek(offset, io.SeekStart) if err != nil { o.fs.putConnection(&cn, err) return nil, fmt.Errorf("failed to seek: %w", err) } if pos != offset { err = fmt.Errorf("failed to seek: wrong position (expected=%d, reported=%d)", offset, pos) o.fs.putConnection(&cn, err) return nil, err } in = readers.NewLimitedReadCloser(fl, limit) in = &boundReadCloser{ rc: in, close: func() error { o.fs.putConnection(&cn, nil) return nil }, } return in, nil } // Update the Object from in with modTime and size func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { share, filename := o.split() if share == "" || filename == "" { return fs.ErrorIsDir } err = o.fs.ensureDirectory(ctx, share, filename) if err != nil { return fmt.Errorf("failed to make parent directories: %w", err) } filename = o.fs.toSambaPath(filename) o.fs.addSession() // Show session in use defer o.fs.removeSession() cn, err := o.fs.getConnection(ctx, share) if err != nil { return err } defer func() { o.fs.putConnection(&cn, err) }() fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644) if err != nil { return fmt.Errorf("failed to open: %w", err) } // remove the file if upload failed remove := func() { // Windows doesn't allow removal of files without closing file removeErr := fl.Close() if removeErr != nil { fs.Debugf(src, "failed to close the file for delete: %v", removeErr) // try to remove the file anyway; the file may be already closed } removeErr = cn.smbShare.Remove(filename) if removeErr != nil { fs.Debugf(src, "failed to remove: %v", removeErr) } else { fs.Debugf(src, "removed after failed upload: %v", err) } } _, err = fl.ReadFrom(in) if err != nil { remove() return fmt.Errorf("Update ReadFrom failed: %w", err) } err = fl.Close() if err != nil { remove() return fmt.Errorf("Update Close failed: %w", err) } // Set the modified time and also o.statResult err = o.SetModTime(ctx, src.ModTime(ctx)) if err != nil { return fmt.Errorf("Update SetModTime failed: %w", err) } return nil } // Remove an object func (o *Object) Remove(ctx context.Context) (err error) { share, filename := o.split() if share == "" || filename == "" { return fs.ErrorIsDir } filename = o.fs.toSambaPath(filename) cn, err := o.fs.getConnection(ctx, share) if err != nil { return err } err = cn.smbShare.Remove(filename) o.fs.putConnection(&cn, err) return err } // String converts this Object to a string func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } /// Misc // split returns share name and path in the share from the rootRelativePath // relative to f.root func (f *Fs) split(rootRelativePath string) (shareName, filepath string) { return bucket.Split(path.Join(f.root, rootRelativePath)) } // split returns share name and path in the share from the object func (o *Object) split() (shareName, filepath string) { return o.fs.split(o.remote) } func (f *Fs) toSambaPath(path string) string { // 1. encode via Rclone's escaping system // 2. convert to backslash-separated path return strings.ReplaceAll(f.opt.Enc.FromStandardPath(path), "/", "\\") } func (f *Fs) toNativePath(path string) string { // 1. convert *back* to slash-separated path // 2. encode via Rclone's escaping system return f.opt.Enc.ToStandardPath(strings.ReplaceAll(path, "\\", "/")) } func ensureSuffix(s, suffix string) string { if strings.HasSuffix(s, suffix) { return s } return s + suffix } // isPathDir determines if a path represents a directory based on trailing slash func isPathDir(path string) bool { return path == "" || strings.HasSuffix(path, "/") } func trimPathPrefix(s, prefix string) string { // we need to clean the paths to make tests pass! s = betterPathClean(s) prefix = betterPathClean(prefix) if s == prefix || s == prefix+"/" { return "" } prefix = ensureSuffix(prefix, "/") return strings.TrimPrefix(s, prefix) } func betterPathClean(p string) string { d := path.Clean(p) if d == "." { return "" } return d } type boundReadCloser struct { rc io.ReadCloser close func() error } func (r *boundReadCloser) Read(p []byte) (n int, err error) { return r.rc.Read(p) } func (r *boundReadCloser) Close() error { err1 := r.rc.Close() err2 := r.close() if err1 != nil { return err1 } return err2 } func translateError(e error, dir bool) error { if os.IsNotExist(e) { if dir { return fs.ErrorDirNotFound } return fs.ErrorObjectNotFound } return e } var ( _ fs.Fs = &Fs{} _ fs.PutStreamer = &Fs{} _ fs.Mover = &Fs{} _ fs.DirMover = &Fs{} _ fs.Abouter = &Fs{} _ fs.Shutdowner = &Fs{} _ fs.Object = &Object{} _ io.ReadCloser = &boundReadCloser{} )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/smb/kerberos_test.go
backend/smb/kerberos_test.go
package smb import ( "os" "path/filepath" "testing" "time" "github.com/jcmturner/gokrb5/v8/client" "github.com/jcmturner/gokrb5/v8/config" "github.com/jcmturner/gokrb5/v8/credentials" "github.com/stretchr/testify/assert" ) func TestResolveCcachePath(t *testing.T) { tmpDir := t.TempDir() // Setup: files for FILE and DIR modes fileCcache := filepath.Join(tmpDir, "file_ccache") err := os.WriteFile(fileCcache, []byte{}, 0600) assert.NoError(t, err) dirCcache := filepath.Join(tmpDir, "dir_ccache") err = os.Mkdir(dirCcache, 0755) assert.NoError(t, err) err = os.WriteFile(filepath.Join(dirCcache, "primary"), []byte("ticket"), 0600) assert.NoError(t, err) dirCcacheTicket := filepath.Join(dirCcache, "ticket") err = os.WriteFile(dirCcacheTicket, []byte{}, 0600) assert.NoError(t, err) tests := []struct { name string ccachePath string envKRB5CCNAME string expected string expectError bool }{ { name: "FILE: prefix from env", ccachePath: "", envKRB5CCNAME: "FILE:" + fileCcache, expected: fileCcache, }, { name: "DIR: prefix from env", ccachePath: "", envKRB5CCNAME: "DIR:" + dirCcache, expected: dirCcacheTicket, }, { name: "Unsupported prefix", ccachePath: "", envKRB5CCNAME: "MEMORY:/bad/path", expectError: true, }, { name: "Direct file path (no prefix)", ccachePath: "/tmp/myccache", expected: "/tmp/myccache", }, { name: "Default to /tmp/krb5cc_<uid>", ccachePath: "", envKRB5CCNAME: "", expected: "/tmp/krb5cc_", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Setenv("KRB5CCNAME", tt.envKRB5CCNAME) result, err := resolveCcachePath(tt.ccachePath) if tt.expectError { assert.Error(t, err) } else { assert.NoError(t, err) assert.Contains(t, result, tt.expected) } }) } } func TestKerberosFactory_GetClient_ReloadOnCcacheChange(t *testing.T) { // Create temp ccache file tmpFile, err := os.CreateTemp("", "krb5cc_test") assert.NoError(t, err) defer func() { if err := os.Remove(tmpFile.Name()); err != nil { t.Logf("Failed to remove temp file %s: %v", tmpFile.Name(), err) } }() unixPath := filepath.ToSlash(tmpFile.Name()) ccachePath := "FILE:" + unixPath initialContent := []byte("CCACHE_VERSION 4\n") _, err = tmpFile.Write(initialContent) assert.NoError(t, err) assert.NoError(t, tmpFile.Close()) // Setup mocks loadCallCount := 0 mockLoadCCache := func(path string) (*credentials.CCache, error) { loadCallCount++ return &credentials.CCache{}, nil } mockNewClient := func(cc *credentials.CCache, cfg *config.Config, opts ...func(*client.Settings)) (*client.Client, error) { return &client.Client{}, nil } mockLoadConfig := func() (*config.Config, error) { return &config.Config{}, nil } factory := &KerberosFactory{ loadCCache: mockLoadCCache, newClient: mockNewClient, loadConfig: mockLoadConfig, } // First call — triggers loading _, err = factory.GetClient(ccachePath) assert.NoError(t, err) assert.Equal(t, 1, loadCallCount, "expected 1 load call") // Second call — should reuse cache, no additional load _, err = factory.GetClient(ccachePath) assert.NoError(t, err) assert.Equal(t, 1, loadCallCount, "expected cached reuse, no new load") // Simulate file update time.Sleep(1 * time.Second) // ensure mtime changes err = os.WriteFile(tmpFile.Name(), []byte("CCACHE_VERSION 4\n#updated"), 0600) assert.NoError(t, err) // Third call — should detect change, reload _, err = factory.GetClient(ccachePath) assert.NoError(t, err) assert.Equal(t, 2, loadCallCount, "expected reload on changed ccache") }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/smb/filepool.go
backend/smb/filepool.go
package smb import ( "context" "fmt" "os" "sync" "github.com/cloudsoda/go-smb2" "golang.org/x/sync/errgroup" ) // FsInterface defines the methods that filePool needs from Fs type FsInterface interface { getConnection(ctx context.Context, share string) (*conn, error) putConnection(pc **conn, err error) removeSession() } type file struct { *smb2.File c *conn } type filePool struct { ctx context.Context fs FsInterface share string path string mu sync.Mutex pool []*file } func newFilePool(ctx context.Context, fs FsInterface, share, path string) *filePool { return &filePool{ ctx: ctx, fs: fs, share: share, path: path, } } func (p *filePool) get() (*file, error) { p.mu.Lock() if len(p.pool) > 0 { f := p.pool[len(p.pool)-1] p.pool = p.pool[:len(p.pool)-1] p.mu.Unlock() return f, nil } p.mu.Unlock() c, err := p.fs.getConnection(p.ctx, p.share) if err != nil { return nil, err } fl, err := c.smbShare.OpenFile(p.path, os.O_WRONLY, 0o644) if err != nil { p.fs.putConnection(&c, err) return nil, fmt.Errorf("failed to open: %w", err) } return &file{File: fl, c: c}, nil } func (p *filePool) put(f *file, err error) { if f == nil { return } if err != nil { _ = f.Close() p.fs.putConnection(&f.c, err) return } p.mu.Lock() p.pool = append(p.pool, f) p.mu.Unlock() } func (p *filePool) drain() error { p.mu.Lock() files := p.pool p.pool = nil p.mu.Unlock() g, _ := errgroup.WithContext(p.ctx) for _, f := range files { g.Go(func() error { err := f.Close() p.fs.putConnection(&f.c, err) return err }) } return g.Wait() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false