repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/metadata_unix.go | backend/local/metadata_unix.go | //go:build openbsd || solaris
package local
import (
"fmt"
"os"
"syscall"
"time"
"github.com/rclone/rclone/fs"
)
// Read the time specified from the os.FileInfo
func readTime(t timeType, fi os.FileInfo) time.Time {
stat, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
fs.Debugf(nil, "didn't return Stat_t as expected")
return fi.ModTime()
}
switch t {
case aTime:
return time.Unix(stat.Atim.Unix())
case cTime:
return time.Unix(stat.Ctim.Unix())
}
return fi.ModTime()
}
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)
if err != nil {
return err
}
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok {
fs.Debugf(o, "didn't return Stat_t as expected")
return nil
}
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
if stat.Rdev != 0 {
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
}
setTime := func(key string, t syscall.Timespec) {
m.Set(key, time.Unix(t.Unix()).Format(metadataTimeFormat))
}
setTime("atime", stat.Atim)
setTime("mtime", stat.Mtim)
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/local_test.go | backend/local/local_test.go | // Test Local filesystem interface
package local_test
import (
"testing"
"github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "",
NilObject: (*local.Object)(nil),
QuickTestOK: true,
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/metadata_other.go | backend/local/metadata_other.go | //go:build dragonfly || plan9 || js || aix
package local
import (
"fmt"
"os"
"time"
"github.com/rclone/rclone/fs"
)
// Read the time specified from the os.FileInfo
func readTime(t timeType, fi os.FileInfo) time.Time {
return fi.ModTime()
}
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)
if err != nil {
return err
}
m.Set("mode", fmt.Sprintf("%0o", info.Mode()))
m.Set("mtime", info.ModTime().Format(metadataTimeFormat))
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/setbtime_windows.go | backend/local/setbtime_windows.go | //go:build windows
package local
import (
"syscall"
"time"
)
const haveSetBTime = true
// setTimes sets any of atime, mtime or btime
// if link is set it sets a link rather than the target
func setTimes(name string, atime, mtime, btime time.Time, link bool) (err error) {
pathp, err := syscall.UTF16PtrFromString(name)
if err != nil {
return err
}
fileFlag := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
if link {
fileFlag |= syscall.FILE_FLAG_OPEN_REPARSE_POINT
}
h, err := syscall.CreateFile(pathp,
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
syscall.OPEN_EXISTING, fileFlag, 0)
if err != nil {
return err
}
defer func() {
closeErr := syscall.Close(h)
if err == nil {
err = closeErr
}
}()
var patime, pmtime, pbtime *syscall.Filetime
if !atime.IsZero() {
t := syscall.NsecToFiletime(atime.UnixNano())
patime = &t
}
if !mtime.IsZero() {
t := syscall.NsecToFiletime(mtime.UnixNano())
pmtime = &t
}
if !btime.IsZero() {
t := syscall.NsecToFiletime(btime.UnixNano())
pbtime = &t
}
return syscall.SetFileTime(h, pbtime, patime, pmtime)
}
// setBTime sets the birth time of the file passed in
func setBTime(name string, btime time.Time) (err error) {
return setTimes(name, time.Time{}, time.Time{}, btime, false)
}
// lsetBTime changes the birth time of the link passed in
func lsetBTime(name string, btime time.Time) error {
return setTimes(name, time.Time{}, time.Time{}, btime, true)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/remove_windows.go | backend/local/remove_windows.go | //go:build windows
package local
import (
"os"
"time"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/windows"
)
// Removes name, retrying on a sharing violation
func remove(name string) (err error) {
const maxTries = 10
var sleepTime = 1 * time.Millisecond
for i := 0; i < maxTries; i++ {
err = os.Remove(name)
if err == nil {
break
}
pathErr, ok := err.(*os.PathError)
if !ok {
break
}
if pathErr.Err != windows.ERROR_SHARING_VIOLATION {
break
}
fs.Logf(name, "Remove detected sharing violation - retry %d/%d sleeping %v", i+1, maxTries, sleepTime)
time.Sleep(sleepTime)
sleepTime <<= 1
}
return err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/symlink.go | backend/local/symlink.go | //go:build !windows && !plan9 && !js
package local
import (
"os"
"syscall"
)
// isCircularSymlinkError checks if the current error code is because of a circular symlink
func isCircularSymlinkError(err error) bool {
if err != nil {
if newerr, ok := err.(*os.PathError); ok {
if errcode, ok := newerr.Err.(syscall.Errno); ok {
if errcode == syscall.ELOOP {
return true
}
}
}
}
return false
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/lchmod.go | backend/local/lchmod.go | //go:build windows || plan9 || js || linux
package local
import "os"
const haveLChmod = false
// lChmod changes the mode of the named file to mode. If the file is a symbolic
// link, it changes the link, not the target. If there is an error,
// it will be of type *PathError.
func lChmod(name string, mode os.FileMode) error {
// Can't do this safely on this OS - chmoding a symlink always
// changes the destination.
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/about_unix.go | backend/local/about_unix.go | //go:build darwin || dragonfly || freebsd || linux
package local
import (
"context"
"fmt"
"os"
"syscall"
"github.com/rclone/rclone/fs"
)
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var s syscall.Statfs_t
err := syscall.Statfs(f.root, &s)
if err != nil {
if os.IsNotExist(err) {
return nil, fs.ErrorDirNotFound
}
return nil, fmt.Errorf("failed to read disk usage: %w", err)
}
bs := int64(s.Bsize) // nolint: unconvert
usage := &fs.Usage{
Total: fs.NewUsageValue(bs * int64(s.Blocks)), //nolint: unconvert // quota of bytes that can be used
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), //nolint: unconvert // bytes in use
Free: fs.NewUsageValue(bs * int64(s.Bavail)), //nolint: unconvert // bytes which can be uploaded before reaching the quota
}
return usage, nil
}
// check interface
var _ fs.Abouter = &Fs{}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/about_windows.go | backend/local/about_windows.go | //go:build windows
package local
import (
"context"
"fmt"
"unsafe"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/windows"
)
var getFreeDiskSpace = windows.NewLazySystemDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var available, total, free int64
root, e := windows.UTF16PtrFromString(f.root)
if e != nil {
return nil, fmt.Errorf("failed to read disk usage: %w", e)
}
_, _, e1 := getFreeDiskSpace.Call(
uintptr(unsafe.Pointer(root)),
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
)
if e1 != windows.Errno(0) {
return nil, fmt.Errorf("failed to read disk usage: %w", e1)
}
usage := &fs.Usage{
Total: fs.NewUsageValue(total), // quota of bytes that can be used
Used: fs.NewUsageValue(total - free), // bytes in use
Free: fs.NewUsageValue(available), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}
// check interface
var _ fs.Abouter = &Fs{}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/lchtimes_unix.go | backend/local/lchtimes_unix.go | //go:build !windows && !plan9 && !js
package local
import (
"os"
"time"
"golang.org/x/sys/unix"
)
const haveLChtimes = true
// lChtimes changes the access and modification times of the named
// link, similar to the Unix utime() or utimes() functions.
//
// The underlying filesystem may truncate or round the values to a
// less precise time unit.
// If there is an error, it will be of type *PathError.
func lChtimes(name string, atime time.Time, mtime time.Time) error {
var utimes [2]unix.Timespec
utimes[0] = unix.NsecToTimespec(atime.UnixNano())
utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
if e := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); e != nil {
return &os.PathError{Op: "lchtimes", Path: name, Err: e}
}
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/metadata_linux.go | backend/local/metadata_linux.go | //go:build linux
package local
import (
"fmt"
"os"
"runtime"
"sync"
"syscall"
"time"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/unix"
)
var (
statxCheckOnce sync.Once
readMetadataFromFileFn func(o *Object, m *fs.Metadata) (err error)
)
// Read the time specified from the os.FileInfo
func readTime(t timeType, fi os.FileInfo) time.Time {
stat, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
fs.Debugf(nil, "didn't return Stat_t as expected")
return fi.ModTime()
}
switch t {
case aTime:
return time.Unix(stat.Atim.Unix())
case cTime:
return time.Unix(stat.Ctim.Unix())
}
return fi.ModTime()
}
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
statxCheckOnce.Do(func() {
// Check statx() is available as it was only introduced in kernel 4.11
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
var stat unix.Statx_t
if runtime.GOOS != "android" && unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
readMetadataFromFileFn = readMetadataFromFileStatx
} else {
readMetadataFromFileFn = readMetadataFromFileFstatat
}
})
return readMetadataFromFileFn(o, m)
}
// Read the metadata from the file into metadata where possible
func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
flags := unix.AT_SYMLINK_NOFOLLOW
if o.fs.opt.FollowSymlinks {
flags = 0
}
var stat unix.Statx_t
// statx() was added to Linux in kernel 4.11
err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 |
unix.STATX_TYPE | // Want stx_mode & S_IFMT
unix.STATX_MODE | // Want stx_mode & ~S_IFMT
unix.STATX_UID | // Want stx_uid
unix.STATX_GID | // Want stx_gid
unix.STATX_ATIME | // Want stx_atime
unix.STATX_MTIME | // Want stx_mtime
unix.STATX_CTIME | // Want stx_ctime
unix.STATX_BTIME), // Want stx_btime
&stat)
if err != nil {
return err
}
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
if stat.Rdev_major != 0 || stat.Rdev_minor != 0 {
m.Set("rdev", fmt.Sprintf("%x", uint64(stat.Rdev_major)<<32|uint64(stat.Rdev_minor)))
}
setTime := func(key string, t unix.StatxTimestamp) {
m.Set(key, time.Unix(t.Sec, int64(t.Nsec)).Format(metadataTimeFormat))
}
setTime("atime", stat.Atime)
setTime("mtime", stat.Mtime)
setTime("btime", stat.Btime)
return nil
}
// Read the metadata from the file into metadata where possible
func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
flags := unix.AT_SYMLINK_NOFOLLOW
if o.fs.opt.FollowSymlinks {
flags = 0
}
var stat unix.Stat_t
// fstatat() was added to Linux in kernel 2.6.16
// Go only supports 2.6.32 or later
err = unix.Fstatat(unix.AT_FDCWD, o.path, &stat, flags)
if err != nil {
return err
}
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
if stat.Rdev != 0 {
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
}
setTime := func(key string, t unix.Timespec) {
// The types of t.Sec and t.Nsec vary from int32 to int64 on
// different Linux architectures so we need to cast them to
// int64 here and hence need to quiet the linter about
// unnecessary casts.
//
// nolint: unconvert
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
}
setTime("atime", stat.Atim)
setTime("mtime", stat.Mtim)
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/metadata_windows.go | backend/local/metadata_windows.go | //go:build windows
package local
import (
"fmt"
"os"
"syscall"
"time"
"github.com/rclone/rclone/fs"
)
// Read the time specified from the os.FileInfo
func readTime(t timeType, fi os.FileInfo) time.Time {
stat, ok := fi.Sys().(*syscall.Win32FileAttributeData)
if !ok {
fs.Debugf(nil, "didn't return Win32FileAttributeData as expected")
return fi.ModTime()
}
switch t {
case aTime:
return time.Unix(0, stat.LastAccessTime.Nanoseconds())
case bTime:
return time.Unix(0, stat.CreationTime.Nanoseconds())
}
return fi.ModTime()
}
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)
if err != nil {
return err
}
stat, ok := info.Sys().(*syscall.Win32FileAttributeData)
if !ok {
fs.Debugf(o, "didn't return Win32FileAttributeData as expected")
return nil
}
// FIXME do something with stat.FileAttributes ?
m.Set("mode", fmt.Sprintf("%0o", info.Mode()))
setTime := func(key string, t syscall.Filetime) {
m.Set(key, time.Unix(0, t.Nanoseconds()).Format(metadataTimeFormat))
}
setTime("atime", stat.LastAccessTime)
setTime("mtime", stat.LastWriteTime)
setTime("btime", stat.CreationTime)
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/koofr/koofr.go | backend/koofr/koofr.go | // Package koofr provides an interface to the Koofr storage system.
package koofr
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
"path"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
httpclient "github.com/koofr/go-httpclient"
koofrclient "github.com/koofr/go-koofrclient"
)
// Register Fs with rclone
func init() {
fs.Register(&fs.RegInfo{
Name: "koofr",
Description: "Koofr, Digi Storage and other Koofr-compatible storage providers",
NewFs: NewFs,
Options: []fs.Option{{
Name: fs.ConfigProvider,
Help: "Choose your storage provider.",
// NOTE if you add a new provider here, then add it in the
// setProviderDefaults() function and update options accordingly
Examples: []fs.OptionExample{{
Value: "koofr",
Help: "Koofr, https://app.koofr.net/",
}, {
Value: "digistorage",
Help: "Digi Storage, https://storage.rcs-rds.ro/",
}, {
Value: "other",
Help: "Any other Koofr API compatible storage service",
}},
}, {
Name: "endpoint",
Help: "The Koofr API endpoint to use.",
Provider: "other",
Required: true,
}, {
Name: "mountid",
Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
Advanced: true,
}, {
Name: "setmtime",
Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true,
Advanced: true,
}, {
Name: "user",
Help: "Your user name.",
Required: true,
Sensitive: true,
}, {
Name: "password",
Help: "Your password for rclone generate one at https://app.koofr.net/app/admin/preferences/password.",
Provider: "koofr",
IsPassword: true,
Required: true,
}, {
Name: "password",
Help: "Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password.",
Provider: "digistorage",
IsPassword: true,
Required: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at your service's settings page).",
Provider: "other",
IsPassword: true,
Required: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options represent the configuration of the Koofr backend
type Options struct {
Provider string `config:"provider"`
Endpoint string `config:"endpoint"`
MountID string `config:"mountid"`
User string `config:"user"`
Password string `config:"password"`
SetMTime bool `config:"setmtime"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// An Fs is a representation of a remote Koofr Fs
type Fs struct {
name string
mountID string
root string
opt Options
features *fs.Features
client *koofrclient.KoofrClient
}
// An Object on the remote Koofr Fs
type Object struct {
fs *Fs
remote string
info koofrclient.FileInfo
}
func base(pth string) string {
rv := path.Base(pth)
if rv == "" || rv == "." {
rv = "/"
}
return rv
}
func dir(pth string) string {
rv := path.Dir(pth)
if rv == "" || rv == "." {
rv = "/"
}
return rv
}
// String returns a string representation of the remote Object
func (o *Object) String() string {
return o.remote
}
// Remote returns the remote path of the Object, relative to Fs root
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification time of the Object
func (o *Object) ModTime(ctx context.Context) time.Time {
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
}
// Size return the size of the Object in bytes
func (o *Object) Size() int64 {
return o.info.Size
}
// Fs returns a reference to the Koofr Fs containing the Object
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns an MD5 hash of the Object
func (o *Object) Hash(ctx context.Context, typ hash.Type) (string, error) {
if typ == hash.MD5 {
return o.info.Hash, nil
}
return "", nil
}
// fullPath returns full path of the remote Object (including Fs root)
func (o *Object) fullPath() string {
return o.fs.fullPath(o.remote)
}
// Storable returns true if the Object is storable
func (o *Object) Storable() bool {
return true
}
// SetModTime is not supported
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
return fs.ErrorCantSetModTimeWithoutDelete
}
// Open opens the Object for reading
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
var sOff, eOff int64 = 0, -1
fs.FixRangeOption(options, o.Size())
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
sOff = x.Offset
case *fs.RangeOption:
sOff = x.Start
eOff = x.End
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if sOff == 0 && eOff < 0 {
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
}
span := &koofrclient.FileSpan{
Start: sOff,
End: eOff,
}
return o.fs.client.FilesGetRange(o.fs.mountID, o.fullPath(), span)
}
// Update updates the Object contents
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
putopts := &koofrclient.PutOptions{
ForceOverwrite: true,
NoRename: true,
OverwriteIgnoreNonExisting: true,
SetModified: &mtime,
}
fullPath := o.fullPath()
dirPath := dir(fullPath)
name := base(fullPath)
err := o.fs.mkdir(dirPath)
if err != nil {
return err
}
info, err := o.fs.client.FilesPutWithOptions(o.fs.mountID, dirPath, name, in, putopts)
if err != nil {
return err
}
o.info = *info
return nil
}
// Remove deletes the remote Object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
}
// Name returns the name of the Fs
func (f *Fs) Name() string {
return f.name
}
// Root returns the root path of the Fs
func (f *Fs) Root() string {
return f.root
}
// String returns a string representation of the Fs
func (f *Fs) String() string {
return "koofr:" + f.mountID + ":" + f.root
}
// Features returns the optional features supported by this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision denotes that setting modification times is not supported
func (f *Fs) Precision() time.Duration {
if !f.opt.SetMTime {
return fs.ModTimeNotSupported
}
return time.Millisecond
}
// Hashes returns a set of hashes are Provided by the Fs
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// fullPath constructs a full, absolute path from an Fs root relative path,
func (f *Fs) fullPath(part string) string {
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
}
func setProviderDefaults(opt *Options) {
// handle old, provider-less configs
if opt.Provider == "" {
if opt.Endpoint == "" || strings.HasPrefix(opt.Endpoint, "https://app.koofr.net") {
opt.Provider = "koofr"
} else if strings.HasPrefix(opt.Endpoint, "https://storage.rcs-rds.ro") {
opt.Provider = "digistorage"
} else {
opt.Provider = "other"
}
}
// now assign an endpoint
if opt.Provider == "koofr" {
opt.Endpoint = "https://app.koofr.net"
} else if opt.Provider == "digistorage" {
opt.Endpoint = "https://storage.rcs-rds.ro"
}
}
// NewFs constructs a new filesystem given a root path and rclone configuration options
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
setProviderDefaults(opt)
return NewFsFromOptions(ctx, name, root, opt)
}
// NewFsFromOptions constructs a new filesystem given a root path and internal configuration options
func NewFsFromOptions(ctx context.Context, name, root string, opt *Options) (ff fs.Fs, err error) {
pass, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, err
}
httpClient := httpclient.New()
httpClient.Client = fshttp.NewClient(ctx)
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
basicAuth := fmt.Sprintf("Basic %s",
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
client.HTTPClient.Headers.Set("Authorization", basicAuth)
mounts, err := client.Mounts()
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
client: client,
}
f.features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
for _, m := range mounts {
if opt.MountID != "" {
if m.Id == opt.MountID {
f.mountID = m.Id
break
}
} else if m.IsPrimary {
f.mountID = m.Id
break
}
}
if f.mountID == "" {
if opt.MountID == "" {
return nil, errors.New("failed to find primary mount")
}
return nil, errors.New("failed to find mount " + opt.MountID)
}
rootFile, err := f.client.FilesInfo(f.mountID, f.opt.Enc.FromStandardPath("/"+f.root))
if err == nil && rootFile.Type != "dir" {
f.root = dir(f.root)
err = fs.ErrorIsFile
} else {
err = nil
}
return f, err
}
// List returns a list of items in a directory
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil {
return nil, translateErrorsDir(err)
}
entries = make([]fs.DirEntry, len(files))
for i, file := range files {
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
if file.Type == "dir" {
entries[i] = fs.NewDir(remote, time.Time{})
} else {
entries[i] = &Object{
fs: f,
info: file,
remote: remote,
}
}
}
return entries, nil
}
// NewObject creates a new remote Object for a given remote path
func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err error) {
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
if err != nil {
return nil, translateErrorsObject(err)
}
if info.Type == "dir" {
return nil, fs.ErrorIsDir
}
return &Object{
fs: f,
info: info,
remote: remote,
}, nil
}
// Put updates a remote Object
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
putopts := &koofrclient.PutOptions{
ForceOverwrite: true,
NoRename: true,
OverwriteIgnoreNonExisting: true,
SetModified: &mtime,
}
fullPath := f.fullPath(src.Remote())
dirPath := dir(fullPath)
name := base(fullPath)
err = f.mkdir(dirPath)
if err != nil {
return nil, err
}
info, err := f.client.FilesPutWithOptions(f.mountID, dirPath, name, in, putopts)
if err != nil {
return nil, translateErrorsObject(err)
}
return &Object{
fs: f,
info: *info,
remote: src.Remote(),
}, nil
}
// PutStream updates a remote Object with a stream of unknown size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// isBadRequest is a predicate which holds true iff the error returned was
// HTTP status 400
func isBadRequest(err error) bool {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusBadRequest {
return true
}
}
return false
}
// translateErrorsDir translates koofr errors to rclone errors (for a dir
// operation)
func translateErrorsDir(err error) error {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusNotFound {
return fs.ErrorDirNotFound
}
}
return err
}
// translateErrorsObject translates Koofr errors to rclone errors (for an object operation)
func translateErrorsObject(err error) error {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
// mkdir creates a directory at the given remote path. Creates ancestors if
// necessary
func (f *Fs) mkdir(fullPath string) error {
if fullPath == "/" {
return nil
}
info, err := f.client.FilesInfo(f.mountID, fullPath)
if err == nil && info.Type == "dir" {
return nil
}
err = translateErrorsDir(err)
if err != nil && err != fs.ErrorDirNotFound {
return err
}
dirs := strings.Split(fullPath, "/")
parent := "/"
for _, part := range dirs {
if part == "" {
continue
}
info, err = f.client.FilesInfo(f.mountID, path.Join(parent, part))
if err != nil || info.Type != "dir" {
err = translateErrorsDir(err)
if err != nil && err != fs.ErrorDirNotFound {
return err
}
err = f.client.FilesNewFolder(f.mountID, parent, part)
if err != nil && !isBadRequest(err) {
return err
}
}
parent = path.Join(parent, part)
}
return nil
}
// Mkdir creates a directory at the given remote path. Creates ancestors if
// necessary
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
fullPath := f.fullPath(dir)
return f.mkdir(fullPath)
}
// Rmdir removes an (empty) directory at the given remote path
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil {
return translateErrorsDir(err)
}
if len(files) > 0 {
return fs.ErrorDirectoryNotEmpty
}
err = f.client.FilesDelete(f.mountID, f.fullPath(dir))
if err != nil {
return translateErrorsDir(err)
}
return nil
}
// Copy copies a remote Object to the given path
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath)
err := f.mkdir(dstDir)
if err != nil {
return nil, fs.ErrorCantCopy
}
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
err = f.client.FilesCopy((src.(*Object)).fs.mountID,
(src.(*Object)).fs.fullPath((src.(*Object)).remote),
f.mountID, dstFullPath, koofrclient.CopyOptions{SetModified: &mtime})
if err != nil {
return nil, fs.ErrorCantCopy
}
return f.NewObject(ctx, remote)
}
// Move moves a remote Object to the given path
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj := src.(*Object)
dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath)
err := f.mkdir(dstDir)
if err != nil {
return nil, fs.ErrorCantMove
}
err = f.client.FilesMove(srcObj.fs.mountID,
srcObj.fs.fullPath(srcObj.remote), f.mountID, dstFullPath)
if err != nil {
return nil, fs.ErrorCantMove
}
return f.NewObject(ctx, remote)
}
// DirMove moves a remote directory to the given path
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs := src.(*Fs)
srcFullPath := srcFs.fullPath(srcRemote)
dstFullPath := f.fullPath(dstRemote)
if srcFs.mountID == f.mountID && srcFullPath == dstFullPath {
return fs.ErrorDirExists
}
dstDir := dir(dstFullPath)
err := f.mkdir(dstDir)
if err != nil {
return fs.ErrorCantDirMove
}
err = f.client.FilesMove(srcFs.mountID, srcFullPath, f.mountID, dstFullPath)
if err != nil {
return fs.ErrorCantDirMove
}
return nil
}
// About reports space usage (with a MiB precision)
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
mount, err := f.client.MountsDetails(f.mountID)
if err != nil {
return nil, err
}
return &fs.Usage{
Total: fs.NewUsageValue(mount.SpaceTotal * 1024 * 1024),
Used: fs.NewUsageValue(mount.SpaceUsed * 1024 * 1024),
Trashed: nil,
Other: nil,
Free: fs.NewUsageValue((mount.SpaceTotal - mount.SpaceUsed) * 1024 * 1024),
Objects: nil,
}, nil
}
// Purge purges the complete Fs
func (f *Fs) Purge(ctx context.Context) error {
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
return err
}
// linkCreate is a Koofr API request for creating a public link
type linkCreate struct {
Path string `json:"path"`
}
// link is a Koofr API response to creating a public link
type link struct {
ID string `json:"id"`
Name string `json:"name"`
Path string `json:"path"`
Counter int64 `json:"counter"`
URL string `json:"url"`
ShortURL string `json:"shortUrl"`
Hash string `json:"hash"`
Host string `json:"host"`
HasPassword bool `json:"hasPassword"`
Password string `json:"password"`
ValidFrom int64 `json:"validFrom"`
ValidTo int64 `json:"validTo"`
PasswordRequired bool `json:"passwordRequired"`
}
// createLink makes a Koofr API call to create a public link
func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link, error) {
linkCreate := linkCreate{
Path: path,
}
linkData := link{}
request := httpclient.RequestData{
Method: "POST",
Path: "/api/v2/mounts/" + mountID + "/links",
ExpectedStatus: []int{http.StatusOK, http.StatusCreated},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: linkCreate,
RespEncoding: httpclient.EncodingJSON,
RespValue: &linkData,
}
_, err := c.Request(&request)
if err != nil {
return nil, err
}
return &linkData, nil
}
// PublicLink creates a public link to the remote path
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
if err != nil {
return "", translateErrorsDir(err)
}
// URL returned by API looks like following:
//
// https://app.koofr.net/links/35d9fb92-74a3-4930-b4ed-57f123bfb1a6
//
// Direct url looks like following:
//
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
//
// I am not sure about meaning of "path" parameter; in my experiments
// it is always "%2F", and omitting it or putting any other value
// results in 404.
//
// There is one more quirk: direct link to file in / returns that file,
// direct link to file somewhere else in hierarchy returns zip archive
// with one member.
link := linkData.URL
link = strings.ReplaceAll(link, "/links", "/content/links")
link += "/files/get?path=%2F"
return link, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/koofr/koofr_test.go | backend/koofr/koofr_test.go | package koofr_test
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestKoofr:",
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/entry.go | backend/union/entry.go | package union
import (
"context"
"errors"
"fmt"
"io"
"sync"
"time"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
// Object describes a union Object
//
// This is a wrapped object which returns the Union Fs as its parent
type Object struct {
*upstream.Object
fs *Fs // what this object is part of
co []upstream.Entry
writebackMu sync.Mutex
}
// Directory describes a union Directory
//
// This is a wrapped object contains all candidates
type Directory struct {
*upstream.Directory
fs *Fs // what this directory is part of
cd []upstream.Entry
}
type entry interface {
upstream.Entry
candidates() []upstream.Entry
}
// Update o with the contents of newO excluding the lock
func (o *Object) update(newO *Object) {
o.Object = newO.Object
o.fs = newO.fs
o.co = newO.co
}
// UnWrapUpstream returns the upstream Object that this Object is wrapping
func (o *Object) UnWrapUpstream() *upstream.Object {
return o.Object
}
// Fs returns the union Fs as the parent
func (o *Object) Fs() fs.Info {
return o.fs
}
func (o *Object) candidates() []upstream.Entry {
return o.co
}
func (d *Directory) candidates() []upstream.Entry {
return d.cd
}
// Update in to the object with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
entries, err := o.fs.actionEntries(o.candidates()...)
if err == fs.ErrorPermissionDenied {
// There are no candidates in this object which can be written to
// So attempt to create a new object instead
newO, err := o.fs.put(ctx, in, src, false, options...)
if err != nil {
return err
}
// Update current object
o.update(newO.(*Object))
return nil
} else if err != nil {
return err
}
if len(entries) == 1 {
obj := entries[0].(*upstream.Object)
return obj.Update(ctx, in, src, options...)
}
// Multi-threading
readers, errChan := multiReader(len(entries), in)
errs := Errors(make([]error, len(entries)+1))
multithread(len(entries), func(i int) {
if o, ok := entries[i].(*upstream.Object); ok {
err := o.Update(ctx, readers[i], src, options...)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
if len(entries) > 1 {
// Drain the input buffer to allow other uploads to continue
_, _ = io.Copy(io.Discard, readers[i])
}
}
} else {
errs[i] = fs.ErrorNotAFile
}
})
errs[len(entries)] = <-errChan
return errs.Err()
}
// Remove candidate objects selected by ACTION policy
func (o *Object) Remove(ctx context.Context) error {
entries, err := o.fs.actionEntries(o.candidates()...)
if err != nil {
return err
}
errs := Errors(make([]error, len(entries)))
multithread(len(entries), func(i int) {
if o, ok := entries[i].(*upstream.Object); ok {
err := o.Remove(ctx)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
}
} else {
errs[i] = fs.ErrorNotAFile
}
})
return errs.Err()
}
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
entries, err := o.fs.actionEntries(o.candidates()...)
if err != nil {
return err
}
var wg sync.WaitGroup
errs := Errors(make([]error, len(entries)))
multithread(len(entries), func(i int) {
if o, ok := entries[i].(*upstream.Object); ok {
err := o.SetModTime(ctx, t)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
}
} else {
errs[i] = fs.ErrorNotAFile
}
})
wg.Wait()
return errs.Err()
}
// GetTier returns storage tier or class of the Object
func (o *Object) GetTier() string {
do, ok := o.Object.Object.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
do, ok := o.Object.Object.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// MimeType returns the content type of the Object if known
func (o *Object) MimeType(ctx context.Context) (mimeType string) {
if do, ok := o.Object.Object.(fs.MimeTyper); ok {
mimeType = do.MimeType(ctx)
}
return mimeType
}
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
func (o *Object) SetTier(tier string) error {
do, ok := o.Object.Object.(fs.SetTierer)
if !ok {
return errors.New("underlying remote does not support SetTier")
}
return do.SetTier(tier)
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
// Need some sort of locking to prevent multiple downloads
o.writebackMu.Lock()
defer o.writebackMu.Unlock()
// FIXME what if correct object is already in o.co
newObj, err := o.Object.Writeback(ctx)
if err != nil {
return nil, err
}
if newObj != nil {
o.Object = newObj
o.co = append(o.co, newObj) // FIXME should this append or overwrite or update?
}
return o.Object.Object.Open(ctx, options...)
}
// ModTime returns the modification date of the directory
// It returns the latest ModTime of all candidates
func (d *Directory) ModTime(ctx context.Context) (t time.Time) {
entries := d.candidates()
times := make([]time.Time, len(entries))
multithread(len(entries), func(i int) {
times[i] = entries[i].ModTime(ctx)
})
for _, ti := range times {
if t.Before(ti) {
t = ti
}
}
return t
}
// Size returns the size of the directory
// It returns the sum of all candidates
func (d *Directory) Size() (s int64) {
for _, e := range d.candidates() {
s += e.Size()
}
return s
}
// SetMetadata sets metadata for an DirEntry
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (d *Directory) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
entries, err := d.fs.actionEntries(d.candidates()...)
if err != nil {
return err
}
var wg sync.WaitGroup
errs := Errors(make([]error, len(entries)))
multithread(len(entries), func(i int) {
if d, ok := entries[i].(*upstream.Directory); ok {
err := d.SetMetadata(ctx, metadata)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", d.UpstreamFs().Name(), err)
}
} else {
errs[i] = fs.ErrorIsFile
}
})
wg.Wait()
return errs.Err()
}
// SetModTime sets the metadata on the DirEntry to set the modification date
//
// If there is any other metadata it does not overwrite it.
func (d *Directory) SetModTime(ctx context.Context, t time.Time) error {
entries, err := d.fs.actionEntries(d.candidates()...)
if err != nil {
return err
}
var wg sync.WaitGroup
errs := Errors(make([]error, len(entries)))
multithread(len(entries), func(i int) {
if d, ok := entries[i].(*upstream.Directory); ok {
err := d.SetModTime(ctx, t)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", d.UpstreamFs().Name(), err)
}
} else {
errs[i] = fs.ErrorIsFile
}
})
wg.Wait()
return errs.Err()
}
// Check the interfaces are satisfied
var (
_ fs.FullObject = (*Object)(nil)
_ fs.FullDirectory = (*Directory)(nil)
)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/errors_test.go | backend/union/errors_test.go | package union
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
)
var (
err1 = errors.New("Error 1")
err2 = errors.New("Error 2")
err3 = errors.New("Error 3")
)
func TestErrorsMap(t *testing.T) {
es := Errors{
nil,
err1,
err2,
}
want := Errors{
err2,
}
got := es.Map(func(e error) error {
if e == err1 {
return nil
}
return e
})
assert.Equal(t, want, got)
}
func TestErrorsFilterNil(t *testing.T) {
es := Errors{
nil,
err1,
nil,
err2,
nil,
}
want := Errors{
err1,
err2,
}
got := es.FilterNil()
assert.Equal(t, want, got)
}
func TestErrorsErr(t *testing.T) {
// Check not all nil case
es := Errors{
nil,
err1,
nil,
err2,
nil,
}
want := Errors{
err1,
err2,
}
got := es.Err()
// Check all nil case
assert.Equal(t, want, got)
es = Errors{
nil,
nil,
nil,
}
assert.Nil(t, es.Err())
}
func TestErrorsError(t *testing.T) {
assert.Equal(t, "no error", Errors{}.Error())
assert.Equal(t, "1 error: Error 1", Errors{err1}.Error())
assert.Equal(t, "1 error: nil error", Errors{nil}.Error())
assert.Equal(t, "2 errors: Error 1; Error 2", Errors{err1, err2}.Error())
}
func TestErrorsUnwrap(t *testing.T) {
es := Errors{
err1,
err2,
}
assert.Equal(t, []error{err1, err2}, es.Unwrap())
assert.True(t, errors.Is(es, err1))
assert.True(t, errors.Is(es, err2))
assert.False(t, errors.Is(es, err3))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/union_test.go | backend/union/union_test.go | // Test Union filesystem interface
package union_test
import (
"testing"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/memory"
"github.com/rclone/rclone/backend/union"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt", "OpenChunkWriter", "ListP"}
unimplementableObjectMethods = []string{}
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
func TestStandard(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
name := "TestUnion"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "union"},
{Name: name, Key: "upstreams", Value: upstreams},
{Name: name, Key: "action_policy", Value: "epall"},
{Name: name, Key: "create_policy", Value: "epmfs"},
{Name: name, Key: "search_policy", Value: "ff"},
},
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
QuickTestOK: true,
})
}
func TestRO(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
upstreams := dirs[0] + " " + dirs[1] + ":ro " + dirs[2] + ":ro"
name := "TestUnionRO"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "union"},
{Name: name, Key: "upstreams", Value: upstreams},
{Name: name, Key: "action_policy", Value: "epall"},
{Name: name, Key: "create_policy", Value: "epmfs"},
{Name: name, Key: "search_policy", Value: "ff"},
},
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
QuickTestOK: true,
})
}
func TestNC(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
upstreams := dirs[0] + " " + dirs[1] + ":nc " + dirs[2] + ":nc"
name := "TestUnionNC"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "union"},
{Name: name, Key: "upstreams", Value: upstreams},
{Name: name, Key: "action_policy", Value: "epall"},
{Name: name, Key: "create_policy", Value: "epmfs"},
{Name: name, Key: "search_policy", Value: "ff"},
},
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
QuickTestOK: true,
})
}
func TestPolicy1(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
name := "TestUnionPolicy1"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "union"},
{Name: name, Key: "upstreams", Value: upstreams},
{Name: name, Key: "action_policy", Value: "all"},
{Name: name, Key: "create_policy", Value: "lus"},
{Name: name, Key: "search_policy", Value: "all"},
},
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
QuickTestOK: true,
})
}
func TestPolicy2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
name := "TestUnionPolicy2"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "union"},
{Name: name, Key: "upstreams", Value: upstreams},
{Name: name, Key: "action_policy", Value: "all"},
{Name: name, Key: "create_policy", Value: "rand"},
{Name: name, Key: "search_policy", Value: "ff"},
},
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
QuickTestOK: true,
})
}
func TestPolicy3(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
name := "TestUnionPolicy3"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "union"},
{Name: name, Key: "upstreams", Value: upstreams},
{Name: name, Key: "action_policy", Value: "all"},
{Name: name, Key: "create_policy", Value: "all"},
{Name: name, Key: "search_policy", Value: "all"},
},
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
QuickTestOK: true,
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/errors.go | backend/union/errors.go | package union
import (
"bytes"
"fmt"
)
// The Errors type wraps a slice of errors
type Errors []error
// Map returns a copy of the error slice with all its errors modified
// according to the mapping function. If mapping returns nil,
// the error is dropped from the error slice with no replacement.
func (e Errors) Map(mapping func(error) error) Errors {
s := make([]error, len(e))
i := 0
for _, err := range e {
nerr := mapping(err)
if nerr == nil {
continue
}
s[i] = nerr
i++
}
return Errors(s[:i])
}
// FilterNil returns the Errors without nil
func (e Errors) FilterNil() Errors {
ne := e.Map(func(err error) error {
return err
})
return ne
}
// Err returns an error interface that filtered nil,
// or nil if no non-nil Error is presented.
func (e Errors) Err() error {
ne := e.FilterNil()
if len(ne) == 0 {
return nil
}
return ne
}
// Error returns a concatenated string of the contained errors
func (e Errors) Error() string {
var buf bytes.Buffer
if len(e) == 0 {
buf.WriteString("no error")
} else if len(e) == 1 {
buf.WriteString("1 error: ")
} else {
fmt.Fprintf(&buf, "%d errors: ", len(e))
}
for i, err := range e {
if i != 0 {
buf.WriteString("; ")
}
if err != nil {
buf.WriteString(err.Error())
} else {
buf.WriteString("nil error")
}
}
return buf.String()
}
// Unwrap returns the wrapped errors
func (e Errors) Unwrap() []error {
return e
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/union_internal_test.go | backend/union/union_internal_test.go | package union
import (
"bytes"
"context"
"fmt"
"runtime"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// MakeTestDirs makes directories in /tmp for testing
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
for i := 1; i <= n; i++ {
dir := t.TempDir()
dirs = append(dirs, dir)
}
return dirs
}
func (f *Fs) TestInternalReadOnly(t *testing.T) {
if f.name != "TestUnionRO" {
t.Skip("Only on RO union")
}
dir := "TestInternalReadOnly"
ctx := context.Background()
rofs := f.upstreams[len(f.upstreams)-1]
assert.False(t, rofs.IsWritable())
// Put a file onto the read only fs
contents := random.String(50)
file1 := fstest.NewItem(dir+"/file.txt", contents, time.Now())
obj1 := fstests.PutTestContents(ctx, t, rofs, &file1, contents, true)
// Check read from readonly fs via union
o, err := f.NewObject(ctx, file1.Path)
require.NoError(t, err)
assert.Equal(t, int64(50), o.Size())
// Now call Update on the union Object with new data
contents2 := random.String(100)
file2 := fstest.NewItem(dir+"/file.txt", contents2, time.Now())
in := bytes.NewBufferString(contents2)
src := object.NewStaticObjectInfo(file2.Path, file2.ModTime, file2.Size, true, nil, nil)
err = o.Update(ctx, in, src)
require.NoError(t, err)
assert.Equal(t, int64(100), o.Size())
// Check we read the new object via the union
o, err = f.NewObject(ctx, file1.Path)
require.NoError(t, err)
assert.Equal(t, int64(100), o.Size())
// Remove the object
assert.NoError(t, o.Remove(ctx))
// Check we read the old object in the read only layer now
o, err = f.NewObject(ctx, file1.Path)
require.NoError(t, err)
assert.Equal(t, int64(50), o.Size())
// Remove file and dir from read only fs
assert.NoError(t, obj1.Remove(ctx))
assert.NoError(t, rofs.Rmdir(ctx, dir))
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("ReadOnly", f.TestInternalReadOnly)
}
var _ fstests.InternalTester = (*Fs)(nil)
// This specifically tests a union of local which can Move but not
// Copy and :memory: which can Copy but not Move to makes sure that
// the resulting union can Move
func TestMoveCopy(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
ctx := context.Background()
dirs := MakeTestDirs(t, 1)
fsString := fmt.Sprintf(":union,upstreams='%s :memory:bucket':", dirs[0])
f, err := fs.NewFs(ctx, fsString)
require.NoError(t, err)
unionFs := f.(*Fs)
fLocal := unionFs.upstreams[0].Fs
fMemory := unionFs.upstreams[1].Fs
if runtime.GOOS == "darwin" {
// need to disable as this test specifically tests a local that can't Copy
f.Features().Disable("Copy")
fLocal.Features().Disable("Copy")
}
t.Run("Features", func(t *testing.T) {
assert.NotNil(t, f.Features().Move)
assert.Nil(t, f.Features().Copy)
// Check underlying are as we are expect
assert.NotNil(t, fLocal.Features().Move)
assert.Nil(t, fLocal.Features().Copy)
assert.Nil(t, fMemory.Features().Move)
assert.NotNil(t, fMemory.Features().Copy)
})
// Put a file onto the local fs
contentsLocal := random.String(50)
fileLocal := fstest.NewItem("local.txt", contentsLocal, time.Now())
_ = fstests.PutTestContents(ctx, t, fLocal, &fileLocal, contentsLocal, true)
objLocal, err := f.NewObject(ctx, fileLocal.Path)
require.NoError(t, err)
// Put a file onto the memory fs
contentsMemory := random.String(60)
fileMemory := fstest.NewItem("memory.txt", contentsMemory, time.Now())
_ = fstests.PutTestContents(ctx, t, fMemory, &fileMemory, contentsMemory, true)
objMemory, err := f.NewObject(ctx, fileMemory.Path)
require.NoError(t, err)
fstest.CheckListing(t, f, []fstest.Item{fileLocal, fileMemory})
t.Run("MoveLocal", func(t *testing.T) {
fileLocal.Path = "local-renamed.txt"
_, err := operations.Move(ctx, f, nil, fileLocal.Path, objLocal)
require.NoError(t, err)
fstest.CheckListing(t, f, []fstest.Item{fileLocal, fileMemory})
// Check can retrieve object from union
obj, err := f.NewObject(ctx, fileLocal.Path)
require.NoError(t, err)
assert.Equal(t, fileLocal.Size, obj.Size())
// Check can retrieve object from underlying
obj, err = fLocal.NewObject(ctx, fileLocal.Path)
require.NoError(t, err)
assert.Equal(t, fileLocal.Size, obj.Size())
t.Run("MoveMemory", func(t *testing.T) {
fileMemory.Path = "memory-renamed.txt"
_, err := operations.Move(ctx, f, nil, fileMemory.Path, objMemory)
require.NoError(t, err)
fstest.CheckListing(t, f, []fstest.Item{fileLocal, fileMemory})
// Check can retrieve object from union
obj, err := f.NewObject(ctx, fileMemory.Path)
require.NoError(t, err)
assert.Equal(t, fileMemory.Size, obj.Size())
// Check can retrieve object from underlying
obj, err = fMemory.NewObject(ctx, fileMemory.Path)
require.NoError(t, err)
assert.Equal(t, fileMemory.Size, obj.Size())
})
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/union.go | backend/union/union.go | // Package union implements a virtual provider to join existing remotes.
package union
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"path"
"path/filepath"
"strings"
"sync"
"time"
"github.com/rclone/rclone/backend/union/common"
"github.com/rclone/rclone/backend/union/policy"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "union",
Description: "Union merges the contents of several upstream fs",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{
Name: "upstreams",
Help: "List of space separated upstreams.\n\nCan be 'upstreama:test/dir upstreamb:', '\"upstreama:test/space:ro dir\" upstreamb:', etc.",
Required: true,
}, {
Name: "action_policy",
Help: "Policy to choose upstream on ACTION category.",
Default: "epall",
}, {
Name: "create_policy",
Help: "Policy to choose upstream on CREATE category.",
Default: "epmfs",
}, {
Name: "search_policy",
Help: "Policy to choose upstream on SEARCH category.",
Default: "ff",
}, {
Name: "cache_time",
Help: "Cache time of usage and free space (in seconds).\n\nThis option is only useful when a path preserving policy is used.",
Default: 120,
}, {
Name: "min_free_space",
Help: `Minimum viable free space for lfs/eplfs policies.
If a remote has less than this much free space then it won't be
considered for use in lfs or eplfs policies.`,
Advanced: true,
Default: fs.Gibi,
}},
}
fs.Register(fsi)
}
// Fs represents a union of upstreams
type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt common.Options // options for this Fs
root string // the path we are working on
upstreams []*upstream.Fs // slice of upstreams
hashSet hash.Set // intersection of hash types
actionPolicy policy.Policy // policy for ACTION
createPolicy policy.Policy // policy for CREATE
searchPolicy policy.Policy // policy for SEARCH
}
// Wrap candidate objects in to a union Object
func (f *Fs) wrapEntries(entries ...upstream.Entry) (entry, error) {
e, err := f.searchEntries(entries...)
if err != nil {
return nil, err
}
switch e := e.(type) {
case *upstream.Object:
return &Object{
Object: e,
fs: f,
co: entries,
}, nil
case *upstream.Directory:
return &Directory{
Directory: e,
fs: f,
cd: entries,
}, nil
default:
return nil, fmt.Errorf("unknown object type %T", e)
}
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("union root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
upstreams, err := f.action(ctx, dir)
if err != nil {
// If none of the backends can have empty directories then
// don't complain about directories not being found
if !f.features.CanHaveEmptyDirectories && err == fs.ErrorObjectNotFound {
return nil
}
return err
}
errs := Errors(make([]error, len(upstreams)))
multithread(len(upstreams), func(i int) {
err := upstreams[i].Rmdir(ctx, dir)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
}
})
return errs.Err()
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return f.hashSet
}
// mkdir makes the directory passed in and returns the upstreams used
func (f *Fs) mkdir(ctx context.Context, dir string) ([]*upstream.Fs, error) {
upstreams, err := f.create(ctx, dir)
if err == fs.ErrorObjectNotFound {
parent := parentDir(dir)
if dir != parent {
upstreams, err = f.mkdir(ctx, parent)
} else if dir == "" {
// If root dirs not created then create them
upstreams, err = f.upstreams, nil
}
}
if err != nil {
return nil, err
}
errs := Errors(make([]error, len(upstreams)))
multithread(len(upstreams), func(i int) {
err := upstreams[i].Mkdir(ctx, dir)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
}
})
err = errs.Err()
if err != nil {
return nil, err
}
// If created roots then choose one
if dir == "" {
upstreams, err = f.create(ctx, dir)
}
return upstreams, err
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.mkdir(ctx, dir)
return err
}
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
upstreams, err := f.create(ctx, dir)
if err != nil {
return nil, err
}
errs := Errors(make([]error, len(upstreams)))
entries := make([]upstream.Entry, len(upstreams))
multithread(len(upstreams), func(i int) {
u := upstreams[i]
if do := u.Features().MkdirMetadata; do != nil {
newDir, err := do(ctx, dir, metadata)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
} else {
entries[i], err = u.WrapEntry(newDir)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
}
}
} else {
// Just do Mkdir on upstreams which don't support MkdirMetadata
err := u.Mkdir(ctx, dir)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
}
}
})
err = errs.Err()
if err != nil {
return nil, err
}
entry, err := f.wrapEntries(entries...)
if err != nil {
return nil, err
}
newDir, ok := entry.(fs.Directory)
if !ok {
return nil, fmt.Errorf("internal error: expecting %T to be an fs.Directory", entry)
}
return newDir, nil
}
// Purge all files in the directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
for _, r := range f.upstreams {
if r.Features().Purge == nil {
return fs.ErrorCantPurge
}
}
upstreams, err := f.action(ctx, "")
if err != nil {
return err
}
errs := Errors(make([]error, len(upstreams)))
multithread(len(upstreams), func(i int) {
err := upstreams[i].Features().Purge(ctx, dir)
if errors.Is(err, fs.ErrorDirNotFound) {
err = nil
}
if err != nil {
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
}
})
return errs.Err()
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
o := srcObj.UnWrapUpstream()
su := o.UpstreamFs()
if su.Features().Copy == nil {
return nil, fs.ErrorCantCopy
}
var du *upstream.Fs
for _, u := range f.upstreams {
if operations.Same(u.RootFs, su.RootFs) {
du = u
}
}
if du == nil {
return nil, fs.ErrorCantCopy
}
if !du.IsCreatable() {
return nil, fs.ErrorPermissionDenied
}
co, err := du.Features().Copy(ctx, o, remote)
if err != nil || co == nil {
return nil, err
}
wo, err := f.wrapEntries(du.WrapObject(co))
return wo.(*Object), err
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
o, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
entries, err := f.actionEntries(o.candidates()...)
if err != nil {
return nil, err
}
for _, e := range entries {
if !operations.CanServerSideMove(e.UpstreamFs()) {
return nil, fs.ErrorCantMove
}
}
objs := make([]*upstream.Object, len(entries))
errs := Errors(make([]error, len(entries)))
multithread(len(entries), func(i int) {
su := entries[i].UpstreamFs()
o, ok := entries[i].(*upstream.Object)
if !ok {
errs[i] = fmt.Errorf("%s: %w", su.Name(), fs.ErrorNotAFile)
return
}
var du *upstream.Fs
for _, u := range f.upstreams {
if operations.Same(u.RootFs, su.RootFs) {
du = u
}
}
if du == nil {
errs[i] = fmt.Errorf("%s: %s: %w", su.Name(), remote, fs.ErrorCantMove)
return
}
srcObj := o.UnWrap()
duFeatures := du.Features()
do := duFeatures.Move
if duFeatures.Move == nil {
do = duFeatures.Copy
}
// Do the Move or Copy
dstObj, err := do(ctx, srcObj, remote)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", su.Name(), err)
return
}
if dstObj == nil {
errs[i] = fmt.Errorf("%s: destination object not found", su.Name())
return
}
objs[i] = du.WrapObject(dstObj)
// Delete the source object if Copy
if duFeatures.Move == nil {
err = srcObj.Remove(ctx)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", su.Name(), err)
return
}
}
})
var en []upstream.Entry
for _, o := range objs {
if o != nil {
en = append(en, o)
}
}
e, err := f.wrapEntries(en...)
if err != nil {
return nil, err
}
return e.(*Object), errs.Err()
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
sfs, ok := src.(*Fs)
if !ok {
fs.Debugf(src, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
upstreams, err := sfs.action(ctx, srcRemote)
if err != nil {
return err
}
for _, u := range upstreams {
if u.Features().DirMove == nil {
return fs.ErrorCantDirMove
}
}
errs := Errors(make([]error, len(upstreams)))
multithread(len(upstreams), func(i int) {
su := upstreams[i]
var du *upstream.Fs
for _, u := range f.upstreams {
if operations.Same(u.RootFs, su.RootFs) {
du = u
}
}
if du == nil {
errs[i] = fmt.Errorf("%s: %s: %w", su.Name(), su.Root(), fs.ErrorCantDirMove)
return
}
err := du.Features().DirMove(ctx, su.Fs, srcRemote, dstRemote)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", du.Name()+":"+du.Root(), err)
}
})
errs = errs.FilterNil()
if len(errs) == 0 {
return nil
}
for _, e := range errs {
if !errors.Is(e, fs.ErrorDirExists) {
return errs
}
}
return fs.ErrorDirExists
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
upstreams, err := f.action(ctx, dir)
if err != nil {
return err
}
errs := Errors(make([]error, len(upstreams)))
multithread(len(upstreams), func(i int) {
u := upstreams[i]
// ignore DirSetModTime on upstreams which don't support it
if do := u.Features().DirSetModTime; do != nil {
err := do(ctx, dir, modTime)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
}
}
})
return errs.Err()
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implementation must empty the channel
// regularly. When the channel gets closed, the implementation
// should stop polling and release resources.
func (f *Fs) ChangeNotify(ctx context.Context, fn func(string, fs.EntryType), ch <-chan time.Duration) {
var uChans []chan time.Duration
for _, u := range f.upstreams {
if ChangeNotify := u.Features().ChangeNotify; ChangeNotify != nil {
ch := make(chan time.Duration)
uChans = append(uChans, ch)
ChangeNotify(ctx, fn, ch)
}
}
go func() {
for i := range ch {
for _, c := range uChans {
c <- i
}
}
for _, c := range uChans {
close(c)
}
}()
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
multithread(len(f.upstreams), func(i int) {
if do := f.upstreams[i].Features().DirCacheFlush; do != nil {
do()
}
})
}
// Tee in into n outputs
//
// When finished read the error from the channel
func multiReader(n int, in io.Reader) ([]io.Reader, <-chan error) {
readers := make([]io.Reader, n)
pipeWriters := make([]*io.PipeWriter, n)
writers := make([]io.Writer, n)
errChan := make(chan error, 1)
for i := range writers {
r, w := io.Pipe()
bw := bufio.NewWriter(w)
readers[i], pipeWriters[i], writers[i] = r, w, bw
}
go func() {
mw := io.MultiWriter(writers...)
es := make([]error, 2*n+1)
_, copyErr := io.Copy(mw, in)
es[2*n] = copyErr
// Flush the buffers
for i, bw := range writers {
es[i] = bw.(*bufio.Writer).Flush()
}
// Close the underlying pipes
for i, pw := range pipeWriters {
es[2*i] = pw.CloseWithError(copyErr)
}
errChan <- Errors(es).Err()
}()
return readers, errChan
}
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
srcPath := src.Remote()
upstreams, err := f.create(ctx, srcPath)
if err == fs.ErrorObjectNotFound {
upstreams, err = f.mkdir(ctx, parentDir(srcPath))
}
if err != nil {
return nil, err
}
if len(upstreams) == 1 {
u := upstreams[0]
var o fs.Object
var err error
if stream {
o, err = u.Features().PutStream(ctx, in, src, options...)
} else {
o, err = u.Put(ctx, in, src, options...)
}
if err != nil {
return nil, err
}
e, err := f.wrapEntries(u.WrapObject(o))
return e.(*Object), err
}
// Multi-threading
readers, errChan := multiReader(len(upstreams), in)
errs := Errors(make([]error, len(upstreams)+1))
objs := make([]upstream.Entry, len(upstreams))
multithread(len(upstreams), func(i int) {
u := upstreams[i]
var o fs.Object
var err error
if stream {
o, err = u.Features().PutStream(ctx, readers[i], src, options...)
} else {
o, err = u.Put(ctx, readers[i], src, options...)
}
if err != nil {
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
if len(upstreams) > 1 {
// Drain the input buffer to allow other uploads to continue
_, _ = io.Copy(io.Discard, readers[i])
}
return
}
objs[i] = u.WrapObject(o)
})
errs[len(upstreams)] = <-errChan
err = errs.Err()
if err != nil {
return nil, err
}
e, err := f.wrapEntries(objs...)
return e.(*Object), err
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, false, options...)
default:
return nil, err
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, true, options...)
default:
return nil, err
}
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
usage := &fs.Usage{
Total: new(int64),
Used: new(int64),
Trashed: new(int64),
Other: new(int64),
Free: new(int64),
Objects: new(int64),
}
for _, u := range f.upstreams {
usg, err := u.About(ctx)
if errors.Is(err, fs.ErrorDirNotFound) {
continue
}
if err != nil {
return nil, err
}
if usg.Total != nil && usage.Total != nil {
*usage.Total += *usg.Total
} else {
usage.Total = nil
}
if usg.Used != nil && usage.Used != nil {
*usage.Used += *usg.Used
} else {
usage.Used = nil
}
if usg.Trashed != nil && usage.Trashed != nil {
*usage.Trashed += *usg.Trashed
} else {
usage.Trashed = nil
}
if usg.Other != nil && usage.Other != nil {
*usage.Other += *usg.Other
} else {
usage.Other = nil
}
if usg.Free != nil && usage.Free != nil {
*usage.Free += *usg.Free
} else {
usage.Free = nil
}
if usg.Objects != nil && usage.Objects != nil {
*usage.Objects += *usg.Objects
} else {
usage.Objects = nil
}
}
return usage, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entriesList := make([][]upstream.Entry, len(f.upstreams))
errs := Errors(make([]error, len(f.upstreams)))
multithread(len(f.upstreams), func(i int) {
u := f.upstreams[i]
entries, err := u.List(ctx, dir)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
return
}
uEntries := make([]upstream.Entry, len(entries))
for j, e := range entries {
uEntries[j], _ = u.WrapEntry(e)
}
entriesList[i] = uEntries
})
if len(errs) == len(errs.FilterNil()) {
errs = errs.Map(func(e error) error {
if errors.Is(e, fs.ErrorDirNotFound) {
return nil
}
return e
})
if len(errs) == 0 {
return nil, fs.ErrorDirNotFound
}
return nil, errs.Err()
}
return f.mergeDirEntries(entriesList)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
var entriesList [][]upstream.Entry
errs := Errors(make([]error, len(f.upstreams)))
var mutex sync.Mutex
multithread(len(f.upstreams), func(i int) {
u := f.upstreams[i]
var err error
callback := func(entries fs.DirEntries) error {
uEntries := make([]upstream.Entry, len(entries))
for j, e := range entries {
uEntries[j], _ = u.WrapEntry(e)
}
mutex.Lock()
entriesList = append(entriesList, uEntries)
mutex.Unlock()
return nil
}
do := u.Features().ListR
if do != nil {
err = do(ctx, dir, callback)
} else {
err = walk.ListR(ctx, u, dir, true, -1, walk.ListAll, callback)
}
if err != nil {
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
return
}
})
if len(errs) == len(errs.FilterNil()) {
errs = errs.Map(func(e error) error {
if errors.Is(e, fs.ErrorDirNotFound) {
return nil
}
return e
})
if len(errs) == 0 {
return fs.ErrorDirNotFound
}
return errs.Err()
}
entries, err := f.mergeDirEntries(entriesList)
if err != nil {
return err
}
return callback(entries)
}
// NewObject creates a new remote union file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
objs := make([]*upstream.Object, len(f.upstreams))
errs := Errors(make([]error, len(f.upstreams)))
multithread(len(f.upstreams), func(i int) {
u := f.upstreams[i]
o, err := u.NewObject(ctx, remote)
if err != nil && err != fs.ErrorObjectNotFound {
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
return
}
objs[i] = u.WrapObject(o)
})
var entries []upstream.Entry
for _, o := range objs {
if o != nil {
entries = append(entries, o)
}
}
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
e, err := f.wrapEntries(entries...)
if err != nil {
return nil, err
}
return e.(*Object), errs.Err()
}
// Precision is the greatest Precision of all upstreams
func (f *Fs) Precision() time.Duration {
var greatestPrecision time.Duration
for _, u := range f.upstreams {
if u.Precision() > greatestPrecision {
greatestPrecision = u.Precision()
}
}
return greatestPrecision
}
func (f *Fs) action(ctx context.Context, path string) ([]*upstream.Fs, error) {
return f.actionPolicy.Action(ctx, f.upstreams, path)
}
func (f *Fs) actionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
return f.actionPolicy.ActionEntries(entries...)
}
func (f *Fs) create(ctx context.Context, path string) ([]*upstream.Fs, error) {
return f.createPolicy.Create(ctx, f.upstreams, path)
}
func (f *Fs) searchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
return f.searchPolicy.SearchEntries(entries...)
}
func (f *Fs) mergeDirEntries(entriesList [][]upstream.Entry) (fs.DirEntries, error) {
entryMap := make(map[string]([]upstream.Entry))
for _, en := range entriesList {
if en == nil {
continue
}
for _, entry := range en {
remote := entry.Remote()
if f.Features().CaseInsensitive {
remote = strings.ToLower(remote)
}
entryMap[remote] = append(entryMap[remote], entry)
}
}
var entries fs.DirEntries
for path := range entryMap {
e, err := f.wrapEntries(entryMap[path]...)
if err != nil {
return nil, err
}
entries = append(entries, e)
}
return entries, nil
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
errs := Errors(make([]error, len(f.upstreams)))
multithread(len(f.upstreams), func(i int) {
u := f.upstreams[i]
if do := u.Features().Shutdown; do != nil {
err := do(ctx)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
}
}
})
return errs.Err()
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
errs := Errors(make([]error, len(f.upstreams)))
multithread(len(f.upstreams), func(i int) {
u := f.upstreams[i]
if do := u.Features().CleanUp; do != nil {
err := do(ctx)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
}
}
})
return errs.Err()
}
// NewFs constructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(common.Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Backward compatible to old config
if len(opt.Upstreams) == 0 && len(opt.Remotes) > 0 {
for i := range len(opt.Remotes) - 1 {
opt.Remotes[i] += ":ro"
}
opt.Upstreams = opt.Remotes
}
if len(opt.Upstreams) == 0 {
return nil, errors.New("union can't point to an empty upstream - check the value of the upstreams setting")
}
if len(opt.Upstreams) == 1 {
return nil, errors.New("union can't point to a single upstream - check the value of the upstreams setting")
}
for _, u := range opt.Upstreams {
if strings.HasPrefix(u, name+":") {
return nil, errors.New("can't point union remote at itself - check the value of the upstreams setting")
}
}
root = strings.Trim(root, "/")
upstreams := make([]*upstream.Fs, len(opt.Upstreams))
errs := Errors(make([]error, len(opt.Upstreams)))
multithread(len(opt.Upstreams), func(i int) {
u := opt.Upstreams[i]
upstreams[i], errs[i] = upstream.New(ctx, u, root, opt)
})
var usedUpstreams []*upstream.Fs
var fserr error
for i, err := range errs {
if err != nil && err != fs.ErrorIsFile {
return nil, err
}
// Only the upstreams returns ErrorIsFile would be used if any
if err == fs.ErrorIsFile {
usedUpstreams = append(usedUpstreams, upstreams[i])
fserr = fs.ErrorIsFile
}
}
if fserr == nil {
usedUpstreams = upstreams
}
f := &Fs{
name: name,
root: root,
opt: *opt,
upstreams: usedUpstreams,
}
// Correct root if definitely pointing to a file
if fserr == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
err = upstream.Prepare(f.upstreams)
if err != nil {
return nil, err
}
f.actionPolicy, err = policy.Get(opt.ActionPolicy)
if err != nil {
return nil, err
}
f.createPolicy, err = policy.Get(opt.CreatePolicy)
if err != nil {
return nil, err
}
f.searchPolicy, err = policy.Get(opt.SearchPolicy)
if err != nil {
return nil, err
}
fs.Debugf(f, "actionPolicy = %T, createPolicy = %T, searchPolicy = %T", f.actionPolicy, f.createPolicy, f.searchPolicy)
var features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
BucketBased: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
}).Fill(ctx, f)
canMove, slowHash := true, false
for _, f := range upstreams {
features = features.Mask(ctx, f) // Mask all upstream fs
if !operations.CanServerSideMove(f) {
canMove = false
}
slowHash = slowHash || f.Features().SlowHash
}
// We can move if all remotes support Move or Copy
if canMove {
features.Move = f.Move
}
// If any of upstreams are SlowHash, propagate it
features.SlowHash = slowHash
// Enable ListR when upstreams either support ListR or is local
// But not when all upstreams are local
if features.ListR == nil {
for _, u := range upstreams {
if u.Features().ListR != nil {
features.ListR = f.ListR
} else if !u.Features().IsLocal {
features.ListR = nil
break
}
}
}
// Disable ListP always
features.ListP = nil
// show that we wrap other backends
features.Overlay = true
f.features = features
// Get common intersection of hashes
hashSet := f.upstreams[0].Hashes()
for _, u := range f.upstreams[1:] {
hashSet = hashSet.Overlap(u.Hashes())
}
f.hashSet = hashSet
return f, fserr
}
func parentDir(absPath string) string {
parent := path.Dir(strings.TrimRight(filepath.ToSlash(absPath), "/"))
if parent == "." {
parent = ""
}
return parent
}
func multithread(num int, fn func(int)) {
var wg sync.WaitGroup
for i := range num {
wg.Add(1)
i := i
go func() {
defer wg.Done()
fn(i)
}()
}
wg.Wait()
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/common/options.go | backend/union/common/options.go | // Package common defines code common to the union and the policies
//
// These need to be defined in a separate package to avoid import loops
package common //nolint:revive // Don't include revive when running golangci-lint because this triggers var-naming: avoid meaningless package names
import "github.com/rclone/rclone/fs"
// Options defines the configuration for this backend
type Options struct {
Upstreams fs.SpaceSepList `config:"upstreams"`
Remotes fs.SpaceSepList `config:"remotes"` // Deprecated
ActionPolicy string `config:"action_policy"`
CreatePolicy string `config:"create_policy"`
SearchPolicy string `config:"search_policy"`
CacheTime int `config:"cache_time"`
MinFreeSpace fs.SizeSuffix `config:"min_free_space"`
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/upstream/upstream.go | backend/union/upstream/upstream.go | // Package upstream provides utility functionality to union.
package upstream
import (
"context"
"errors"
"fmt"
"io"
"math"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/rclone/rclone/backend/union/common"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/operations"
)
var (
// ErrUsageFieldNotSupported stats the usage field is not supported by the backend
ErrUsageFieldNotSupported = errors.New("this usage field is not supported")
)
// Fs is a wrap of any fs and its configs
type Fs struct {
fs.Fs
RootFs fs.Fs
RootPath string
Opt *common.Options
writable bool
creatable bool
usage *fs.Usage // Cache the usage
cacheTime time.Duration // cache duration
cacheExpiry atomic.Int64 // usage cache expiry time
cacheMutex sync.RWMutex
cacheOnce sync.Once
cacheUpdate bool // if the cache is updating
writeback bool // writeback to this upstream
writebackFs *Fs // if non zero, writeback to this upstream
}
// Directory describes a wrapped Directory
//
// This is a wrapped Directory which contains the upstream Fs
type Directory struct {
fs.Directory
f *Fs
}
// Object describes a wrapped Object
//
// This is a wrapped Object which contains the upstream Fs
type Object struct {
fs.Object
f *Fs
}
// Entry describe a wrapped fs.DirEntry interface with the
// information of upstream Fs
type Entry interface {
fs.DirEntry
UpstreamFs() *Fs
}
// New creates a new Fs based on the
// string formatted `type:root_path(:ro/:nc)`
func New(ctx context.Context, remote, root string, opt *common.Options) (*Fs, error) {
configName, fsPath, err := fspath.SplitFs(remote)
if err != nil {
return nil, err
}
f := &Fs{
RootPath: strings.TrimRight(root, "/"),
Opt: opt,
writable: true,
creatable: true,
cacheTime: time.Duration(opt.CacheTime) * time.Second,
usage: &fs.Usage{},
}
f.cacheExpiry.Store(time.Now().Unix())
if strings.HasSuffix(fsPath, ":ro") {
f.writable = false
f.creatable = false
fsPath = fsPath[0 : len(fsPath)-3]
} else if strings.HasSuffix(fsPath, ":nc") {
f.writable = true
f.creatable = false
fsPath = fsPath[0 : len(fsPath)-3]
} else if strings.HasSuffix(fsPath, ":writeback") {
f.writeback = true
fsPath = fsPath[0 : len(fsPath)-len(":writeback")]
}
remote = configName + fsPath
rFs, err := cache.Get(ctx, remote)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}
f.RootFs = rFs
rootString := fspath.JoinRootPath(remote, root)
myFs, err := cache.Get(ctx, rootString)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}
f.Fs = myFs
cache.PinUntilFinalized(f.Fs, f)
return f, err
}
// Prepare the configured upstreams as a group
func Prepare(fses []*Fs) error {
writebacks := 0
var writebackFs *Fs
for _, f := range fses {
if f.writeback {
writebackFs = f
writebacks++
}
}
if writebacks == 0 {
return nil
} else if writebacks > 1 {
return fmt.Errorf("can only have 1 :writeback not %d", writebacks)
}
for _, f := range fses {
if !f.writeback {
f.writebackFs = writebackFs
}
}
return nil
}
// WrapDirectory wraps an fs.Directory to include the info
// of the upstream Fs
func (f *Fs) WrapDirectory(e fs.Directory) *Directory {
if e == nil {
return nil
}
return &Directory{
Directory: e,
f: f,
}
}
// WrapObject wraps an fs.Object to include the info
// of the upstream Fs
func (f *Fs) WrapObject(o fs.Object) *Object {
if o == nil {
return nil
}
return &Object{
Object: o,
f: f,
}
}
// WrapEntry wraps an fs.DirEntry to include the info
// of the upstream Fs
func (f *Fs) WrapEntry(e fs.DirEntry) (Entry, error) {
switch e := e.(type) {
case fs.Object:
return f.WrapObject(e), nil
case fs.Directory:
return f.WrapDirectory(e), nil
default:
return nil, fmt.Errorf("unknown object type %T", e)
}
}
// UpstreamFs get the upstream Fs the entry is stored in
func (e *Directory) UpstreamFs() *Fs {
return e.f
}
// UpstreamFs get the upstream Fs the entry is stored in
func (o *Object) UpstreamFs() *Fs {
return o.f
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *Object) UnWrap() fs.Object {
return o.Object
}
// IsCreatable return if the fs is allowed to create new objects
func (f *Fs) IsCreatable() bool {
return f.creatable
}
// IsWritable return if the fs is allowed to write
func (f *Fs) IsWritable() bool {
return f.writable
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.Fs.Put(ctx, in, src, options...)
if err != nil {
return o, err
}
f.cacheMutex.Lock()
defer f.cacheMutex.Unlock()
size := src.Size()
if f.usage.Used != nil {
*f.usage.Used += size
}
if f.usage.Free != nil {
*f.usage.Free -= size
}
if f.usage.Objects != nil {
*f.usage.Objects++
}
return o, nil
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Features().PutStream
if do == nil {
return nil, fs.ErrorNotImplemented
}
o, err := do(ctx, in, src, options...)
if err != nil {
return o, err
}
f.cacheMutex.Lock()
defer f.cacheMutex.Unlock()
size := o.Size()
if f.usage.Used != nil {
*f.usage.Used += size
}
if f.usage.Free != nil {
*f.usage.Free -= size
}
if f.usage.Objects != nil {
*f.usage.Objects++
}
return o, nil
}
// Update in to the object with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
size := o.Size()
err := o.Object.Update(ctx, in, src, options...)
if err != nil {
return err
}
o.f.cacheMutex.Lock()
defer o.f.cacheMutex.Unlock()
delta := o.Size() - size
if delta <= 0 {
return nil
}
if o.f.usage.Used != nil {
*o.f.usage.Used += size
}
if o.f.usage.Free != nil {
*o.f.usage.Free -= size
}
return nil
}
// GetTier returns storage tier or class of the Object
func (o *Object) GetTier() string {
do, ok := o.Object.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// MimeType returns the content type of the Object if known
func (o *Object) MimeType(ctx context.Context) (mimeType string) {
if do, ok := o.Object.(fs.MimeTyper); ok {
mimeType = do.MimeType(ctx)
}
return mimeType
}
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
func (o *Object) SetTier(tier string) error {
do, ok := o.Object.(fs.SetTierer)
if !ok {
return errors.New("underlying remote does not support SetTier")
}
return do.SetTier(tier)
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.Object.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// Metadata returns metadata for an DirEntry
//
// It should return nil if there is no Metadata
func (e *Directory) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := e.Directory.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// SetMetadata sets metadata for an DirEntry
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (e *Directory) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := e.Directory.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// SetModTime sets the metadata on the DirEntry to set the modification date
//
// If there is any other metadata it does not overwrite it.
func (e *Directory) SetModTime(ctx context.Context, t time.Time) error {
do, ok := e.Directory.(fs.SetModTimer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetModTime(ctx, t)
}
// Writeback writes the object back and returns a new object
//
// If it returns nil, nil then the original object is OK
func (o *Object) Writeback(ctx context.Context) (*Object, error) {
if o.f.writebackFs == nil {
return nil, nil
}
newObj, err := operations.Copy(ctx, o.f.writebackFs.Fs, nil, o.Object.Remote(), o.Object)
if err != nil {
return nil, err
}
// newObj could be nil here
if newObj == nil {
fs.Errorf(o, "nil Object returned from operations.Copy")
return nil, nil
}
return &Object{
Object: newObj,
f: o.f,
}, err
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if f.cacheExpiry.Load() <= time.Now().Unix() {
err := f.updateUsage()
if err != nil {
return nil, ErrUsageFieldNotSupported
}
}
f.cacheMutex.RLock()
defer f.cacheMutex.RUnlock()
return f.usage, nil
}
// GetFreeSpace get the free space of the fs
//
// This is returned as 0..math.MaxInt64-1 leaving math.MaxInt64 as a sentinel
func (f *Fs) GetFreeSpace() (int64, error) {
if f.cacheExpiry.Load() <= time.Now().Unix() {
err := f.updateUsage()
if err != nil {
return math.MaxInt64 - 1, ErrUsageFieldNotSupported
}
}
f.cacheMutex.RLock()
defer f.cacheMutex.RUnlock()
if f.usage.Free == nil {
return math.MaxInt64 - 1, ErrUsageFieldNotSupported
}
return *f.usage.Free, nil
}
// GetUsedSpace get the used space of the fs
//
// This is returned as 0..math.MaxInt64-1 leaving math.MaxInt64 as a sentinel
func (f *Fs) GetUsedSpace() (int64, error) {
if f.cacheExpiry.Load() <= time.Now().Unix() {
err := f.updateUsage()
if err != nil {
return 0, ErrUsageFieldNotSupported
}
}
f.cacheMutex.RLock()
defer f.cacheMutex.RUnlock()
if f.usage.Used == nil {
return 0, ErrUsageFieldNotSupported
}
return *f.usage.Used, nil
}
// GetNumObjects get the number of objects of the fs
func (f *Fs) GetNumObjects() (int64, error) {
if f.cacheExpiry.Load() <= time.Now().Unix() {
err := f.updateUsage()
if err != nil {
return 0, ErrUsageFieldNotSupported
}
}
f.cacheMutex.RLock()
defer f.cacheMutex.RUnlock()
if f.usage.Objects == nil {
return 0, ErrUsageFieldNotSupported
}
return *f.usage.Objects, nil
}
func (f *Fs) updateUsage() (err error) {
if do := f.RootFs.Features().About; do == nil {
return ErrUsageFieldNotSupported
}
done := false
f.cacheOnce.Do(func() {
f.cacheMutex.Lock()
err = f.updateUsageCore(false)
f.cacheMutex.Unlock()
done = true
})
if done {
return err
}
if !f.cacheUpdate {
f.cacheUpdate = true
go func() {
_ = f.updateUsageCore(true)
f.cacheUpdate = false
}()
}
return nil
}
func (f *Fs) updateUsageCore(lock bool) error {
// Run in background, should not be cancelled by user
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
usage, err := f.RootFs.Features().About(ctx)
if err != nil {
f.cacheUpdate = false
if errors.Is(err, fs.ErrorDirNotFound) {
err = nil
}
return err
}
if lock {
f.cacheMutex.Lock()
defer f.cacheMutex.Unlock()
}
// Store usage
f.cacheExpiry.Store(time.Now().Add(f.cacheTime).Unix())
f.usage = usage
return nil
}
// Check the interfaces are satisfied
var (
_ fs.FullObject = (*Object)(nil)
_ fs.FullDirectory = (*Directory)(nil)
)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/policy/policy.go | backend/union/policy/policy.go | package policy
import (
"context"
"fmt"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
var policies = make(map[string]Policy)
// Policy is the interface of a set of defined behavior choosing
// the upstream Fs to operate on
type Policy interface {
// Action category policy, governing the modification of files and directories
Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error)
// Create category policy, governing the creation of files and directories
Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error)
// Search category policy, governing the access to files and directories
Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error)
// ActionEntries is ACTION category policy but receiving a set of candidate entries
ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error)
// CreateEntries is CREATE category policy but receiving a set of candidate entries
CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error)
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
SearchEntries(entries ...upstream.Entry) (upstream.Entry, error)
}
func registerPolicy(name string, p Policy) {
policies[strings.ToLower(name)] = p
}
// Get a Policy from the list
func Get(name string) (Policy, error) {
p, ok := policies[strings.ToLower(name)]
if !ok {
return nil, fmt.Errorf("didn't find policy called %q", name)
}
return p, nil
}
func filterRO(ufs []*upstream.Fs) (wufs []*upstream.Fs) {
for _, u := range ufs {
if u.IsWritable() {
wufs = append(wufs, u)
}
}
return wufs
}
func filterROEntries(ue []upstream.Entry) (wue []upstream.Entry) {
for _, e := range ue {
if e.UpstreamFs().IsWritable() {
wue = append(wue, e)
}
}
return wue
}
func filterNC(ufs []*upstream.Fs) (wufs []*upstream.Fs) {
for _, u := range ufs {
if u.IsCreatable() {
wufs = append(wufs, u)
}
}
return wufs
}
func filterNCEntries(ue []upstream.Entry) (wue []upstream.Entry) {
for _, e := range ue {
if e.UpstreamFs().IsCreatable() {
wue = append(wue, e)
}
}
return wue
}
func parentDir(absPath string) string {
parent := path.Dir(strings.TrimRight(absPath, "/"))
if parent == "." {
parent = ""
}
return parent
}
func clean(absPath string) string {
cleanPath := path.Clean(absPath)
if cleanPath == "." {
cleanPath = ""
}
return cleanPath
}
func findEntry(ctx context.Context, f fs.Fs, remote string) fs.DirEntry {
remote = clean(remote)
dir := parentDir(remote)
entries, err := f.List(ctx, dir)
if remote == dir {
if err != nil {
return nil
}
return fs.NewDir("", time.Time{})
}
found := false
for _, e := range entries {
eRemote := e.Remote()
if f.Features().CaseInsensitive {
found = strings.EqualFold(remote, eRemote)
} else {
found = (remote == eRemote)
}
if found {
return e
}
}
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/policy/lfs.go | backend/union/policy/lfs.go | package policy
import (
"context"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("lfs", &Lfs{})
}
// Lfs stands for least free space
// Search category: same as eplfs.
// Action category: same as eplfs.
// Create category: Pick the drive with the least free space.
type Lfs struct {
EpLfs
}
// Create category policy, governing the creation of files and directories
func (p *Lfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.lfs(upstreams)
return []*upstream.Fs{u}, err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/policy/eprand.go | backend/union/policy/eprand.go | package policy
import (
"context"
"math/rand"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("eprand", &EpRand{})
}
// EpRand stands for existing path, random
// Calls epall and then randomizes. Returns one candidate.
type EpRand struct {
EpAll
}
func (p *EpRand) rand(upstreams []*upstream.Fs) *upstream.Fs {
return upstreams[rand.Intn(len(upstreams))]
}
func (p *EpRand) randEntries(entries []upstream.Entry) upstream.Entry {
return entries[rand.Intn(len(entries))]
}
// Action category policy, governing the modification of files and directories
func (p *EpRand) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
if err != nil {
return nil, err
}
return []*upstream.Fs{p.rand(upstreams)}, nil
}
// ActionEntries is ACTION category policy but receiving a set of candidate entries
func (p *EpRand) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.ActionEntries(entries...)
if err != nil {
return nil, err
}
return []upstream.Entry{p.randEntries(entries)}, nil
}
// Create category policy, governing the creation of files and directories
func (p *EpRand) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
if err != nil {
return nil, err
}
return []*upstream.Fs{p.rand(upstreams)}, nil
}
// CreateEntries is CREATE category policy but receiving a set of candidate entries
func (p *EpRand) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.CreateEntries(entries...)
if err != nil {
return nil, err
}
return []upstream.Entry{p.randEntries(entries)}, nil
}
// Search category policy, governing the access to files and directories
func (p *EpRand) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams, err := p.epall(ctx, upstreams, path)
if err != nil {
return nil, err
}
return p.rand(upstreams), nil
}
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
func (p *EpRand) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.randEntries(entries), nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/policy/epmfs.go | backend/union/policy/epmfs.go | package policy
import (
"context"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("epmfs", &EpMfs{})
}
// EpMfs stands for existing path, most free space
// Of all the candidates on which the path exists choose the one with the most free space.
type EpMfs struct {
EpAll
}
func (p *EpMfs) mfs(upstreams []*upstream.Fs) (*upstream.Fs, error) {
var maxFreeSpace int64
var mfsupstream *upstream.Fs
for _, u := range upstreams {
space, err := u.GetFreeSpace()
if err != nil {
fs.LogPrintf(fs.LogLevelNotice, nil,
"Free Space is not supported for upstream %s, treating as infinite", u.Name())
}
if maxFreeSpace < space {
maxFreeSpace = space
mfsupstream = u
}
}
if mfsupstream == nil {
return nil, fs.ErrorObjectNotFound
}
return mfsupstream, nil
}
func (p *EpMfs) mfsEntries(entries []upstream.Entry) (upstream.Entry, error) {
var maxFreeSpace int64
var mfsEntry upstream.Entry
for _, e := range entries {
space, err := e.UpstreamFs().GetFreeSpace()
if err != nil {
fs.LogPrintf(fs.LogLevelNotice, nil,
"Free Space is not supported for upstream %s, treating as infinite", e.UpstreamFs().Name())
}
if maxFreeSpace < space {
maxFreeSpace = space
mfsEntry = e
}
}
return mfsEntry, nil
}
// Action category policy, governing the modification of files and directories
func (p *EpMfs) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
if err != nil {
return nil, err
}
u, err := p.mfs(upstreams)
return []*upstream.Fs{u}, err
}
// ActionEntries is ACTION category policy but receiving a set of candidate entries
func (p *EpMfs) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.ActionEntries(entries...)
if err != nil {
return nil, err
}
e, err := p.mfsEntries(entries)
return []upstream.Entry{e}, err
}
// Create category policy, governing the creation of files and directories
func (p *EpMfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
if err != nil {
return nil, err
}
u, err := p.mfs(upstreams)
return []*upstream.Fs{u}, err
}
// CreateEntries is CREATE category policy but receiving a set of candidate entries
func (p *EpMfs) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.CreateEntries(entries...)
if err != nil {
return nil, err
}
e, err := p.mfsEntries(entries)
return []upstream.Entry{e}, err
}
// Search category policy, governing the access to files and directories
func (p *EpMfs) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams, err := p.epall(ctx, upstreams, path)
if err != nil {
return nil, err
}
return p.mfs(upstreams)
}
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
func (p *EpMfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.mfsEntries(entries)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/policy/lno.go | backend/union/policy/lno.go | package policy
import (
"context"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("lno", &Lno{})
}
// Lno stands for least number of objects
// Search category: same as eplno.
// Action category: same as eplno.
// Create category: Pick the drive with the least number of objects.
type Lno struct {
EpLno
}
// Create category policy, governing the creation of files and directories
func (p *Lno) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.lno(upstreams)
return []*upstream.Fs{u}, err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/policy/epff.go | backend/union/policy/epff.go | package policy
import (
"context"
"path"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("epff", &EpFF{})
}
// EpFF stands for existing path, first found
// Given the order of the candidates, act on the first one found where the relative path exists.
type EpFF struct{}
func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
ch := make(chan *upstream.Fs, len(upstreams))
ctx, cancel := context.WithCancel(ctx)
defer cancel()
for _, u := range upstreams {
go func() {
rfs := u.RootFs
remote := path.Join(u.RootPath, filePath)
if findEntry(ctx, rfs, remote) == nil {
u = nil
}
ch <- u
}()
}
var u *upstream.Fs
for range upstreams {
u = <-ch
if u != nil {
break
}
}
if u == nil {
return nil, fs.ErrorObjectNotFound
}
return u, nil
}
// Action category policy, governing the modification of files and directories
func (p *EpFF) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterRO(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.epff(ctx, upstreams, path)
return []*upstream.Fs{u}, err
}
// ActionEntries is ACTION category policy but receiving a set of candidate entries
func (p *EpFF) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
entries = filterROEntries(entries)
if len(entries) == 0 {
return nil, fs.ErrorPermissionDenied
}
return entries[:1], nil
}
// Create category policy, governing the creation of files and directories
func (p *EpFF) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.epff(ctx, upstreams, path+"/..")
return []*upstream.Fs{u}, err
}
// CreateEntries is CREATE category policy but receiving a set of candidate entries
func (p *EpFF) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
entries = filterNCEntries(entries)
if len(entries) == 0 {
return nil, fs.ErrorPermissionDenied
}
return entries[:1], nil
}
// Search category policy, governing the access to files and directories
func (p *EpFF) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.epff(ctx, upstreams, path)
}
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
func (p *EpFF) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return entries[0], nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/policy/rand.go | backend/union/policy/rand.go | package policy
import (
"context"
"math/rand"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("rand", &Rand{})
}
// Rand stands for random
// Calls all and then randomizes. Returns one candidate.
type Rand struct {
All
}
func (p *Rand) rand(upstreams []*upstream.Fs) *upstream.Fs {
return upstreams[rand.Intn(len(upstreams))]
}
func (p *Rand) randEntries(entries []upstream.Entry) upstream.Entry {
return entries[rand.Intn(len(entries))]
}
// Action category policy, governing the modification of files and directories
func (p *Rand) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.All.Action(ctx, upstreams, path)
if err != nil {
return nil, err
}
return []*upstream.Fs{p.rand(upstreams)}, nil
}
// ActionEntries is ACTION category policy but receiving a set of candidate entries
func (p *Rand) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.All.ActionEntries(entries...)
if err != nil {
return nil, err
}
return []upstream.Entry{p.randEntries(entries)}, nil
}
// Create category policy, governing the creation of files and directories
func (p *Rand) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.All.Create(ctx, upstreams, path)
if err != nil {
return nil, err
}
return []*upstream.Fs{p.rand(upstreams)}, nil
}
// CreateEntries is CREATE category policy but receiving a set of candidate entries
func (p *Rand) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.All.CreateEntries(entries...)
if err != nil {
return nil, err
}
return []upstream.Entry{p.randEntries(entries)}, nil
}
// Search category policy, governing the access to files and directories
func (p *Rand) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams, err := p.epall(ctx, upstreams, path)
if err != nil {
return nil, err
}
return p.rand(upstreams), nil
}
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
func (p *Rand) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.randEntries(entries), nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/policy/epall.go | backend/union/policy/epall.go | package policy
import (
"context"
"path"
"sync"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("epall", &EpAll{})
}
// EpAll stands for existing path, all
// Action category: apply to all found.
// Create category: apply to all found.
// Search category: same as epff.
type EpAll struct {
EpFF
}
func (p *EpAll) epall(ctx context.Context, upstreams []*upstream.Fs, filePath string) ([]*upstream.Fs, error) {
var wg sync.WaitGroup
ufs := make([]*upstream.Fs, len(upstreams))
for i, u := range upstreams {
wg.Add(1)
i, u := i, u // Closure
go func() {
rfs := u.RootFs
remote := path.Join(u.RootPath, filePath)
if findEntry(ctx, rfs, remote) != nil {
ufs[i] = u
}
wg.Done()
}()
}
wg.Wait()
var results []*upstream.Fs
for _, f := range ufs {
if f != nil {
results = append(results, f)
}
}
if len(results) == 0 {
return nil, fs.ErrorObjectNotFound
}
return results, nil
}
// Action category policy, governing the modification of files and directories
func (p *EpAll) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterRO(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
return p.epall(ctx, upstreams, path)
}
// ActionEntries is ACTION category policy but receiving a set of candidate entries
func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
entries = filterROEntries(entries)
if len(entries) == 0 {
return nil, fs.ErrorPermissionDenied
}
return entries, nil
}
// Create category policy, governing the creation of files and directories
func (p *EpAll) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
upstreams, err := p.epall(ctx, upstreams, path+"/..")
return upstreams, err
}
// CreateEntries is CREATE category policy but receiving a set of candidate entries
func (p *EpAll) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
entries = filterNCEntries(entries)
if len(entries) == 0 {
return nil, fs.ErrorPermissionDenied
}
return entries, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/policy/ff.go | backend/union/policy/ff.go | package policy
import (
"context"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("ff", &FF{})
}
// FF stands for first found
// Search category: same as epff.
// Action category: same as epff.
// Create category: Given the order of the candidates, act on the first one found.
type FF struct {
EpFF
}
// Create category policy, governing the creation of files and directories
func (p *FF) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return upstreams, fs.ErrorPermissionDenied
}
return upstreams[:1], nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/policy/eplno.go | backend/union/policy/eplno.go | package policy
import (
"context"
"math"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("eplno", &EpLno{})
}
// EpLno stands for existing path, least number of objects
// Of all the candidates on which the path exists choose the one with the least number of objects
type EpLno struct {
EpAll
}
func (p *EpLno) lno(upstreams []*upstream.Fs) (*upstream.Fs, error) {
var minNumObj int64 = math.MaxInt64
var lnoUpstream *upstream.Fs
for _, u := range upstreams {
numObj, err := u.GetNumObjects()
if err != nil {
fs.LogPrintf(fs.LogLevelNotice, nil,
"Number of Objects is not supported for upstream %s, treating as 0", u.Name())
}
if minNumObj > numObj {
minNumObj = numObj
lnoUpstream = u
}
}
if lnoUpstream == nil {
return nil, fs.ErrorObjectNotFound
}
return lnoUpstream, nil
}
func (p *EpLno) lnoEntries(entries []upstream.Entry) (upstream.Entry, error) {
var minNumObj int64 = math.MaxInt64
var lnoEntry upstream.Entry
for _, e := range entries {
numObj, err := e.UpstreamFs().GetNumObjects()
if err != nil {
fs.LogPrintf(fs.LogLevelNotice, nil,
"Number of Objects is not supported for upstream %s, treating as 0", e.UpstreamFs().Name())
}
if minNumObj > numObj {
minNumObj = numObj
lnoEntry = e
}
}
return lnoEntry, nil
}
// Action category policy, governing the modification of files and directories
func (p *EpLno) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
if err != nil {
return nil, err
}
u, err := p.lno(upstreams)
return []*upstream.Fs{u}, err
}
// ActionEntries is ACTION category policy but receiving a set of candidate entries
func (p *EpLno) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.ActionEntries(entries...)
if err != nil {
return nil, err
}
e, err := p.lnoEntries(entries)
return []upstream.Entry{e}, err
}
// Create category policy, governing the creation of files and directories
func (p *EpLno) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
if err != nil {
return nil, err
}
u, err := p.lno(upstreams)
return []*upstream.Fs{u}, err
}
// CreateEntries is CREATE category policy but receiving a set of candidate entries
func (p *EpLno) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.CreateEntries(entries...)
if err != nil {
return nil, err
}
e, err := p.lnoEntries(entries)
return []upstream.Entry{e}, err
}
// Search category policy, governing the access to files and directories
func (p *EpLno) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams, err := p.epall(ctx, upstreams, path)
if err != nil {
return nil, err
}
return p.lno(upstreams)
}
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
func (p *EpLno) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.lnoEntries(entries)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/policy/lus.go | backend/union/policy/lus.go | package policy
import (
"context"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("lus", &Lus{})
}
// Lus stands for least used space
// Search category: same as eplus.
// Action category: same as eplus.
// Create category: Pick the drive with the least used space.
type Lus struct {
EpLus
}
// Create category policy, governing the creation of files and directories
func (p *Lus) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.lus(upstreams)
return []*upstream.Fs{u}, err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/policy/newest.go | backend/union/policy/newest.go | package policy
import (
"context"
"path"
"sync"
"time"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("newest", &Newest{})
}
// Newest policy picks the file / directory with the largest mtime
// It implies the existence of a path
type Newest struct {
EpAll
}
func (p *Newest) newest(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
var wg sync.WaitGroup
ufs := make([]*upstream.Fs, len(upstreams))
mtimes := make([]time.Time, len(upstreams))
for i, u := range upstreams {
wg.Add(1)
i, u := i, u // Closure
go func() {
defer wg.Done()
rfs := u.RootFs
remote := path.Join(u.RootPath, filePath)
if e := findEntry(ctx, rfs, remote); e != nil {
ufs[i] = u
mtimes[i] = e.ModTime(ctx)
}
}()
}
wg.Wait()
maxMtime := time.Time{}
var newestFs *upstream.Fs
for i, u := range ufs {
if u != nil && mtimes[i].After(maxMtime) {
maxMtime = mtimes[i]
newestFs = u
}
}
if newestFs == nil {
return nil, fs.ErrorObjectNotFound
}
return newestFs, nil
}
func (p *Newest) newestEntries(entries []upstream.Entry) (upstream.Entry, error) {
var wg sync.WaitGroup
mtimes := make([]time.Time, len(entries))
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
for i, e := range entries {
wg.Add(1)
i, e := i, e // Closure
go func() {
defer wg.Done()
mtimes[i] = e.ModTime(ctx)
}()
}
wg.Wait()
maxMtime := time.Time{}
var newestEntry upstream.Entry
for i, t := range mtimes {
if t.After(maxMtime) {
maxMtime = t
newestEntry = entries[i]
}
}
if newestEntry == nil {
return nil, fs.ErrorObjectNotFound
}
return newestEntry, nil
}
// Action category policy, governing the modification of files and directories
func (p *Newest) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterRO(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.newest(ctx, upstreams, path)
return []*upstream.Fs{u}, err
}
// ActionEntries is ACTION category policy but receiving a set of candidate entries
func (p *Newest) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
entries = filterROEntries(entries)
if len(entries) == 0 {
return nil, fs.ErrorPermissionDenied
}
e, err := p.newestEntries(entries)
return []upstream.Entry{e}, err
}
// Create category policy, governing the creation of files and directories
func (p *Newest) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.newest(ctx, upstreams, path+"/..")
return []*upstream.Fs{u}, err
}
// CreateEntries is CREATE category policy but receiving a set of candidate entries
func (p *Newest) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
entries = filterNCEntries(entries)
if len(entries) == 0 {
return nil, fs.ErrorPermissionDenied
}
e, err := p.newestEntries(entries)
return []upstream.Entry{e}, err
}
// Search category policy, governing the access to files and directories
func (p *Newest) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.newest(ctx, upstreams, path)
}
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
func (p *Newest) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.newestEntries(entries)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/policy/eplfs.go | backend/union/policy/eplfs.go | package policy
import (
"context"
"errors"
"math"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("eplfs", &EpLfs{})
}
// EpLfs stands for existing path, least free space
// Of all the candidates on which the path exists choose the one with the least free space.
type EpLfs struct {
EpAll
}
var errNoUpstreamsFound = errors.New("no upstreams found with more than min_free_space space spare")
func (p *EpLfs) lfs(upstreams []*upstream.Fs) (*upstream.Fs, error) {
var minFreeSpace int64 = math.MaxInt64
var lfsupstream *upstream.Fs
for _, u := range upstreams {
space, err := u.GetFreeSpace()
if err != nil {
fs.LogPrintf(fs.LogLevelNotice, nil,
"Free Space is not supported for upstream %s, treating as infinite", u.Name())
}
if space < minFreeSpace && space > int64(u.Opt.MinFreeSpace) {
minFreeSpace = space
lfsupstream = u
}
}
if lfsupstream == nil {
return nil, errNoUpstreamsFound
}
return lfsupstream, nil
}
func (p *EpLfs) lfsEntries(entries []upstream.Entry) (upstream.Entry, error) {
var minFreeSpace int64 = math.MaxInt64
var lfsEntry upstream.Entry
for _, e := range entries {
u := e.UpstreamFs()
space, err := u.GetFreeSpace()
if err != nil {
fs.LogPrintf(fs.LogLevelNotice, nil,
"Free Space is not supported for upstream %s, treating as infinite", u.Name())
}
if space < minFreeSpace && space > int64(u.Opt.MinFreeSpace) {
minFreeSpace = space
lfsEntry = e
}
}
if lfsEntry == nil {
return nil, errNoUpstreamsFound
}
return lfsEntry, nil
}
// Action category policy, governing the modification of files and directories
func (p *EpLfs) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
if err != nil {
return nil, err
}
u, err := p.lfs(upstreams)
return []*upstream.Fs{u}, err
}
// ActionEntries is ACTION category policy but receiving a set of candidate entries
func (p *EpLfs) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.ActionEntries(entries...)
if err != nil {
return nil, err
}
e, err := p.lfsEntries(entries)
return []upstream.Entry{e}, err
}
// Create category policy, governing the creation of files and directories
func (p *EpLfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
if err != nil {
return nil, err
}
u, err := p.lfs(upstreams)
return []*upstream.Fs{u}, err
}
// CreateEntries is CREATE category policy but receiving a set of candidate entries
func (p *EpLfs) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.CreateEntries(entries...)
if err != nil {
return nil, err
}
e, err := p.lfsEntries(entries)
return []upstream.Entry{e}, err
}
// Search category policy, governing the access to files and directories
func (p *EpLfs) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams, err := p.epall(ctx, upstreams, path)
if err != nil {
return nil, err
}
return p.lfs(upstreams)
}
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
func (p *EpLfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.lfsEntries(entries)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/policy/eplus.go | backend/union/policy/eplus.go | package policy
import (
"context"
"math"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("eplus", &EpLus{})
}
// EpLus stands for existing path, least used space
// Of all the candidates on which the path exists choose the one with the least used space.
type EpLus struct {
EpAll
}
func (p *EpLus) lus(upstreams []*upstream.Fs) (*upstream.Fs, error) {
var minUsedSpace int64 = math.MaxInt64
var lusupstream *upstream.Fs
for _, u := range upstreams {
space, err := u.GetUsedSpace()
if err != nil {
fs.LogPrintf(fs.LogLevelNotice, nil,
"Used Space is not supported for upstream %s, treating as 0", u.Name())
}
if space < minUsedSpace {
minUsedSpace = space
lusupstream = u
}
}
if lusupstream == nil {
return nil, fs.ErrorObjectNotFound
}
return lusupstream, nil
}
func (p *EpLus) lusEntries(entries []upstream.Entry) (upstream.Entry, error) {
var minUsedSpace int64 = math.MaxInt64
var lusEntry upstream.Entry
for _, e := range entries {
space, err := e.UpstreamFs().GetUsedSpace()
if err != nil {
fs.LogPrintf(fs.LogLevelNotice, nil,
"Used Space is not supported for upstream %s, treating as 0", e.UpstreamFs().Name())
}
if space < minUsedSpace {
minUsedSpace = space
lusEntry = e
}
}
return lusEntry, nil
}
// Action category policy, governing the modification of files and directories
func (p *EpLus) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
if err != nil {
return nil, err
}
u, err := p.lus(upstreams)
return []*upstream.Fs{u}, err
}
// ActionEntries is ACTION category policy but receiving a set of candidate entries
func (p *EpLus) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.ActionEntries(entries...)
if err != nil {
return nil, err
}
e, err := p.lusEntries(entries)
return []upstream.Entry{e}, err
}
// Create category policy, governing the creation of files and directories
func (p *EpLus) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
if err != nil {
return nil, err
}
u, err := p.lus(upstreams)
return []*upstream.Fs{u}, err
}
// CreateEntries is CREATE category policy but receiving a set of candidate entries
func (p *EpLus) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.EpAll.CreateEntries(entries...)
if err != nil {
return nil, err
}
e, err := p.lusEntries(entries)
return []upstream.Entry{e}, err
}
// Search category policy, governing the access to files and directories
func (p *EpLus) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams, err := p.epall(ctx, upstreams, path)
if err != nil {
return nil, err
}
return p.lus(upstreams)
}
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
func (p *EpLus) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.lusEntries(entries)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/policy/all.go | backend/union/policy/all.go | // Package policy provides utilities for the union implementation.
package policy
import (
"context"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("all", &All{})
}
// All policy behaves the same as EpAll except for the CREATE category
// Action category: same as epall.
// Create category: apply to all branches.
// Search category: same as epall.
type All struct {
EpAll
}
// Create category policy, governing the creation of files and directories
func (p *All) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
return upstreams, nil
}
// CreateEntries is CREATE category policy but receiving a set of candidate entries
func (p *All) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
entries = filterNCEntries(entries)
if len(entries) == 0 {
return nil, fs.ErrorPermissionDenied
}
return entries, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/union/policy/mfs.go | backend/union/policy/mfs.go | package policy
import (
"context"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("mfs", &Mfs{})
}
// Mfs stands for most free space
// Search category: same as epmfs.
// Action category: same as epmfs.
// Create category: Pick the drive with the most free space.
type Mfs struct {
EpMfs
}
// Create category policy, governing the creation of files and directories
func (p *Mfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.mfs(upstreams)
return []*upstream.Fs{u}, err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/pixeldrain/pixeldrain.go | backend/pixeldrain/pixeldrain.go | // Package pixeldrain provides an interface to the Pixeldrain object storage
// system.
package pixeldrain
import (
"context"
"errors"
"fmt"
"io"
"path"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
timeFormat = time.RFC3339Nano
minSleep = pacer.MinSleep(10 * time.Millisecond)
maxSleep = pacer.MaxSleep(1 * time.Second)
decayConstant = pacer.DecayConstant(2) // bigger for slower decay, exponential
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "pixeldrain",
Description: "Pixeldrain Filesystem",
NewFs: NewFs,
Config: nil,
Options: []fs.Option{{
Name: "api_key",
Help: "API key for your pixeldrain account.\n" +
"Found on https://pixeldrain.com/user/api_keys.",
Sensitive: true,
}, {
Name: "root_folder_id",
Help: "Root of the filesystem to use.\n\n" +
"Set to 'me' to use your personal filesystem. " +
"Set to a shared directory ID to use a shared directory.",
Default: "me",
}, {
Name: "api_url",
Help: "The API endpoint to connect to. In the vast majority of cases it's fine to leave\n" +
"this at default. It is only intended to be changed for testing purposes.",
Default: "https://pixeldrain.com/api",
Advanced: true,
Required: true,
}},
MetadataInfo: &fs.MetadataInfo{
System: map[string]fs.MetadataHelp{
"mode": {
Help: "File mode",
Type: "octal, unix style",
Example: "755",
},
"mtime": {
Help: "Time of last modification",
Type: "RFC 3339",
Example: timeFormat,
},
"btime": {
Help: "Time of file birth (creation)",
Type: "RFC 3339",
Example: timeFormat,
},
},
Help: "Pixeldrain supports file modes and creation times.",
},
})
}
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
RootFolderID string `config:"root_folder_id"`
APIURL string `config:"api_url"`
}
// Fs represents a remote box
type Fs struct {
name string // name of this remote, as given to NewFS
root string // the path we are working on, as given to NewFS
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
pacer *fs.Pacer
loggedIn bool // if the user is authenticated
// Pathprefix is the directory we're working in. The pathPrefix is stripped
// from every API response containing a path. The pathPrefix always begins
// and ends with a slash for concatenation convenience
pathPrefix string
}
// Object describes a pixeldrain file
type Object struct {
fs *Fs // what this object is part of
base FilesystemNode // the node this object references
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(apiErrorHandler),
pacer: fs.NewPacer(ctx, pacer.NewDefault(minSleep, maxSleep, decayConstant)),
}
f.features = (&fs.Features{
ReadMimeType: true,
CanHaveEmptyDirectories: true,
ReadMetadata: true,
WriteMetadata: true,
}).Fill(ctx, f)
// Set the path prefix. This is the path to the root directory on the
// server. We add it to each request and strip it from each response because
// rclone does not want to see it
f.pathPrefix = "/" + path.Join(opt.RootFolderID, f.root) + "/"
// The root URL equates to https://pixeldrain.com/api/filesystem during
// normal operation. API handlers need to manually add the pathPrefix to
// each request
f.srv.SetRoot(opt.APIURL + "/filesystem")
// If using an APIKey, set the Authorization header
if len(opt.APIKey) > 0 {
f.srv.SetUserPass("", opt.APIKey)
// Check if credentials are correct
user, err := f.userInfo(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get user data: %w", err)
}
f.loggedIn = true
fs.Infof(f,
"Logged in as '%s', subscription '%s', storage limit %d",
user.Username, user.Subscription.Name, user.Subscription.StorageSpace,
)
}
if !f.loggedIn && opt.RootFolderID == "me" {
return nil, errors.New("authentication required: the 'me' directory can only be accessed while logged in")
}
// Satisfy TestFsIsFile. This test expects that we throw an error if the
// filesystem root is a file
fsp, err := f.stat(ctx, "")
if err != errNotFound && err != nil {
// It doesn't matter if the root directory does not exist, as long as it
// is not a file. This is what the test dictates
return f, err
} else if err == nil && fsp.Base().Type == "file" {
// The filesystem root is a file, rclone wants us to set the root to the
// parent directory
f.root = path.Dir(f.root)
f.pathPrefix = "/" + path.Join(opt.RootFolderID, f.root) + "/"
return f, fs.ErrorIsFile
}
return f, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
fsp, err := f.stat(ctx, dir)
if err == errNotFound {
return nil, fs.ErrorDirNotFound
} else if err != nil {
return nil, err
} else if fsp.Base().Type == "file" {
return nil, fs.ErrorIsFile
}
entries = make(fs.DirEntries, len(fsp.Children))
for i := range fsp.Children {
if fsp.Children[i].Type == "dir" {
entries[i] = f.nodeToDirectory(fsp.Children[i])
} else {
entries[i] = f.nodeToObject(fsp.Children[i])
}
}
return entries, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fsp, err := f.stat(ctx, remote)
if err == errNotFound {
return nil, fs.ErrorObjectNotFound
} else if err != nil {
return nil, err
} else if fsp.Base().Type == "dir" {
return nil, fs.ErrorIsDir
}
return f.nodeToObject(fsp.Base()), nil
}
// Put the object
//
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
if err != nil {
return nil, fmt.Errorf("failed to get object metadata")
}
// Overwrite the mtime if it was not already set in the metadata
if _, ok := meta["mtime"]; !ok {
if meta == nil {
meta = make(fs.Metadata)
}
meta["mtime"] = src.ModTime(ctx).Format(timeFormat)
}
node, err := f.put(ctx, src.Remote(), in, meta, options)
if err != nil {
return nil, fmt.Errorf("failed to put object: %w", err)
}
return f.nodeToObject(node), nil
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
err = f.mkdir(ctx, dir)
if err == errNotFound {
return fs.ErrorDirNotFound
} else if err == errExists {
// Spec says we do not return an error if the directory already exists
return nil
}
return err
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
err = f.delete(ctx, dir, false)
if err == errNotFound {
return fs.ErrorDirNotFound
}
return err
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string { return f.name }
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string { return f.root }
// String converts this Fs to a string
func (f *Fs) String() string { return fmt.Sprintf("pixeldrain root '%s'", f.root) }
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration { return time.Millisecond }
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set { return hash.Set(hash.SHA256) }
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features { return f.features }
// Purge all files in the directory specified
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
err = f.delete(ctx, dir, true)
if err == errNotFound {
return fs.ErrorDirNotFound
}
return err
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
// This is not a pixeldrain object. Can't move
return nil, fs.ErrorCantMove
}
node, err := f.rename(ctx, srcObj.fs, srcObj.base.Path, remote, fs.GetConfig(ctx).MetadataSet)
if err == errIncompatibleSourceFS {
return nil, fs.ErrorCantMove
} else if err == errNotFound {
return nil, fs.ErrorObjectNotFound
}
return f.nodeToObject(node), nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
_, err = f.rename(ctx, src, srcRemote, dstRemote, nil)
if err == errIncompatibleSourceFS {
return fs.ErrorCantDirMove
} else if err == errNotFound {
return fs.ErrorDirNotFound
} else if err == errExists {
return fs.ErrorDirExists
}
return err
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implementation must empty the channel
// regularly. When the channel gets closed, the implementation
// should stop polling and release resources.
func (f *Fs) ChangeNotify(ctx context.Context, notify func(string, fs.EntryType), newInterval <-chan time.Duration) {
// If the bucket ID is not /me we need to explicitly enable change logging
// for this directory or file
if f.pathPrefix != "/me/" {
_, err := f.update(ctx, "", fs.Metadata{"logging_enabled": "true"})
if err != nil {
fs.Errorf(f, "Failed to set up change logging for path '%s': %s", f.pathPrefix, err)
}
}
go f.changeNotify(ctx, notify, newInterval)
}
func (f *Fs) changeNotify(ctx context.Context, notify func(string, fs.EntryType), newInterval <-chan time.Duration) {
var ticker = time.NewTicker(<-newInterval)
var lastPoll = time.Now()
for {
select {
case dur, ok := <-newInterval:
if !ok {
ticker.Stop()
return
}
fs.Debugf(f, "Polling changes at an interval of %s", dur)
ticker.Reset(dur)
case t := <-ticker.C:
clog, err := f.changeLog(ctx, lastPoll, t)
if err != nil {
fs.Errorf(f, "Failed to get change log for path '%s': %s", f.pathPrefix, err)
continue
}
for i := range clog {
fs.Debugf(f, "Path '%s' (%s) changed (%s) in directory '%s'",
clog[i].Path, clog[i].Type, clog[i].Action, f.pathPrefix)
if clog[i].Type == "dir" {
notify(strings.TrimPrefix(clog[i].Path, "/"), fs.EntryDirectory)
} else if clog[i].Type == "file" {
notify(strings.TrimPrefix(clog[i].Path, "/"), fs.EntryObject)
}
}
lastPoll = t
}
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Put already supports streaming so we just use that
return f.Put(ctx, in, src, options...)
}
// DirSetModTime sets the mtime metadata on a directory
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) (err error) {
_, err = f.update(ctx, dir, fs.Metadata{"mtime": modTime.Format(timeFormat)})
return err
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
fsn, err := f.update(ctx, remote, fs.Metadata{"shared": strconv.FormatBool(!unlink)})
if err != nil {
return "", err
}
if fsn.ID != "" {
return strings.Replace(f.opt.APIURL, "/api", "/d/", 1) + fsn.ID, nil
}
return "", nil
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
user, err := f.userInfo(ctx)
if err != nil {
return nil, fmt.Errorf("failed to read user info: %w", err)
}
usage = &fs.Usage{Used: fs.NewUsageValue(user.StorageSpaceUsed)}
if user.Subscription.StorageSpace > -1 {
usage.Total = fs.NewUsageValue(user.Subscription.StorageSpace)
}
return usage, nil
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
_, err = o.fs.update(ctx, o.base.Path, fs.Metadata{"mtime": modTime.Format(timeFormat)})
if err == nil {
o.base.Modified = modTime
}
return err
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
return o.fs.read(ctx, o.base.Path, options)
}
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one.
//
// The new object may have been created if an error is returned.
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
// Copy the parameters and update the object
o.base.Modified = src.ModTime(ctx)
o.base.FileSize = src.Size()
o.base.SHA256Sum, _ = src.Hash(ctx, hash.SHA256)
_, err = o.fs.Put(ctx, in, o, options...)
return err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.delete(ctx, o.base.Path, false)
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns the SHA-256 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.SHA256 {
return "", hash.ErrUnsupported
}
return o.base.SHA256Sum, nil
}
// Storable returns a boolean showing whether this object storable
func (o *Object) Storable() bool {
return true
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.base.Path
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.base.Path
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.base.Modified
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.base.FileSize
}
// MimeType returns the content type of the Object if known, or "" if not
func (o *Object) MimeType(ctx context.Context) string {
return o.base.FileType
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return fs.Metadata{
"mode": o.base.ModeOctal,
"mtime": o.base.Modified.Format(timeFormat),
"btime": o.base.Created.Format(timeFormat),
}, nil
}
// Verify that all the interfaces are implemented correctly
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Info = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.DirEntry = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.Metadataer = (*Object)(nil)
)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/pixeldrain/pixeldrain_test.go | backend/pixeldrain/pixeldrain_test.go | // Test pixeldrain filesystem interface
package pixeldrain_test
import (
"testing"
"github.com/rclone/rclone/backend/pixeldrain"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestPixeldrain:",
NilObject: (*pixeldrain.Object)(nil),
SkipInvalidUTF8: true, // Pixeldrain throws an error on invalid utf-8
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/pixeldrain/api_client.go | backend/pixeldrain/api_client.go | package pixeldrain
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
)
// FilesystemPath is the object which is returned from the pixeldrain API when
// running the stat command on a path. It includes the node information for all
// the members of the path and for all the children of the requested directory.
type FilesystemPath struct {
Path []FilesystemNode `json:"path"`
BaseIndex int `json:"base_index"`
Children []FilesystemNode `json:"children"`
}
// Base returns the base node of the path, this is the node that the path points
// to
func (fsp *FilesystemPath) Base() FilesystemNode {
return fsp.Path[fsp.BaseIndex]
}
// FilesystemNode is a single node in the pixeldrain filesystem. Usually part of
// a Path or Children slice. The Node is also returned as response from update
// commands, if requested
type FilesystemNode struct {
Type string `json:"type"`
Path string `json:"path"`
Name string `json:"name"`
Created time.Time `json:"created"`
Modified time.Time `json:"modified"`
ModeOctal string `json:"mode_octal"`
// File params
FileSize int64 `json:"file_size"`
FileType string `json:"file_type"`
SHA256Sum string `json:"sha256_sum"`
// ID is only filled in when the file/directory is publicly shared
ID string `json:"id,omitempty"`
}
// ChangeLog is a log of changes that happened in a filesystem. Changes returned
// from the API are on chronological order from old to new. A change log can be
// requested for any directory or file, but change logging needs to be enabled
// with the update API before any log entries will be made. Changes are logged
// for 24 hours after logging was enabled. Each time a change log is requested
// the timer is reset to 24 hours.
type ChangeLog []ChangeLogEntry
// ChangeLogEntry is a single entry in a directory's change log. It contains the
// time at which the change occurred. The path relative to the requested
// directory and the action that was performend (update, move or delete). In
// case of a move operation the new path of the file is stored in the path_new
// field
type ChangeLogEntry struct {
Time time.Time `json:"time"`
Path string `json:"path"`
PathNew string `json:"path_new"`
Action string `json:"action"`
Type string `json:"type"`
}
// UserInfo contains information about the logged in user
type UserInfo struct {
Username string `json:"username"`
Subscription SubscriptionType `json:"subscription"`
StorageSpaceUsed int64 `json:"storage_space_used"`
}
// SubscriptionType contains information about a subscription type. It's not the
// active subscription itself, only the properties of the subscription. Like the
// perks and cost
type SubscriptionType struct {
Name string `json:"name"`
StorageSpace int64 `json:"storage_space"`
}
// APIError is the error type returned by the pixeldrain API
type APIError struct {
StatusCode string `json:"value"`
Message string `json:"message"`
}
func (e APIError) Error() string { return e.StatusCode }
// Generalized errors which are caught in our own handlers and translated to
// more specific errors from the fs package.
var (
errNotFound = errors.New("pd api: path not found")
errExists = errors.New("pd api: node already exists")
errAuthenticationFailed = errors.New("pd api: authentication failed")
)
func apiErrorHandler(resp *http.Response) (err error) {
var e APIError
if err = json.NewDecoder(resp.Body).Decode(&e); err != nil {
return fmt.Errorf("failed to parse error json: %w", err)
}
// We close the body here so that the API handlers can be sure that the
// response body is not still open when an error was returned
if err = resp.Body.Close(); err != nil {
return fmt.Errorf("failed to close resp body: %w", err)
}
if e.StatusCode == "path_not_found" {
return errNotFound
} else if e.StatusCode == "directory_not_empty" {
return fs.ErrorDirectoryNotEmpty
} else if e.StatusCode == "node_already_exists" {
return errExists
} else if e.StatusCode == "authentication_failed" {
return errAuthenticationFailed
} else if e.StatusCode == "permission_denied" {
return fs.ErrorPermissionDenied
}
return e
}
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
}
// shouldRetry returns a boolean as to whether this resp and err deserve to be
// retried. It returns the err as a convenience so it can be used as the return
// value in the pacer function
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// paramsFromMetadata turns the fs.Metadata into instructions the pixeldrain API
// can understand.
func paramsFromMetadata(meta fs.Metadata) (params url.Values) {
params = make(url.Values)
if modified, ok := meta["mtime"]; ok {
params.Set("modified", modified)
}
if created, ok := meta["btime"]; ok {
params.Set("created", created)
}
if mode, ok := meta["mode"]; ok {
params.Set("mode", mode)
}
if shared, ok := meta["shared"]; ok {
params.Set("shared", shared)
}
if loggingEnabled, ok := meta["logging_enabled"]; ok {
params.Set("logging_enabled", loggingEnabled)
}
return params
}
// nodeToObject converts a single FilesystemNode API response to an object. The
// node is usually a single element from a directory listing
func (f *Fs) nodeToObject(node FilesystemNode) (o *Object) {
// Trim the path prefix. The path prefix is hidden from rclone during all
// operations. Saving it here would confuse rclone a lot. So instead we
// strip it here and add it back for every API request we need to perform
node.Path = strings.TrimPrefix(node.Path, f.pathPrefix)
return &Object{fs: f, base: node}
}
func (f *Fs) nodeToDirectory(node FilesystemNode) fs.DirEntry {
return fs.NewDir(strings.TrimPrefix(node.Path, f.pathPrefix), node.Modified).SetID(node.ID)
}
func (f *Fs) escapePath(p string) (out string) {
// Add the path prefix, encode all the parts and combine them together
var parts = strings.Split(f.pathPrefix+p, "/")
for i := range parts {
parts[i] = url.PathEscape(parts[i])
}
return strings.Join(parts, "/")
}
func (f *Fs) put(
ctx context.Context,
path string,
body io.Reader,
meta fs.Metadata,
options []fs.OpenOption,
) (node FilesystemNode, err error) {
var params = paramsFromMetadata(meta)
// Tell the server to automatically create parent directories if they don't
// exist yet
params.Set("make_parents", "true")
return node, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "PUT",
Path: f.escapePath(path),
Body: body,
Parameters: params,
Options: options,
},
nil,
&node,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) read(ctx context.Context, path string, options []fs.OpenOption) (in io.ReadCloser, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &rest.Opts{
Method: "GET",
Path: f.escapePath(path),
Options: options,
})
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
return resp.Body, err
}
func (f *Fs) stat(ctx context.Context, path string) (fsp FilesystemPath, err error) {
return fsp, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "GET",
Path: f.escapePath(path),
// To receive node info from the pixeldrain API you need to add the
// ?stat query. Without it pixeldrain will return the file contents
// in the URL points to a file
Parameters: url.Values{"stat": []string{""}},
},
nil,
&fsp,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) changeLog(ctx context.Context, start, end time.Time) (changeLog ChangeLog, err error) {
return changeLog, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "GET",
Path: f.escapePath(""),
Parameters: url.Values{
"change_log": []string{""},
"start": []string{start.Format(time.RFC3339Nano)},
"end": []string{end.Format(time.RFC3339Nano)},
},
},
nil,
&changeLog,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) update(ctx context.Context, path string, fields fs.Metadata) (node FilesystemNode, err error) {
var params = paramsFromMetadata(fields)
params.Set("action", "update")
return node, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "POST",
Path: f.escapePath(path),
MultipartParams: params,
},
nil,
&node,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) mkdir(ctx context.Context, dir string) (err error) {
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "POST",
Path: f.escapePath(dir),
MultipartParams: url.Values{"action": []string{"mkdirall"}},
NoResponse: true,
},
nil,
nil,
)
return shouldRetry(ctx, resp, err)
})
}
var errIncompatibleSourceFS = errors.New("source filesystem is not the same as target")
// Renames a file on the server side. Can be used for both directories and files
func (f *Fs) rename(ctx context.Context, src fs.Fs, from, to string, meta fs.Metadata) (node FilesystemNode, err error) {
srcFs, ok := src.(*Fs)
if !ok {
// This is not a pixeldrain FS, can't move
return node, errIncompatibleSourceFS
} else if srcFs.opt.RootFolderID != f.opt.RootFolderID {
// Path is not in the same root dir, can't move
return node, errIncompatibleSourceFS
}
var params = paramsFromMetadata(meta)
params.Set("action", "rename")
// The target is always in our own filesystem so here we use our
// own pathPrefix
params.Set("target", f.pathPrefix+to)
// Create parent directories if the parent directory of the file
// does not exist yet
params.Set("make_parents", "true")
return node, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "POST",
// Important: We use the source FS path prefix here
Path: srcFs.escapePath(from),
MultipartParams: params,
},
nil,
&node,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) delete(ctx context.Context, path string, recursive bool) (err error) {
var params url.Values
if recursive {
// Tell the server to recursively delete all child files
params = url.Values{"recursive": []string{"true"}}
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "DELETE",
Path: f.escapePath(path),
Parameters: params,
NoResponse: true,
},
nil, nil,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) userInfo(ctx context.Context) (user UserInfo, err error) {
return user, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "GET",
// The default RootURL points at the filesystem endpoint. We can't
// use that to request user information. So here we override it to
// the user endpoint
RootURL: f.opt.APIURL + "/user",
},
nil,
&user,
)
return shouldRetry(ctx, resp, err)
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/http/http_internal_test.go | backend/http/http_internal_test.go | package http
import (
"context"
"fmt"
"io"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/rest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
remoteName = "TestHTTP"
testPath = "test"
filesPath = filepath.Join(testPath, "files")
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
lineEndSize = 1
)
// prepareServer prepares the test server and shuts it down automatically
// when the test completes.
func prepareServer(t *testing.T) configmap.Simple {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
// verify the file path is correct, and also check which line endings
// are used to get sizes right ("\n" except on Windows, but even there
// we may have "\n" or "\r\n" depending on git crlf setting)
fileList, err := os.ReadDir(filesPath)
require.NoError(t, err)
require.Greater(t, len(fileList), 0)
for _, file := range fileList {
if !file.IsDir() {
data, _ := os.ReadFile(filepath.Join(filesPath, file.Name()))
if strings.HasSuffix(string(data), "\r\n") {
lineEndSize = 2
}
break
}
}
// test the headers are there then pass on to fileServer
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
// Set the content disposition header for the fifth file
// later we will check if it is set using the metadata method
if r.URL.Path == "/five.txt.gz" {
w.Header().Set("Content-Disposition", "attachment; filename=\"five.txt.gz\"")
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Content-Language", "en-US")
w.Header().Set("Content-Encoding", "gzip")
}
fileServer.ServeHTTP(w, r)
})
// Make the test server
ts := httptest.NewServer(handler)
// Configure the remote
configfile.Install()
// fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true
// config.FileSet(remoteName, "type", "http")
// config.FileSet(remoteName, "url", ts.URL)
m := configmap.Simple{
"type": "http",
"url": ts.URL,
"headers": strings.Join(headers, ","),
}
t.Cleanup(ts.Close)
return m
}
// prepare prepares the test server and shuts it down automatically
// when the test completes.
func prepare(t *testing.T) fs.Fs {
m := prepareServer(t)
// Instantiate it
f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
return f
}
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
entries, err := f.List(context.Background(), "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, 5, len(entries))
e := entries[0]
assert.Equal(t, "five.txt.gz", e.Remote())
assert.Equal(t, int64(-1), e.Size())
_, ok := e.(fs.Object)
assert.True(t, ok)
e = entries[1]
assert.Equal(t, "four", e.Remote())
assert.Equal(t, int64(-1), e.Size())
_, ok = e.(fs.Directory)
assert.True(t, ok)
e = entries[2]
assert.Equal(t, "one%.txt", e.Remote())
assert.Equal(t, int64(5+lineEndSize), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
e = entries[3]
assert.Equal(t, "three", e.Remote())
assert.Equal(t, int64(-1), e.Size())
_, ok = e.(fs.Directory)
assert.True(t, ok)
e = entries[4]
assert.Equal(t, "two.html", e.Remote())
if noSlash {
assert.Equal(t, int64(-1), e.Size())
_, ok = e.(fs.Directory)
assert.True(t, ok)
} else {
assert.Equal(t, int64(40+lineEndSize), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
}
}
func TestListRoot(t *testing.T) {
f := prepare(t)
testListRoot(t, f, false)
}
func TestListRootNoSlash(t *testing.T) {
f := prepare(t)
f.(*Fs).opt.NoSlash = true
testListRoot(t, f, true)
}
func TestListSubDir(t *testing.T) {
f := prepare(t)
entries, err := f.List(context.Background(), "three")
require.NoError(t, err)
sort.Sort(entries)
assert.Equal(t, 1, len(entries))
e := entries[0]
assert.Equal(t, "three/underthree.txt", e.Remote())
assert.Equal(t, int64(8+lineEndSize), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
}
func TestNewObject(t *testing.T) {
f := prepare(t)
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
assert.Equal(t, "four/under four.txt", o.Remote())
assert.Equal(t, int64(8+lineEndSize), o.Size())
_, ok := o.(*Object)
assert.True(t, ok)
// Test the time is correct on the object
tObj := o.ModTime(context.Background())
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
// check object not found
o, err = f.NewObject(context.Background(), "not found.txt")
assert.Nil(t, o)
assert.Equal(t, fs.ErrorObjectNotFound, err)
}
func TestNewObjectWithLeadingSlash(t *testing.T) {
f := prepare(t)
o, err := f.NewObject(context.Background(), "/four/under four.txt")
require.NoError(t, err)
assert.Equal(t, "/four/under four.txt", o.Remote())
assert.Equal(t, int64(8+lineEndSize), o.Size())
_, ok := o.(*Object)
assert.True(t, ok)
// Test the time is correct on the object
tObj := o.ModTime(context.Background())
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
// check object not found
o, err = f.NewObject(context.Background(), "/not found.txt")
assert.Nil(t, o)
assert.Equal(t, fs.ErrorObjectNotFound, err)
}
func TestNewObjectWithMetadata(t *testing.T) {
f := prepare(t)
o, err := f.NewObject(context.Background(), "/five.txt.gz")
require.NoError(t, err)
assert.Equal(t, "five.txt.gz", o.Remote())
ho, ok := o.(*Object)
assert.True(t, ok)
metadata, err := ho.Metadata(context.Background())
require.NoError(t, err)
assert.Equal(t, "text/plain; charset=utf-8", metadata["content-type"])
assert.Equal(t, "attachment; filename=\"five.txt.gz\"", metadata["content-disposition"])
assert.Equal(t, "five.txt.gz", metadata["content-disposition-filename"])
assert.Equal(t, "no-cache", metadata["cache-control"])
assert.Equal(t, "en-US", metadata["content-language"])
assert.Equal(t, "gzip", metadata["content-encoding"])
}
func TestOpen(t *testing.T) {
m := prepareServer(t)
for _, head := range []bool{false, true} {
if !head {
m.Set("no_head", "true")
}
f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
for _, rangeRead := range []bool{false, true} {
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
if !head {
// Test mod time is still indeterminate
tObj := o.ModTime(context.Background())
assert.Equal(t, time.Duration(0), time.Unix(0, 0).Sub(tObj))
// Test file size is still indeterminate
assert.Equal(t, int64(-1), o.Size())
}
var data []byte
if !rangeRead {
// Test normal read
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
}
} else {
// Test with range request
fd, err := o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
// Test the time is always correct on the object after file open
tObj := o.ModTime(context.Background())
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
if !rangeRead {
// Test the file size
assert.Equal(t, int64(len(data)), o.Size())
}
}
}
}
func TestMimeType(t *testing.T) {
f := prepare(t)
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
do, ok := o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
}
func TestIsAFileRoot(t *testing.T) {
m := prepareServer(t)
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
testListRoot(t, f, false)
}
func TestIsAFileSubDir(t *testing.T) {
m := prepareServer(t)
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
entries, err := f.List(context.Background(), "")
require.NoError(t, err)
sort.Sort(entries)
assert.Equal(t, 1, len(entries))
e := entries[0]
assert.Equal(t, "underthree.txt", e.Remote())
assert.Equal(t, int64(8+lineEndSize), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
}
func TestParseName(t *testing.T) {
for i, test := range []struct {
base string
val string
wantErr error
want string
}{
{"http://example.com/", "potato", nil, "potato"},
{"http://example.com/dir/", "potato", nil, "potato"},
{"http://example.com/dir/", "potato?download=true", errFoundQuestionMark, ""},
{"http://example.com/dir/", "../dir/potato", nil, "potato"},
{"http://example.com/dir/", "..", errNotUnderRoot, ""},
{"http://example.com/dir/", "http://example.com/", errNotUnderRoot, ""},
{"http://example.com/dir/", "http://example.com/dir/", errNameIsEmpty, ""},
{"http://example.com/dir/", "http://example.com/dir/potato", nil, "potato"},
{"http://example.com/dir/", "https://example.com/dir/potato", errSchemeMismatch, ""},
{"http://example.com/dir/", "http://notexample.com/dir/potato", errHostMismatch, ""},
{"http://example.com/dir/", "/dir/", errNameIsEmpty, ""},
{"http://example.com/dir/", "/dir/potato", nil, "potato"},
{"http://example.com/dir/", "subdir/potato", errNameContainsSlash, ""},
{"http://example.com/dir/", "With percent %25.txt", nil, "With percent %.txt"},
{"http://example.com/dir/", "With colon :", errURLJoinFailed, ""},
{"http://example.com/dir/", rest.URLPathEscape("With colon :"), nil, "With colon :"},
{"http://example.com/Dungeons%20%26%20Dragons/", "/Dungeons%20&%20Dragons/D%26D%20Basic%20%28Holmes%2C%20B%2C%20X%2C%20BECMI%29/", nil, "D&D Basic (Holmes, B, X, BECMI)/"},
} {
u, err := url.Parse(test.base)
require.NoError(t, err)
got, gotErr := parseName(u, test.val)
what := fmt.Sprintf("test %d base=%q, val=%q", i, test.base, test.val)
assert.Equal(t, test.wantErr, gotErr, what)
assert.Equal(t, test.want, got, what)
}
}
// Load HTML from the file given and parse it, checking it against the entries passed in
func parseHTML(t *testing.T, name string, base string, want []string) {
in, err := os.Open(filepath.Join(testPath, "index_files", name))
require.NoError(t, err)
defer func() {
require.NoError(t, in.Close())
}()
if base == "" {
base = "http://example.com/"
}
u, err := url.Parse(base)
require.NoError(t, err)
entries, err := parse(u, in)
require.NoError(t, err)
assert.Equal(t, want, entries)
}
func TestParseEmpty(t *testing.T) {
parseHTML(t, "empty.html", "", []string(nil))
}
func TestParseApache(t *testing.T) {
parseHTML(t, "apache.html", "http://example.com/nick/pub/", []string{
"SWIG-embed.tar.gz",
"avi2dvd.pl",
"cambert.exe",
"cambert.gz",
"fedora_demo.gz",
"gchq-challenge/",
"mandelterm/",
"pgp-key.txt",
"pymath/",
"rclone",
"readdir.exe",
"rush_hour_solver_cut_down.py",
"snake-puzzle/",
"stressdisk/",
"timer-test",
"words-to-regexp.pl",
"Now 100% better.mp3",
"Now better.mp3",
})
}
func TestParseMemstore(t *testing.T) {
parseHTML(t, "memstore.html", "", []string{
"test/",
"v1.35/",
"v1.36-01-g503cd84/",
"rclone-beta-latest-freebsd-386.zip",
"rclone-beta-latest-freebsd-amd64.zip",
"rclone-beta-latest-windows-amd64.zip",
})
}
func TestParseNginx(t *testing.T) {
parseHTML(t, "nginx.html", "", []string{
"deltas/",
"objects/",
"refs/",
"state/",
"config",
"summary",
})
}
func TestParseCaddy(t *testing.T) {
parseHTML(t, "caddy.html", "", []string{
"mimetype.zip",
"rclone-delete-empty-dirs.py",
"rclone-show-empty-dirs.py",
"stat-windows-386.zip",
"v1.36-155-gcf29ee8b-team-driveβ/",
"v1.36-156-gca76b3fb-team-driveβ/",
"v1.36-156-ge1f0e0f5-team-driveβ/",
"v1.36-22-g06ea13a-ssh-agentβ/",
})
}
func TestFsNoSlashRoots(t *testing.T) {
// Test Fs with roots that does not end with '/', the logic that
// decides if url is to be considered a file or directory, based
// on result from a HEAD request.
// Handler for faking HEAD responses with different status codes
headCount := 0
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "HEAD" {
headCount++
responseCode, err := strconv.Atoi(path.Base(r.URL.String()))
require.NoError(t, err)
if strings.HasPrefix(r.URL.String(), "/redirect/") {
var redir string
if strings.HasPrefix(r.URL.String(), "/redirect/file/") {
redir = "/redirected"
} else if strings.HasPrefix(r.URL.String(), "/redirect/dir/") {
redir = "/redirected/"
} else {
require.Fail(t, "Redirect test requests must start with '/redirect/file/' or '/redirect/dir/'")
}
http.Redirect(w, r, redir, responseCode)
} else {
http.Error(w, http.StatusText(responseCode), responseCode)
}
}
})
// Make the test server
ts := httptest.NewServer(handler)
defer ts.Close()
// Configure the remote
configfile.Install()
m := configmap.Simple{
"type": "http",
"url": ts.URL,
}
// Test
for i, test := range []struct {
root string
isFile bool
}{
// 2xx success
{"parent/200", true},
{"parent/204", true},
// 3xx redirection Redirect status 301, 302, 303, 307, 308
{"redirect/file/301", true}, // Request is redirected to "/redirected"
{"redirect/dir/301", false}, // Request is redirected to "/redirected/"
{"redirect/file/302", true}, // Request is redirected to "/redirected"
{"redirect/dir/302", false}, // Request is redirected to "/redirected/"
{"redirect/file/303", true}, // Request is redirected to "/redirected"
{"redirect/dir/303", false}, // Request is redirected to "/redirected/"
{"redirect/file/304", true}, // Not really a redirect, handled like 4xx errors (below)
{"redirect/file/305", true}, // Not really a redirect, handled like 4xx errors (below)
{"redirect/file/306", true}, // Not really a redirect, handled like 4xx errors (below)
{"redirect/file/307", true}, // Request is redirected to "/redirected"
{"redirect/dir/307", false}, // Request is redirected to "/redirected/"
{"redirect/file/308", true}, // Request is redirected to "/redirected"
{"redirect/dir/308", false}, // Request is redirected to "/redirected/"
// 4xx client errors
{"parent/403", true}, // Forbidden status (head request blocked)
{"parent/404", false}, // Not found status
} {
for _, noHead := range []bool{false, true} {
var isFile bool
if noHead {
m.Set("no_head", "true")
isFile = true
} else {
m.Set("no_head", "false")
isFile = test.isFile
}
headCount = 0
f, err := NewFs(context.Background(), remoteName, test.root, m)
if noHead {
assert.Equal(t, 0, headCount)
} else {
assert.Equal(t, 1, headCount)
}
if isFile {
assert.ErrorIs(t, err, fs.ErrorIsFile)
} else {
assert.NoError(t, err)
}
var endpoint string
if isFile {
parent, _ := path.Split(test.root)
endpoint = "/" + parent
} else {
endpoint = "/" + test.root + "/"
}
what := fmt.Sprintf("i=%d, root=%q, isFile=%v, noHead=%v", i, test.root, isFile, noHead)
assert.Equal(t, ts.URL+endpoint, f.String(), what)
}
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/http/http.go | backend/http/http.go | // Package http provides a filesystem interface using golang.org/net/http
//
// It treats HTML pages served from the endpoint as directory
// listings, and includes any links found as files.
package http
import (
"context"
"errors"
"fmt"
"io"
"mime"
"net/http"
"net/textproto"
"net/url"
"path"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/net/html"
)
var (
errorReadOnly = errors.New("http remotes are read only")
timeUnset = time.Unix(0, 0)
)
func init() {
fsi := &fs.RegInfo{
Name: "http",
Description: "HTTP",
NewFs: NewFs,
CommandHelp: commandHelp,
MetadataInfo: &fs.MetadataInfo{
System: systemMetadataInfo,
Help: `HTTP metadata keys are case insensitive and are always returned in lower case.`,
},
Options: []fs.Option{{
Name: "url",
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
Required: true,
}, {
Name: "headers",
Help: `Set HTTP headers for all transactions.
Use this to set additional HTTP headers for all transactions.
The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used.
For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.`,
Default: fs.CommaSepList{},
Advanced: true,
}, {
Name: "no_slash",
Help: `Set this if the site doesn't end directories with /.
Use this if your target website does not use / on the end of
directories.
A / on the end of a path is how rclone normally tells the difference
between files and directories. If this flag is set, then rclone will
treat all files with Content-Type: text/html as directories and read
URLs from them rather than downloading them.
Note that this may cause rclone to confuse genuine HTML files with
directories.`,
Default: false,
Advanced: true,
}, {
Name: "no_head",
Help: `Don't use HEAD requests.
HEAD requests are mainly used to find file sizes in dir listing.
If your site is being very slow to load then you can try this option.
Normally rclone does a HEAD request for each potential file in a
directory listing to:
- find its size
- check it really exists
- check to see if it is a directory
If you set this option, rclone will not do the HEAD request. This will mean
that directory listings are much quicker, but rclone won't have the times or
sizes of any files, and some files that don't exist may be in the listing.`,
Default: false,
Advanced: true,
}, {
Name: "no_escape",
Help: "Do not escape URL metacharacters in path names.",
Default: false,
}},
}
fs.Register(fsi)
}
// system metadata keys which this backend owns
var systemMetadataInfo = map[string]fs.MetadataHelp{
"cache-control": {
Help: "Cache-Control header",
Type: "string",
Example: "no-cache",
},
"content-disposition": {
Help: "Content-Disposition header",
Type: "string",
Example: "inline",
},
"content-disposition-filename": {
Help: "Filename retrieved from Content-Disposition header",
Type: "string",
Example: "file.txt",
},
"content-encoding": {
Help: "Content-Encoding header",
Type: "string",
Example: "gzip",
},
"content-language": {
Help: "Content-Language header",
Type: "string",
Example: "en-US",
},
"content-type": {
Help: "Content-Type header",
Type: "string",
Example: "text/plain",
},
}
// Options defines the configuration for this backend
type Options struct {
Endpoint string `config:"url"`
NoSlash bool `config:"no_slash"`
NoHead bool `config:"no_head"`
Headers fs.CommaSepList `config:"headers"`
NoEscape bool `config:"no_escape"`
}
// Fs stores the interface to the remote HTTP files
type Fs struct {
name string
root string
features *fs.Features // optional features
opt Options // options for this backend
ci *fs.ConfigInfo // global config
endpoint *url.URL
endpointURL string // endpoint as a string
httpClient *http.Client
}
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
contentType string
// Metadata as pointers to strings as they often won't be present
contentDisposition *string // Content-Disposition: header
contentDispositionFilename *string // Filename retrieved from Content-Disposition: header
cacheControl *string // Cache-Control: header
contentEncoding *string // Content-Encoding: header
contentLanguage *string // Content-Language: header
}
// statusError returns an error if the res contained an error
func statusError(res *http.Response, err error) error {
if err != nil {
return err
}
if res.StatusCode < 200 || res.StatusCode > 299 {
_ = res.Body.Close()
return fmt.Errorf("HTTP Error: %s", res.Status)
}
return nil
}
// getFsEndpoint decides if url is to be considered a file or directory,
// and returns a proper endpoint url to use for the fs.
func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Options) (string, bool) {
// If url ends with '/' it is already a proper url always assumed to be a directory.
if url[len(url)-1] == '/' {
return url, false
}
// If url does not end with '/' we send a HEAD request to decide
// if it is directory or file, and if directory appends the missing
// '/', or if file returns the directory url to parent instead.
createFileResult := func() (string, bool) {
fs.Debugf(nil, "If path is a directory you must add a trailing '/'")
parent, _ := path.Split(url)
return parent, true
}
createDirResult := func() (string, bool) {
fs.Debugf(nil, "To avoid the initial HEAD request add a trailing '/' to the path")
return url + "/", false
}
// If HEAD requests are not allowed we just have to assume it is a file.
if opt.NoHead {
fs.Debugf(nil, "Assuming path is a file as --http-no-head is set")
return createFileResult()
}
// Use a client which doesn't follow redirects so the server
// doesn't redirect http://host/dir to http://host/dir/
noRedir := *client
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be created: %v", err)
return createFileResult()
}
addHeaders(req, opt)
res, err := noRedir.Do(req)
if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
return createFileResult()
}
if res.StatusCode == http.StatusNotFound {
fs.Debugf(nil, "Assuming path is a directory as HEAD response is it does not exist as a file (%s)", res.Status)
return createDirResult()
}
if res.StatusCode == http.StatusMovedPermanently ||
res.StatusCode == http.StatusFound ||
res.StatusCode == http.StatusSeeOther ||
res.StatusCode == http.StatusTemporaryRedirect ||
res.StatusCode == http.StatusPermanentRedirect {
redir := res.Header.Get("Location")
if redir != "" {
if redir[len(redir)-1] == '/' {
fs.Debugf(nil, "Assuming path is a directory as HEAD response is redirect (%s) to a path that ends with '/': %s", res.Status, redir)
return createDirResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) to a path that does not end with '/': %s", res.Status, redir)
return createFileResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) but no location header", res.Status)
return createFileResult()
}
if res.StatusCode < 200 || res.StatusCode > 299 {
// Example is 403 (http.StatusForbidden) for servers not allowing HEAD requests.
fs.Debugf(nil, "Assuming path is a file as HEAD response is an error (%s)", res.Status)
return createFileResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is success (%s)", res.Status)
return createFileResult()
}
// Make the http connection with opt
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
if len(opt.Headers)%2 != 0 {
return false, errors.New("odd number of headers supplied")
}
if !strings.HasSuffix(opt.Endpoint, "/") {
opt.Endpoint += "/"
}
// Parse the endpoint and stick the root onto it
base, err := url.Parse(opt.Endpoint)
if err != nil {
return false, err
}
u, err := rest.URLJoin(base, rest.URLPathEscape(f.root))
if err != nil {
return false, err
}
client := fshttp.NewClient(ctx)
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
fs.Debugf(nil, "Root: %s", endpoint)
u, err = url.Parse(endpoint)
if err != nil {
return false, err
}
// Update f with the new parameters
f.httpClient = client
f.endpoint = u
f.endpointURL = u.String()
if isFile {
// Correct root if definitely pointing to a file
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
return isFile, nil
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
}
f.features = (&fs.Features{
ReadMetadata: true,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
// Make the http connection
isFile, err := f.httpConnection(ctx, opt)
if err != nil {
return nil, err
}
if isFile {
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
if !strings.HasSuffix(f.endpointURL, "/") {
return nil, errors.New("internal error: url doesn't end with /")
}
return f, nil
}
// Name returns the configured name of the file system
func (f *Fs) Name() string {
return f.name
}
// Root returns the root for the filesystem
func (f *Fs) Root() string {
return f.root
}
// String returns the URL for the filesystem
func (f *Fs) String() string {
return f.endpointURL
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
func (f *Fs) Precision() time.Duration {
return time.Second
}
// NewObject creates a new remote http file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
err := o.head(ctx)
if err != nil {
return nil, err
}
return o, nil
}
// Join's the remote onto the base URL
func (f *Fs) url(remote string) string {
trimmedRemote := strings.TrimLeft(remote, "/") // remove leading "/" since we always have it in f.endpointURL
if f.opt.NoEscape {
// Directly concatenate without escaping, no_escape behavior
return f.endpointURL + trimmedRemote
}
// Default behavior
return f.endpointURL + rest.URLPathEscape(trimmedRemote)
}
// Errors returned by parseName
var (
errURLJoinFailed = errors.New("URLJoin failed")
errFoundQuestionMark = errors.New("found ? in URL")
errHostMismatch = errors.New("host mismatch")
errSchemeMismatch = errors.New("scheme mismatch")
errNotUnderRoot = errors.New("not under root")
errNameIsEmpty = errors.New("name is empty")
errNameContainsSlash = errors.New("name contains /")
)
// parseName turns a name as found in the page into a remote path or returns an error
func parseName(base *url.URL, name string) (string, error) {
// make URL absolute
u, err := rest.URLJoin(base, name)
if err != nil {
return "", errURLJoinFailed
}
// check it doesn't have URL parameters
uStr := u.String()
if strings.Contains(uStr, "?") {
return "", errFoundQuestionMark
}
// check that this is going back to the same host and scheme
if base.Host != u.Host {
return "", errHostMismatch
}
if base.Scheme != u.Scheme {
return "", errSchemeMismatch
}
// check has path prefix
if !strings.HasPrefix(u.Path, base.Path) {
return "", errNotUnderRoot
}
// calculate the name relative to the base
name = u.Path[len(base.Path):]
// mustn't be empty
if name == "" {
return "", errNameIsEmpty
}
// mustn't contain a / - we are looking for a single level directory
slash := strings.Index(name, "/")
if slash >= 0 && slash != len(name)-1 {
return "", errNameContainsSlash
}
return name, nil
}
// Parse turns HTML for a directory into names
// base should be the base URL to resolve any relative names from
func parse(base *url.URL, in io.Reader) (names []string, err error) {
doc, err := html.Parse(in)
if err != nil {
return nil, err
}
var (
walk func(*html.Node)
seen = make(map[string]struct{})
)
walk = func(n *html.Node) {
if n.Type == html.ElementNode && n.Data == "a" {
for _, a := range n.Attr {
if a.Key == "href" {
name, err := parseName(base, a.Val)
if err == nil {
if _, found := seen[name]; !found {
names = append(names, name)
seen[name] = struct{}{}
}
}
break
}
}
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
walk(c)
}
}
walk(doc)
return names, nil
}
// parseFilename extracts the filename from a Content-Disposition header
func parseFilename(contentDisposition string) (string, error) {
// Normalize the contentDisposition to canonical MIME format
mediaType, params, err := mime.ParseMediaType(contentDisposition)
if err != nil {
return "", fmt.Errorf("failed to parse contentDisposition: %v", err)
}
// Check if the contentDisposition is an attachment
if strings.ToLower(mediaType) != "attachment" {
return "", fmt.Errorf("not an attachment: %s", mediaType)
}
// Extract the filename from the parameters
filename, ok := params["filename"]
if !ok {
return "", fmt.Errorf("filename not found in contentDisposition")
}
// Decode filename if it contains special encoding
return textproto.TrimString(filename), nil
}
// Adds the configured headers to the request if any
func addHeaders(req *http.Request, opt *Options) {
for i := 0; i < len(opt.Headers); i += 2 {
key := opt.Headers[i]
value := opt.Headers[i+1]
req.Header.Add(key, value)
}
}
// Adds the configured headers to the request if any
func (f *Fs) addHeaders(req *http.Request) {
addHeaders(req, &f.opt)
}
// Read the directory passed in
func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error) {
URL := f.url(dir)
u, err := url.Parse(URL)
if err != nil {
return nil, fmt.Errorf("failed to readDir: %w", err)
}
if !strings.HasSuffix(URL, "/") {
return nil, fmt.Errorf("internal error: readDir URL %q didn't end in /", URL)
}
// Do the request
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
if err != nil {
return nil, fmt.Errorf("readDir failed: %w", err)
}
f.addHeaders(req)
res, err := f.httpClient.Do(req)
if err == nil {
defer fs.CheckClose(res.Body, &err)
if res.StatusCode == http.StatusNotFound {
return nil, fs.ErrorDirNotFound
}
}
err = statusError(res, err)
if err != nil {
return nil, fmt.Errorf("failed to readDir: %w", err)
}
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
switch contentType {
case "text/html":
names, err = parse(u, res.Body)
if err != nil {
return nil, fmt.Errorf("readDir: %w", err)
}
default:
return nil, fmt.Errorf("can't parse content type %q", contentType)
}
return names, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if !strings.HasSuffix(dir, "/") && dir != "" {
dir += "/"
}
names, err := f.readDir(ctx, dir)
if err != nil {
return nil, fmt.Errorf("error listing %q: %w", dir, err)
}
var (
entriesMu sync.Mutex // to protect entries
wg sync.WaitGroup
checkers = f.ci.Checkers
in = make(chan string, checkers)
)
add := func(entry fs.DirEntry) {
entriesMu.Lock()
entries = append(entries, entry)
entriesMu.Unlock()
}
for range checkers {
wg.Add(1)
go func() {
defer wg.Done()
for remote := range in {
file := &Object{
fs: f,
remote: remote,
}
switch err := file.head(ctx); err {
case nil:
add(file)
case fs.ErrorNotAFile:
// ...found a directory not a file
add(fs.NewDir(remote, time.Time{}))
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
}
}()
}
for _, name := range names {
isDir := name[len(name)-1] == '/'
name = strings.TrimRight(name, "/")
remote := path.Join(dir, name)
if isDir {
add(fs.NewDir(remote, time.Time{}))
} else {
in <- remote
}
}
close(in)
wg.Wait()
return entries, nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// Fs is the filesystem this remote http file object is located within
func (o *Object) Fs() fs.Info {
return o.fs
}
// String returns the URL to the remote HTTP file
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote the name of the remote HTTP file, relative to the fs root
func (o *Object) Remote() string {
if o.contentDispositionFilename != nil {
return *o.contentDispositionFilename
}
return o.remote
}
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Size returns the size in bytes of the remote http file
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the remote http file
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// url returns the native url of the object
func (o *Object) url() string {
return o.fs.url(o.remote)
}
// head sends a HEAD request to update info fields in the Object
func (o *Object) head(ctx context.Context) error {
if o.fs.opt.NoHead {
o.size = -1
o.modTime = timeUnset
o.contentType = fs.MimeType(ctx, o)
return nil
}
url := o.url()
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
if err != nil {
return fmt.Errorf("stat failed: %w", err)
}
o.fs.addHeaders(req)
res, err := o.fs.httpClient.Do(req)
if err == nil && res.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
err = statusError(res, err)
if err != nil {
return fmt.Errorf("failed to stat: %w", err)
}
return o.decodeMetadata(ctx, res)
}
// decodeMetadata updates info fields in the Object according to HTTP response headers
func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
t = timeUnset
}
o.modTime = t
o.contentType = res.Header.Get("Content-Type")
o.size = rest.ParseSizeFromHeaders(res.Header)
contentDisposition := res.Header.Get("Content-Disposition")
if contentDisposition != "" {
o.contentDisposition = &contentDisposition
}
if o.contentDisposition != nil {
var filename string
filename, err = parseFilename(*o.contentDisposition)
if err == nil && filename != "" {
o.contentDispositionFilename = &filename
}
}
cacheControl := res.Header.Get("Cache-Control")
if cacheControl != "" {
o.cacheControl = &cacheControl
}
contentEncoding := res.Header.Get("Content-Encoding")
if contentEncoding != "" {
o.contentEncoding = &contentEncoding
}
contentLanguage := res.Header.Get("Content-Language")
if contentLanguage != "" {
o.contentLanguage = &contentLanguage
}
// If NoSlash is set then check ContentType to see if it is a directory
if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType)
if err != nil {
return fmt.Errorf("failed to parse Content-Type: %q: %w", o.contentType, err)
}
if mediaType == "text/html" {
return fs.ErrorNotAFile
}
}
return nil
}
// SetModTime sets the modification and access time to the specified time
//
// it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return errorReadOnly
}
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
func (o *Object) Storable() bool {
return true
}
// Open a remote http file object for reading. Seek is supported
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.url()
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
// Add optional headers
for k, v := range fs.OpenOptionHeaders(options) {
req.Header.Add(k, v)
}
o.fs.addHeaders(req)
// Do the request
res, err := o.fs.httpClient.Do(req)
err = statusError(res, err)
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
if err = o.decodeMetadata(ctx, res); err != nil {
return nil, fmt.Errorf("decodeMetadata failed: %w", err)
}
return res.Body, nil
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return errorReadOnly
}
// Remove a remote http file object
func (o *Object) Remove(ctx context.Context) error {
return errorReadOnly
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return errorReadOnly
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errorReadOnly
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.contentType
}
var commandHelp = []fs.CommandHelp{{
Name: "set",
Short: "Set command for updating the config parameters.",
Long: `This set command can be used to update the config parameters
for a running http backend.
Usage examples:
` + "```console" + `
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=remote: -o url=https://example.com
` + "```" + `
The option keys are named as they are in the config file.
This rebuilds the connection to the http backend when it is called with
the new parameters. Only new parameters need be passed as the values
will default to those currently in use.
It doesn't return anything.`,
}}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
switch name {
case "set":
newOpt := f.opt
err := configstruct.Set(configmap.Simple(opt), &newOpt)
if err != nil {
return nil, fmt.Errorf("reading config: %w", err)
}
_, err = f.httpConnection(ctx, &newOpt)
if err != nil {
return nil, fmt.Errorf("updating session: %w", err)
}
f.opt = newOpt
keys := []string{}
for k := range opt {
keys = append(keys, k)
}
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
return nil, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
metadata = make(fs.Metadata, 6)
if o.contentType != "" {
metadata["content-type"] = o.contentType
}
// Set system metadata
setMetadata := func(k string, v *string) {
if v == nil || *v == "" {
return
}
metadata[k] = *v
}
setMetadata("content-disposition", o.contentDisposition)
setMetadata("content-disposition-filename", o.contentDispositionFilename)
setMetadata("cache-control", o.cacheControl)
setMetadata("content-language", o.contentLanguage)
setMetadata("content-encoding", o.contentEncoding)
return metadata, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.Commander = &Fs{}
_ fs.Metadataer = &Object{}
)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/netstorage/netstorage_test.go | backend/netstorage/netstorage_test.go | package netstorage_test
import (
"testing"
"github.com/rclone/rclone/backend/netstorage"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestnStorage:",
NilObject: (*netstorage.Object)(nil),
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/netstorage/netstorage.go | backend/netstorage/netstorage.go | // Package netstorage provides an interface to Akamai NetStorage API
package netstorage
import (
"context"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/xml"
"errors"
"fmt"
gohash "hash"
"io"
"math/rand"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
// Constants
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
func init() {
fsi := &fs.RegInfo{
Name: "netstorage",
Description: "Akamai NetStorage",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "protocol",
Help: `Select between HTTP or HTTPS protocol.
Most users should choose HTTPS, which is the default.
HTTP is provided primarily for debugging purposes.`,
Examples: []fs.OptionExample{{
Value: "http",
Help: "HTTP protocol",
}, {
Value: "https",
Help: "HTTPS protocol",
}},
Default: "https",
Advanced: true,
}, {
Name: "host",
Help: `Domain+path of NetStorage host to connect to.
Format should be ` + "`<domain>/<internal folders>`",
Required: true,
Sensitive: true,
}, {
Name: "account",
Help: "Set the NetStorage account name",
Required: true,
Sensitive: true,
}, {
Name: "secret",
Help: `Set the NetStorage account secret/G2O key for authentication.
Please choose the 'y' option to set your own password then enter your secret.`,
IsPassword: true,
Required: true,
}},
}
fs.Register(fsi)
}
var commandHelp = []fs.CommandHelp{{
Name: "du",
Short: "Return disk usage information for a specified directory.",
Long: `The usage information returned, includes the targeted directory as well as all
files stored in any sub-directories that may exist.`,
}, {
Name: "symlink",
Short: "You can create a symbolic link in ObjectStore with the symlink action.",
Long: `The desired path location (including applicable sub-directories) ending in
the object that will be the target of the symlink (for example, /links/mylink).
Include the file extension for the object, if applicable.
Usage example:
` + "```console" + `
rclone backend symlink <src> <path>
` + "```",
},
}
// Options defines the configuration for this backend
type Options struct {
Endpoint string `config:"host"`
Account string `config:"account"`
Secret string `config:"secret"`
Protocol string `config:"protocol"`
}
// Fs stores the interface to the remote HTTP files
type Fs struct {
name string
root string
features *fs.Features // optional features
opt Options // options for this backend
endpointURL string // endpoint as a string
srv *rest.Client // the connection to the Netstorage server
pacer *fs.Pacer // to pace the API calls
filetype string // dir, file or symlink
dirscreated map[string]bool // if implicit dir has been created already
dirscreatedMutex sync.Mutex // mutex to protect dirscreated
statcache map[string][]File // cache successful stat requests
statcacheMutex sync.RWMutex // RWMutex to protect statcache
}
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
type Object struct {
fs *Fs
filetype string // dir, file or symlink
remote string // remote path
size int64 // size of the object in bytes
modTime int64 // modification time of the object
md5sum string // md5sum of the object
fullURL string // full path URL
target string // symlink target when filetype is symlink
}
//------------------------------------------------------------------------------
// Stat is an object which holds the information of the stat element of the response xml
type Stat struct {
XMLName xml.Name `xml:"stat"`
Files []File `xml:"file"`
Directory string `xml:"directory,attr"`
}
// File is an object which holds the information of the file element of the response xml
type File struct {
XMLName xml.Name `xml:"file"`
Type string `xml:"type,attr"`
Name string `xml:"name,attr"`
NameBase64 string `xml:"name_base64,attr"`
Size int64 `xml:"size,attr"`
Md5 string `xml:"md5,attr"`
Mtime int64 `xml:"mtime,attr"`
Bytes int64 `xml:"bytes,attr"`
Files int64 `xml:"files,attr"`
Target string `xml:"target,attr"`
}
// List is an object which holds the information of the list element of the response xml
type List struct {
XMLName xml.Name `xml:"list"`
Files []File `xml:"file"`
Resume ListResume `xml:"resume"`
}
// ListResume represents the resume xml element of the list
type ListResume struct {
XMLName xml.Name `xml:"resume"`
Start string `xml:"start,attr"`
}
// Du represents the du xml element of the response
type Du struct {
XMLName xml.Name `xml:"du"`
Directory string `xml:"directory,attr"`
Duinfo DuInfo `xml:"du-info"`
}
// DuInfo represents the du-info xml element of the response
type DuInfo struct {
XMLName xml.Name `xml:"du-info"`
Files int64 `xml:"files,attr"`
Bytes int64 `xml:"bytes,attr"`
}
// GetName returns a normalized name of the Stat item
func (s Stat) GetName() xml.Name {
return s.XMLName
}
// GetName returns a normalized name of the List item
func (l List) GetName() xml.Name {
return l.XMLName
}
// GetName returns a normalized name of the Du item
func (d Du) GetName() xml.Name {
return d.XMLName
}
//------------------------------------------------------------------------------
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
//
// If root refers to an existing object, then it should return an Fs which
// points to the parent of that object and ErrorIsFile.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// The base URL (endPoint is protocol + :// + domain/internal folder
opt.Endpoint = opt.Protocol + "://" + opt.Endpoint
fs.Debugf(nil, "NetStorage NewFS endpoint %q", opt.Endpoint)
if !strings.HasSuffix(opt.Endpoint, "/") {
opt.Endpoint += "/"
}
// Decrypt credentials, even though it is hard to eyedrop the hex string, it adds an extra piece of mind
opt.Secret = obscure.MustReveal(opt.Secret)
// Parse the endpoint and stick the root onto it
base, err := url.Parse(opt.Endpoint)
if err != nil {
return nil, fmt.Errorf("couldn't parse URL %q: %w", opt.Endpoint, err)
}
u, err := rest.URLJoin(base, rest.URLPathEscape(root))
if err != nil {
return nil, fmt.Errorf("couldn't join URL %q and %q: %w", base.String(), root, err)
}
client := fshttp.NewClient(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
endpointURL: u.String(),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
dirscreated: make(map[string]bool),
statcache: make(map[string][]File),
}
f.srv = rest.NewClient(client)
f.srv.SetSigner(f.getAuth)
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
err = f.initFs(ctx, "")
switch err {
case nil:
// Object is the directory
return f, nil
case fs.ErrorObjectNotFound:
return f, nil
case fs.ErrorIsFile:
// Correct root if definitely pointing to a file
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
// Fs points to the parent directory
return f, err
default:
return nil, err
}
}
// Command the backend to run a named commands: du and symlink
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
switch name {
case "du":
// No arg parsing needed, the path is passed in the fs
return f.netStorageDuRequest(ctx)
case "symlink":
dst := ""
if len(arg) > 0 {
dst = arg[0]
} else {
return nil, errors.New("NetStorage symlink command: need argument for target")
}
// Strip off the leading slash added by NewFs on object not found
URL := strings.TrimSuffix(f.url(""), "/")
return f.netStorageSymlinkRequest(ctx, URL, dst, nil)
default:
return nil, fs.ErrorCommandNotFound
}
}
// Name returns the configured name of the file system
func (f *Fs) Name() string {
return f.name
}
// Root returns the root for the filesystem
func (f *Fs) Root() string {
return f.root
}
// String returns the URL for the filesystem
func (f *Fs) String() string {
return f.endpointURL
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// NewObject creates a new remote http file object
// NewObject finds the Object at remote
// If it can't be found returns fs.ErrorObjectNotFound
// If it isn't a file, then it returns fs.ErrorIsDir
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
URL := f.url(remote)
files, err := f.netStorageStatRequest(ctx, URL, false)
if err != nil {
return nil, err
}
if files == nil {
fs.Errorf(nil, "Stat for %q has empty files", URL)
return nil, fs.ErrorObjectNotFound
}
file := files[0]
switch file.Type {
case
"file",
"symlink":
return f.newObjectWithInfo(remote, &file)
case "dir":
return nil, fs.ErrorIsDir
default:
return nil, fmt.Errorf("object of an unsupported type %s for %q: %w", file.Type, URL, err)
}
}
// initFs initializes Fs based on the stat reply
func (f *Fs) initFs(ctx context.Context, dir string) error {
// Path must end with the slash, so the join later will work correctly
defer func() {
if !strings.HasSuffix(f.endpointURL, "/") {
f.endpointURL += "/"
}
}()
URL := f.url(dir)
files, err := f.netStorageStatRequest(ctx, URL, true)
if err == fs.ErrorObjectNotFound || files == nil {
return fs.ErrorObjectNotFound
}
if err != nil {
return err
}
f.filetype = files[0].Type
switch f.filetype {
case "dir":
// This directory is known to exist, adding to explicit directories
f.setDirscreated(URL)
return nil
case
"file",
"symlink":
// Fs should point to the parent of that object and return ErrorIsFile
lastindex := strings.LastIndex(f.endpointURL, "/")
if lastindex != -1 {
f.endpointURL = f.endpointURL[0 : lastindex+1]
} else {
fs.Errorf(nil, "Remote URL %q unexpectedly does not include the slash", f.endpointURL)
}
return fs.ErrorIsFile
default:
err = fmt.Errorf("unsupported object type %s for %q: %w", f.filetype, URL, err)
f.filetype = ""
return err
}
}
// url joins the remote onto the endpoint URL
func (f *Fs) url(remote string) string {
if remote == "" {
return f.endpointURL
}
pathescapeURL := rest.URLPathEscape(remote)
// Strip off initial "./" from the path, which can be added by path escape function following the RFC 3986 4.2
// (a segment must be preceded by a dot-segment (e.g., "./this:that") to make a relative-path reference).
pathescapeURL = strings.TrimPrefix(pathescapeURL, "./")
// Cannot use rest.URLJoin() here because NetStorage is an object storage and allows to have a "."
// directory name, which will be eliminated by the join function.
return f.endpointURL + pathescapeURL
}
// getFileName returns the file name if present, otherwise decoded name_base64
// if present, otherwise an empty string
func (f *Fs) getFileName(file *File) string {
if file.Name != "" {
return file.Name
}
if file.NameBase64 != "" {
decoded, err := base64.StdEncoding.DecodeString(file.NameBase64)
if err == nil {
return string(decoded)
}
fs.Errorf(nil, "Failed to base64 decode object %s: %v", file.NameBase64, err)
}
return ""
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.filetype == "" {
// This happens in two scenarios.
// 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs.
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
err := f.initFs(ctx, dir)
if err != nil {
if err == fs.ErrorObjectNotFound {
return nil, fs.ErrorDirNotFound
}
return nil, err
}
}
URL := f.url(dir)
files, err := f.netStorageDirRequest(ctx, URL)
if err != nil {
return nil, err
}
if dir != "" && !strings.HasSuffix(dir, "/") {
dir += "/"
}
for _, item := range files {
name := dir + f.getFileName(&item)
switch item.Type {
case "dir":
when := time.Unix(item.Mtime, 0)
entry := fs.NewDir(name, when).SetSize(item.Bytes).SetItems(item.Files)
entries = append(entries, entry)
case "file":
if entry, _ := f.newObjectWithInfo(name, &item); entry != nil {
entries = append(entries, entry)
}
case "symlink":
var entry fs.Object
// Add .rclonelink suffix to allow local backend code to convert to a symlink.
// In case both .rclonelink file AND symlink file exists, the first will be used.
if entry, _ = f.newObjectWithInfo(name+".rclonelink", &item); entry != nil {
fs.Infof(nil, "Converting a symlink to the rclonelink %s target %s", entry.Remote(), item.Target)
entries = append(entries, entry)
}
default:
fs.Logf(nil, "Ignoring unsupported object type %s for %q path", item.Type, name)
}
}
return entries, nil
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
if f.filetype == "" {
// This happens in two scenarios.
// 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs.
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
err := f.initFs(ctx, dir)
if err != nil {
if err == fs.ErrorObjectNotFound {
return fs.ErrorDirNotFound
}
return err
}
}
if !strings.HasSuffix(dir, "/") && dir != "" {
dir += "/"
}
URL := f.url(dir)
u, err := url.Parse(URL)
if err != nil {
fs.Errorf(nil, "Unable to parse URL %q: %v", URL, err)
return fs.ErrorDirNotFound
}
list := list.NewHelper(callback)
for resumeStart := u.Path; resumeStart != ""; {
var files []File
files, resumeStart, err = f.netStorageListRequest(ctx, URL, u.Path)
if err != nil {
if err == fs.ErrorObjectNotFound {
return fs.ErrorDirNotFound
}
return err
}
for _, item := range files {
name := f.getFileName(&item)
// List output includes full paths starting from [CP Code]/
path := strings.TrimPrefix("/"+name, u.Path)
if path == "" {
// Skip the starting directory itself
continue
}
switch item.Type {
case "dir":
when := time.Unix(item.Mtime, 0)
entry := fs.NewDir(dir+strings.TrimSuffix(path, "/"), when)
if err := list.Add(entry); err != nil {
return err
}
case "file":
if entry, _ := f.newObjectWithInfo(dir+path, &item); entry != nil {
if err := list.Add(entry); err != nil {
return err
}
}
case "symlink":
// Add .rclonelink suffix to allow local backend code to convert to a symlink.
// In case both .rclonelink file AND symlink file exists, the first will be used.
if entry, _ := f.newObjectWithInfo(dir+path+".rclonelink", &item); entry != nil {
fs.Infof(nil, "Converting a symlink to the rclonelink %s for target %s", entry.Remote(), item.Target)
if err := list.Add(entry); err != nil {
return err
}
}
default:
fs.Logf(nil, "Ignoring unsupported object type %s for %s path", item.Type, name)
}
}
if resumeStart != "" {
// Perform subsequent list action call, construct the
// URL where the previous request finished
u, err := url.Parse(f.endpointURL)
if err != nil {
fs.Errorf(nil, "Unable to parse URL %q: %v", f.endpointURL, err)
return fs.ErrorDirNotFound
}
resumeURL, err := rest.URLJoin(u, rest.URLPathEscape(resumeStart))
if err != nil {
fs.Errorf(nil, "Unable to join URL %q for resumeStart %s: %v", f.endpointURL, resumeStart, err)
return fs.ErrorDirNotFound
}
URL = resumeURL.String()
}
}
return list.Flush()
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
err := f.implicitCheck(ctx, src.Remote(), true)
if err != nil {
return nil, err
}
// Barebones object will get filled in Update
o := &Object{
fs: f,
remote: src.Remote(),
fullURL: f.url(src.Remote()),
}
// We pass through the Update's error
err = o.Update(ctx, in, src, options...)
if err != nil {
return nil, err
}
return o, nil
}
// implicitCheck prevents implicit dir creation by doing mkdir from base up to current dir,
// does NOT check if these dirs created conflict with existing dirs/files so can result in dupe
func (f *Fs) implicitCheck(ctx context.Context, remote string, isfile bool) error {
// Find base (URL including the CPCODE path) and root (what follows after that)
URL := f.url(remote)
u, err := url.Parse(URL)
if err != nil {
fs.Errorf(nil, "Unable to parse URL %q while implicit checking directory: %v", URL, err)
return err
}
startPos := 0
if strings.HasPrefix(u.Path, "/") {
startPos = 1
}
pos := strings.Index(u.Path[startPos:], "/")
if pos == -1 {
fs.Errorf(nil, "URL %q unexpectedly does not include the slash in the CPCODE path", URL)
return nil
}
root := rest.URLPathEscape(u.Path[startPos+pos+1:])
u.Path = u.Path[:startPos+pos]
base := u.String()
if !strings.HasSuffix(base, "/") {
base += "/"
}
if isfile {
// Get the base name of root
lastindex := strings.LastIndex(root, "/")
if lastindex == -1 {
// We are at the level of CPCODE path
return nil
}
root = root[0 : lastindex+1]
}
// We make sure root always has "/" at the end
if !strings.HasSuffix(root, "/") {
root += "/"
}
for root != "" {
frontindex := strings.Index(root, "/")
if frontindex == -1 {
return nil
}
frontdir := root[0 : frontindex+1]
root = root[frontindex+1:]
base += frontdir
if !f.testAndSetDirscreated(base) {
fs.Infof(nil, "Implicitly create directory %s", base)
err := f.netStorageMkdirRequest(ctx, base)
if err != nil {
fs.Errorf("Mkdir request in implicit check failed for base %s: %v", base, err)
return err
}
}
}
return nil
}
// Purge all files in the directory specified.
// NetStorage quick-delete is disabled by default AND not instantaneous.
// Returns fs.ErrorCantPurge when quick-delete fails.
func (f *Fs) Purge(ctx context.Context, dir string) error {
URL := f.url(dir)
const actionHeader = "version=1&action=quick-delete&quick-delete=imreallyreallysure"
if _, err := f.callBackend(ctx, URL, "POST", actionHeader, true, nil, nil); err != nil {
fs.Logf(nil, "Purge using quick-delete failed, fallback on recursive delete: %v", err)
return fs.ErrorCantPurge
}
fs.Logf(nil, "Purge using quick-delete has been queued, you may not see immediate changes")
return nil
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Pass through error from Put
return f.Put(ctx, in, src, options...)
}
// Fs is the filesystem this remote http file object is located within
func (o *Object) Fs() fs.Info {
return o.fs
}
// String returns the URL to the remote HTTP file
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote the name of the remote HTTP file, relative to the fs root
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5sum, nil
}
// Size returns the size in bytes of the remote http file
func (o *Object) Size() int64 {
return o.size
}
// Md5Sum returns the md5 of the object
func (o *Object) Md5Sum() string {
return o.md5sum
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return time.Unix(o.modTime, 0)
}
// SetModTime sets the modification and access time to the specified time
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
URL := o.fullURL
when := strconv.FormatInt(modTime.Unix(), 10)
actionHeader := "version=1&action=mtime&mtime=" + when
if _, err := o.fs.callBackend(ctx, URL, "POST", actionHeader, true, nil, nil); err != nil {
fs.Debugf(nil, "NetStorage action mtime failed for %q: %v", URL, err)
return err
}
o.fs.deleteStatCache(URL)
o.modTime = modTime.Unix()
return nil
}
// Storable returns whether this object is storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
return o.netStorageDownloadRequest(ctx, options)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// Mkdir makes the root directory of the Fs object
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// ImplicitCheck will mkdir from base up to dir, if not already in dirscreated
return f.implicitCheck(ctx, dir, false)
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.netStorageDeleteRequest(ctx)
}
// Rmdir removes the root directory of the Fs object
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.netStorageRmdirRequest(ctx, dir)
}
// Update netstorage with the object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
o.size = src.Size()
o.modTime = src.ModTime(ctx).Unix()
// Don't do md5 check because that's done by server
o.md5sum = ""
err := o.netStorageUploadRequest(ctx, in, src)
// We return an object updated with source stats,
// we don't refetch the obj after upload
if err != nil {
return err
}
return nil
}
// newObjectWithInfo creates an fs.Object for any netstorage.File or symlink.
// If it can't be found it returns the error fs.ErrorObjectNotFound.
// It returns fs.ErrorIsDir error for directory objects, but still fills the
// fs.Object structure (for directory operations).
func (f *Fs) newObjectWithInfo(remote string, info *File) (fs.Object, error) {
if info == nil {
return nil, fs.ErrorObjectNotFound
}
URL := f.url(remote)
size := info.Size
if info.Type == "symlink" {
// File size for symlinks is absent but for .rclonelink to work
// the size should be the length of the target name
size = int64(len(info.Target))
}
o := &Object{
fs: f,
filetype: info.Type,
remote: remote,
size: size,
modTime: info.Mtime,
md5sum: info.Md5,
fullURL: URL,
target: info.Target,
}
if info.Type == "dir" {
return o, fs.ErrorIsDir
}
return o, nil
}
// getAuth is the signing hook to get the NetStorage auth
func (f *Fs) getAuth(req *http.Request) error {
// Set Authorization header
dataHeader := generateDataHeader(f)
path := req.URL.RequestURI()
//lint:ignore SA1008 false positive when running staticcheck, the header name is according to docs even if not canonical
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1008
actionHeader := req.Header["X-Akamai-ACS-Action"][0]
fs.Debugf(nil, "NetStorage API %s call %s for path %q", req.Method, actionHeader, path)
req.Header.Set("X-Akamai-ACS-Auth-Data", dataHeader)
req.Header.Set("X-Akamai-ACS-Auth-Sign", generateSignHeader(f, dataHeader, path, actionHeader))
return nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
423, // Locked
429, // Too Many Requests
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// callBackend calls NetStorage API using either rest.Call or rest.CallXML function,
// depending on whether the response is required
func (f *Fs) callBackend(ctx context.Context, URL, method, actionHeader string, noResponse bool, response any, options []fs.OpenOption) (io.ReadCloser, error) {
opts := rest.Opts{
Method: method,
RootURL: URL,
NoResponse: noResponse,
ExtraHeaders: map[string]string{
"*X-Akamai-ACS-Action": actionHeader,
},
}
if options != nil {
opts.Options = options
}
var resp *http.Response
err := f.pacer.Call(func() (bool, error) {
var err error
if response != nil {
resp, err = f.srv.CallXML(ctx, &opts, nil, response)
} else {
resp, err = f.srv.Call(ctx, &opts)
}
return shouldRetry(ctx, resp, err)
})
if err != nil {
if resp != nil && resp.StatusCode == http.StatusNotFound {
// 404 HTTP code translates into Object not found
return nil, fs.ErrorObjectNotFound
}
return nil, fmt.Errorf("failed to call NetStorage API: %w", err)
}
if noResponse {
return nil, nil
}
return resp.Body, nil
}
// netStorageStatRequest performs a NetStorage stat request
func (f *Fs) netStorageStatRequest(ctx context.Context, URL string, directory bool) ([]File, error) {
if strings.HasSuffix(URL, ".rclonelink") {
fs.Infof(nil, "Converting rclonelink to a symlink on the stat request %q", URL)
URL = strings.TrimSuffix(URL, ".rclonelink")
}
URL = strings.TrimSuffix(URL, "/")
files := f.getStatCache(URL)
if files == nil {
const actionHeader = "version=1&action=stat&implicit=yes&format=xml&encoding=utf-8&slash=both"
statResp := &Stat{}
if _, err := f.callBackend(ctx, URL, "GET", actionHeader, false, statResp, nil); err != nil {
fs.Debugf(nil, "NetStorage action stat failed for %q: %v", URL, err)
return nil, err
}
files = statResp.Files
f.setStatCache(URL, files)
}
// Multiple objects can be returned with the "slash=both" option,
// when file/symlink/directory has the same name
for i := range files {
if files[i].Type == "symlink" {
// Add .rclonelink suffix to allow local backend code to convert to a symlink.
files[i].Name += ".rclonelink"
fs.Infof(nil, "Converting a symlink to the rclonelink on the stat request %s", files[i].Name)
}
entrywanted := (directory && files[i].Type == "dir") ||
(!directory && files[i].Type != "dir")
if entrywanted {
files[0], files[i] = files[i], files[0]
}
}
return files, nil
}
// netStorageDirRequest performs a NetStorage dir request
func (f *Fs) netStorageDirRequest(ctx context.Context, URL string) ([]File, error) {
const actionHeader = "version=1&action=dir&format=xml&encoding=utf-8"
statResp := &Stat{}
if _, err := f.callBackend(ctx, URL, "GET", actionHeader, false, statResp, nil); err != nil {
if err == fs.ErrorObjectNotFound {
return nil, fs.ErrorDirNotFound
}
fs.Debugf(nil, "NetStorage action dir failed for %q: %v", URL, err)
return nil, err
}
return statResp.Files, nil
}
// netStorageListRequest performs a NetStorage list request
// Second returning parameter is resumeStart string, if not empty the function should be restarted with the adjusted URL to continue the listing.
func (f *Fs) netStorageListRequest(ctx context.Context, URL, endPath string) ([]File, string, error) {
actionHeader := "version=1&action=list&mtime_all=yes&format=xml&encoding=utf-8"
if !pathIsOneLevelDeep(endPath) {
// Add end= to limit the depth to endPath
escapeEndPath := url.QueryEscape(strings.TrimSuffix(endPath, "/"))
// The "0" character exists in place of the trailing slash to
// accommodate ObjectStore directory logic
end := "&end=" + strings.TrimSuffix(escapeEndPath, "/") + "0"
actionHeader += end
}
listResp := &List{}
if _, err := f.callBackend(ctx, URL, "GET", actionHeader, false, listResp, nil); err != nil {
if err == fs.ErrorObjectNotFound {
// List action is known to return 404 for a valid [CP Code] path with no objects inside.
// Call stat to find out whether it is an empty directory or path does not exist.
fs.Debugf(nil, "NetStorage action list returned 404, call stat for %q", URL)
files, err := f.netStorageStatRequest(ctx, URL, true)
if err == nil && len(files) > 0 && files[0].Type == "dir" {
return []File{}, "", nil
}
}
fs.Debugf(nil, "NetStorage action list failed for %q: %v", URL, err)
return nil, "", err
}
return listResp.Files, listResp.Resume.Start, nil
}
// netStorageUploadRequest performs a NetStorage upload request
func (o *Object) netStorageUploadRequest(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
URL := o.fullURL
if URL == "" {
URL = o.fs.url(src.Remote())
}
if strings.HasSuffix(URL, ".rclonelink") {
bits, err := io.ReadAll(in)
if err != nil {
return err
}
targ := string(bits)
symlinkloc := strings.TrimSuffix(URL, ".rclonelink")
fs.Infof(nil, "Converting rclonelink to a symlink on upload %s target %s", symlinkloc, targ)
_, err = o.fs.netStorageSymlinkRequest(ctx, symlinkloc, targ, &o.modTime)
return err
}
u, err := url.Parse(URL)
if err != nil {
return fmt.Errorf("unable to parse URL %q while uploading: %w", URL, err)
}
path := u.RequestURI()
const actionHeader = "version=1&action=upload&sha256=atend&mtime=atend"
trailers := &http.Header{}
hr := newHashReader(in, sha256.New())
reader := customReader(
func(p []byte) (n int, err error) {
if n, err = hr.Read(p); err != nil && err == io.EOF {
// Send the "chunked trailer" after upload of the object
digest := hex.EncodeToString(hr.Sum(nil))
actionHeader := "version=1&action=upload&sha256=" + digest +
"&mtime=" + strconv.FormatInt(src.ModTime(ctx).Unix(), 10)
trailers.Add("X-Akamai-ACS-Action", actionHeader)
dataHeader := generateDataHeader(o.fs)
trailers.Add("X-Akamai-ACS-Auth-Data", dataHeader)
signHeader := generateSignHeader(o.fs, dataHeader, path, actionHeader)
trailers.Add("X-Akamai-ACS-Auth-Sign", signHeader)
}
return
},
)
var resp *http.Response
opts := rest.Opts{
Method: "PUT",
RootURL: URL,
NoResponse: true,
Options: options,
Body: reader,
Trailer: trailers,
ExtraHeaders: map[string]string{
"*X-Akamai-ACS-Action": actionHeader,
},
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
})
if err != nil {
if resp != nil && resp.StatusCode == http.StatusNotFound {
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | true |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/bin/cross-compile.go | bin/cross-compile.go | //go:build ignore
// Cross compile rclone - in go because I hate bash ;-)
package main
import (
"flag"
"fmt"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
"sync"
"text/template"
"time"
)
var (
// Flags
debug = flag.Bool("d", false, "Print commands instead of running them")
parallel = flag.Int("parallel", runtime.NumCPU(), "Number of commands to run in parallel")
copyAs = flag.String("release", "", "Make copies of the releases with this name")
gitLog = flag.String("git-log", "", "git log to include as well")
include = flag.String("include", "^.*$", "os/arch regexp to include")
exclude = flag.String("exclude", "^$", "os/arch regexp to exclude")
cgo = flag.Bool("cgo", false, "Use cgo for the build")
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running")
tags = flag.String("tags", "", "Space separated list of build tags")
buildmode = flag.String("buildmode", "", "Passed to go build -buildmode flag")
compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip")
extraEnv = flag.String("env", "", "comma separated list of VAR=VALUE env vars to set")
macOSSDK = flag.String("macos-sdk", "", "macOS SDK to use")
macOSArch = flag.String("macos-arch", "", "macOS arch to use")
extraCgoCFlags = flag.String("cgo-cflags", "", "extra CGO_CFLAGS")
extraCgoLdFlags = flag.String("cgo-ldflags", "", "extra CGO_LDFLAGS")
)
// GOOS/GOARCH pairs we build for
//
// If the GOARCH contains a - it is a synthetic arch with more parameters
var osarches = []string{
"windows/386",
"windows/amd64",
"windows/arm64",
"darwin/amd64",
"darwin/arm64",
"linux/386",
"linux/amd64",
"linux/arm",
"linux/arm-v6",
"linux/arm-v7",
"linux/arm64",
"linux/mips",
"linux/mipsle",
"freebsd/386",
"freebsd/amd64",
"freebsd/arm",
"freebsd/arm-v6",
"freebsd/arm-v7",
"netbsd/386",
"netbsd/amd64",
"netbsd/arm",
"netbsd/arm-v6",
"netbsd/arm-v7",
"openbsd/386",
"openbsd/amd64",
"plan9/386",
"plan9/amd64",
"solaris/amd64",
// "js/wasm", // Rclone is too big for js/wasm until https://github.com/golang/go/issues/64856 is fixed
"aix/ppc64",
}
// Special environment flags for a given arch
var archFlags = map[string][]string{
"386": {"GO386=softfloat"},
"mips": {"GOMIPS=softfloat"},
"mipsle": {"GOMIPS=softfloat"},
"arm": {"GOARM=5"},
"arm-v6": {"GOARM=6"},
"arm-v7": {"GOARM=7"},
}
// Map Go architectures to NFPM architectures
// Any missing are passed straight through
var goarchToNfpm = map[string]string{
"arm": "arm5",
"arm-v6": "arm6",
"arm-v7": "arm7",
}
// runEnv - run a shell command with env
func runEnv(args, env []string) error {
if *debug {
args = append([]string{"echo"}, args...)
}
cmd := exec.Command(args[0], args[1:]...)
if env != nil {
cmd.Env = append(os.Environ(), env...)
}
if *debug {
log.Printf("args = %v, env = %v\n", args, cmd.Env)
}
out, err := cmd.CombinedOutput()
if err != nil {
log.Print("----------------------------")
log.Printf("Failed to run %v: %v", args, err)
log.Printf("Command output was:\n%s", out)
log.Print("----------------------------")
}
return err
}
// run a shell command
func run(args ...string) {
err := runEnv(args, nil)
if err != nil {
log.Fatalf("Exiting after error: %v", err)
}
}
// chdir or die
func chdir(dir string) {
err := os.Chdir(dir)
if err != nil {
log.Fatalf("Couldn't cd into %q: %v", dir, err)
}
}
// substitute data from go template file in to file out
func substitute(inFile, outFile string, data interface{}) {
t, err := template.ParseFiles(inFile)
if err != nil {
log.Fatalf("Failed to read template file %q: %v", inFile, err)
}
out, err := os.Create(outFile)
if err != nil {
log.Fatalf("Failed to create output file %q: %v", outFile, err)
}
defer func() {
err := out.Close()
if err != nil {
log.Fatalf("Failed to close output file %q: %v", outFile, err)
}
}()
err = t.Execute(out, data)
if err != nil {
log.Fatalf("Failed to substitute template file %q: %v", inFile, err)
}
}
// build the zip package return its name
func buildZip(dir string) string {
// Now build the zip
run("cp", "-a", "../MANUAL.txt", filepath.Join(dir, "README.txt"))
run("cp", "-a", "../MANUAL.html", filepath.Join(dir, "README.html"))
run("cp", "-a", "../rclone.1", dir)
if *gitLog != "" {
run("cp", "-a", *gitLog, dir)
}
zip := dir + ".zip"
run("zip", "-r9", zip, dir)
return zip
}
// Build .deb and .rpm packages
//
// It returns a list of artifacts it has made
func buildDebAndRpm(dir, version, goarch string) []string {
// Make internal version number acceptable to .deb and .rpm
pkgVersion := version[1:]
pkgVersion = strings.ReplaceAll(pkgVersion, "β", "-beta")
pkgVersion = strings.ReplaceAll(pkgVersion, "-", ".")
nfpmArch, ok := goarchToNfpm[goarch]
if !ok {
nfpmArch = goarch
}
// Make nfpm.yaml from the template
substitute("../bin/nfpm.yaml", path.Join(dir, "nfpm.yaml"), map[string]string{
"Version": pkgVersion,
"Arch": nfpmArch,
})
// build them
var artifacts []string
for _, pkg := range []string{".deb", ".rpm"} {
artifact := dir + pkg
run("bash", "-c", "cd "+dir+" && nfpm -f nfpm.yaml pkg -t ../"+artifact)
artifacts = append(artifacts, artifact)
}
return artifacts
}
// Trip a version suffix off the arch if present
func stripVersion(goarch string) string {
i := strings.Index(goarch, "-")
if i < 0 {
return goarch
}
return goarch[:i]
}
// run the command returning trimmed output
func runOut(command ...string) string {
out, err := exec.Command(command[0], command[1:]...).Output()
if err != nil {
log.Fatalf("Failed to run %q: %v", command, err)
}
return strings.TrimSpace(string(out))
}
// Generate Windows resource system object file (.syso), which can be picked
// up by the following go build for embedding version information and icon
// resources into the executable.
func generateResourceWindows(version, arch string) func() {
sysoPath := fmt.Sprintf("../resource_windows_%s.syso", arch) // Use explicit destination filename, even though it should be same as default, so that we are sure we have the correct reference to it
if err := os.Remove(sysoPath); !os.IsNotExist(err) {
// Note: This one we choose to treat as fatal, to avoid any risk of picking up an old .syso file without noticing.
log.Fatalf("Failed to remove existing Windows %s resource system object file %s: %v", arch, sysoPath, err)
}
args := []string{"go", "run", "../bin/resource_windows.go", "-arch", arch, "-version", version, "-syso", sysoPath}
if err := runEnv(args, nil); err != nil {
log.Printf("Warning: Couldn't generate Windows %s resource system object file, binaries will not have version information or icon embedded", arch)
return nil
}
if _, err := os.Stat(sysoPath); err != nil {
log.Printf("Warning: Couldn't find generated Windows %s resource system object file, binaries will not have version information or icon embedded", arch)
return nil
}
return func() {
if err := os.Remove(sysoPath); err != nil && !os.IsNotExist(err) {
log.Printf("Warning: Couldn't remove generated Windows %s resource system object file %s: %v. Please remove it manually.", arch, sysoPath, err)
}
}
}
// build the binary in dir returning success or failure
func compileArch(version, goos, goarch, dir string) bool {
log.Printf("Compiling %s/%s into %s", goos, goarch, dir)
goarchBase := stripVersion(goarch)
output := filepath.Join(dir, "rclone")
if goos == "windows" {
output += ".exe"
if cleanupFn := generateResourceWindows(version, goarchBase); cleanupFn != nil {
defer cleanupFn()
}
}
err := os.MkdirAll(dir, 0777)
if err != nil {
log.Fatalf("Failed to mkdir: %v", err)
}
args := []string{
"go", "build",
"--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version,
"-trimpath",
"-o", output,
"-tags", *tags,
}
if *buildmode != "" {
args = append(args,
"-buildmode", *buildmode,
)
}
args = append(args,
"..",
)
env := []string{
"GOOS=" + goos,
"GOARCH=" + goarchBase,
}
if *extraEnv != "" {
env = append(env, strings.Split(*extraEnv, ",")...)
}
var (
cgoCFlags []string
cgoLdFlags []string
)
if *macOSSDK != "" {
flag := "-isysroot " + runOut("xcrun", "--sdk", *macOSSDK, "--show-sdk-path")
cgoCFlags = append(cgoCFlags, flag)
cgoLdFlags = append(cgoLdFlags, flag)
}
if *macOSArch != "" {
flag := "-arch " + *macOSArch
cgoCFlags = append(cgoCFlags, flag)
cgoLdFlags = append(cgoLdFlags, flag)
}
if *extraCgoCFlags != "" {
cgoCFlags = append(cgoCFlags, *extraCgoCFlags)
}
if *extraCgoLdFlags != "" {
cgoLdFlags = append(cgoLdFlags, *extraCgoLdFlags)
}
if len(cgoCFlags) > 0 {
env = append(env, "CGO_CFLAGS="+strings.Join(cgoCFlags, " "))
}
if len(cgoLdFlags) > 0 {
env = append(env, "CGO_LDFLAGS="+strings.Join(cgoLdFlags, " "))
}
if !*cgo {
env = append(env, "CGO_ENABLED=0")
} else {
env = append(env, "CGO_ENABLED=1")
}
if flags, ok := archFlags[goarch]; ok {
env = append(env, flags...)
}
err = runEnv(args, env)
if err != nil {
log.Printf("Error compiling %s/%s: %v", goos, goarch, err)
return false
}
if !*compileOnly {
if goos != "js" {
artifacts := []string{buildZip(dir)}
// build a .deb and .rpm if appropriate
if goos == "linux" {
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
}
if *copyAs != "" {
for _, artifact := range artifacts {
run("ln", artifact, strings.Replace(artifact, "-"+version, "-"+*copyAs, 1))
}
}
}
// tidy up
run("rm", "-rf", dir)
}
log.Printf("Done compiling %s/%s", goos, goarch)
return true
}
func compile(version string) {
start := time.Now()
wg := new(sync.WaitGroup)
run := make(chan func(), *parallel)
for i := 0; i < *parallel; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for f := range run {
f()
}
}()
}
includeRe, err := regexp.Compile(*include)
if err != nil {
log.Fatalf("Bad -include regexp: %v", err)
}
excludeRe, err := regexp.Compile(*exclude)
if err != nil {
log.Fatalf("Bad -exclude regexp: %v", err)
}
compiled := 0
var failuresMu sync.Mutex
var failures []string
for _, osarch := range osarches {
if excludeRe.MatchString(osarch) || !includeRe.MatchString(osarch) {
continue
}
parts := strings.Split(osarch, "/")
if len(parts) != 2 {
log.Fatalf("Bad osarch %q", osarch)
}
goos, goarch := parts[0], parts[1]
userGoos := goos
if goos == "darwin" {
userGoos = "osx"
}
dir := filepath.Join("rclone-" + version + "-" + userGoos + "-" + goarch)
run <- func() {
if !compileArch(version, goos, goarch, dir) {
failuresMu.Lock()
failures = append(failures, goos+"/"+goarch)
failuresMu.Unlock()
}
}
compiled++
}
close(run)
wg.Wait()
log.Printf("Compiled %d arches in %v", compiled, time.Since(start))
if len(failures) > 0 {
sort.Strings(failures)
log.Printf("%d compile failures:\n %s\n", len(failures), strings.Join(failures, "\n "))
os.Exit(1)
}
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 1 {
log.Fatalf("Syntax: %s <version>", os.Args[0])
}
version := args[0]
if !*noClean {
run("rm", "-rf", "build")
run("mkdir", "build")
}
chdir("build")
err := os.WriteFile("version.txt", []byte(fmt.Sprintf("rclone %s\n", version)), 0666)
if err != nil {
log.Fatalf("Couldn't write version.txt: %v", err)
}
compile(version)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/bin/rules.go | bin/rules.go | // Ruleguard file implementing custom linting rules.
//
// Note that when used from golangci-lint (using the gocritic linter configured
// with the ruleguard check), because rule files are not handled by
// golangci-lint itself, changes will not invalidate the golangci-lint cache,
// and you must manually clean to cache (golangci-lint cache clean) for them to
// be considered, as explained here:
// https://www.quasilyte.dev/blog/post/ruleguard/#using-from-the-golangci-lint
//
// Note that this file is ignored from build with a build constraint, but using
// a different than "ignore" to avoid go mod tidy making dsl an indirect
// dependency, as explained here:
// https://github.com/quasilyte/go-ruleguard?tab=readme-ov-file#troubleshooting
//go:build ruleguard
// Package gorules implementing custom linting rules using ruleguard
package gorules
import "github.com/quasilyte/go-ruleguard/dsl"
// Suggest rewriting "log.(Print|Fatal|Panic)(f|ln)?" to
// "fs.(Printf|Fatalf|Panicf)", and do it if running golangci-lint with
// argument --fix. The suggestion wraps a single non-string single argument or
// variadic arguments in fmt.Sprint to be compatible with format string
// argument of fs functions.
//
// Caveats:
// - After applying the suggestions, imports may have to be fixed manually,
// removing unused "log", adding "github.com/rclone/rclone/fs" and "fmt",
// and if there was a variable named "fs" or "fmt" in the scope the name
// clash must be fixed.
// - Suggested code is incorrect when within fs package itself, due to the
// "fs."" prefix. Could handle it using condition
// ".Where(m.File().PkgPath.Matches(`github.com/rclone/rclone/fs`))"
// but not sure how to avoid duplicating all checks with and without this
// condition so haven't bothered yet.
func useFsLog(m dsl.Matcher) {
m.Match(`log.Print($x)`, `log.Println($x)`).Where(m["x"].Type.Is(`string`)).Suggest(`fs.Log(nil, $x)`)
m.Match(`log.Print($*args)`, `log.Println($*args)`).Suggest(`fs.Log(nil, fmt.Sprint($args))`)
m.Match(`log.Printf($*args)`).Suggest(`fs.Logf(nil, $args)`)
m.Match(`log.Fatal($x)`, `log.Fatalln($x)`).Where(m["x"].Type.Is(`string`)).Suggest(`fs.Fatal(nil, $x)`)
m.Match(`log.Fatal($*args)`, `log.Fatalln($*args)`).Suggest(`fs.Fatal(nil, fmt.Sprint($args))`)
m.Match(`log.Fatalf($*args)`).Suggest(`fs.Fatalf(nil, $args)`)
m.Match(`log.Panic($x)`, `log.Panicln($x)`).Where(m["x"].Type.Is(`string`)).Suggest(`fs.Panic(nil, $x)`)
m.Match(`log.Panic($*args)`, `log.Panicln($*args)`).Suggest(`fs.Panic(nil, fmt.Sprint($args))`)
m.Match(`log.Panicf($*args)`).Suggest(`fs.Panicf(nil, $args)`)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/bin/not-in-stable.go | bin/not-in-stable.go | // This shows the commits not yet in the stable branch
package main
import (
"bytes"
"flag"
"fmt"
"log"
"os"
"os/exec"
"regexp"
"github.com/coreos/go-semver/semver"
)
// version=$(sed <VERSION -e 's/\.[0-9]+*$//g')
// echo "Checking version ${version}"
// echo
//
// git log --oneline ${version}.0..${version}-stable | cut -c11- | sort > /tmp/in-stable
// git log --oneline ${version}.0..master | cut -c11- | sort > /tmp/in-master
//
// comm -23 /tmp/in-master /tmp/in-stable
var logRe = regexp.MustCompile(`^([0-9a-f]{4,}) (.*)$`)
// run the test passed in with the -run passed in
func readCommits(from, to string) (logMap map[string]string, logs []string) {
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
out, err := cmd.Output()
if err != nil {
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
}
logMap = map[string]string{}
logs = []string{}
for line := range bytes.SplitSeq(out, []byte{'\n'}) {
if len(line) == 0 {
continue
}
match := logRe.FindSubmatch(line)
if match == nil {
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
}
var hash, logMessage = string(match[1]), string(match[2])
logMap[logMessage] = hash
logs = append(logs, logMessage)
}
return logMap, logs
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 0 {
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
}
// v1.54.0
versionBytes, err := os.ReadFile("VERSION")
if err != nil {
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
}
if versionBytes[0] == 'v' {
versionBytes = versionBytes[1:]
}
versionBytes = bytes.TrimSpace(versionBytes)
semver := semver.New(string(versionBytes))
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
masterMap, masterLogs := readCommits(stable+".0", "master")
stableMap, _ := readCommits(stable+".0", stable+"-stable")
for _, logMessage := range masterLogs {
// Commit found in stable already
if _, found := stableMap[logMessage]; found {
continue
}
hash := masterMap[logMessage]
fmt.Printf("%s %s\n", hash, logMessage)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/bin/check-merged.go | bin/check-merged.go | //go:build ignore
// Attempt to work out if branches have already been merged
package main
import (
"bufio"
"errors"
"flag"
"fmt"
"log"
"os"
"os/exec"
"regexp"
)
var (
// Flags
master = flag.String("master", "master", "Branch to work out if merged into")
version = "development version" // overridden by goreleaser
)
func usage() {
fmt.Fprintf(os.Stderr, `Usage: %s [options]
Version: %s
Attempt to work out if in the current git repo branches have been
merged into master.
Example usage:
%s
Full options:
`, os.Args[0], version, os.Args[0])
flag.PrintDefaults()
}
var (
printedSep = false
)
const (
sep1 = "============================================================"
sep2 = "------------------------------------------------------------"
)
// Show the diff between two git revisions
func gitDiffDiff(rev1, rev2 string) {
fmt.Printf("Diff of diffs of %q and %q\n", rev1, rev2)
cmd := exec.Command("bash", "-c", fmt.Sprintf(`diff <(git show "%s") <(git show "%s")`, rev1, rev2))
out, err := cmd.Output()
if err != nil {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) && exitErr.ExitCode() == 1 {
// OK just different
} else {
log.Fatalf("git diff failed: %#v", err)
}
}
_, _ = os.Stdout.Write(out)
}
var reCommit = regexp.MustCompile(`commit ([0-9a-f]{32,})`)
// Grep the git log for logLine
func gitLogGrep(branch, rev, logLine string) {
cmd := exec.Command("git", "log", "--grep", regexp.QuoteMeta(logLine), *master)
out, err := cmd.Output()
if err != nil {
log.Fatalf("git log grep failed: %v", err)
}
if len(out) > 0 {
if !printedSep {
fmt.Println(sep1)
printedSep = true
}
fmt.Printf("Branch: %s - MAY BE MERGED to %s\nLog: %s\n\n", branch, *master, logLine)
_, _ = os.Stdout.Write(out)
match := reCommit.FindSubmatch(out)
if len(match) != 0 {
commit := string(match[1])
fmt.Println(sep2)
gitDiffDiff(rev, commit)
}
fmt.Println(sep1)
}
}
// * b2-fix-download-url 4209c768a [gone] b2: fix transfers when using download_url
var reLine = regexp.MustCompile(`^[ *] (\S+)\s+([0-9a-f]+)\s+(?:\[[^]]+\] )?(.*)$`)
// Run git branch -v, parse the output and check it in the log
func gitBranch() {
cmd := exec.Command("git", "branch", "-v")
cmd.Stderr = os.Stderr
out, err := cmd.StdoutPipe()
if err != nil {
log.Fatalf("git branch pipe failed: %v", err)
}
if err := cmd.Start(); err != nil {
log.Fatalf("git branch failed: %v", err)
}
scanner := bufio.NewScanner(out)
for scanner.Scan() {
line := scanner.Text()
match := reLine.FindStringSubmatch(line)
if len(match) != 4 {
log.Printf("Invalid line %q", line)
continue
}
branch, rev, logLine := match[1], match[2], match[3]
if branch == *master {
continue
}
//fmt.Printf("branch = %q, rev = %q, logLine = %q\n", branch, rev, logLine)
gitLogGrep(branch, rev, logLine)
}
if err := scanner.Err(); err != nil {
log.Fatalf("failed reading git branch: %v", err)
}
if err := cmd.Wait(); err != nil {
log.Fatalf("git branch wait failed: %v", err)
}
}
func main() {
flag.Usage = usage
flag.Parse()
args := flag.Args()
if len(args) != 0 {
usage()
log.Fatal("Wrong number of arguments")
}
gitBranch()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/bin/resource_windows.go | bin/resource_windows.go | // Utility program to generate Rclone-specific Windows resource system object
// file (.syso), that can be picked up by a following go build for embedding
// version information and icon resources into a rclone binary.
//
// Run it with "go generate", or "go run" to be able to customize with
// command-line flags. Note that this program is intended to be run directly
// from its original location in the source tree: Default paths are absolute
// within the current source tree, which is convenient because it makes it
// oblivious to the working directory, and it gives identical result whether
// run by "go generate" or "go run", but it will not make sense if this
// program's source is moved out from the source tree.
//
// Can be used for rclone.exe (default), and other binaries such as
// librclone.dll (must be specified with flag -binary).
//
//go:generate go run resource_windows.go
//go:build tools
package main
import (
"flag"
"fmt"
"log"
"path"
"runtime"
"strings"
"github.com/coreos/go-semver/semver"
"github.com/josephspurrier/goversioninfo"
"github.com/rclone/rclone/fs"
)
func main() {
// Get path of directory containing the current source file to use for absolute path references within the code tree (as described above)
projectDir := ""
_, sourceFile, _, ok := runtime.Caller(0)
if ok {
projectDir = path.Dir(path.Dir(sourceFile)) // Root of the current project working directory
}
// Define flags
binary := flag.String("binary", "rclone.exe", `The name of the binary to generate resource for, e.g. "rclone.exe" or "librclone.dll"`)
arch := flag.String("arch", runtime.GOARCH, `Architecture of resource file, or the target GOARCH, "386", "amd64", "arm", or "arm64"`)
version := flag.String("version", fs.Version, "Version number or tag name")
icon := flag.String("icon", path.Join(projectDir, "graphics/logo/ico/logo_symbol_color.ico"), "Path to icon file to embed in an .exe binary")
dir := flag.String("dir", projectDir, "Path to output directory where to write the resulting system object file (.syso), with a default name according to -arch (resource_windows_<arch>.syso), only considered if not -syso is specified")
syso := flag.String("syso", "", "Path to output resource system object file (.syso) to be created/overwritten, ignores -dir")
// Parse command-line flags
flag.Parse()
// Handle default value for -file which depends on optional -dir and -arch
if *syso == "" {
// Use default filename, which includes target GOOS (hardcoded "windows")
// and GOARCH (from argument -arch) as suffix, to avoid any race conditions,
// and also this will be recognized by go build when it is consuming the
// .syso file and will only be used for builds with matching os/arch.
*syso = path.Join(*dir, fmt.Sprintf("resource_windows_%s.syso", *arch))
}
// Parse version/tag string argument as a SemVer
stringVersion := strings.TrimPrefix(*version, "v")
semanticVersion, err := semver.NewVersion(stringVersion)
if err != nil {
log.Fatalf("Invalid version number: %v", err)
}
// Extract binary extension
binaryExt := path.Ext(*binary)
// Create the version info configuration container
vi := &goversioninfo.VersionInfo{}
// FixedFileInfo
vi.FixedFileInfo.FileOS = "040004" // VOS_NT_WINDOWS32
if strings.EqualFold(binaryExt, ".exe") {
vi.FixedFileInfo.FileType = "01" // VFT_APP
} else if strings.EqualFold(binaryExt, ".dll") {
vi.FixedFileInfo.FileType = "02" // VFT_DLL
} else {
log.Fatalf("Specified binary must have extension .exe or .dll")
}
// FixedFileInfo.FileVersion
vi.FixedFileInfo.FileVersion.Major = int(semanticVersion.Major)
vi.FixedFileInfo.FileVersion.Minor = int(semanticVersion.Minor)
vi.FixedFileInfo.FileVersion.Patch = int(semanticVersion.Patch)
vi.FixedFileInfo.FileVersion.Build = 0
// FixedFileInfo.ProductVersion
vi.FixedFileInfo.ProductVersion.Major = int(semanticVersion.Major)
vi.FixedFileInfo.ProductVersion.Minor = int(semanticVersion.Minor)
vi.FixedFileInfo.ProductVersion.Patch = int(semanticVersion.Patch)
vi.FixedFileInfo.ProductVersion.Build = 0
// StringFileInfo
vi.StringFileInfo.CompanyName = "https://rclone.org"
vi.StringFileInfo.ProductName = "Rclone"
vi.StringFileInfo.FileDescription = "Rclone"
vi.StringFileInfo.InternalName = (*binary)[:len(*binary)-len(binaryExt)]
vi.StringFileInfo.OriginalFilename = *binary
vi.StringFileInfo.LegalCopyright = "The Rclone Authors"
vi.StringFileInfo.FileVersion = stringVersion
vi.StringFileInfo.ProductVersion = stringVersion
// Icon (only relevant for .exe, not .dll)
if *icon != "" && strings.EqualFold(binaryExt, ".exe") {
vi.IconPath = *icon
}
// Build native structures from the configuration data
vi.Build()
// Write the native structures as binary data to a buffer
vi.Walk()
// Write the binary data buffer to file
if err := vi.WriteSyso(*syso, *arch); err != nil {
log.Fatalf(`Failed to generate Windows %s resource system object file for %v with path "%v": %v`, *arch, *binary, *syso, err)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/bin/test_independence.go | bin/test_independence.go | //go:build ignore
// Test that the tests in the suite passed in are independent
package main
import (
"flag"
"log"
"os"
"os/exec"
"regexp"
)
var matchLine = regexp.MustCompile(`(?m)^=== RUN\s*(TestIntegration/\S*)\s*$`)
// run the test pass in and grep out the test names
func findTests(packageToTest string) (tests []string) {
cmd := exec.Command("go", "test", "-v", packageToTest)
out, err := cmd.CombinedOutput()
if err != nil {
_, _ = os.Stderr.Write(out)
log.Fatal(err)
}
results := matchLine.FindAllSubmatch(out, -1)
if results == nil {
log.Fatal("No tests found")
}
for _, line := range results {
tests = append(tests, string(line[1]))
}
return tests
}
// run the test passed in with the -run passed in
func runTest(packageToTest string, testName string) {
cmd := exec.Command("go", "test", "-v", packageToTest, "-run", "^"+testName+"$")
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("%s FAILED ------------------", testName)
_, _ = os.Stderr.Write(out)
log.Printf("%s FAILED ------------------", testName)
} else {
log.Printf("%s OK", testName)
}
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 1 {
log.Fatalf("Syntax: %s <test_to_run>", os.Args[0])
}
packageToTest := args[0]
testNames := findTests(packageToTest)
// fmt.Printf("%s\n", testNames)
for _, testName := range testNames {
runTest(packageToTest, testName)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/bin/make_bisync_docs.go | bin/make_bisync_docs.go | //go:build ignore
package main
import (
"bytes"
"cmp"
"context"
"encoding/json"
"flag"
"fmt"
"os"
"path/filepath"
"slices"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest/runs"
"github.com/stretchr/testify/assert/yaml"
)
var path = flag.String("path", "./docs/content/", "root path")
const (
configFile = "fstest/test_all/config.yaml"
startListIgnores = "<!--- start list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->"
endListIgnores = "<!--- end list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->"
startListFailures = "<!--- start list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->"
endListFailures = "<!--- end list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->"
integrationTestsJSONURL = "https://pub.rclone.org/integration-tests/current/index.json"
integrationTestsHTMLURL = "https://pub.rclone.org/integration-tests/current/"
)
func main() {
err := replaceBetween(*path, startListIgnores, endListIgnores, getIgnores)
if err != nil {
fs.Errorf(*path, "error replacing ignores: %v", err)
}
err = replaceBetween(*path, startListFailures, endListFailures, getFailures)
if err != nil {
fs.Errorf(*path, "error replacing failures: %v", err)
}
}
// replaceBetween replaces the text between startSep and endSep with fn()
func replaceBetween(path, startSep, endSep string, fn func() (string, error)) error {
b, err := os.ReadFile(filepath.Join(path, "bisync.md"))
if err != nil {
return err
}
doc := string(b)
before, after, found := strings.Cut(doc, startSep)
if !found {
return fmt.Errorf("could not find: %v", startSep)
}
_, after, found = strings.Cut(after, endSep)
if !found {
return fmt.Errorf("could not find: %v", endSep)
}
replaceSection, err := fn()
if err != nil {
return err
}
newDoc := before + startSep + "\n" + strings.TrimSpace(replaceSection) + "\n" + endSep + after
err = os.WriteFile(filepath.Join(path, "bisync.md"), []byte(newDoc), 0777)
if err != nil {
return err
}
return nil
}
// getIgnores updates the list of ignores from config.yaml
func getIgnores() (string, error) {
config, err := parseConfig()
if err != nil {
return "", fmt.Errorf("failed to parse config: %v", err)
}
s := ""
slices.SortFunc(config.Backends, func(a, b runs.Backend) int {
return cmp.Compare(a.Remote, b.Remote)
})
for _, backend := range config.Backends {
include := false
if slices.Contains(backend.IgnoreTests, "cmd/bisync") {
include = true
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(backend.Remote, ":"), backend.Backend)
}
for _, ignore := range backend.Ignore {
if strings.Contains(strings.ToLower(ignore), "bisync") {
if !include { // don't have header row yet
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(backend.Remote, ":"), backend.Backend)
}
include = true
s += fmt.Sprintf(" - `%s`\n", ignore)
// TODO: might be neat to add a "reason" param displaying the reason the test is ignored
}
}
}
return s, nil
}
// getFailures updates the list of currently failing tests from the integration tests server
func getFailures() (string, error) {
var buf bytes.Buffer
err := operations.CopyURLToWriter(context.Background(), integrationTestsJSONURL, &buf)
if err != nil {
return "", err
}
r := runs.Report{}
err = json.Unmarshal(buf.Bytes(), &r)
if err != nil {
return "", fmt.Errorf("failed to unmarshal json: %v", err)
}
s := ""
for _, run := range r.Failed {
for i, t := range run.FailedTests {
if strings.Contains(strings.ToLower(t), "bisync") {
if i == 0 { // don't have header row yet
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(run.Remote, ":"), run.Backend)
}
url := integrationTestsHTMLURL + run.TrialName
url = url[:len(url)-5] + "1.txt" // numbers higher than 1 could change from night to night
s += fmt.Sprintf(" - [`%s`](%v)\n", t, url)
if i == 4 && len(run.FailedTests) > 5 { // stop after 5
s += fmt.Sprintf(" - [%v more](%v)\n", len(run.FailedTests)-5, integrationTestsHTMLURL)
break
}
}
}
}
s += fmt.Sprintf("- Updated: %v", r.DateTime)
return s, nil
}
// parseConfig reads and parses the config.yaml file
func parseConfig() (*runs.Config, error) {
d, err := os.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("failed to read config file: %w", err)
}
config := &runs.Config{}
err = yaml.Unmarshal(d, &config)
if err != nil {
return nil, fmt.Errorf("failed to parse config file: %w", err)
}
return config, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/bin/get-github-release.go | bin/get-github-release.go | //go:build ignore
// Get the latest release from a github project
//
// If GITHUB_USER and GITHUB_TOKEN are set then these will be used to
// authenticate the request which is useful to avoid rate limits.
package main
import (
"archive/tar"
"compress/bzip2"
"compress/gzip"
"encoding/json"
"flag"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"time"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/net/html"
"golang.org/x/sys/unix"
)
var (
// Flags
install = flag.Bool("install", false, "Install the downloaded package using sudo dpkg -i.")
extract = flag.String("extract", "", "Extract the named executable from the .tar.gz and install into bindir.")
bindir = flag.String("bindir", defaultBinDir(), "Directory to install files downloaded with -extract.")
useAPI = flag.Bool("use-api", false, "Use the API for finding the release instead of scraping the page.")
// Globals
matchProject = regexp.MustCompile(`^([\w-]+)/([\w-]+)$`)
osAliases = map[string][]string{
"darwin": {"macos", "osx"},
}
archAliases = map[string][]string{
"amd64": {"x86_64"},
}
)
// A github release
//
// Made by pasting the JSON into https://mholt.github.io/json-to-go/
type Release struct {
URL string `json:"url"`
AssetsURL string `json:"assets_url"`
UploadURL string `json:"upload_url"`
HTMLURL string `json:"html_url"`
ID int `json:"id"`
TagName string `json:"tag_name"`
TargetCommitish string `json:"target_commitish"`
Name string `json:"name"`
Draft bool `json:"draft"`
Author struct {
Login string `json:"login"`
ID int `json:"id"`
AvatarURL string `json:"avatar_url"`
GravatarID string `json:"gravatar_id"`
URL string `json:"url"`
HTMLURL string `json:"html_url"`
FollowersURL string `json:"followers_url"`
FollowingURL string `json:"following_url"`
GistsURL string `json:"gists_url"`
StarredURL string `json:"starred_url"`
SubscriptionsURL string `json:"subscriptions_url"`
OrganizationsURL string `json:"organizations_url"`
ReposURL string `json:"repos_url"`
EventsURL string `json:"events_url"`
ReceivedEventsURL string `json:"received_events_url"`
Type string `json:"type"`
SiteAdmin bool `json:"site_admin"`
} `json:"author"`
Prerelease bool `json:"prerelease"`
CreatedAt time.Time `json:"created_at"`
PublishedAt time.Time `json:"published_at"`
Assets []struct {
URL string `json:"url"`
ID int `json:"id"`
Name string `json:"name"`
Label string `json:"label"`
Uploader struct {
Login string `json:"login"`
ID int `json:"id"`
AvatarURL string `json:"avatar_url"`
GravatarID string `json:"gravatar_id"`
URL string `json:"url"`
HTMLURL string `json:"html_url"`
FollowersURL string `json:"followers_url"`
FollowingURL string `json:"following_url"`
GistsURL string `json:"gists_url"`
StarredURL string `json:"starred_url"`
SubscriptionsURL string `json:"subscriptions_url"`
OrganizationsURL string `json:"organizations_url"`
ReposURL string `json:"repos_url"`
EventsURL string `json:"events_url"`
ReceivedEventsURL string `json:"received_events_url"`
Type string `json:"type"`
SiteAdmin bool `json:"site_admin"`
} `json:"uploader"`
ContentType string `json:"content_type"`
State string `json:"state"`
Size int `json:"size"`
DownloadCount int `json:"download_count"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
BrowserDownloadURL string `json:"browser_download_url"`
} `json:"assets"`
TarballURL string `json:"tarball_url"`
ZipballURL string `json:"zipball_url"`
Body string `json:"body"`
}
// checks if a path has write access
func writable(path string) bool {
return unix.Access(path, unix.W_OK) == nil
}
// Directory to install releases in by default
//
// Find writable directories on $PATH. Use $GOPATH/bin if that is on
// the path and writable or use the first writable directory which is
// in $HOME or failing that the first writable directory.
//
// Returns "" if none of the above were found
func defaultBinDir() string {
home := os.Getenv("HOME")
var (
bin string
homeBin string
goHomeBin string
gopath = os.Getenv("GOPATH")
)
for _, dir := range strings.Split(os.Getenv("PATH"), ":") {
if writable(dir) {
if strings.HasPrefix(dir, home) {
if homeBin != "" {
homeBin = dir
}
if gopath != "" && strings.HasPrefix(dir, gopath) && goHomeBin == "" {
goHomeBin = dir
}
}
if bin == "" {
bin = dir
}
}
}
if goHomeBin != "" {
return goHomeBin
}
if homeBin != "" {
return homeBin
}
return bin
}
// read the body or an error message
func readBody(in io.Reader) string {
data, err := io.ReadAll(in)
if err != nil {
return fmt.Sprintf("Error reading body: %v", err.Error())
}
return string(data)
}
// Get an asset URL and name
func getAsset(project string, matchName *regexp.Regexp) (string, string) {
url := "https://api.github.com/repos/" + project + "/releases/latest"
log.Printf("Fetching asset info for %q from %q", project, url)
user, pass := os.Getenv("GITHUB_USER"), os.Getenv("GITHUB_TOKEN")
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatalf("Failed to make http request %q: %v", url, err)
}
if user != "" && pass != "" {
log.Printf("Fetching using GITHUB_USER and GITHUB_TOKEN")
req.SetBasicAuth(user, pass)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
log.Fatalf("Failed to fetch release info %q: %v", url, err)
}
if resp.StatusCode != http.StatusOK {
log.Printf("Error: %s", readBody(resp.Body))
log.Fatalf("Bad status %d when fetching %q release info: %s", resp.StatusCode, url, resp.Status)
}
var release Release
err = json.NewDecoder(resp.Body).Decode(&release)
if err != nil {
log.Fatalf("Failed to decode release info: %v", err)
}
err = resp.Body.Close()
if err != nil {
log.Fatalf("Failed to close body: %v", err)
}
for _, asset := range release.Assets {
//log.Printf("Finding %s", asset.Name)
if matchName.MatchString(asset.Name) && isOurOsArch(asset.Name) {
return asset.BrowserDownloadURL, asset.Name
}
}
log.Fatalf("Didn't find asset in info")
return "", ""
}
// Get an asset URL and name by scraping the downloads page
//
// This doesn't use the API so isn't rate limited when not using GITHUB login details
func getAssetFromReleasesPage(project string, matchName *regexp.Regexp) (assetURL string, assetName string) {
baseURL := "https://github.com/" + project + "/releases"
log.Printf("Fetching asset info for %q from %q", project, baseURL)
base, err := url.Parse(baseURL)
if err != nil {
log.Fatalf("URL Parse failed: %v", err)
}
resp, err := http.Get(baseURL)
if err != nil {
log.Fatalf("Failed to fetch release info %q: %v", baseURL, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
log.Printf("Error: %s", readBody(resp.Body))
log.Fatalf("Bad status %d when fetching %q release info: %s", resp.StatusCode, baseURL, resp.Status)
}
doc, err := html.Parse(resp.Body)
if err != nil {
log.Fatalf("Failed to parse web page: %v", err)
}
var walk func(*html.Node)
walk = func(n *html.Node) {
if n.Type == html.ElementNode && n.Data == "a" {
for _, a := range n.Attr {
if a.Key == "href" {
if name := path.Base(a.Val); matchName.MatchString(name) && isOurOsArch(name) {
if u, err := rest.URLJoin(base, a.Val); err == nil {
if assetName == "" {
assetName = name
assetURL = u.String()
}
}
}
break
}
}
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
walk(c)
}
}
walk(doc)
if assetName == "" || assetURL == "" {
log.Fatalf("Didn't find URL in page")
}
return assetURL, assetName
}
// isOurOsArch returns true if s contains our OS and our Arch
func isOurOsArch(s string) bool {
s = strings.ToLower(s)
check := func(base string, aliases map[string][]string) bool {
names := []string{base}
names = append(names, aliases[base]...)
for _, name := range names {
if strings.Contains(s, name) {
return true
}
}
return false
}
return check(runtime.GOARCH, archAliases) && check(runtime.GOOS, osAliases)
}
// get a file for download
func getFile(url, fileName string) {
log.Printf("Downloading %q from %q", fileName, url)
out, err := os.Create(fileName)
if err != nil {
log.Fatalf("Failed to open %q: %v", fileName, err)
}
resp, err := http.Get(url)
if err != nil {
log.Fatalf("Failed to fetch asset %q: %v", url, err)
}
if resp.StatusCode != http.StatusOK {
log.Printf("Error: %s", readBody(resp.Body))
log.Fatalf("Bad status %d when fetching %q asset: %s", resp.StatusCode, url, resp.Status)
}
n, err := io.Copy(out, resp.Body)
if err != nil {
log.Fatalf("Error while downloading: %v", err)
}
err = resp.Body.Close()
if err != nil {
log.Fatalf("Failed to close body: %v", err)
}
err = out.Close()
if err != nil {
log.Fatalf("Failed to close output file: %v", err)
}
log.Printf("Downloaded %q (%d bytes)", fileName, n)
}
// run a shell command
func run(args ...string) {
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
log.Fatalf("Failed to run %v: %v", args, err)
}
}
// Untars fileName from srcFile
func untar(srcFile, fileName, extractDir string) {
f, err := os.Open(srcFile)
if err != nil {
log.Fatalf("Couldn't open tar: %v", err)
}
defer func() {
err := f.Close()
if err != nil {
log.Fatalf("Couldn't close tar: %v", err)
}
}()
var in io.Reader = f
srcExt := filepath.Ext(srcFile)
if srcExt == ".gz" || srcExt == ".tgz" {
gzf, err := gzip.NewReader(f)
if err != nil {
log.Fatalf("Couldn't open gzip: %v", err)
}
in = gzf
} else if srcExt == ".bz2" {
in = bzip2.NewReader(f)
}
tarReader := tar.NewReader(in)
for {
header, err := tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("Trouble reading tar file: %v", err)
}
name := header.Name
switch header.Typeflag {
case tar.TypeReg:
baseName := filepath.Base(name)
if baseName == fileName {
outPath := filepath.Join(extractDir, fileName)
out, err := os.OpenFile(outPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777)
if err != nil {
log.Fatalf("Couldn't open output file: %v", err)
}
n, err := io.Copy(out, tarReader)
if err != nil {
log.Fatalf("Couldn't write output file: %v", err)
}
if err = out.Close(); err != nil {
log.Fatalf("Couldn't close output: %v", err)
}
log.Printf("Wrote %s (%d bytes) as %q", fileName, n, outPath)
}
}
}
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 2 {
log.Fatalf("Syntax: %s <user/project> <name reg exp>", os.Args[0])
}
project, nameRe := args[0], args[1]
if !matchProject.MatchString(project) {
log.Fatalf("Project %q must be in form user/project", project)
}
matchName, err := regexp.Compile(nameRe)
if err != nil {
log.Fatalf("Invalid regexp for name %q: %v", nameRe, err)
}
var assetURL, assetName string
if *useAPI {
assetURL, assetName = getAsset(project, matchName)
} else {
assetURL, assetName = getAssetFromReleasesPage(project, matchName)
}
fileName := filepath.Join(os.TempDir(), assetName)
getFile(assetURL, fileName)
if *install {
log.Printf("Installing %s", fileName)
run("sudo", "dpkg", "--force-bad-version", "-i", fileName)
log.Printf("Installed %s", fileName)
} else if *extract != "" {
if *bindir == "" {
log.Fatalf("Need to set -bindir")
}
log.Printf("Unpacking %s from %s and installing into %s", *extract, fileName, *bindir)
untar(fileName, *extract, *bindir+"/")
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/systemd/notify.go | lib/systemd/notify.go | package systemd
import (
"fmt"
"sync"
"github.com/coreos/go-systemd/v22/daemon"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/atexit"
)
// Notify systemd that the service is ready. This returns a
// function which should be called to notify that the service is
// stopping. This function will be called on exit if the service exits
// on a signal.
// NOTE: this function should only be called once, and so it
// should generally only be used directly in a command's Run handler.
// It should not be called as a result of rc commands. See #7540.
func Notify() func() {
if _, err := daemon.SdNotify(false, daemon.SdNotifyReady); err != nil {
fs.Logf(nil, "failed to notify ready to systemd: %v", err)
}
var finaliseOnce sync.Once
finalise := func() {
finaliseOnce.Do(func() {
if _, err := daemon.SdNotify(false, daemon.SdNotifyStopping); err != nil {
fs.Logf(nil, "failed to notify stopping to systemd: %v", err)
}
})
}
finaliseHandle := atexit.Register(finalise)
return func() {
atexit.Unregister(finaliseHandle)
finalise()
}
}
// UpdateStatus updates the systemd status
func UpdateStatus(status string) error {
systemdStatus := fmt.Sprintf("STATUS=%s", status)
_, err := daemon.SdNotify(false, systemdStatus)
return err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/systemd/doc.go | lib/systemd/doc.go | // Package systemd contains utilities for communication with the systemd service manager.
package systemd
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/plugin/plugin.go | lib/plugin/plugin.go | //go:build (darwin || linux) && !gccgo
package plugin
import (
"fmt"
"os"
"path/filepath"
"plugin"
"strings"
)
func init() {
dir := os.Getenv("RCLONE_PLUGIN_PATH")
if dir == "" {
return
}
// Get file names of plugin dir
listing, err := os.ReadDir(dir)
if err != nil {
fmt.Fprintln(os.Stderr, "Failed to open plugin directory:", err)
}
// Enumerate file names, load valid plugins
for _, file := range listing {
// Match name
fileName := file.Name()
if !strings.HasPrefix(fileName, "librcloneplugin_") {
continue
}
if !strings.HasSuffix(fileName, ".so") {
continue
}
// Try to load plugin
_, err := plugin.Open(filepath.Join(dir, fileName))
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to load plugin %s: %s\n",
fileName, err)
}
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/plugin/package.go | lib/plugin/package.go | // Package plugin implements loading out-of-tree storage backends
// using https://golang.org/pkg/plugin/ on Linux and macOS.
//
// If the $RCLONE_PLUGIN_PATH is present, any Go plugins in that dir
// named like librcloneplugin_NAME.so will be loaded.
//
// To create a plugin, write the backend package like it was in-tree
// but set the package name to "main". Then, build the plugin with
//
// go build -buildmode=plugin -o librcloneplugin_NAME.so
//
// where NAME equals the plugin's fs.RegInfo.Name.
package plugin
// Build for plugin for unsupported platforms to stop go complaining
// about "no buildable Go source files".
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/env/env_test.go | lib/env/env_test.go | package env
import (
"os"
"path/filepath"
"testing"
homedir "github.com/mitchellh/go-homedir"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestShellExpand(t *testing.T) {
home, err := homedir.Dir()
require.NoError(t, err)
require.NoError(t, os.Setenv("EXPAND_TEST", "potato"))
defer func() {
require.NoError(t, os.Unsetenv("EXPAND_TEST"))
}()
for _, test := range []struct {
in, want string
}{
{"", ""},
{"~", filepath.FromSlash(home)},
{filepath.FromSlash("~/dir/file.txt"), filepath.FromSlash(home + "/dir/file.txt")},
{filepath.FromSlash("/dir/~/file.txt"), filepath.FromSlash("/dir/~/file.txt")},
{filepath.FromSlash("~/${EXPAND_TEST}"), filepath.FromSlash(home + "/potato")},
} {
got := ShellExpand(test.in)
assert.Equal(t, test.want, got, test.in)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/env/env.go | lib/env/env.go | // Package env contains functions for dealing with environment variables
package env
import (
"os"
"os/user"
homedir "github.com/mitchellh/go-homedir"
)
// ShellExpandHelp describes what ShellExpand does for inclusion into help
const ShellExpandHelp = "\n\nLeading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`."
// ShellExpand replaces a leading "~" with the home directory" and
// expands all environment variables afterwards.
func ShellExpand(s string) string {
if s != "" {
if s[0] == '~' {
newS, err := homedir.Expand(s)
if err == nil {
s = newS
}
}
s = os.ExpandEnv(s)
}
return s
}
// CurrentUser finds the current user name or "" if not found
func CurrentUser() (userName string) {
userName = os.Getenv("USER")
// If we are making docs just use $USER
if userName == "$USER" {
return userName
}
// Try reading using the OS
usr, err := user.Current()
if err == nil {
return usr.Username
}
// Fall back to reading $USER then $LOGNAME
if userName != "" {
return userName
}
return os.Getenv("LOGNAME")
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/oauthutil/oauthutil.go | lib/oauthutil/oauthutil.go | // Package oauthutil provides OAuth utilities.
package oauthutil
import (
"context"
"encoding/json"
"errors"
"fmt"
"html/template"
"net"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/random"
"github.com/skratchdot/open-golang/open"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
)
var (
// templateString is the template used in the authorization webserver
templateString string
)
const (
// TitleBarRedirectURL is the OAuth2 redirect URL to use when the authorization
// code should be returned in the title bar of the browser, with the page text
// prompting the user to copy the code and paste it in the application.
TitleBarRedirectURL = "urn:ietf:wg:oauth:2.0:oob"
// bindPort is the port that we bind the local webserver to
bindPort = "53682"
// bindAddress is binding for local webserver when active
bindAddress = "127.0.0.1:" + bindPort
// RedirectURL is redirect to local webserver when active
RedirectURL = "http://" + bindAddress + "/"
// RedirectPublicURL is redirect to local webserver when active with public name
RedirectPublicURL = "http://localhost.rclone.org:" + bindPort + "/"
// RedirectLocalhostURL is redirect to local webserver when active with localhost
RedirectLocalhostURL = "http://localhost:" + bindPort + "/"
// RedirectPublicSecureURL is a public https URL which
// redirects to the local webserver
RedirectPublicSecureURL = "https://oauth.rclone.org/"
// DefaultAuthResponseTemplate is the default template used in the authorization webserver
DefaultAuthResponseTemplate = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{{ if .OK }}Success!{{ else }}Failure!{{ end }}</title>
</head>
<body>
<h1>{{ if .OK }}Success!{{ else }}Failure!{{ end }}</h1>
<hr>
<pre style="width: 750px; white-space: pre-wrap;">
{{ if eq .OK false }}
Error: {{ .Name }}<br>
{{ if .Description }}Description: {{ .Description }}<br>{{ end }}
{{ if .Code }}Code: {{ .Code }}<br>{{ end }}
{{ if .HelpURL }}Look here for help: <a href="{{ .HelpURL }}">{{ .HelpURL }}</a><br>{{ end }}
{{ else }}
All done. Please go back to rclone.
{{ end }}
</pre>
</body>
</html>
`
)
// OpenURL is used when rclone wants to open a browser window
// for user authentication. It defaults to something which
// should work for most uses, but may be overridden.
var OpenURL = open.Start
// Config - structure that we will use to store the OAuth configuration
// settings. This is based on the union of the configuration structures for the two
// OAuth modules that we are using (oauth2 and oauth2.clientcrentials), along with a
// flag indicating if we are going to use the client credential flow
type Config struct {
ClientID string
ClientSecret string
TokenURL string
AuthURL string
Scopes []string
EndpointParams url.Values
RedirectURL string
ClientCredentialFlow bool
AuthStyle oauth2.AuthStyle
}
// MakeOauth2Config makes an oauth2.Config from our config
func (conf *Config) MakeOauth2Config() *oauth2.Config {
return &oauth2.Config{
ClientID: conf.ClientID,
ClientSecret: conf.ClientSecret,
RedirectURL: conf.RedirectURL,
Scopes: conf.Scopes,
Endpoint: oauth2.Endpoint{
AuthURL: conf.AuthURL,
TokenURL: conf.TokenURL,
AuthStyle: conf.AuthStyle,
},
}
}
// MakeClientCredentialsConfig makes a clientcredentials.Config from our config
func (conf *Config) MakeClientCredentialsConfig() *clientcredentials.Config {
return &clientcredentials.Config{
ClientID: conf.ClientID,
ClientSecret: conf.ClientSecret,
Scopes: conf.Scopes,
TokenURL: conf.TokenURL,
AuthStyle: conf.AuthStyle,
// EndpointParams url.Values
}
}
// SharedOptions are shared between backends the utilize an OAuth flow
var SharedOptions = []fs.Option{{
Name: config.ConfigClientID,
Help: "OAuth Client Id.\n\nLeave blank normally.",
Sensitive: true,
}, {
Name: config.ConfigClientSecret,
Help: "OAuth Client Secret.\n\nLeave blank normally.",
Sensitive: true,
}, {
Name: config.ConfigToken,
Help: "OAuth Access Token as a JSON blob.",
Advanced: true,
Sensitive: true,
}, {
Name: config.ConfigAuthURL,
Help: "Auth server URL.\n\nLeave blank to use the provider defaults.",
Advanced: true,
}, {
Name: config.ConfigTokenURL,
Help: "Token server url.\n\nLeave blank to use the provider defaults.",
Advanced: true,
}, {
Name: config.ConfigClientCredentials,
Default: false,
Help: "Use client credentials OAuth flow.\n\nThis will use the OAUTH2 client Credentials Flow as described in RFC 6749.\n\nNote that this option is NOT supported by all backends.",
Advanced: true,
}}
// oldToken contains an end-user's tokens.
// This is the data you must store to persist authentication.
//
// From the original code.google.com/p/goauth2/oauth package - used
// for backwards compatibility in the rclone config file
type oldToken struct {
AccessToken string
RefreshToken string
Expiry time.Time
}
// GetToken returns the token saved in the config file under
// section name.
func GetToken(name string, m configmap.Mapper) (*oauth2.Token, error) {
tokenString, ok := m.Get(config.ConfigToken)
if !ok || tokenString == "" {
return nil, fmt.Errorf("empty token found - please run \"rclone config reconnect %s:\"", name)
}
token := new(oauth2.Token)
err := json.Unmarshal([]byte(tokenString), token)
if err != nil {
return nil, err
}
// if has data then return it
if token.AccessToken != "" {
return token, nil
}
// otherwise try parsing as oldToken
oldtoken := new(oldToken)
err = json.Unmarshal([]byte(tokenString), oldtoken)
if err != nil {
return nil, err
}
// Fill in result into new token
token.AccessToken = oldtoken.AccessToken
token.RefreshToken = oldtoken.RefreshToken
token.Expiry = oldtoken.Expiry
// Save new format in config file
err = PutToken(name, m, token, false)
if err != nil {
return nil, err
}
return token, nil
}
// PutToken stores the token in the config file
//
// This saves the config file if it changes
func PutToken(name string, m configmap.Mapper, token *oauth2.Token, newSection bool) error {
tokenBytes, err := json.Marshal(token)
if err != nil {
return err
}
tokenString := string(tokenBytes)
old, ok := m.Get(config.ConfigToken)
if !ok || tokenString != old {
m.Set(config.ConfigToken, tokenString)
fs.Debugf(name, "Saved new token in config file")
}
return nil
}
// TokenSource stores updated tokens in the config file
type TokenSource struct {
mu sync.Mutex
name string
m configmap.Mapper
tokenSource oauth2.TokenSource
token *oauth2.Token
config *Config
ctx context.Context
expiryTimer *time.Timer // signals whenever the token expires
}
// If token has expired then first try re-reading it (and its refresh token)
// from the config file in case a concurrently running rclone has updated them
// already.
// Returns whether either of the two tokens has been reread.
func (ts *TokenSource) reReadToken() (changed bool) {
tokenString, found := ts.m.Get(config.ConfigToken)
if !found || tokenString == "" {
fs.Debugf(ts.name, "Failed to read token out of config file")
return false
}
newToken := new(oauth2.Token)
err := json.Unmarshal([]byte(tokenString), newToken)
if err != nil {
fs.Debugf(ts.name, "Failed to parse token out of config file: %v", err)
return false
}
if newToken.Valid() {
fs.Debugf(ts.name, "Loaded fresh token from config file")
changed = true
}
if newToken.RefreshToken != "" && newToken.RefreshToken != ts.token.RefreshToken {
fs.Debugf(ts.name, "Loaded new refresh token from config file")
changed = true
}
if changed {
ts.token = newToken
ts.tokenSource = nil // invalidate since we changed the token
} else {
fs.Debugf(ts.name, "No updated token found in the config file")
}
return changed
}
type retrieveErrResponse struct {
Error string `json:"error"`
}
// If err is nil or an error other than fatal OAuth errors, returns err itself.
// Otherwise returns a more user-friendly error.
func maybeWrapOAuthError(err error, remoteName string) (newErr error) {
newErr = err
if rErr, ok := err.(*oauth2.RetrieveError); ok {
if rErr.Response.StatusCode == 400 || rErr.Response.StatusCode == 401 {
fs.Debugf(remoteName, "got fatal oauth error: %v", rErr)
var resp retrieveErrResponse
if err = json.Unmarshal(rErr.Body, &resp); err != nil {
newErr = fmt.Errorf("(can't decode error info) - try refreshing token with \"rclone config reconnect %s:\"", remoteName)
return
}
var suggestion string
switch resp.Error {
case "invalid_client", "unauthorized_client", "unsupported_grant_type", "invalid_scope":
suggestion = "if you're using your own client id/secret, make sure they're properly set up following the docs"
case "invalid_grant":
fallthrough
default:
suggestion = fmt.Sprintf("maybe token expired? - try refreshing with \"rclone config reconnect %s:\"", remoteName)
}
newErr = fmt.Errorf("%s: %s", resp.Error, suggestion)
}
}
return
}
// Token returns a token or an error.
// Token must be safe for concurrent use by multiple goroutines.
// The returned Token must not be modified.
//
// This saves the token in the config file if it has changed
func (ts *TokenSource) Token() (*oauth2.Token, error) {
ts.mu.Lock()
defer ts.mu.Unlock()
var (
token *oauth2.Token
err error
changed = false
)
const maxTries = 5
// If we have a cached valid token, use that
if ts.token.Valid() {
return ts.token, nil
}
fs.Debug(ts.name, "Token expired")
// Try getting the token a few times
for i := 1; i <= maxTries; i++ {
// Try reading the token from the config file in case it has
// been updated by a concurrent rclone process
if !ts.token.Valid() {
if ts.reReadToken() {
changed = true
} else if !ts.config.ClientCredentialFlow && ts.token.RefreshToken == "" {
return nil, fserrors.FatalError(
fmt.Errorf("token expired and there's no refresh token - manually refresh with \"rclone config reconnect %s:\"", ts.name),
)
}
}
// Make a new token source if required
if ts.tokenSource == nil {
if ts.config.ClientCredentialFlow {
ts.tokenSource = ts.config.MakeClientCredentialsConfig().TokenSource(ts.ctx)
} else {
ts.tokenSource = ts.config.MakeOauth2Config().TokenSource(ts.ctx, ts.token)
}
}
token, err = ts.tokenSource.Token()
if err == nil {
fs.Debug(ts.name, "Token refresh successful")
break
}
if newErr := maybeWrapOAuthError(err, ts.name); newErr != err {
err = newErr // Fatal OAuth error
break
}
fs.Debugf(ts.name, "Token refresh failed try %d/%d: %v", i, maxTries, err)
time.Sleep(1 * time.Second)
}
if err != nil {
return nil, fmt.Errorf("couldn't fetch token: %w", err)
}
changed = changed || ts.token == nil || token.AccessToken != ts.token.AccessToken || token.RefreshToken != ts.token.RefreshToken || token.Expiry != ts.token.Expiry
ts.token = token
if changed {
// Bump on the expiry timer if it is set
if ts.expiryTimer != nil {
ts.expiryTimer.Reset(ts.timeToExpiry())
}
err = PutToken(ts.name, ts.m, token, false)
if err != nil {
return nil, fmt.Errorf("couldn't store token: %w", err)
}
}
return token, nil
}
// Invalidate invalidates the token
func (ts *TokenSource) Invalidate() {
ts.mu.Lock()
ts.token.AccessToken = ""
ts.mu.Unlock()
}
// Expire marks the token as expired
//
// This also marks the token in the config file as expired, if it is the same one
func (ts *TokenSource) Expire() error {
ts.mu.Lock()
defer ts.mu.Unlock()
ts.token.Expiry = time.Now().Add(time.Hour * (-1)) // expire token
t, err := GetToken(ts.name, ts.m)
if err != nil {
return err
}
if t.AccessToken == ts.token.AccessToken {
err = PutToken(ts.name, ts.m, ts.token, false)
}
return err
}
// timeToExpiry returns how long until the token expires
//
// Call with the lock held
func (ts *TokenSource) timeToExpiry() time.Duration {
t := ts.token
if t == nil {
return 0
}
if t.Expiry.IsZero() {
return 3e9 * time.Second // ~95 years
}
return time.Until(t.Expiry)
}
// OnExpiry returns a channel which has the time written to it when
// the token expires. Note that there is only one channel so if
// attaching multiple go routines it will only signal to one of them.
func (ts *TokenSource) OnExpiry() <-chan time.Time {
ts.mu.Lock()
defer ts.mu.Unlock()
if ts.expiryTimer == nil {
ts.expiryTimer = time.NewTimer(ts.timeToExpiry())
}
return ts.expiryTimer.C
}
// Check interface satisfied
var _ oauth2.TokenSource = (*TokenSource)(nil)
// Context returns a context with our HTTP Client baked in for oauth2
func Context(ctx context.Context, client *http.Client) context.Context {
return context.WithValue(ctx, oauth2.HTTPClient, client)
}
// OverrideCredentials sets the ClientID and ClientSecret from the
// config file if they are not blank.
// If any value is overridden, true is returned.
// the origConfig is copied
func OverrideCredentials(name string, m configmap.Mapper, origConfig *Config) (newConfig *Config, changed bool) {
newConfig = new(Config)
*newConfig = *origConfig
changed = false
ClientID, ok := m.Get(config.ConfigClientID)
if ok && ClientID != "" {
newConfig.ClientID = ClientID
// Clear out any existing client secret since the ID changed.
// (otherwise it's impossible for a config to clear the secret)
newConfig.ClientSecret = ""
changed = true
}
ClientSecret, ok := m.Get(config.ConfigClientSecret)
if ok && ClientSecret != "" {
newConfig.ClientSecret = ClientSecret
changed = true
}
AuthURL, ok := m.Get(config.ConfigAuthURL)
if ok && AuthURL != "" {
newConfig.AuthURL = AuthURL
changed = true
}
TokenURL, ok := m.Get(config.ConfigTokenURL)
if ok && TokenURL != "" {
newConfig.TokenURL = TokenURL
changed = true
}
ClientCredentialStr, ok := m.Get(config.ConfigClientCredentials)
if ok && ClientCredentialStr != "" {
ClientCredential, err := strconv.ParseBool(ClientCredentialStr)
if err != nil {
fs.Errorf(nil, "Invalid setting for %q: %v", config.ConfigClientCredentials, err)
} else {
newConfig.ClientCredentialFlow = ClientCredential
}
changed = true
}
return newConfig, changed
}
// NewClientWithBaseClient gets a token from the config file and
// configures a Client with it. It returns the client and a
// TokenSource which Invalidate may need to be called on. It uses the
// httpClient passed in as the base client.
func NewClientWithBaseClient(ctx context.Context, name string, m configmap.Mapper, config *Config, baseClient *http.Client) (*http.Client, *TokenSource, error) {
config, _ = OverrideCredentials(name, m, config)
token, err := GetToken(name, m)
if err != nil && !config.ClientCredentialFlow {
return nil, nil, err
}
// Set our own http client in the context
ctx = Context(ctx, baseClient)
// Wrap the TokenSource in our TokenSource which saves changed
// tokens in the config file
ts := &TokenSource{
name: name,
m: m,
token: token,
config: config,
ctx: ctx,
}
return oauth2.NewClient(ctx, ts), ts, nil
}
// NewClientCredentialsClient creates a new OAuth module using the
// ClientCredential flow
func NewClientCredentialsClient(ctx context.Context, name string, m configmap.Mapper, oauthConfig *Config, baseClient *http.Client) (*http.Client, *TokenSource, error) {
oauthConfig, _ = OverrideCredentials(name, m, oauthConfig)
token, _ := GetToken(name, m)
// If the token doesn't exist then we will fetch one in the next step as we don't need a refresh token
// Set our own http client in the context
ctx = Context(ctx, baseClient)
// Wrap the TokenSource in our TokenSource which saves changed
// tokens in the config file
ts := &TokenSource{
name: name,
m: m,
token: token,
config: oauthConfig,
ctx: ctx,
}
return oauth2.NewClient(ctx, ts), ts, nil
}
// NewClient gets a token from the config file and configures a Client
// with it. It returns the client and a TokenSource which Invalidate
// may need to be called on
func NewClient(ctx context.Context, name string, m configmap.Mapper, oauthConfig *Config) (*http.Client, *TokenSource, error) {
// Check whether we are using the client credentials flow
if oauthConfig.ClientCredentialFlow {
return NewClientCredentialsClient(ctx, name, m, oauthConfig, fshttp.NewClient(ctx))
}
return NewClientWithBaseClient(ctx, name, m, oauthConfig, fshttp.NewClient(ctx))
}
// AuthResult is returned from the web server after authorization
// success or failure
type AuthResult struct {
OK bool // Failure or Success?
Name string
Description string
Code string
HelpURL string
Form url.Values // the complete contents of the form
Err error // any underlying error to report
}
// Error satisfies the error interface so AuthResult can be used as an error
func (ar *AuthResult) Error() string {
status := "Error"
if ar.OK {
status = "OK"
}
return fmt.Sprintf("%s: %s\nCode: %q\nDescription: %s\nHelp: %s",
status, ar.Name, ar.Code, ar.Description, ar.HelpURL)
}
// CheckAuthFn is called when a good Auth has been received
type CheckAuthFn func(*Config, *AuthResult) error
// Options for the oauth config
type Options struct {
OAuth2Config *Config // Basic config for oauth2
NoOffline bool // If set then "access_type=offline" parameter is not passed
CheckAuth CheckAuthFn // When the AuthResult is known the checkAuth function is called if set
OAuth2Opts []oauth2.AuthCodeOption // extra oauth2 options
StateBlankOK bool // If set, state returned as "" is deemed to be OK
}
// ConfigOut returns a config item suitable for the backend config
//
// state is the place to return the config to
// oAuth is the config to run the oauth with
func ConfigOut(state string, oAuth *Options) (*fs.ConfigOut, error) {
return &fs.ConfigOut{
State: state,
OAuth: oAuth,
}, nil
}
// ConfigOAuth does the oauth config specified in the config block
//
// This is called with a state which has pushed on it
//
// state prefixed with "*oauth"
// state for oauth to return to
// state that returned the OAuth when we wish to recall it
// value that returned the OAuth
func ConfigOAuth(ctx context.Context, name string, m configmap.Mapper, ri *fs.RegInfo, in fs.ConfigIn) (*fs.ConfigOut, error) {
stateParams, state := fs.StatePop(in.State)
// Make the next state
newState := func(state string) string {
return fs.StatePush(stateParams, state)
}
// Recall the Oauth state again by calling the Config with the same input again
getOAuth := func() (opt *Options, err error) {
tmpState, _ := fs.StatePop(stateParams)
tmpState, State := fs.StatePop(tmpState)
_, Result := fs.StatePop(tmpState)
out, err := ri.Config(ctx, name, m, fs.ConfigIn{State: State, Result: Result})
if err != nil {
return nil, err
}
if out.OAuth == nil {
return nil, errors.New("failed to recall OAuth state")
}
opt, ok := out.OAuth.(*Options)
if !ok {
return nil, fmt.Errorf("internal error: oauth failed: wrong type in config: %T", out.OAuth)
}
if opt.OAuth2Config == nil {
return nil, errors.New("internal error: oauth failed: OAuth2Config not set")
}
return opt, nil
}
switch state {
case "*oauth":
// See if already have a token
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
return fs.ConfigConfirm(newState("*oauth-confirm"), true, "config_refresh_token", "Already have a token - refresh?")
}
return fs.ConfigGoto(newState("*oauth-confirm"))
case "*oauth-confirm":
if in.Result == "false" {
return fs.ConfigGoto(newState("*oauth-done"))
}
opt, err := getOAuth()
if err != nil {
return nil, err
}
oauthConfig, _ := OverrideCredentials(name, m, opt.OAuth2Config)
if oauthConfig.ClientCredentialFlow {
// If using client credential flow, skip straight to getting the token since we don't need a browser
return fs.ConfigGoto(newState("*oauth-do"))
}
return fs.ConfigConfirm(newState("*oauth-islocal"), true, "config_is_local", "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n")
case "*oauth-islocal":
if in.Result == "true" {
return fs.ConfigGoto(newState("*oauth-do"))
}
return fs.ConfigGoto(newState("*oauth-remote"))
case "*oauth-remote":
opt, err := getOAuth()
if err != nil {
return nil, err
}
if noWebserverNeeded(opt.OAuth2Config) {
authURL, _, err := getAuthURL(name, m, opt.OAuth2Config, opt)
if err != nil {
return nil, err
}
return fs.ConfigInput(newState("*oauth-do"), "config_verification_code", fmt.Sprintf("Verification code\n\nGo to this URL, authenticate then paste the code here.\n\n%s\n", authURL))
}
var out strings.Builder
fmt.Fprintf(&out, `For this to work, you will need rclone available on a machine that has
a web browser available.
For more help and alternate methods see: https://rclone.org/remote_setup/
Execute the following on the machine with the web browser (same rclone
version recommended):
`)
// Find the overridden options
inM := ri.Options.NonDefault(m)
delete(inM, fs.ConfigToken) // delete token as we are refreshing it
for k, v := range inM {
fs.Debugf(nil, "sending %s = %q", k, v)
}
// Encode them into a string
mCopyString, err := inM.Encode()
if err != nil {
return nil, fmt.Errorf("oauthutil authorize encode: %w", err)
}
// Write what the user has to do
if len(mCopyString) > 0 {
fmt.Fprintf(&out, "\trclone authorize %q %q\n", ri.Name, mCopyString)
} else {
fmt.Fprintf(&out, "\trclone authorize %q\n", ri.Name)
}
fmt.Fprintln(&out, "\nThen paste the result.")
return fs.ConfigInput(newState("*oauth-authorize"), "config_token", out.String())
case "*oauth-authorize":
// Read the updates to the config
outM := configmap.Simple{}
token := oauth2.Token{}
code := in.Result
newFormat := true
err := outM.Decode(code)
if err != nil {
newFormat = false
err = json.Unmarshal([]byte(code), &token)
}
if err != nil {
return fs.ConfigError(newState("*oauth-authorize"), fmt.Sprintf("Couldn't decode response - try again (make sure you are using a matching version of rclone on both sides: %v\n", err))
}
// Save the config updates
if newFormat {
for k, v := range outM {
m.Set(k, v)
fs.Debugf(nil, "received %s = %q", k, v)
}
} else {
m.Set(fs.ConfigToken, code)
}
return fs.ConfigGoto(newState("*oauth-done"))
case "*oauth-do":
// Make sure we can read the HTML template file if it was specified.
configTemplateFile, _ := m.Get("config_template_file")
configTemplateString, _ := m.Get("config_template")
if configTemplateFile != "" {
dat, err := os.ReadFile(configTemplateFile)
if err != nil {
return nil, fmt.Errorf("failed to read template file: %w", err)
}
templateString = string(dat)
} else if configTemplateString != "" {
templateString = configTemplateString
} else {
templateString = DefaultAuthResponseTemplate
}
code := in.Result
opt, err := getOAuth()
if err != nil {
return nil, err
}
oauthConfig, changed := OverrideCredentials(name, m, opt.OAuth2Config)
if changed {
fs.Logf(nil, "Make sure your Redirect URL is set to %q in your custom config.\n", oauthConfig.RedirectURL)
}
if oauthConfig.ClientCredentialFlow {
err = clientCredentialsFlowGetToken(ctx, name, m, oauthConfig, opt)
if err != nil {
return nil, err
}
} else {
if code == "" {
oauthConfig = fixRedirect(oauthConfig)
code, err = configSetup(ctx, ri.Name, name, m, oauthConfig, opt)
if err != nil {
return nil, fmt.Errorf("config failed to refresh token: %w", err)
}
}
err = configExchange(ctx, name, m, oauthConfig, code)
if err != nil {
return nil, err
}
}
return fs.ConfigGoto(newState("*oauth-done"))
case "*oauth-done":
// Return to the state indicated in the State stack
_, returnState := fs.StatePop(stateParams)
return fs.ConfigGoto(returnState)
}
return nil, fmt.Errorf("unknown internal oauth state %q", state)
}
func init() {
// Set the function to avoid circular import
fs.ConfigOAuth = ConfigOAuth
}
// Return true if can run without a webserver and just entering a code
func noWebserverNeeded(oauthConfig *Config) bool {
return oauthConfig.RedirectURL == TitleBarRedirectURL
}
// get the URL we need to send the user to
func getAuthURL(name string, m configmap.Mapper, oauthConfig *Config, opt *Options) (authURL string, state string, err error) {
oauthConfig, _ = OverrideCredentials(name, m, oauthConfig)
// Make random state
state, err = random.Password(128)
if err != nil {
return "", "", err
}
// Create the configuration required for the OAuth flow
oauth2Conf := oauthConfig.MakeOauth2Config()
// Generate oauth URL
opts := opt.OAuth2Opts
if !opt.NoOffline {
opts = append(opts, oauth2.AccessTypeOffline)
}
authURL = oauth2Conf.AuthCodeURL(state, opts...)
return authURL, state, nil
}
// If TitleBarRedirect is set but we are doing a real oauth, then
// override our redirect URL
func fixRedirect(oauthConfig *Config) *Config {
switch oauthConfig.RedirectURL {
case TitleBarRedirectURL:
// copy the config and set to use the internal webserver
configCopy := *oauthConfig
oauthConfig = &configCopy
oauthConfig.RedirectURL = RedirectURL
}
return oauthConfig
}
// configSetup does the initial creation of the token for the client credentials flow
//
// If opt is nil it will use the default Options.
func clientCredentialsFlowGetToken(ctx context.Context, name string, m configmap.Mapper, oauthConfig *Config, opt *Options) error {
if opt == nil {
opt = &Options{}
}
_ = opt // not currently using the Options
fs.Debugf(nil, "Getting token for client credentials flow")
_, tokenSource, err := NewClientCredentialsClient(ctx, name, m, oauthConfig, fshttp.NewClient(ctx))
if err != nil {
return fmt.Errorf("client credentials flow: failed to make client: %w", err)
}
// Get the token and save it in the config file
_, err = tokenSource.Token()
if err != nil {
return fmt.Errorf("client credentials flow: failed to get token: %w", err)
}
return nil
}
// configSetup does the initial creation of the token
//
// If opt is nil it will use the default Options.
//
// It will run an internal webserver to receive the results
func configSetup(ctx context.Context, id, name string, m configmap.Mapper, oauthConfig *Config, opt *Options) (string, error) {
if opt == nil {
opt = &Options{}
}
authorizeNoAutoBrowserValue, ok := m.Get(config.ConfigAuthNoBrowser)
authorizeNoAutoBrowser := ok && authorizeNoAutoBrowserValue != ""
authURL, state, err := getAuthURL(name, m, oauthConfig, opt)
if err != nil {
return "", err
}
// Prepare webserver
server := newAuthServer(opt, bindAddress, state, authURL)
err = server.Init()
if err != nil {
return "", fmt.Errorf("failed to start auth webserver: %w", err)
}
go server.Serve()
defer server.Stop()
authURL = "http://" + bindAddress + "/auth?state=" + state
if !authorizeNoAutoBrowser {
// Open the URL for the user to visit
err := OpenURL(authURL)
if err != nil {
fs.Errorf(nil, "Failed to open browser automatically (%v) - please go to the following link: %s\n", err, authURL)
} else {
fs.Logf(nil, "If your browser doesn't open automatically go to the following link: %s\n", authURL)
}
} else {
fs.Logf(nil, "Please go to the following link: %s\n", authURL)
}
fs.Logf(nil, "Log in and authorize rclone for access\n")
// Read the code via the webserver
fs.Logf(nil, "Waiting for code...\n")
auth := <-server.result
if !auth.OK || auth.Code == "" {
return "", auth
}
fs.Logf(nil, "Got code\n")
if opt.CheckAuth != nil {
err = opt.CheckAuth(oauthConfig, auth)
if err != nil {
return "", err
}
}
return auth.Code, nil
}
// Exchange the code for a token
func configExchange(ctx context.Context, name string, m configmap.Mapper, oauthConfig *Config, code string) error {
ctx = Context(ctx, fshttp.NewClient(ctx))
// Create the configuration required for the OAuth flow
oauth2Conf := oauthConfig.MakeOauth2Config()
token, err := oauth2Conf.Exchange(ctx, code)
if err != nil {
return fmt.Errorf("failed to get token: %w", err)
}
return PutToken(name, m, token, true)
}
// Local web server for collecting auth
type authServer struct {
opt *Options
state string
listener net.Listener
bindAddress string
authURL string
server *http.Server
result chan *AuthResult
}
// newAuthServer makes the webserver for collecting auth
func newAuthServer(opt *Options, bindAddress, state, authURL string) *authServer {
return &authServer{
opt: opt,
state: state,
bindAddress: bindAddress,
authURL: authURL, // http://host/auth redirects to here
result: make(chan *AuthResult, 1),
}
}
// Receive the auth request
func (s *authServer) handleAuth(w http.ResponseWriter, req *http.Request) {
if req.URL.Path != "/" {
fs.Debugf(nil, "Ignoring %s request on auth server to %q", req.Method, req.URL.Path)
http.NotFound(w, req)
return
}
fs.Debugf(nil, "Received %s request on auth server to %q", req.Method, req.URL.Path)
// Reply with the response to the user and to the channel
reply := func(status int, res *AuthResult) {
w.WriteHeader(status)
w.Header().Set("Content-Type", "text/html")
var t = template.Must(template.New("authResponse").Parse(templateString))
if err := t.Execute(w, res); err != nil {
fs.Debugf(nil, "Could not execute template for web response.")
}
s.result <- res
}
// Parse the form parameters and save them
err := req.ParseForm()
if err != nil {
reply(http.StatusBadRequest, &AuthResult{
Name: "Parse form error",
Description: err.Error(),
})
return
}
// get code, error if empty
code := req.Form.Get("code")
if code == "" {
err := &AuthResult{
Name: "Auth Error",
Description: "No code returned by remote server",
}
if errorCode := req.Form.Get("error"); errorCode != "" {
err.Description += ": " + errorCode
}
if errorMessage := req.Form.Get("error_description"); errorMessage != "" {
err.Description += ": " + errorMessage
}
reply(http.StatusBadRequest, err)
return
}
// check state
state := req.Form.Get("state")
if state != s.state && !(state == "" && s.opt.StateBlankOK) {
reply(http.StatusBadRequest, &AuthResult{
Name: "Auth state doesn't match",
Description: fmt.Sprintf("Expecting %q got %q", s.state, state),
})
return
}
// code OK
reply(http.StatusOK, &AuthResult{
OK: true,
Code: code,
Form: req.Form,
})
}
// Init gets the internal web server ready to receive config details
func (s *authServer) Init() error {
fs.Debugf(nil, "Starting auth server on %s", s.bindAddress)
mux := http.NewServeMux()
s.server = &http.Server{
Addr: s.bindAddress,
Handler: mux,
}
s.server.SetKeepAlivesEnabled(false)
mux.HandleFunc("/auth", func(w http.ResponseWriter, req *http.Request) {
state := req.FormValue("state")
if state != s.state {
fs.Debugf(nil, "State did not match: want %q got %q", s.state, state)
http.Error(w, "State did not match - please try again", http.StatusForbidden)
return
}
fs.Debugf(nil, "Redirecting browser to: %s", s.authURL)
http.Redirect(w, req, s.authURL, http.StatusTemporaryRedirect)
})
mux.HandleFunc("/", s.handleAuth)
var err error
s.listener, err = net.Listen("tcp", s.bindAddress)
if err != nil {
return err
}
return nil
}
// Serve the auth server, doesn't return
func (s *authServer) Serve() {
err := s.server.Serve(s.listener)
fs.Debugf(nil, "Closed auth server with error: %v", err)
}
// Stop the auth server by closing its socket
func (s *authServer) Stop() {
fs.Debugf(nil, "Closing auth server")
close(s.result)
_ = s.listener.Close()
// close the server
_ = s.server.Close()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/oauthutil/renew.go | lib/oauthutil/renew.go | package oauthutil
import (
"sync"
"sync/atomic"
"github.com/rclone/rclone/fs"
)
// Renew allows tokens to be renewed on expiry if uploads are in progress.
type Renew struct {
name string // name to use in logs
ts *TokenSource // token source that needs renewing
uploads atomic.Int32 // number of uploads in progress
run func() error // a transaction to run to renew the token on
done chan any // channel to end the go routine
shutdown sync.Once
}
// NewRenew creates a new Renew struct and starts a background process
// which renews the token whenever it expires. It uses the run() call
// to run a transaction to do this.
//
// It will only renew the token if the number of uploads > 0
func NewRenew(name string, ts *TokenSource, run func() error) *Renew {
r := &Renew{
name: name,
ts: ts,
run: run,
done: make(chan any),
}
go r.renewOnExpiry()
return r
}
// renewOnExpiry renews the token whenever it expires. Useful when there
// are lots of uploads in progress and the token doesn't get renewed.
// Amazon seem to cancel your uploads if you don't renew your token
// for 2hrs.
func (r *Renew) renewOnExpiry() {
expiry := r.ts.OnExpiry()
for {
select {
case <-expiry:
case <-r.done:
return
}
uploads := r.uploads.Load()
if uploads != 0 {
fs.Debugf(r.name, "Background refresher detected expired token - %d uploads in progress - refreshing", uploads)
// Do a transaction
err := r.run()
if err != nil {
fs.Errorf(r.name, "Background token refresher failed: %v", err)
}
} else {
fs.Debugf(r.name, "Background refresher detected expired token but no uploads in progress - doing nothing")
}
}
}
// Start should be called before starting an upload
func (r *Renew) Start() {
r.uploads.Add(1)
}
// Stop should be called after finishing an upload
func (r *Renew) Stop() {
r.uploads.Add(-1)
}
// Invalidate invalidates the token source
func (r *Renew) Invalidate() {
r.ts.Invalidate()
}
// Expire expires the token source
func (r *Renew) Expire() error {
return r.ts.Expire()
}
// Shutdown stops the timer and no more renewal will take place.
func (r *Renew) Shutdown() {
if r == nil {
return
}
// closing a channel can only be done once
r.shutdown.Do(func() {
if r.ts != nil {
r.ts.expiryTimer.Stop()
}
close(r.done)
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/buildinfo/arch.go | lib/buildinfo/arch.go | package buildinfo
import (
"runtime"
"golang.org/x/sys/cpu"
)
// GetSupportedGOARM returns the ARM compatibility level of the current CPU.
//
// Returns the integer value that can be set for the GOARM variable to
// build with this level as target, a value which normally corresponds to the
// ARM architecture version number, although it is the floating point hardware
// support which is the decicive factor.
//
// Only relevant for 32-bit ARM architectures, where GOARCH=arm, which means
// ARMv7 and lower (ARMv8 is GOARCH=arm64 and GOARM is not considered).
// Highest possible value is therefore 7, while other possible values are
// 6 (for ARMv6) and 5 (for ARMv5, which is the lowest currently supported
// in go. Returns value 0 for anything else.
//
// See also:
//
// https://go.dev/src/runtime/os_linux_arm.go
// https://github.com/golang/go/wiki/GoArm
func GetSupportedGOARM() int {
if runtime.GOARCH == "arm" && cpu.Initialized {
// This CPU is an ARM (32-bit), and cpu.Initialized true means its
// features could be retrieved on current GOOS so that we can check
// for floating point hardware support.
if cpu.ARM.HasVFPv3 {
// This CPU has VFPv3 floating point hardware, which means it can
// run programs built with any GOARM value, 7 and lower.
return 7
} else if cpu.ARM.HasVFP {
// This CPU has VFP floating point hardware, but not VFPv3, which
// means it can run programs built with GOARM value 6 and lower,
// but not 7.
return 6
}
// This CPU has no VFP floating point hardware, which means it can
// only run programs built with GOARM value 5, which is minimum supported.
// Note that the CPU can still in reality be based on e.g. ARMv7
// architecture, but simply lack hardfloat support.
return 5
}
return 0
}
// GetArch tells the rclone executable's architecture target.
func GetArch() string {
// Get the running program's architecture target.
arch := runtime.GOARCH
// For ARM architectures there are several variants, with different
// inconsistent and ambiguous naming.
//
// The most interesting thing here is which compatibility level of go is
// used, as controlled by GOARM build variable. We cannot in runtime get
// the actual value of GOARM used for building this program, but we can
// check the value supported by the current CPU by calling GetSupportedGOARM.
// This means we return information about the compatibility level (GOARM
// value) supported, when the current rclone executable may in reality be
// built with a lower level.
//
// Note that the kernel architecture, as returned by "uname -m", is not
// considered or included in results here, but it is included in the output
// from function GetOSVersion. It can have values such as armv6l, armv7l,
// armv8l, arm64 and aarch64, which may give relevant information. But it
// can also simply have value "arm", or it can have value "armv7l" for a
// processor based on ARMv7 but without floating point hardware - which
// means it in go needs to be built in ARMv5 compatibility mode (GOARM=5).
if arch == "arm64" {
// 64-bit ARM architecture, known as AArch64, was introduced with ARMv8.
// In go this architecture is a specific one, separate from other ARMs.
arch += " (ARMv8 compatible)"
} else if arch == "arm" {
// 32-bit ARM architecture, which is ARMv7 and lower.
// In go there are different compatibility levels represented by ARM
// architecture version number (like 5, 6 or 7).
switch GetSupportedGOARM() {
case 7:
arch += " (ARMv7 compatible)"
case 6:
arch += " (ARMv6 compatible)"
case 5:
arch += " (ARMv5 compatible, no hardfloat)"
}
}
return arch
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/buildinfo/snap.go | lib/buildinfo/snap.go | //go:build snap
package buildinfo
func init() {
Tags = append(Tags, "snap")
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/buildinfo/tags.go | lib/buildinfo/tags.go | // Package buildinfo provides build information.
package buildinfo
import (
"sort"
"strings"
)
// Tags contains slice of build tags.
// The `cmount` tag is added by cmd/cmount/mount.go only if build is static.
// The `noselfupdate` tag is added by cmd/selfupdate/noselfupdate.go
// Other tags including `cgo` are detected in this package.
var Tags []string
// GetLinkingAndTags tells how the rclone executable was linked
// and returns space separated build tags or the string "none".
func GetLinkingAndTags() (linking, tagString string) {
linking = "static"
tagList := []string{}
for _, tag := range Tags {
if tag == "cgo" {
linking = "dynamic"
} else {
tagList = append(tagList, tag)
}
}
if len(tagList) > 0 {
sort.Strings(tagList)
tagString = strings.Join(tagList, " ")
} else {
tagString = "none"
}
return
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/buildinfo/osversion.go | lib/buildinfo/osversion.go | //go:build !windows
package buildinfo
import (
"strings"
"github.com/shirou/gopsutil/v4/host"
)
// GetOSVersion returns OS version, kernel and bitness
func GetOSVersion() (osVersion, osKernel string) {
if platform, _, version, err := host.PlatformInformation(); err == nil && platform != "" {
osVersion = platform
if version != "" {
osVersion += " " + version
}
}
if version, err := host.KernelVersion(); err == nil && version != "" {
osKernel = version
}
if arch, err := host.KernelArch(); err == nil && arch != "" {
if strings.HasSuffix(arch, "64") && osVersion != "" {
osVersion += " (64 bit)"
}
if osKernel != "" {
osKernel += " (" + arch + ")"
}
}
return
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/buildinfo/cgo.go | lib/buildinfo/cgo.go | //go:build cgo
package buildinfo
func init() {
Tags = append(Tags, "cgo")
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/buildinfo/osversion_windows.go | lib/buildinfo/osversion_windows.go | package buildinfo
import (
"regexp"
"strings"
"unsafe"
"github.com/shirou/gopsutil/v4/host"
"golang.org/x/sys/windows"
)
// GetOSVersion returns OS version, kernel and bitness
// On Windows it performs additional output enhancements.
func GetOSVersion() (osVersion, osKernel string) {
if platform, _, version, err := host.PlatformInformation(); err == nil && platform != "" {
osVersion = platform
if version != "" {
osVersion += " " + version
}
}
if version, err := host.KernelVersion(); err == nil && version != "" {
osKernel = version
// Prevent duplication of output on Windows
if strings.Contains(osVersion, osKernel) {
deduped := strings.TrimSpace(strings.Replace(osVersion, osKernel, "", 1))
if deduped != "" {
osVersion = deduped
}
}
// Simplify kernel output: `MAJOR.MINOR.BUILD.REVISION Build BUILD.REVISION` -> `MAJOR.MINOR.BUILD.REVISION`
match := regexp.MustCompile(`^(\d+\.\d+\.(\d+\.\d+)) Build (\d+\.\d+)$`).FindStringSubmatch(osKernel)
if len(match) == 4 && match[2] == match[3] {
osKernel = match[1]
}
}
if osVersion != "" {
// Include the friendly-name of the version, which is typically what is referred to.
// Until Windows 10 version 2004 (May 2020) this can be found from registry entry
// ReleaseID, after that we must use entry DisplayVersion (ReleaseId is stuck at 2009).
// Source: https://ss64.com/nt/ver.html
friendlyName := getRegistryVersionString("DisplayVersion")
if friendlyName == "" {
friendlyName = getRegistryVersionString("ReleaseId")
}
if friendlyName != "" {
osVersion += " " + friendlyName
}
}
if arch, err := host.KernelArch(); err == nil && arch != "" {
if strings.HasSuffix(arch, "64") && osVersion != "" {
osVersion += " (64 bit)"
}
if osKernel != "" {
osKernel += " (" + arch + ")"
}
}
return
}
var regVersionKeyUTF16 = windows.StringToUTF16Ptr(`SOFTWARE\Microsoft\Windows NT\CurrentVersion`)
func getRegistryVersionString(name string) string {
var (
err error
handle windows.Handle
bufLen uint32
valType uint32
)
err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, regVersionKeyUTF16, 0, windows.KEY_READ|windows.KEY_WOW64_64KEY, &handle)
if err != nil {
return ""
}
defer func() {
_ = windows.RegCloseKey(handle)
}()
nameUTF16 := windows.StringToUTF16Ptr(name)
err = windows.RegQueryValueEx(handle, nameUTF16, nil, &valType, nil, &bufLen)
if err != nil {
return ""
}
regBuf := make([]uint16, bufLen/2+1)
err = windows.RegQueryValueEx(handle, nameUTF16, nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen)
if err != nil {
return ""
}
return windows.UTF16ToString(regBuf)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/file/preallocate_windows.go | lib/file/preallocate_windows.go | //go:build windows
package file
import (
"fmt"
"os"
"sync"
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var (
ntdll = windows.NewLazySystemDLL("ntdll.dll")
ntQueryVolumeInformationFile = ntdll.NewProc("NtQueryVolumeInformationFile")
ntSetInformationFile = ntdll.NewProc("NtSetInformationFile")
preAllocateMu sync.Mutex
)
type fileAllocationInformation struct {
AllocationSize uint64
}
type fileFsSizeInformation struct {
TotalAllocationUnits uint64
AvailableAllocationUnits uint64
SectorsPerAllocationUnit uint32
BytesPerSector uint32
}
type ioStatusBlock struct {
Status, Information uintptr
}
// PreallocateImplemented is a constant indicating whether the
// implementation of Preallocate actually does anything.
const PreallocateImplemented = true
// PreAllocate the file for performance reasons
func PreAllocate(size int64, out *os.File) error {
if size <= 0 {
return nil
}
preAllocateMu.Lock()
defer preAllocateMu.Unlock()
var (
iosb ioStatusBlock
fsSizeInfo fileFsSizeInformation
allocInfo fileAllocationInformation
)
// Query info about the block sizes on the file system
_, _, e1 := ntQueryVolumeInformationFile.Call(
out.Fd(),
uintptr(unsafe.Pointer(&iosb)),
uintptr(unsafe.Pointer(&fsSizeInfo)),
unsafe.Sizeof(fsSizeInfo),
uintptr(3), // FileFsSizeInformation
)
if e1 != nil && e1 != syscall.Errno(0) {
return fmt.Errorf("preAllocate NtQueryVolumeInformationFile failed: %w", e1)
}
// Calculate the allocation size
clusterSize := uint64(fsSizeInfo.BytesPerSector) * uint64(fsSizeInfo.SectorsPerAllocationUnit)
if clusterSize <= 0 {
return fmt.Errorf("preAllocate clusterSize %d <= 0", clusterSize)
}
allocInfo.AllocationSize = (1 + uint64(size-1)/clusterSize) * clusterSize
// Ask for the allocation
_, _, e1 = ntSetInformationFile.Call(
out.Fd(),
uintptr(unsafe.Pointer(&iosb)),
uintptr(unsafe.Pointer(&allocInfo)),
unsafe.Sizeof(allocInfo),
uintptr(19), // FileAllocationInformation
)
if e1 != nil && e1 != syscall.Errno(0) {
if e1 == windows.ERROR_DISK_FULL || e1 == windows.ERROR_HANDLE_DISK_FULL {
return ErrDiskFull
}
return fmt.Errorf("preAllocate NtSetInformationFile failed: %w", e1)
}
return nil
}
// SetSparseImplemented is a constant indicating whether the
// implementation of SetSparse actually does anything.
const SetSparseImplemented = true
// SetSparse makes the file be a sparse file
func SetSparse(out *os.File) error {
var bytesReturned uint32
err := syscall.DeviceIoControl(syscall.Handle(out.Fd()), windows.FSCTL_SET_SPARSE, nil, 0, nil, 0, &bytesReturned, nil)
if err != nil {
return fmt.Errorf("DeviceIoControl FSCTL_SET_SPARSE: %w", err)
}
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/file/file.go | lib/file/file.go | // Package file provides a version of os.OpenFile, the handles of
// which can be renamed and deleted under Windows.
package file
import "os"
// Open opens the named file for reading. If successful, methods on
// the returned file can be used for reading; the associated file
// descriptor has mode O_RDONLY.
// If there is an error, it will be of type *PathError.
func Open(name string) (*os.File, error) {
return OpenFile(name, os.O_RDONLY, 0)
}
// Create creates the named file with mode 0666 (before umask), truncating
// it if it already exists. If successful, methods on the returned
// File can be used for I/O; the associated file descriptor has mode
// O_RDWR.
// If there is an error, it will be of type *PathError.
func Create(name string) (*os.File, error) {
return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/file/unc.go | lib/file/unc.go | //go:build !windows
package file
// UNCPath converts an absolute Windows path to a UNC long path.
//
// It does nothing on non windows platforms
func UNCPath(l string) string {
return l
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/file/preallocate.go | lib/file/preallocate.go | package file
import "errors"
// ErrDiskFull is returned from PreAllocate when it detects disk full
var ErrDiskFull = errors.New("preallocate: file too big for remaining disk space")
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/file/unc_windows.go | lib/file/unc_windows.go | //go:build windows
package file
import (
"regexp"
"strings"
)
// Pattern to match a windows absolute path: "c:\" and similar
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
// UNCPath converts an absolute Windows path to a UNC long path.
//
// It does nothing on non windows platforms
func UNCPath(l string) string {
// If prefix is "\\", we already have a UNC path or server.
if strings.HasPrefix(l, `\\`) {
// If already long path, just keep it
if strings.HasPrefix(l, `\\?\`) {
return l
}
// Trim "\\" from path and add UNC prefix.
return `\\?\UNC\` + strings.TrimPrefix(l, `\\`)
}
if isAbsWinDrive.MatchString(l) {
return `\\?\` + l
}
return l
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/file/unc_test.go | lib/file/unc_test.go | //go:build windows
package file
import "testing"
var uncTestPaths = []string{
`C:\Ba*d\P|a?t<h>\Windows\Folder`,
`C:\Windows\Folder`,
`\\?\C:\Windows\Folder`,
`\\?\UNC\server\share\Desktop`,
`\\?\unC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`C:\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`C:\AbsoluteToRoot\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\server\share\Desktop`,
`\\?\UNC\\share\folder\Desktop`,
`\\server\share`,
}
var uncTestPathsResults = []string{
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
`\\?\C:\Windows\Folder`,
`\\?\C:\Windows\Folder`,
`\\?\UNC\server\share\Desktop`,
`\\?\unC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\?\UNC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\?\C:\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\?\C:\AbsoluteToRoot\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\?\UNC\server\share\Desktop`,
`\\?\UNC\\share\folder\Desktop`,
`\\?\UNC\server\share`,
}
// Test that UNC paths are converted.
func TestUncPaths(t *testing.T) {
for i, p := range uncTestPaths {
unc := UNCPath(p)
if unc != uncTestPathsResults[i] {
t.Fatalf("UNC test path\nInput:%s\nOutput:%s\nExpected:%s", p, unc, uncTestPathsResults[i])
}
// Test we don't add more.
unc = UNCPath(unc)
if unc != uncTestPathsResults[i] {
t.Fatalf("UNC test path\nInput:%s\nOutput:%s\nExpected:%s", p, unc, uncTestPathsResults[i])
}
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/file/file_test.go | lib/file/file_test.go | package file
import (
"fmt"
"io"
"os"
"path"
"runtime"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// This lists dir and checks the listing is as expected without checking the size
func checkListingNoSize(t *testing.T, dir string, want []string) {
var got []string
nodes, err := os.ReadDir(dir)
require.NoError(t, err)
for _, node := range nodes {
got = append(got, fmt.Sprintf("%s,%v", node.Name(), node.IsDir()))
}
assert.Equal(t, want, got)
}
// This lists dir and checks the listing is as expected
func checkListing(t *testing.T, dir string, want []string) {
var got []string
nodes, err := os.ReadDir(dir)
require.NoError(t, err)
for _, node := range nodes {
info, err := node.Info()
assert.NoError(t, err)
got = append(got, fmt.Sprintf("%s,%d,%v", node.Name(), info.Size(), node.IsDir()))
}
assert.Equal(t, want, got)
}
// Test we can rename an open file
func TestOpenFileRename(t *testing.T) {
dir := t.TempDir()
filepath := path.Join(dir, "file1")
f, err := Create(filepath)
require.NoError(t, err)
_, err = f.Write([]byte("hello"))
assert.NoError(t, err)
checkListingNoSize(t, dir, []string{
"file1,false",
})
// Delete the file first
assert.NoError(t, os.Remove(filepath))
// .. then close it
assert.NoError(t, f.Close())
checkListing(t, dir, nil)
}
// Test we can delete an open file
func TestOpenFileDelete(t *testing.T) {
dir := t.TempDir()
filepath := path.Join(dir, "file1")
f, err := Create(filepath)
require.NoError(t, err)
_, err = f.Write([]byte("hello"))
assert.NoError(t, err)
checkListingNoSize(t, dir, []string{
"file1,false",
})
// Rename the file while open
filepath2 := path.Join(dir, "file2")
assert.NoError(t, os.Rename(filepath, filepath2))
checkListingNoSize(t, dir, []string{
"file2,false",
})
// .. then close it
assert.NoError(t, f.Close())
checkListing(t, dir, []string{
"file2,5,false",
})
}
// Smoke test the Open, OpenFile and Create functions
func TestOpenFileOperations(t *testing.T) {
dir := t.TempDir()
filepath := path.Join(dir, "file1")
// Create the file
f, err := Create(filepath)
require.NoError(t, err)
_, err = f.Write([]byte("hello"))
assert.NoError(t, err)
assert.NoError(t, f.Close())
checkListing(t, dir, []string{
"file1,5,false",
})
// Append onto the file
f, err = OpenFile(filepath, os.O_RDWR|os.O_APPEND, 0666)
require.NoError(t, err)
_, err = f.Write([]byte("HI"))
assert.NoError(t, err)
assert.NoError(t, f.Close())
checkListing(t, dir, []string{
"file1,7,false",
})
// Read it back in
f, err = Open(filepath)
require.NoError(t, err)
var b = make([]byte, 10)
n, err := f.Read(b)
assert.True(t, err == io.EOF || err == nil)
assert.Equal(t, 7, n)
assert.Equal(t, "helloHI", string(b[:n]))
assert.NoError(t, f.Close())
checkListing(t, dir, []string{
"file1,7,false",
})
}
// Smoke test the IsReserved function
func TestIsReserved(t *testing.T) {
if runtime.GOOS != "windows" {
t.Skip("Skipping test on !windows")
}
// Regular name
require.NoError(t, IsReserved("readme.txt"))
require.NoError(t, IsReserved("some/path/readme.txt"))
// Empty
require.Error(t, IsReserved(""))
// Separators only
require.Error(t, IsReserved("/"))
require.Error(t, IsReserved("////"))
require.Error(t, IsReserved("./././././"))
// Legacy device name
require.Error(t, IsReserved("NUL"))
require.Error(t, IsReserved("nul"))
require.Error(t, IsReserved("Nul"))
require.Error(t, IsReserved("NUL.txt"))
require.Error(t, IsReserved("some/path/to/nul.txt"))
require.NoError(t, IsReserved("NULL"))
// Name end with a space or a period
require.Error(t, IsReserved("test."))
require.Error(t, IsReserved("test "))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/file/preallocate_other.go | lib/file/preallocate_other.go | //go:build !windows && !linux
package file
import "os"
// PreallocateImplemented is a constant indicating whether the
// implementation of Preallocate actually does anything.
const PreallocateImplemented = false
// PreAllocate the file for performance reasons
func PreAllocate(size int64, out *os.File) error {
return nil
}
// SetSparseImplemented is a constant indicating whether the
// implementation of SetSparse actually does anything.
const SetSparseImplemented = false
// SetSparse makes the file be a sparse file
func SetSparse(out *os.File) error {
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/file/file_windows.go | lib/file/file_windows.go | //go:build windows
package file
import (
"errors"
"os"
"path/filepath"
"regexp"
"syscall"
)
// OpenFile is the generalized open call; most users will use Open or Create
// instead. It opens the named file with specified flag (O_RDONLY etc.) and
// perm (before umask), if applicable. If successful, methods on the returned
// File can be used for I/O. If there is an error, it will be of type
// *PathError.
//
// Under both Unix and Windows this will allow open files to be
// renamed and or deleted.
func OpenFile(path string, mode int, perm os.FileMode) (*os.File, error) {
// This code copied from syscall_windows.go in the go source and then
// modified to support renaming and deleting open files by adding
// FILE_SHARE_DELETE.
//
// https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-createfilea#file_share_delete
if len(path) == 0 {
return nil, syscall.ERROR_FILE_NOT_FOUND
}
pathp, err := syscall.UTF16PtrFromString(path)
if err != nil {
return nil, err
}
var access uint32
switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
case syscall.O_RDONLY:
access = syscall.GENERIC_READ
case syscall.O_WRONLY:
access = syscall.GENERIC_WRITE
case syscall.O_RDWR:
access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
}
if mode&syscall.O_CREAT != 0 {
access |= syscall.GENERIC_WRITE
}
if mode&syscall.O_APPEND != 0 {
access &^= syscall.GENERIC_WRITE
access |= syscall.FILE_APPEND_DATA
}
sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
var createmode uint32
switch {
case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
createmode = syscall.CREATE_NEW
case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
createmode = syscall.CREATE_ALWAYS
case mode&syscall.O_CREAT == syscall.O_CREAT:
createmode = syscall.OPEN_ALWAYS
case mode&syscall.O_TRUNC == syscall.O_TRUNC:
createmode = syscall.TRUNCATE_EXISTING
default:
createmode = syscall.OPEN_EXISTING
}
h, e := syscall.CreateFile(pathp, access, sharemode, nil, createmode, syscall.FILE_ATTRIBUTE_NORMAL|syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
if e != nil {
return nil, e
}
return os.NewFile(uintptr(h), path), nil
}
// IsReserved checks if path contains a reserved name
func IsReserved(path string) error {
if path == "" {
return errors.New("path is empty")
}
base := filepath.Base(path)
// If the path is empty or reduces to ".", Base returns ".".
if base == "." {
return errors.New("path is '.'")
}
// If the path consists entirely of separators, Base returns a single separator.
if base == string(filepath.Separator) {
return errors.New("path consists entirely of separators")
}
// Do not end a file or directory name with a space or a period. Although the underlying
// file system may support such names, the Windows shell and user interface does not.
// (https://docs.microsoft.com/en-gb/windows/win32/fileio/naming-a-file)
suffix := base[len(base)-1]
switch suffix {
case ' ':
return errors.New("base file name ends with a space")
case '.':
return errors.New("base file name ends with a period")
}
// Do not use names of legacy (DOS) devices, not even as basename without extension,
// as this will refer to the actual device.
// (https://docs.microsoft.com/en-gb/windows/win32/fileio/naming-a-file)
if reserved, _ := regexp.MatchString(`^(?i:con|prn|aux|nul|com[1-9]|lpt[1-9])(?:\.|$)`, base); reserved {
return errors.New("base file name is reserved windows device name (CON, PRN, AUX, NUL, COM[1-9], LPT[1-9])")
}
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/file/file_other.go | lib/file/file_other.go | //go:build !windows
package file
import "os"
// OpenFile is the generalized open call; most users will use Open or Create
// instead. It opens the named file with specified flag (O_RDONLY etc.) and
// perm (before umask), if applicable. If successful, methods on the returned
// File can be used for I/O. If there is an error, it will be of type
// *PathError.
//
// Under both Unix and Windows this will allow open files to be
// renamed and or deleted.
var OpenFile = os.OpenFile
// IsReserved checks if path contains a reserved name
func IsReserved(path string) error {
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/file/driveletter_other.go | lib/file/driveletter_other.go | //go:build !windows
package file
// FindUnusedDriveLetter does nothing except on Windows.
func FindUnusedDriveLetter() (driveLetter uint8) {
return 0
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/file/driveletter_windows.go | lib/file/driveletter_windows.go | //go:build windows
package file
import (
"os"
)
// FindUnusedDriveLetter searches mounted drive list on the system
// (starting from Z: and ending at D:) for unused drive letter.
// Returns the letter found (like 'Z') or zero value.
func FindUnusedDriveLetter() (driveLetter uint8) {
// Do not use A: and B:, because they are reserved for floppy drive.
// Do not use C:, because it is normally used for main drive.
for l := uint8('Z'); l >= uint8('D'); l-- {
_, err := os.Stat(string(l) + ":" + string(os.PathSeparator))
if os.IsNotExist(err) {
return l
}
}
return 0
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/file/preallocate_unix.go | lib/file/preallocate_unix.go | //go:build linux
package file
import (
"os"
"sync"
"sync/atomic"
"syscall"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/unix"
)
var (
fallocFlags = [...]uint32{
unix.FALLOC_FL_KEEP_SIZE, // Default
unix.FALLOC_FL_KEEP_SIZE | unix.FALLOC_FL_PUNCH_HOLE, // for ZFS #3066
}
fallocFlagsIndex atomic.Int32
preAllocateMu sync.Mutex
)
// PreallocateImplemented is a constant indicating whether the
// implementation of Preallocate actually does anything.
const PreallocateImplemented = true
// PreAllocate the file for performance reasons
func PreAllocate(size int64, out *os.File) (err error) {
if size <= 0 {
return nil
}
preAllocateMu.Lock()
defer preAllocateMu.Unlock()
for {
index := fallocFlagsIndex.Load()
again:
if index >= int32(len(fallocFlags)) {
return nil // Fallocate is disabled
}
flags := fallocFlags[index]
err = unix.Fallocate(int(out.Fd()), flags, 0, size)
if err == unix.ENOTSUP {
// Try the next flags combination
index++
fallocFlagsIndex.Store(index)
fs.Debugf(nil, "preAllocate: got error on fallocate, trying combination %d/%d: %v", index, len(fallocFlags), err)
goto again
}
// Wrap important errors
if err == unix.ENOSPC {
return ErrDiskFull
}
if err != syscall.EINTR {
break
}
}
return err
}
// SetSparseImplemented is a constant indicating whether the
// implementation of SetSparse actually does anything.
const SetSparseImplemented = false
// SetSparse makes the file be a sparse file
func SetSparse(out *os.File) error {
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/file/mkdir.go | lib/file/mkdir.go | package file
import "os"
// MkdirAll now just calls os.MkdirAll
func MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/proxy/socks.go | lib/proxy/socks.go | // Package proxy enables SOCKS5 proxy dialling
package proxy
import (
"fmt"
"net"
"strings"
"golang.org/x/net/proxy"
)
// SOCKS5Dial dials a net.Conn using a SOCKS5 proxy server.
// The socks5Proxy address can be in the form of [user:password@]host:port, [user@]host:port or just host:port if no auth is required.
// It will optionally take a proxyDialer to dial the SOCKS5 proxy server. If nil is passed, it will use the default net.Dialer.
func SOCKS5Dial(network, addr, socks5Proxy string, proxyDialer proxy.Dialer) (net.Conn, error) {
if proxyDialer == nil {
proxyDialer = &net.Dialer{}
}
var (
proxyAddress string
proxyAuth *proxy.Auth
)
if credsAndHost := strings.SplitN(socks5Proxy, "@", 2); len(credsAndHost) == 2 {
proxyCreds := strings.SplitN(credsAndHost[0], ":", 2)
proxyAuth = &proxy.Auth{
User: proxyCreds[0],
}
if len(proxyCreds) == 2 {
proxyAuth.Password = proxyCreds[1]
}
proxyAddress = credsAndHost[1]
} else {
proxyAddress = credsAndHost[0]
}
proxyDialer, err := proxy.SOCKS5("tcp", proxyAddress, proxyAuth, proxyDialer)
if err != nil {
return nil, fmt.Errorf("failed to create proxy dialer: %w", err)
}
return proxyDialer.Dial(network, addr)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/proxy/http.go | lib/proxy/http.go | package proxy
import (
"bufio"
"crypto/tls"
"fmt"
"net"
"net/http"
"net/url"
"strings"
"golang.org/x/net/proxy"
)
// HTTPConnectDial connects using HTTP CONNECT via proxyDialer
//
// It will read the HTTP proxy address from the environment in the
// standard way.
//
// It optionally takes a proxyDialer to dial the HTTP proxy server.
// If nil is passed, it will use the default net.Dialer.
func HTTPConnectDial(network, addr string, proxyURL *url.URL, proxyDialer proxy.Dialer) (net.Conn, error) {
if proxyDialer == nil {
proxyDialer = &net.Dialer{}
}
if proxyURL == nil {
return proxyDialer.Dial(network, addr)
}
// prepare proxy host with default ports
host := proxyURL.Host
if !strings.Contains(host, ":") {
if strings.EqualFold(proxyURL.Scheme, "https") {
host += ":443"
} else {
host += ":80"
}
}
// connect to proxy
conn, err := proxyDialer.Dial(network, host)
if err != nil {
return nil, fmt.Errorf("HTTP CONNECT proxy failed to Dial: %q", err)
}
// wrap TLS if HTTPS proxy
if strings.EqualFold(proxyURL.Scheme, "https") {
tlsConfig := &tls.Config{ServerName: proxyURL.Hostname()}
tlsConn := tls.Client(conn, tlsConfig)
if err := tlsConn.Handshake(); err != nil {
_ = conn.Close()
return nil, fmt.Errorf("HTTP CONNECT proxy failed to make TLS connection: %q", err)
}
conn = tlsConn
}
// send CONNECT
_, err = fmt.Fprintf(conn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", addr, addr)
if err != nil {
_ = conn.Close()
return nil, fmt.Errorf("HTTP CONNECT proxy failed to send CONNECT: %q", err)
}
br := bufio.NewReader(conn)
req := &http.Request{URL: &url.URL{Scheme: "http", Host: addr}}
resp, err := http.ReadResponse(br, req)
if err != nil {
_ = conn.Close()
return nil, fmt.Errorf("HTTP CONNECT proxy failed to read response: %q", err)
}
if resp.StatusCode != http.StatusOK {
_ = conn.Close()
return nil, fmt.Errorf("HTTP CONNECT proxy failed: %s", resp.Status)
}
return conn, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/cache/cache.go | lib/cache/cache.go | // Package cache implements a simple cache where the entries are
// expired after a given time (5 minutes of disuse by default).
package cache
import (
"strings"
"sync"
"time"
)
// Cache holds values indexed by string, but expired after a given (5
// minutes by default).
type Cache struct {
mu sync.Mutex
cache map[string]*cacheEntry
expireRunning bool
expireDuration time.Duration // expire the cache entry when it is older than this
expireInterval time.Duration // interval to run the cache expire
finalize func(value any)
}
// New creates a new cache with the default expire duration and interval
func New() *Cache {
return &Cache{
cache: map[string]*cacheEntry{},
expireRunning: false,
expireDuration: 300 * time.Second,
expireInterval: 60 * time.Second,
finalize: func(_ any) {},
}
}
// SetExpireDuration sets the interval at which things expire
//
// If it is less than or equal to 0 then things are never cached
func (c *Cache) SetExpireDuration(d time.Duration) *Cache {
c.expireDuration = d
return c
}
// returns true if we aren't to cache anything
func (c *Cache) noCache() bool {
return c.expireDuration <= 0
}
// SetExpireInterval sets the interval at which the cache expiry runs
//
// Set to 0 or a -ve number to disable
func (c *Cache) SetExpireInterval(d time.Duration) *Cache {
if d <= 0 {
d = 100 * 365 * 24 * time.Hour
}
c.expireInterval = d
return c
}
// cacheEntry is stored in the cache
type cacheEntry struct {
value any // cached item
err error // creation error
key string // key
lastUsed time.Time // time used for expiry
pinCount int // non zero if the entry should not be removed
}
// CreateFunc is called to create new values. If the create function
// returns an error it will be cached if ok is true, otherwise the
// error will just be returned, allowing negative caching if required.
type CreateFunc func(key string) (value any, ok bool, error error)
// used marks an entry as accessed now and kicks the expire timer off
// should be called with the lock held
func (c *Cache) used(entry *cacheEntry) {
entry.lastUsed = time.Now()
if !c.expireRunning {
time.AfterFunc(c.expireInterval, c.cacheExpire)
c.expireRunning = true
}
}
// Get gets a value named key either from the cache or creates it
// afresh with the create function.
func (c *Cache) Get(key string, create CreateFunc) (value any, err error) {
c.mu.Lock()
entry, ok := c.cache[key]
if !ok {
c.mu.Unlock() // Unlock in case Get is called recursively
value, ok, err = create(key)
if err != nil && !ok {
return value, err
}
entry = &cacheEntry{
value: value,
key: key,
err: err,
}
c.mu.Lock()
if !c.noCache() {
c.cache[key] = entry
}
}
defer c.mu.Unlock()
c.used(entry)
return entry.value, entry.err
}
func (c *Cache) addPin(key string, count int) {
c.mu.Lock()
entry, ok := c.cache[key]
if ok {
entry.pinCount += count
c.used(entry)
}
c.mu.Unlock()
}
// Pin a value in the cache if it exists
func (c *Cache) Pin(key string) {
c.addPin(key, 1)
}
// Unpin a value in the cache if it exists
func (c *Cache) Unpin(key string) {
c.addPin(key, -1)
}
// PutErr puts a value named key with err into the cache
func (c *Cache) PutErr(key string, value any, err error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.noCache() {
return
}
entry := &cacheEntry{
value: value,
key: key,
err: err,
}
c.used(entry)
c.cache[key] = entry
}
// Put puts a value named key into the cache
func (c *Cache) Put(key string, value any) {
c.PutErr(key, value, nil)
}
// GetMaybe returns the key and true if found, nil and false if not
func (c *Cache) GetMaybe(key string) (value any, found bool) {
c.mu.Lock()
defer c.mu.Unlock()
entry, found := c.cache[key]
if !found {
return nil, found
}
c.used(entry)
return entry.value, found
}
// Delete the entry passed in
//
// Returns true if the entry was found
func (c *Cache) Delete(key string) bool {
c.mu.Lock()
entry, found := c.cache[key]
if found {
c.finalize(entry.value)
}
delete(c.cache, key)
c.mu.Unlock()
return found
}
// DeletePrefix deletes all entries with the given prefix
//
// Returns number of entries deleted
func (c *Cache) DeletePrefix(prefix string) (deleted int) {
c.mu.Lock()
for key, entry := range c.cache {
if !strings.HasPrefix(key, prefix) {
continue
}
c.finalize(entry.value)
delete(c.cache, key)
deleted++
}
c.mu.Unlock()
return deleted
}
// Rename renames the item at oldKey to newKey.
//
// If there was an existing item at newKey then it takes precedence
// and is returned otherwise the item (if any) at oldKey is returned.
func (c *Cache) Rename(oldKey, newKey string) (value any, found bool) {
c.mu.Lock()
if newEntry, newFound := c.cache[newKey]; newFound {
// If new entry is found use that
if oldEntry, oldFound := c.cache[oldKey]; oldFound {
// If there's an old entry that is different we must finalize it
if newEntry.value != oldEntry.value {
c.finalize(c.cache[oldKey].value)
}
}
delete(c.cache, oldKey)
value, found = newEntry.value, newFound
c.used(newEntry)
} else if oldEntry, oldFound := c.cache[oldKey]; oldFound {
// If old entry is found rename it to new and use that
c.cache[newKey] = oldEntry
// No need to shutdown here, as value lives on under newKey
delete(c.cache, oldKey)
c.used(oldEntry)
value, found = oldEntry.value, oldFound
}
c.mu.Unlock()
return value, found
}
// cacheExpire expires any entries that haven't been used recently
func (c *Cache) cacheExpire() {
c.mu.Lock()
defer c.mu.Unlock()
now := time.Now()
for key, entry := range c.cache {
if entry.pinCount <= 0 && now.Sub(entry.lastUsed) > c.expireDuration {
c.finalize(entry.value)
delete(c.cache, key)
}
}
if len(c.cache) != 0 {
time.AfterFunc(c.expireInterval, c.cacheExpire)
c.expireRunning = true
} else {
c.expireRunning = false
}
}
// Clear removes everything from the cache
func (c *Cache) Clear() {
c.mu.Lock()
for key, entry := range c.cache {
c.finalize(entry.value)
delete(c.cache, key)
}
c.mu.Unlock()
}
// Entries returns the number of entries in the cache
func (c *Cache) Entries() int {
c.mu.Lock()
entries := len(c.cache)
c.mu.Unlock()
return entries
}
// SetFinalizer sets a function to be called when a value drops out of the cache
func (c *Cache) SetFinalizer(finalize func(any)) {
c.mu.Lock()
c.finalize = finalize
c.mu.Unlock()
}
// EntriesWithPinCount returns the number of pinned and unpinned entries in the cache
//
// Each entry is counted only once, regardless of entry.pinCount
func (c *Cache) EntriesWithPinCount() (pinned, unpinned int) {
c.mu.Lock()
for _, entry := range c.cache {
if entry.pinCount <= 0 {
unpinned++
} else {
pinned++
}
}
c.mu.Unlock()
return pinned, unpinned
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/cache/cache_test.go | lib/cache/cache_test.go | package cache
import (
"errors"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
called = 0
errSentinel = errors.New("an error")
errCached = errors.New("a cached error")
)
func setup(t *testing.T) (*Cache, CreateFunc) {
called = 0
create := func(path string) (any, bool, error) {
assert.Equal(t, 0, called)
called++
switch path {
case "/":
return "/", true, nil
case "/file.txt":
return "/file.txt", true, errCached
case "/error":
return nil, false, errSentinel
case "/err":
return nil, false, errSentinel
}
panic(fmt.Sprintf("Unknown path %q", path))
}
c := New()
return c, create
}
func TestGet(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
f, err := c.Get("/", create)
require.NoError(t, err)
assert.Equal(t, 1, len(c.cache))
f2, err := c.Get("/", create)
require.NoError(t, err)
assert.Equal(t, f, f2)
}
func TestGetFile(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
f, err := c.Get("/file.txt", create)
require.Equal(t, errCached, err)
assert.Equal(t, 1, len(c.cache))
f2, err := c.Get("/file.txt", create)
require.Equal(t, errCached, err)
assert.Equal(t, f, f2)
}
func TestGetError(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
f, err := c.Get("/error", create)
require.Equal(t, errSentinel, err)
require.Equal(t, nil, f)
assert.Equal(t, 0, len(c.cache))
}
func TestPutErr(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
c.PutErr("/alien", "slime", errSentinel)
assert.Equal(t, 1, len(c.cache))
fNew, err := c.Get("/alien", create)
require.Equal(t, errSentinel, err)
require.Equal(t, "slime", fNew)
assert.Equal(t, 1, len(c.cache))
}
func TestPut(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
c.Put("/alien", "slime")
assert.Equal(t, 1, len(c.cache))
fNew, err := c.Get("/alien", create)
require.NoError(t, err)
require.Equal(t, "slime", fNew)
assert.Equal(t, 1, len(c.cache))
}
func TestCacheExpire(t *testing.T) {
c, create := setup(t)
c.SetExpireInterval(time.Millisecond)
assert.Equal(t, false, c.expireRunning)
_, err := c.Get("/", create)
require.NoError(t, err)
c.mu.Lock()
entry := c.cache["/"]
assert.Equal(t, 1, len(c.cache))
c.mu.Unlock()
c.cacheExpire()
c.mu.Lock()
assert.Equal(t, 1, len(c.cache))
entry.lastUsed = time.Now().Add(-c.expireDuration - 60*time.Second)
assert.Equal(t, true, c.expireRunning)
c.mu.Unlock()
time.Sleep(250 * time.Millisecond)
c.mu.Lock()
assert.Equal(t, false, c.expireRunning)
assert.Equal(t, 0, len(c.cache))
c.mu.Unlock()
}
func TestCacheNoExpire(t *testing.T) {
c, create := setup(t)
assert.False(t, c.noCache())
c.SetExpireDuration(0)
assert.Equal(t, false, c.expireRunning)
assert.True(t, c.noCache())
f, err := c.Get("/", create)
require.NoError(t, err)
require.NotNil(t, f)
c.mu.Lock()
assert.Equal(t, 0, len(c.cache))
c.mu.Unlock()
c.Put("/alien", "slime")
c.mu.Lock()
assert.Equal(t, 0, len(c.cache))
c.mu.Unlock()
}
func TestCachePin(t *testing.T) {
c, create := setup(t)
_, err := c.Get("/", create)
require.NoError(t, err)
// Pin a nonexistent item to show nothing happens
c.Pin("notfound")
c.mu.Lock()
entry := c.cache["/"]
assert.Equal(t, 1, len(c.cache))
c.mu.Unlock()
c.cacheExpire()
c.mu.Lock()
assert.Equal(t, 1, len(c.cache))
c.mu.Unlock()
// Pin the entry and check it does not get expired
c.Pin("/")
// Reset last used to make the item expirable
c.mu.Lock()
entry.lastUsed = time.Now().Add(-c.expireDuration - 60*time.Second)
c.mu.Unlock()
c.cacheExpire()
c.mu.Lock()
assert.Equal(t, 1, len(c.cache))
c.mu.Unlock()
// Unpin the entry and check it does get expired now
c.Unpin("/")
// Reset last used
c.mu.Lock()
entry.lastUsed = time.Now().Add(-c.expireDuration - 60*time.Second)
c.mu.Unlock()
c.cacheExpire()
c.mu.Lock()
assert.Equal(t, 0, len(c.cache))
c.mu.Unlock()
}
func TestClear(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
_, err := c.Get("/", create)
require.NoError(t, err)
assert.Equal(t, 1, len(c.cache))
c.Clear()
assert.Equal(t, 0, len(c.cache))
}
func TestEntries(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, c.Entries())
_, err := c.Get("/", create)
require.NoError(t, err)
assert.Equal(t, 1, c.Entries())
c.Clear()
assert.Equal(t, 0, c.Entries())
}
func TestGetMaybe(t *testing.T) {
c, create := setup(t)
value, found := c.GetMaybe("/")
assert.Equal(t, false, found)
assert.Nil(t, value)
f, err := c.Get("/", create)
require.NoError(t, err)
value, found = c.GetMaybe("/")
assert.Equal(t, true, found)
assert.Equal(t, f, value)
c.Clear()
value, found = c.GetMaybe("/")
assert.Equal(t, false, found)
assert.Nil(t, value)
}
func TestDelete(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
_, err := c.Get("/", create)
require.NoError(t, err)
assert.Equal(t, 1, len(c.cache))
assert.Equal(t, false, c.Delete("notfound"))
assert.Equal(t, 1, len(c.cache))
assert.Equal(t, true, c.Delete("/"))
assert.Equal(t, 0, len(c.cache))
assert.Equal(t, false, c.Delete("/"))
assert.Equal(t, 0, len(c.cache))
}
func TestDeletePrefix(t *testing.T) {
create := func(path string) (any, bool, error) {
return path, true, nil
}
c := New()
_, err := c.Get("remote:path", create)
require.NoError(t, err)
_, err = c.Get("remote:path2", create)
require.NoError(t, err)
_, err = c.Get("remote:", create)
require.NoError(t, err)
_, err = c.Get("remote", create)
require.NoError(t, err)
assert.Equal(t, 4, len(c.cache))
assert.Equal(t, 3, c.DeletePrefix("remote:"))
assert.Equal(t, 1, len(c.cache))
assert.Equal(t, 1, c.DeletePrefix(""))
assert.Equal(t, 0, len(c.cache))
assert.Equal(t, 0, c.DeletePrefix(""))
assert.Equal(t, 0, len(c.cache))
}
func TestCacheRename(t *testing.T) {
c := New()
create := func(path string) (any, bool, error) {
return path, true, nil
}
existing1, err := c.Get("existing1", create)
require.NoError(t, err)
_, err = c.Get("existing2", create)
require.NoError(t, err)
assert.Equal(t, 2, c.Entries())
// rename to nonexistent
value, found := c.Rename("existing1", "EXISTING1")
assert.Equal(t, true, found)
assert.Equal(t, existing1, value)
assert.Equal(t, 2, c.Entries())
// rename to existent and check existing value is returned
value, found = c.Rename("existing2", "EXISTING1")
assert.Equal(t, true, found)
assert.Equal(t, existing1, value)
assert.Equal(t, 1, c.Entries())
// rename nonexistent
value, found = c.Rename("notfound", "NOTFOUND")
assert.Equal(t, false, found)
assert.Nil(t, value)
assert.Equal(t, 1, c.Entries())
}
func TestCacheFinalize(t *testing.T) {
c := New()
numCalled := 0
c.SetFinalizer(func(v any) {
numCalled++
})
create := func(path string) (any, bool, error) {
return path, true, nil
}
_, _ = c.Get("ok", create)
assert.Equal(t, 0, numCalled)
c.Clear()
assert.Equal(t, 1, numCalled)
_, _ = c.Get("ok", create)
c.Delete("ok")
assert.Equal(t, 2, numCalled)
_, _ = c.Get("ok", create)
c.DeletePrefix("ok")
assert.Equal(t, 3, numCalled)
_, _ = c.Get("old", create)
_, _ = c.Get("new", create)
c.Rename("old", "new")
assert.Equal(t, 4, numCalled)
c.expireDuration = 1 * time.Millisecond
_, _ = c.Get("ok", create)
time.Sleep(2 * time.Millisecond)
c.cacheExpire() // "ok" and "new" fall out of cache
assert.Equal(t, 6, numCalled)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/pool/pool_test.go | lib/pool/pool_test.go | package pool
import (
"context"
"errors"
"fmt"
"math/rand"
"sync"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/testy"
"github.com/stretchr/testify/assert"
)
// makes the allocations be unreliable
func makeUnreliable(bp *Pool) {
var allocCount int
tests := rand.Intn(4) + 1
bp.alloc = func(size int) ([]byte, error) {
allocCount++
if allocCount%tests != 0 {
return nil, errors.New("failed to allocate memory")
}
return make([]byte, size), nil
}
var freeCount int
bp.free = func(b []byte) error {
freeCount++
if freeCount%tests != 0 {
return errors.New("failed to free memory")
}
return nil
}
}
func testGetPut(t *testing.T, useMmap bool, unreliable bool) {
bp := New(60*time.Second, 4096, 2, useMmap)
if unreliable {
makeUnreliable(bp)
}
assert.Equal(t, 0, bp.InUse())
b1 := bp.Get()
assert.Equal(t, 1, bp.InUse())
assert.Equal(t, 0, bp.InPool())
assert.Equal(t, 1, bp.Alloced())
b2 := bp.Get()
assert.Equal(t, 2, bp.InUse())
assert.Equal(t, 0, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
b3 := bp.Get()
assert.Equal(t, 3, bp.InUse())
assert.Equal(t, 0, bp.InPool())
assert.Equal(t, 3, bp.Alloced())
bs := bp.GetN(3)
assert.Equal(t, 6, bp.InUse())
assert.Equal(t, 0, bp.InPool())
assert.Equal(t, 6, bp.Alloced())
bp.Put(b1)
assert.Equal(t, 5, bp.InUse())
assert.Equal(t, 1, bp.InPool())
assert.Equal(t, 6, bp.Alloced())
bp.Put(b2)
assert.Equal(t, 4, bp.InUse())
assert.Equal(t, 2, bp.InPool())
assert.Equal(t, 6, bp.Alloced())
bp.Put(b3)
assert.Equal(t, 3, bp.InUse())
assert.Equal(t, 2, bp.InPool())
assert.Equal(t, 5, bp.Alloced())
bp.PutN(bs)
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 2, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
addr := func(b []byte) string {
return fmt.Sprintf("%p", &b[0])
}
b1a := bp.Get()
assert.Equal(t, addr(b2), addr(b1a))
assert.Equal(t, 1, bp.InUse())
assert.Equal(t, 1, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
b2a := bp.Get()
assert.Equal(t, addr(b1), addr(b2a))
assert.Equal(t, 2, bp.InUse())
assert.Equal(t, 0, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
bp.Put(b1a)
bp.Put(b2a)
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 2, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
bsa := bp.GetN(3)
assert.Equal(t, addr(b1), addr(bsa[1]))
assert.Equal(t, addr(b2), addr(bsa[0]))
assert.Equal(t, 3, bp.InUse())
assert.Equal(t, 0, bp.InPool())
assert.Equal(t, 3, bp.Alloced())
bp.PutN(bsa)
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 2, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
assert.Panics(t, func() {
bp.Put(make([]byte, 1))
})
bp.Flush()
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 0, bp.InPool())
assert.Equal(t, 0, bp.Alloced())
}
func testFlusher(t *testing.T, useMmap bool, unreliable bool) {
bp := New(50*time.Millisecond, 4096, 2, useMmap)
if unreliable {
makeUnreliable(bp)
}
b1 := bp.Get()
b2 := bp.Get()
b3 := bp.Get()
bp.Put(b1)
bp.Put(b2)
bp.Put(b3)
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 2, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
bp.mu.Lock()
assert.Equal(t, 0, bp.minFill)
assert.Equal(t, true, bp.flushPending)
bp.mu.Unlock()
checkFlushHasHappened := func(desired int) {
var n int
for range 10 {
time.Sleep(100 * time.Millisecond)
n = bp.InPool()
if n <= desired {
break
}
}
assert.Equal(t, desired, n)
}
checkFlushHasHappened(0)
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 0, bp.InPool())
assert.Equal(t, 0, bp.Alloced())
bp.mu.Lock()
assert.Equal(t, 0, bp.minFill)
assert.Equal(t, false, bp.flushPending)
bp.mu.Unlock()
// Now do manual aging to check it is working properly
bp = New(100*time.Second, 4096, 2, useMmap)
// Check the new one doesn't get flushed
b1 = bp.Get()
b2 = bp.Get()
bp.Put(b1)
bp.Put(b2)
bp.mu.Lock()
assert.Equal(t, 0, bp.minFill)
assert.Equal(t, true, bp.flushPending)
bp.mu.Unlock()
bp.flushAged()
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 2, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
bp.mu.Lock()
assert.Equal(t, 2, bp.minFill)
assert.Equal(t, true, bp.flushPending)
bp.mu.Unlock()
bp.Put(bp.Get())
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 2, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
bp.mu.Lock()
assert.Equal(t, 1, bp.minFill)
assert.Equal(t, true, bp.flushPending)
bp.mu.Unlock()
bp.flushAged()
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 1, bp.InPool())
assert.Equal(t, 1, bp.Alloced())
bp.mu.Lock()
assert.Equal(t, 1, bp.minFill)
assert.Equal(t, true, bp.flushPending)
bp.mu.Unlock()
bp.flushAged()
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 0, bp.InPool())
assert.Equal(t, 0, bp.Alloced())
bp.mu.Lock()
assert.Equal(t, 0, bp.minFill)
assert.Equal(t, false, bp.flushPending)
bp.mu.Unlock()
}
func TestPool(t *testing.T) {
for _, test := range []struct {
name string
useMmap bool
unreliable bool
}{
{
name: "make",
useMmap: false,
unreliable: false,
},
{
name: "mmap",
useMmap: true,
unreliable: false,
},
{
name: "canFail",
useMmap: false,
unreliable: true,
},
} {
t.Run(test.name, func(t *testing.T) {
t.Run("GetPut", func(t *testing.T) { testGetPut(t, test.useMmap, test.unreliable) })
t.Run("Flusher", func(t *testing.T) {
if test.name == "canFail" {
testy.SkipUnreliable(t) // fails regularly on macOS
}
testFlusher(t, test.useMmap, test.unreliable)
})
})
}
}
func TestPoolMaxBufferMemory(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
ci.MaxBufferMemory = 4 * 4096
defer func() {
ci.MaxBufferMemory = 0
totalMemory = nil
}()
totalMemoryInit = sync.Once{} // reset the sync.Once as it likely has been used
totalMemory = nil
bp := New(60*time.Second, 4096, 2, true)
assert.NotNil(t, totalMemory)
assert.Equal(t, bp.alloced, 0)
buf := bp.Get()
bp.Put(buf)
assert.Equal(t, bp.alloced, 1)
var (
wg sync.WaitGroup
mu sync.Mutex
bufs int
maxBufs int
countBuf = func(i int) {
mu.Lock()
defer mu.Unlock()
bufs += i
if bufs > maxBufs {
maxBufs = bufs
}
}
)
const trials = 50
for i := range trials {
wg.Add(1)
go func() {
defer wg.Done()
if i < trials/2 {
n := i%4 + 1
buf := bp.GetN(n)
countBuf(n)
time.Sleep(1 * time.Millisecond)
countBuf(-n)
bp.PutN(buf)
} else {
buf := bp.Get()
countBuf(1)
time.Sleep(1 * time.Millisecond)
countBuf(-1)
bp.Put(buf)
}
}()
}
wg.Wait()
assert.Equal(t, bufs, 0)
assert.Equal(t, maxBufs, 4)
assert.Equal(t, bp.alloced, 2)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/pool/pool.go | lib/pool/pool.go | // Package pool implements a memory pool similar in concept to
// sync.Pool but with more determinism.
package pool
import (
"context"
"fmt"
"slices"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/mmap"
"golang.org/x/sync/semaphore"
)
const (
// BufferSize is the page size of the Global() pool
BufferSize = 1024 * 1024
// BufferCacheSize is the max number of buffers to keep in the cache for the Global() pool
BufferCacheSize = 64
// BufferCacheFlushTime is the max time to keep buffers in the Global() pool
BufferCacheFlushTime = 5 * time.Second
)
// Pool of internal buffers
//
// We hold buffers in cache. Every time we Get or Put we update
// minFill which is the minimum len(cache) seen.
//
// Every flushTime we remove minFill buffers from the cache as they
// were not used in the previous flushTime interval.
type Pool struct {
mu sync.Mutex
cache [][]byte
minFill int // the minimum fill of the cache
bufferSize int
poolSize int
timer *time.Timer
inUse int
alloced int
flushTime time.Duration
flushPending bool
alloc func(int) ([]byte, error)
free func([]byte) error
}
// totalMemory is a semaphore used to control total buffer usage of
// all Pools. It it may be nil in which case the total buffer usage
// will not be controlled. It counts memory in active use, it does not
// count memory cached in the pool.
var totalMemory *semaphore.Weighted
// Make sure we initialise the totalMemory semaphore once
var totalMemoryInit sync.Once
// New makes a buffer pool
//
// flushTime is the interval the buffer pools is flushed
// bufferSize is the size of the allocations
// poolSize is the maximum number of free buffers in the pool
// useMmap should be set to use mmap allocations
func New(flushTime time.Duration, bufferSize, poolSize int, useMmap bool) *Pool {
bp := &Pool{
cache: make([][]byte, 0, poolSize),
poolSize: poolSize,
flushTime: flushTime,
bufferSize: bufferSize,
}
if useMmap {
bp.alloc = mmap.Alloc
bp.free = mmap.Free
} else {
bp.alloc = func(size int) ([]byte, error) {
return make([]byte, size), nil
}
bp.free = func([]byte) error {
return nil
}
}
// Initialise total memory limit if required
totalMemoryInit.Do(func() {
ci := fs.GetConfig(context.Background())
// Set max buffer memory limiter
if ci.MaxBufferMemory > 0 {
totalMemory = semaphore.NewWeighted(int64(ci.MaxBufferMemory))
}
})
bp.timer = time.AfterFunc(flushTime, bp.flushAged)
return bp
}
// get gets the last buffer in bp.cache
//
// Call with mu held
func (bp *Pool) get() []byte {
n := len(bp.cache) - 1
buf := bp.cache[n]
bp.cache[n] = nil // clear buffer pointer from bp.cache
bp.cache = bp.cache[:n]
return buf
}
// getN gets the last n buffers in bp.cache
//
// will panic if you ask for too many buffers
//
// Call with mu held
func (bp *Pool) getN(n int) [][]byte {
i := len(bp.cache) - n
bufs := slices.Clone(bp.cache[i:])
bp.cache = slices.Delete(bp.cache, i, len(bp.cache))
return bufs
}
// put puts the buffer on the end of bp.cache
//
// Call with mu held
func (bp *Pool) put(buf []byte) {
bp.cache = append(bp.cache, buf)
}
// put puts the bufs on the end of bp.cache
//
// Call with mu held
func (bp *Pool) putN(bufs [][]byte) {
bp.cache = append(bp.cache, bufs...)
}
// buffers returns the number of buffers in bp.ache
//
// Call with mu held
func (bp *Pool) buffers() int {
return len(bp.cache)
}
// flush n entries from the entire buffer pool
// Call with mu held
func (bp *Pool) flush(n int) {
for range n {
bp.freeBuffer(bp.get())
}
bp.minFill = len(bp.cache)
}
// Flush the entire buffer pool
func (bp *Pool) Flush() {
bp.mu.Lock()
bp.flush(len(bp.cache))
bp.mu.Unlock()
}
// Remove bp.minFill buffers
func (bp *Pool) flushAged() {
bp.mu.Lock()
bp.flushPending = false
bp.flush(bp.minFill)
// If there are still items in the cache, schedule another flush
if len(bp.cache) != 0 {
bp.kickFlusher()
}
bp.mu.Unlock()
}
// InUse returns the number of buffers in use which haven't been
// returned to the pool
func (bp *Pool) InUse() int {
bp.mu.Lock()
defer bp.mu.Unlock()
return bp.inUse
}
// InPool returns the number of buffers in the pool
func (bp *Pool) InPool() int {
bp.mu.Lock()
defer bp.mu.Unlock()
return len(bp.cache)
}
// Alloced returns the number of buffers allocated and not yet freed
func (bp *Pool) Alloced() int {
bp.mu.Lock()
defer bp.mu.Unlock()
return bp.alloced
}
// starts or resets the buffer flusher timer - call with mu held
func (bp *Pool) kickFlusher() {
if bp.flushPending {
return
}
bp.flushPending = true
bp.timer.Reset(bp.flushTime)
}
// Make sure minFill is correct - call with mu held
func (bp *Pool) updateMinFill() {
if len(bp.cache) < bp.minFill {
bp.minFill = len(bp.cache)
}
}
// acquire mem bytes of memory for the user
func (bp *Pool) acquire(mem int64) error {
if totalMemory == nil {
return nil
}
ctx := context.Background()
return totalMemory.Acquire(ctx, mem)
}
// release mem bytes of memory from the user
func (bp *Pool) release(mem int64) {
if totalMemory == nil {
return
}
totalMemory.Release(mem)
}
// Get a buffer from the pool or allocate one
func (bp *Pool) Get() []byte {
return bp.GetN(1)[0]
}
// GetN get n buffers atomically from the pool or allocate them
func (bp *Pool) GetN(n int) [][]byte {
bp.mu.Lock()
var (
waitTime = time.Millisecond // retry time if allocation failed
err error // allocation error
buf []byte // allocated buffer
bufs [][]byte // bufs so far
have int // have this many buffers in bp.cache
want int // want this many extra buffers
acquired bool // whether we have acquired the memory or not
)
for {
acquired = false
bp.mu.Unlock()
err = bp.acquire(int64(bp.bufferSize) * int64(n))
bp.mu.Lock()
if err != nil {
goto FAIL
}
acquired = true
have = min(bp.buffers(), n)
want = n - have
bufs = bp.getN(have) // get as many buffers as we have from the cache
for range want {
buf, err = bp.alloc(bp.bufferSize)
if err != nil {
goto FAIL
}
bp.alloced++
bufs = append(bufs, buf)
}
break
FAIL:
// Release the buffers and the allocation if it succeeded
bp.putN(bufs)
if acquired {
bp.release(int64(bp.bufferSize) * int64(n))
}
fs.Logf(nil, "Failed to get memory for buffer, waiting for %v: %v", waitTime, err)
bp.mu.Unlock()
time.Sleep(waitTime)
bp.mu.Lock()
waitTime *= 2
clear(bufs)
bufs = nil
}
bp.inUse += n
bp.updateMinFill()
bp.mu.Unlock()
return bufs
}
// freeBuffer returns mem to the os if required - call with lock held
func (bp *Pool) freeBuffer(mem []byte) {
err := bp.free(mem)
if err != nil {
fs.Logf(nil, "Failed to free memory: %v", err)
}
bp.alloced--
}
// _put returns the buffer to the buffer cache or frees it
//
// call with lock held
//
// Note that if you try to return a buffer of the wrong size it will
// panic.
func (bp *Pool) _put(buf []byte) {
buf = buf[0:cap(buf)]
if len(buf) != bp.bufferSize {
panic(fmt.Sprintf("Returning buffer sized %d but expecting %d", len(buf), bp.bufferSize))
}
if len(bp.cache) < bp.poolSize {
bp.put(buf)
} else {
bp.freeBuffer(buf)
}
bp.release(int64(bp.bufferSize))
}
// Put returns the buffer to the buffer cache or frees it
//
// Note that if you try to return a buffer of the wrong size to Put it
// will panic.
func (bp *Pool) Put(buf []byte) {
bp.mu.Lock()
defer bp.mu.Unlock()
bp._put(buf)
bp.inUse--
bp.updateMinFill()
bp.kickFlusher()
}
// PutN returns the buffers to the buffer cache or frees it,
//
// Note that if you try to return a buffer of the wrong size to PutN it
// will panic.
func (bp *Pool) PutN(bufs [][]byte) {
bp.mu.Lock()
defer bp.mu.Unlock()
for _, buf := range bufs {
bp._put(buf)
}
bp.inUse -= len(bufs)
bp.updateMinFill()
bp.kickFlusher()
}
// bufferPool is a global pool of buffers
var bufferPool *Pool
var bufferPoolOnce sync.Once
// Global gets a global pool of BufferSize, BufferCacheSize, BufferCacheFlushTime.
func Global() *Pool {
bufferPoolOnce.Do(func() {
// Initialise the buffer pool when used
ci := fs.GetConfig(context.Background())
bufferPool = New(BufferCacheFlushTime, BufferSize, BufferCacheSize, ci.UseMmap)
})
return bufferPool
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/pool/reader_writer.go | lib/pool/reader_writer.go | package pool
import (
"context"
"errors"
"io"
"sync"
"time"
)
// RWAccount is a function which will be called after every read
// from the RW.
//
// It may return an error which will be passed back to the user.
type RWAccount func(n int) error
// RW contains the state for the read/writer
//
// It can be used as a FIFO to read data from a source and write it out again.
type RW struct {
// Written once variables in initialization
pool *Pool // pool to get pages from
account RWAccount // account for a read
accountOn int // only account on or after this read
// Shared variables between Read and Write
// Write updates these but Read reads from them
// They must all stay in sync together
mu sync.Mutex // protect the shared variables
pages [][]byte // backing store
size int // size written
lastOffset int // size in last page
written chan struct{} // signalled when a write happens
// Read side Variables
out int // offset we are reading from
reads int // count how many times the data has been read
reserved [][]byte // reserved buffers
}
var (
errInvalidWhence = errors.New("pool.RW Seek: invalid whence")
errNegativeSeek = errors.New("pool.RW Seek: negative position")
errSeekPastEnd = errors.New("pool.RW Seek: attempt to seek past end of data")
)
// NewRW returns a reader / writer which is backed from pages from the
// pool passed in.
//
// Data can be stored in it by calling Write and read from it by
// calling Read.
//
// When writing it only appends data. Seek only applies to reading.
func NewRW(pool *Pool) *RW {
rw := &RW{
pool: pool,
pages: make([][]byte, 0, 16),
written: make(chan struct{}, 1),
}
return rw
}
// Reserve bytes of memory.
//
// This allocates n bytes of memory for later use.
//
// This is rounded up to the nearest buffer page size.
//
// Only safe to call once.
func (rw *RW) Reserve(n int64) *RW {
rw.mu.Lock()
defer rw.mu.Unlock()
buffers := int((n + int64(rw.pool.bufferSize) - 1) / int64(rw.pool.bufferSize))
rw.reserved = rw.pool.GetN(buffers)
return rw
}
// SetAccounting should be provided with a function which will be
// called after every read from the RW.
//
// It may return an error which will be passed back to the user.
//
// Not thread safe - call in initialization only.
func (rw *RW) SetAccounting(account RWAccount) *RW {
rw.account = account
return rw
}
// DelayAccountinger enables an accounting delay
type DelayAccountinger interface {
// DelayAccounting makes sure the accounting function only
// gets called on the i-th or later read of the data from this
// point (counting from 1).
//
// This is useful so that we don't account initial reads of
// the data e.g. when calculating hashes.
//
// Set this to 0 to account everything.
DelayAccounting(i int)
}
// DelayAccounting makes sure the accounting function only gets called
// on the i-th or later read of the data from this point (counting
// from 1).
//
// This is useful so that we don't account initial reads of the data
// e.g. when calculating hashes.
//
// Set this to 0 to account everything.
//
// Not thread safe - call in initialization only.
func (rw *RW) DelayAccounting(i int) {
rw.accountOn = i
rw.reads = 0
}
// Returns the page and offset of i for reading.
//
// Ensure there are pages before calling this.
func (rw *RW) readPage(i int) (page []byte) {
rw.mu.Lock()
defer rw.mu.Unlock()
// Count a read of the data if we read the first page
if i == 0 {
rw.reads++
}
pageNumber := i / rw.pool.bufferSize
offset := i % rw.pool.bufferSize
page = rw.pages[pageNumber]
// Clip the last page to the amount written
if pageNumber == len(rw.pages)-1 {
page = page[:rw.lastOffset]
}
return page[offset:]
}
// account for n bytes being read
func (rw *RW) accountRead(n int) error {
if rw.account == nil {
return nil
}
// Don't start accounting until we've reached this many reads
//
// rw.reads will be 1 the first time this is called
// rw.accountOn 2 means start accounting on the 2nd read through
if rw.reads >= rw.accountOn {
return rw.account(n)
}
return nil
}
// Returns true if we have read to EOF
func (rw *RW) eof() bool {
rw.mu.Lock()
defer rw.mu.Unlock()
return rw.out >= rw.size
}
// Read reads up to len(p) bytes into p. It returns the number of
// bytes read (0 <= n <= len(p)) and any error encountered. If some
// data is available but not len(p) bytes, Read returns what is
// available instead of waiting for more.
func (rw *RW) Read(p []byte) (n int, err error) {
var (
nn int
page []byte
)
for len(p) > 0 {
if rw.eof() {
return n, io.EOF
}
page = rw.readPage(rw.out)
nn = copy(p, page)
p = p[nn:]
n += nn
rw.out += nn
err = rw.accountRead(nn)
if err != nil {
return n, err
}
}
return n, nil
}
// WriteTo writes data to w until there's no more data to write or
// when an error occurs. The return value n is the number of bytes
// written. Any error encountered during the write is also returned.
//
// The Copy function uses WriteTo if available. This avoids an
// allocation and a copy.
func (rw *RW) WriteTo(w io.Writer) (n int64, err error) {
var (
nn int
page []byte
)
for !rw.eof() {
page = rw.readPage(rw.out)
nn, err = w.Write(page)
n += int64(nn)
rw.out += nn
if err != nil {
return n, err
}
err = rw.accountRead(nn)
if err != nil {
return n, err
}
}
return n, nil
}
// Get the page we are writing to
func (rw *RW) writePage() (page []byte) {
rw.mu.Lock()
defer rw.mu.Unlock()
if len(rw.pages) > 0 && rw.lastOffset < rw.pool.bufferSize {
return rw.pages[len(rw.pages)-1][rw.lastOffset:]
}
if len(rw.reserved) > 0 {
// Get reserved pages if available
i := len(rw.reserved) - 1
page = rw.reserved[i]
rw.reserved[i] = nil
rw.reserved = rw.reserved[:i]
} else {
page = rw.pool.Get()
}
rw.pages = append(rw.pages, page)
rw.lastOffset = 0
return page
}
// Write writes len(p) bytes from p to the underlying data stream. It returns
// the number of bytes written len(p). It cannot return an error.
func (rw *RW) Write(p []byte) (n int, err error) {
var (
nn int
page []byte
)
for len(p) > 0 {
page = rw.writePage()
nn = copy(page, p)
p = p[nn:]
n += nn
rw.mu.Lock()
rw.size += nn
rw.lastOffset += nn
rw.mu.Unlock()
rw.signalWrite() // signal more data available
}
return n, nil
}
// ReadFrom reads data from r until EOF or error. The return value n is the
// number of bytes read. Any error except EOF encountered during the read is
// also returned.
//
// The Copy function uses ReadFrom if available. This avoids an
// allocation and a copy.
func (rw *RW) ReadFrom(r io.Reader) (n int64, err error) {
var (
nn int
page []byte
)
for err == nil {
page = rw.writePage()
nn, err = r.Read(page)
n += int64(nn)
rw.mu.Lock()
rw.size += nn
rw.lastOffset += nn
rw.mu.Unlock()
rw.signalWrite() // signal more data available
}
if err == io.EOF {
err = nil
}
return n, err
}
// signal that a write has happened
func (rw *RW) signalWrite() {
select {
case rw.written <- struct{}{}:
default:
}
}
// WaitWrite sleeps until a data is written to the RW or Close is
// called or the context is cancelled occurs or for a maximum of 1
// Second then returns.
//
// This can be used when calling Read while the buffer is filling up.
func (rw *RW) WaitWrite(ctx context.Context) {
timer := time.NewTimer(time.Second)
select {
case <-timer.C:
case <-ctx.Done():
case <-rw.written:
}
timer.Stop()
}
// Seek sets the offset for the next Read (not Write - this is always
// appended) to offset, interpreted according to whence: SeekStart
// means relative to the start of the file, SeekCurrent means relative
// to the current offset, and SeekEnd means relative to the end (for
// example, offset = -2 specifies the penultimate byte of the file).
// Seek returns the new offset relative to the start of the file or an
// error, if any.
//
// Seeking to an offset before the start of the file is an error. Seeking
// beyond the end of the written data is an error.
func (rw *RW) Seek(offset int64, whence int) (int64, error) {
var abs int64
rw.mu.Lock()
size := int64(rw.size)
rw.mu.Unlock()
switch whence {
case io.SeekStart:
abs = offset
case io.SeekCurrent:
abs = int64(rw.out) + offset
case io.SeekEnd:
abs = size + offset
default:
return 0, errInvalidWhence
}
if abs < 0 {
return 0, errNegativeSeek
}
if abs > size {
return offset - (abs - size), errSeekPastEnd
}
rw.out = int(abs)
return abs, nil
}
// Close the buffer returning memory to the pool
func (rw *RW) Close() error {
rw.mu.Lock()
defer rw.mu.Unlock()
rw.signalWrite() // signal more data available
rw.pool.PutN(rw.pages)
clear(rw.pages)
rw.pages = nil
rw.pool.PutN(rw.reserved)
clear(rw.reserved)
rw.reserved = nil
return nil
}
// Size returns the number of bytes in the buffer
func (rw *RW) Size() int64 {
rw.mu.Lock()
defer rw.mu.Unlock()
return int64(rw.size)
}
// Check interfaces
var (
_ io.Reader = (*RW)(nil)
_ io.ReaderFrom = (*RW)(nil)
_ io.Writer = (*RW)(nil)
_ io.WriterTo = (*RW)(nil)
_ io.Seeker = (*RW)(nil)
_ io.Closer = (*RW)(nil)
_ DelayAccountinger = (*RW)(nil)
)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/pool/reader_writer_test.go | lib/pool/reader_writer_test.go | package pool
import (
"bytes"
"context"
"errors"
"io"
"sync"
"testing"
"time"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const blockSize = 4096
var rwPool = New(60*time.Second, blockSize, 2, false)
// A writer that always returns an error
type testWriterError struct{}
var errWriteError = errors.New("write error")
func (testWriterError) Write(p []byte) (n int, err error) {
return 0, errWriteError
}
func TestRW(t *testing.T) {
var dst []byte
var pos int64
var err error
var n int
testData := []byte("Goodness!!") // 10 bytes long
newRW := func() *RW {
rw := NewRW(rwPool)
buf := bytes.NewBuffer(testData)
nn, err := rw.ReadFrom(buf) // fill up with goodness
assert.NoError(t, err)
assert.Equal(t, int64(10), nn)
assert.Equal(t, int64(10), rw.Size())
return rw
}
close := func(rw *RW) {
assert.NoError(t, rw.Close())
}
t.Run("Empty", func(t *testing.T) {
// Test empty read
rw := NewRW(rwPool)
defer close(rw)
assert.Equal(t, int64(0), rw.Size())
dst = make([]byte, 10)
n, err = rw.Read(dst)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 0, n)
assert.Equal(t, int64(0), rw.Size())
})
t.Run("Full", func(t *testing.T) {
rw := newRW()
defer close(rw)
// Test full read
dst = make([]byte, 100)
n, err = rw.Read(dst)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 10, n)
assert.Equal(t, testData, dst[0:10])
// Test read EOF
n, err = rw.Read(dst)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 0, n)
// Test Seek Back to start
dst = make([]byte, 10)
pos, err = rw.Seek(0, io.SeekStart)
assert.Nil(t, err)
assert.Equal(t, 0, int(pos))
// Now full read
n, err = rw.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 10, n)
assert.Equal(t, testData, dst)
})
t.Run("WriteTo", func(t *testing.T) {
rw := newRW()
defer close(rw)
var b bytes.Buffer
n, err := rw.WriteTo(&b)
assert.NoError(t, err)
assert.Equal(t, int64(10), n)
assert.Equal(t, testData, b.Bytes())
})
t.Run("WriteToError", func(t *testing.T) {
rw := newRW()
defer close(rw)
w := testWriterError{}
n, err := rw.WriteTo(w)
assert.Equal(t, errWriteError, err)
assert.Equal(t, int64(0), n)
})
t.Run("Partial", func(t *testing.T) {
// Test partial read
rw := newRW()
defer close(rw)
dst = make([]byte, 5)
n, err = rw.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 5, n)
assert.Equal(t, testData[0:5], dst)
n, err = rw.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 5, n)
assert.Equal(t, testData[5:], dst)
})
t.Run("Seek", func(t *testing.T) {
// Test Seek
rw := newRW()
defer close(rw)
// Seek to end
pos, err = rw.Seek(10, io.SeekStart)
assert.NoError(t, err)
assert.Equal(t, int64(10), pos)
// Seek to start
pos, err = rw.Seek(0, io.SeekStart)
assert.NoError(t, err)
assert.Equal(t, int64(0), pos)
// Should not allow seek past cache index
pos, err = rw.Seek(11, io.SeekCurrent)
assert.Equal(t, errSeekPastEnd, err)
assert.Equal(t, 10, int(pos))
// Should not allow seek to negative position start
pos, err = rw.Seek(-1, io.SeekCurrent)
assert.Equal(t, errNegativeSeek, err)
assert.Equal(t, 0, int(pos))
// Should not allow seek with invalid whence
pos, err = rw.Seek(0, 3)
assert.Equal(t, errInvalidWhence, err)
assert.Equal(t, 0, int(pos))
// Should seek from index with io.SeekCurrent(1) whence
dst = make([]byte, 5)
_, _ = rw.Read(dst)
pos, err = rw.Seek(-3, io.SeekCurrent)
assert.Nil(t, err)
assert.Equal(t, 2, int(pos))
pos, err = rw.Seek(1, io.SeekCurrent)
assert.Nil(t, err)
assert.Equal(t, 3, int(pos))
// Should seek from cache end with io.SeekEnd(2) whence
pos, err = rw.Seek(-3, io.SeekEnd)
assert.Nil(t, err)
assert.Equal(t, 7, int(pos))
// Should read from seek position and past it
dst = make([]byte, 3)
n, err = io.ReadFull(rw, dst)
assert.Nil(t, err)
assert.Equal(t, 3, n)
assert.Equal(t, testData[7:10], dst)
})
t.Run("Account", func(t *testing.T) {
errBoom := errors.New("accounting error")
t.Run("Read", func(t *testing.T) {
rw := newRW()
defer close(rw)
var total int
rw.SetAccounting(func(n int) error {
total += n
return nil
})
dst = make([]byte, 3)
n, err = rw.Read(dst)
assert.Equal(t, 3, n)
assert.NoError(t, err)
assert.Equal(t, 3, total)
})
t.Run("WriteTo", func(t *testing.T) {
rw := newRW()
defer close(rw)
var b bytes.Buffer
var total int
rw.SetAccounting(func(n int) error {
total += n
return nil
})
n, err := rw.WriteTo(&b)
assert.NoError(t, err)
assert.Equal(t, 10, total)
assert.Equal(t, int64(10), n)
assert.Equal(t, testData, b.Bytes())
})
t.Run("ReadDelay", func(t *testing.T) {
rw := newRW()
defer close(rw)
var total int
rw.SetAccounting(func(n int) error {
total += n
return nil
})
rewind := func() {
_, err := rw.Seek(0, io.SeekStart)
require.NoError(t, err)
}
rw.DelayAccounting(3)
dst = make([]byte, 16)
n, err = rw.Read(dst)
assert.Equal(t, 10, n)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 0, total)
rewind()
n, err = rw.Read(dst)
assert.Equal(t, 10, n)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 0, total)
rewind()
n, err = rw.Read(dst)
assert.Equal(t, 10, n)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 10, total)
rewind()
n, err = rw.Read(dst)
assert.Equal(t, 10, n)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 20, total)
rewind()
})
t.Run("WriteToDelay", func(t *testing.T) {
rw := newRW()
defer close(rw)
var b bytes.Buffer
var total int
rw.SetAccounting(func(n int) error {
total += n
return nil
})
rw.DelayAccounting(3)
rewind := func() {
_, err := rw.Seek(0, io.SeekStart)
require.NoError(t, err)
b.Reset()
}
n, err := rw.WriteTo(&b)
assert.NoError(t, err)
assert.Equal(t, 0, total)
assert.Equal(t, int64(10), n)
assert.Equal(t, testData, b.Bytes())
rewind()
n, err = rw.WriteTo(&b)
assert.NoError(t, err)
assert.Equal(t, 0, total)
assert.Equal(t, int64(10), n)
assert.Equal(t, testData, b.Bytes())
rewind()
n, err = rw.WriteTo(&b)
assert.NoError(t, err)
assert.Equal(t, 10, total)
assert.Equal(t, int64(10), n)
assert.Equal(t, testData, b.Bytes())
rewind()
n, err = rw.WriteTo(&b)
assert.NoError(t, err)
assert.Equal(t, 20, total)
assert.Equal(t, int64(10), n)
assert.Equal(t, testData, b.Bytes())
rewind()
})
t.Run("ReadError", func(t *testing.T) {
// Test accounting errors
rw := newRW()
defer close(rw)
rw.SetAccounting(func(n int) error {
return errBoom
})
dst = make([]byte, 3)
n, err = rw.Read(dst)
assert.Equal(t, 3, n)
assert.Equal(t, errBoom, err)
})
t.Run("WriteToError", func(t *testing.T) {
rw := newRW()
defer close(rw)
rw.SetAccounting(func(n int) error {
return errBoom
})
var b bytes.Buffer
n, err := rw.WriteTo(&b)
assert.Equal(t, errBoom, err)
assert.Equal(t, int64(10), n)
assert.Equal(t, testData, b.Bytes())
})
})
}
// A reader to read in chunkSize chunks
type testReader struct {
data []byte
chunkSize int
}
// Read in chunkSize chunks
func (r *testReader) Read(p []byte) (n int, err error) {
if len(r.data) == 0 {
return 0, io.EOF
}
chunkSize := min(r.chunkSize, len(r.data))
n = copy(p, r.data[:chunkSize])
r.data = r.data[n:]
return n, nil
}
// A writer to write in chunkSize chunks
type testWriter struct {
t *testing.T
data []byte
chunkSize int
buf []byte
offset int
}
// Write in chunkSize chunks
func (w *testWriter) Write(p []byte) (n int, err error) {
if w.buf == nil {
w.buf = make([]byte, w.chunkSize)
}
n = copy(w.buf, p)
assert.Equal(w.t, w.data[w.offset:w.offset+n], w.buf[:n])
w.offset += n
return n, nil
}
func TestRWBoundaryConditions(t *testing.T) {
var accounted int
account := func(n int) error {
accounted += n
return nil
}
maxSize := 3 * blockSize
buf := []byte(random.String(maxSize))
sizes := []int{
1, 2, 3,
blockSize - 2, blockSize - 1, blockSize, blockSize + 1, blockSize + 2,
2*blockSize - 2, 2*blockSize - 1, 2 * blockSize, 2*blockSize + 1, 2*blockSize + 2,
3*blockSize - 2, 3*blockSize - 1, 3 * blockSize,
}
// Write the data in chunkSize chunks
write := func(rw *RW, data []byte, chunkSize int) {
writeData := data
for len(writeData) > 0 {
i := min(chunkSize, len(writeData))
nn, err := rw.Write(writeData[:i])
assert.NoError(t, err)
assert.Equal(t, len(writeData[:i]), nn)
writeData = writeData[nn:]
}
}
// Write the data in chunkSize chunks using ReadFrom
readFrom := func(rw *RW, data []byte, chunkSize int) {
nn, err := rw.ReadFrom(&testReader{
data: data,
chunkSize: chunkSize,
})
assert.NoError(t, err)
assert.Equal(t, int64(len(data)), nn)
}
// Read the data back and check it is OK in chunkSize chunks
read := func(rw *RW, data []byte, chunkSize int) {
size := len(data)
buf := make([]byte, chunkSize)
offset := 0
for {
nn, err := rw.Read(buf)
expectedRead := len(buf)
if offset+chunkSize > size {
expectedRead = size - offset
assert.Equal(t, err, io.EOF)
} else {
assert.NoError(t, err)
}
assert.Equal(t, expectedRead, nn)
assert.Equal(t, data[offset:offset+nn], buf[:nn])
offset += nn
if err == io.EOF {
break
}
}
}
// Read the data back and check it is OK in chunkSize chunks using WriteTo
writeTo := func(rw *RW, data []byte, chunkSize int) {
nn, err := rw.WriteTo(&testWriter{
t: t,
data: data,
chunkSize: chunkSize,
})
assert.NoError(t, err)
assert.Equal(t, int64(len(data)), nn)
}
type test struct {
name string
fn func(*RW, []byte, int)
}
// Read and Write the data with a range of block sizes and functions
for _, write := range []test{{"Write", write}, {"ReadFrom", readFrom}} {
t.Run(write.name, func(t *testing.T) {
for _, read := range []test{{"Read", read}, {"WriteTo", writeTo}} {
t.Run(read.name, func(t *testing.T) {
for _, size := range sizes {
data := buf[:size]
for _, chunkSize := range sizes {
//t.Logf("Testing size=%d chunkSize=%d", useWrite, size, chunkSize)
rw := NewRW(rwPool)
assert.Equal(t, int64(0), rw.Size())
accounted = 0
rw.SetAccounting(account)
assert.Equal(t, 0, accounted)
write.fn(rw, data, chunkSize)
assert.Equal(t, int64(size), rw.Size())
assert.Equal(t, 0, accounted)
read.fn(rw, data, chunkSize)
assert.NoError(t, rw.Close())
assert.Equal(t, size, accounted)
}
}
})
}
})
}
}
// The RW should be thread safe for reading and writing concurrently
func TestRWConcurrency(t *testing.T) {
const bufSize = 1024
// Write data of size using Write
write := func(rw *RW, size int64) {
in := readers.NewPatternReader(size)
buf := make([]byte, bufSize)
nn := int64(0)
for {
nr, inErr := in.Read(buf)
if inErr != nil && inErr != io.EOF {
require.NoError(t, inErr)
}
nw, rwErr := rw.Write(buf[:nr])
require.NoError(t, rwErr)
assert.Equal(t, nr, nw)
nn += int64(nw)
if inErr == io.EOF {
break
}
}
assert.Equal(t, size, nn)
}
// Write the data using ReadFrom
readFrom := func(rw *RW, size int64) {
in := readers.NewPatternReader(size)
nn, err := rw.ReadFrom(in)
assert.NoError(t, err)
assert.Equal(t, size, nn)
}
// Read the data back from inP and check it is OK
check := func(in io.Reader, size int64, rw *RW) {
ck := readers.NewPatternReader(size)
ckBuf := make([]byte, bufSize)
rwBuf := make([]byte, bufSize)
nn := int64(0)
for {
nck, ckErr := ck.Read(ckBuf)
if ckErr != io.EOF {
require.NoError(t, ckErr)
}
var nin int
var inErr error
for {
var nnin int
nnin, inErr = in.Read(rwBuf[nin:])
if inErr != io.EOF {
require.NoError(t, inErr)
}
nin += nnin
nn += int64(nnin)
if nin >= len(rwBuf) || nn >= size || inErr != io.EOF {
break
}
rw.WaitWrite(context.Background())
}
require.Equal(t, ckBuf[:nck], rwBuf[:nin])
if ckErr == io.EOF && inErr == io.EOF {
break
}
}
assert.Equal(t, size, nn)
}
// Read the data back and check it is OK
read := func(rw *RW, size int64) {
check(rw, size, rw)
}
// Read the data back and check it is OK in using WriteTo
writeTo := func(rw *RW, size int64) {
in, out := io.Pipe()
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
check(in, size, rw)
}()
var n int64
for n < size {
nn, err := rw.WriteTo(out)
assert.NoError(t, err)
n += nn
}
assert.Equal(t, size, n)
require.NoError(t, out.Close())
wg.Wait()
}
type test struct {
name string
fn func(*RW, int64)
}
const size = blockSize*255 + 255
// Read and Write the data with a range of block sizes and functions
for _, write := range []test{{"Write", write}, {"ReadFrom", readFrom}} {
t.Run(write.name, func(t *testing.T) {
for _, read := range []test{{"Read", read}, {"WriteTo", writeTo}} {
t.Run(read.name, func(t *testing.T) {
var wg sync.WaitGroup
wg.Add(2)
rw := NewRW(rwPool)
go func() {
defer wg.Done()
read.fn(rw, size)
}()
go func() {
defer wg.Done()
write.fn(rw, size)
}()
wg.Wait()
})
}
})
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/version/version_test.go | lib/version/version_test.go | package version_test
import (
"testing"
"time"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/version"
"github.com/stretchr/testify/assert"
)
var (
emptyT time.Time
t0 = fstest.Time("1970-01-01T01:01:01.123456789Z")
t0r = fstest.Time("1970-01-01T01:01:01.123000000Z")
t1 = fstest.Time("2001-02-03T04:05:06.123000000Z")
)
func TestVersionAdd(t *testing.T) {
for _, test := range []struct {
t time.Time
in string
expected string
}{
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
{t0, "potato-v2001-02-03-040506-123.txt", "potato-v2001-02-03-040506-123-v1970-01-01-010101-123.txt"},
{t0, "123.!!lipps", "123-v1970-01-01-010101-123.!!lipps"},
{t1, "potato", "potato-v2001-02-03-040506-123"},
{t1, ".potato", ".potato-v2001-02-03-040506-123"},
{t1, ".potato.conf", ".potato-v2001-02-03-040506-123.conf"},
{t1, "", "-v2001-02-03-040506-123"},
} {
actual := version.Add(test.in, test.t)
assert.Equal(t, test.expected, actual, test.in)
}
}
func TestVersionRemove(t *testing.T) {
for _, test := range []struct {
in string
expectedT time.Time
expectedRemote string
}{
{"potato.txt", emptyT, "potato.txt"},
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
{"potato-v2001-02-03-040506-123-v1970-01-01-010101-123.txt", t0r, "potato-v2001-02-03-040506-123.txt"},
{"potato-v2001-02-03-040506-123", t1, "potato"},
{".potato-v2001-02-03-040506-123", t1, ".potato"},
{".potato-v2001-02-03-040506-123.conf", t1, ".potato.conf"},
{"-v2001-02-03-040506-123", t1, ""},
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
} {
actualT, actualRemote := version.Remove(test.in)
assert.Equal(t, test.expectedT, actualT, test.in)
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
}
}
func TestVersionMatch(t *testing.T) {
for _, test := range []struct {
in string
expected bool
}{
{"potato.txt", false},
{"potato", false},
{"", false},
{"potato-v1970-01-01-010101-123.txt", true},
{"potato-v2001-02-03-040506-123-v1970-01-01-010101-123.txt", true},
{"potato-v2001-02-03-040506-123", true},
{"-v2001-02-03-040506-123", true},
{"-v9999-99-99-999999-999", true},
} {
actual := version.Match(test.in)
assert.Equal(t, test.expected, actual, test.in)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/version/version.go | lib/version/version.go | // Package version provides machinery for versioning file names
// with a timestamp-based version string
package version
import (
"path"
"regexp"
"strings"
"time"
)
const versionFormat = "-v2006-01-02-150405.000"
var versionRegexp = regexp.MustCompile(`-v\d{4}-\d{2}-\d{2}-\d{6}-\d{3}`)
// Split fileName into base and extension so that base + ext == fileName
func splitExt(fileName string) (base, ext string) {
ext = path.Ext(fileName)
base = fileName[:len(fileName)-len(ext)]
// .file splits to base == "", ext == ".file"
// so swap ext and base in this case
if ext != "" && base == "" {
base, ext = ext, base
}
return base, ext
}
// Add returns fileName modified to include t as the version
func Add(fileName string, t time.Time) string {
base, ext := splitExt(fileName)
s := t.Format(versionFormat)
// Replace the '.' with a '-'
s = strings.ReplaceAll(s, ".", "-")
return base + s + ext
}
// Remove returns a modified fileName without the version string and the time it represented
// If the fileName did not have a version then time.Time{} is returned along with an unmodified fileName
func Remove(fileName string) (t time.Time, fileNameWithoutVersion string) {
fileNameWithoutVersion = fileName
base, ext := splitExt(fileName)
if len(base) < len(versionFormat) {
return
}
versionStart := len(base) - len(versionFormat)
// Check it ends in -xxx
if base[len(base)-4] != '-' {
return
}
// Replace with .xxx for parsing
base = base[:len(base)-4] + "." + base[len(base)-3:]
newT, err := time.Parse(versionFormat, base[versionStart:])
if err != nil {
return
}
return newT, base[:versionStart] + ext
}
// Match returns true if the fileName has a version string
func Match(fileName string) bool {
return versionRegexp.MatchString(fileName)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/errors/errors_test.go | lib/errors/errors_test.go | package errors
import (
"errors"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestWalk(t *testing.T) {
var (
e1 = errors.New("e1")
e2 = errors.New("e2")
e3 = errors.New("e3")
)
for _, test := range []struct {
err error
want []error
}{
{
causerError{nil}, []error{
causerError{nil},
},
}, {
wrapperError{nil}, []error{
wrapperError{nil},
},
}, {
reflectError{nil}, []error{
reflectError{nil},
},
}, {
causerError{e1}, []error{
causerError{e1}, e1,
},
}, {
wrapperError{e1}, []error{
wrapperError{e1}, e1,
},
}, {
reflectError{e1}, []error{
reflectError{e1}, e1,
},
}, {
causerError{reflectError{e1}}, []error{
causerError{reflectError{e1}},
reflectError{e1},
e1,
},
}, {
wrapperError{causerError{e1}}, []error{
wrapperError{causerError{e1}},
causerError{e1},
e1,
},
}, {
reflectError{wrapperError{e1}}, []error{
reflectError{wrapperError{e1}},
wrapperError{e1},
e1,
},
}, {
causerError{reflectError{causerError{e1}}}, []error{
causerError{reflectError{causerError{e1}}},
reflectError{causerError{e1}},
causerError{e1},
e1,
},
}, {
wrapperError{causerError{wrapperError{e1}}}, []error{
wrapperError{causerError{wrapperError{e1}}},
causerError{wrapperError{e1}},
wrapperError{e1},
e1,
},
}, {
reflectError{wrapperError{reflectError{e1}}}, []error{
reflectError{wrapperError{reflectError{e1}}},
wrapperError{reflectError{e1}},
reflectError{e1},
e1,
},
}, {
stopError{nil}, []error{
stopError{nil},
},
}, {
stopError{causerError{nil}}, []error{
stopError{causerError{nil}},
},
}, {
stopError{wrapperError{nil}}, []error{
stopError{wrapperError{nil}},
},
}, {
stopError{reflectError{nil}}, []error{
stopError{reflectError{nil}},
},
}, {
causerError{stopError{e1}}, []error{
causerError{stopError{e1}},
stopError{e1},
},
}, {
wrapperError{stopError{e1}}, []error{
wrapperError{stopError{e1}},
stopError{e1},
},
}, {
reflectError{stopError{e1}}, []error{
reflectError{stopError{e1}},
stopError{e1},
},
}, {
causerError{reflectError{stopError{nil}}}, []error{
causerError{reflectError{stopError{nil}}},
reflectError{stopError{nil}},
stopError{nil},
},
}, {
wrapperError{causerError{stopError{nil}}}, []error{
wrapperError{causerError{stopError{nil}}},
causerError{stopError{nil}},
stopError{nil},
},
}, {
reflectError{wrapperError{stopError{nil}}}, []error{
reflectError{wrapperError{stopError{nil}}},
wrapperError{stopError{nil}},
stopError{nil},
},
}, {
multiWrapperError{[]error{e1}}, []error{
multiWrapperError{[]error{e1}},
e1,
},
}, {
multiWrapperError{[]error{}}, []error{
multiWrapperError{[]error{}},
},
}, {
multiWrapperError{[]error{e1, e2, e3}}, []error{
multiWrapperError{[]error{e1, e2, e3}},
e1,
e2,
e3,
},
}, {
multiWrapperError{[]error{reflectError{e1}, wrapperError{e2}, stopError{e3}}}, []error{
multiWrapperError{[]error{reflectError{e1}, wrapperError{e2}, stopError{e3}}},
reflectError{e1},
e1,
wrapperError{e2},
e2,
stopError{e3},
},
},
} {
var got []error
Walk(test.err, func(err error) bool {
got = append(got, err)
_, stop := err.(stopError)
return stop
})
assert.Equal(t, test.want, got, test.err)
}
}
type causerError struct {
err error
}
func (e causerError) Error() string {
return fmt.Sprintf("causerError(%s)", e.err)
}
func (e causerError) Cause() error {
return e.err
}
var (
_ error = causerError{nil}
_ causer = causerError{nil}
)
type wrapperError struct {
err error
}
func (e wrapperError) Unwrap() error {
return e.err
}
func (e wrapperError) Error() string {
return fmt.Sprintf("wrapperError(%s)", e.err)
}
var (
_ error = wrapperError{nil}
_ wrapper = wrapperError{nil}
)
type multiWrapperError struct {
errs []error
}
func (e multiWrapperError) Unwrap() []error {
return e.errs
}
func (e multiWrapperError) Error() string {
return fmt.Sprintf("multiWrapperError(%s)", e.errs)
}
var (
_ error = multiWrapperError{nil}
_ multiWrapper = multiWrapperError{nil}
)
type reflectError struct {
Err error
}
func (e reflectError) Error() string {
return fmt.Sprintf("reflectError(%s)", e.Err)
}
var (
_ error = reflectError{nil}
)
type stopError struct {
err error
}
func (e stopError) Error() string {
return fmt.Sprintf("stopError(%s)", e.err)
}
func (e stopError) Cause() error {
return e.err
}
var (
_ error = stopError{nil}
_ causer = stopError{nil}
)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/errors/errors.go | lib/errors/errors.go | // Package errors provides error handling utilities.
package errors
import (
"reflect"
)
// WalkFunc is the signature of the Walk callback function. The function gets the
// current error in the chain and should return true if the chain processing
// should be aborted.
type WalkFunc func(error) bool
// Walk invokes the given function for each error in the chain. If the
// provided functions returns true or no further cause can be found, the process
// is stopped and no further calls will be made.
//
// The next error in the chain is determined by the following rules:
//
// the return value of this method is used.
// - If the current error has a `Unwrap() error` method
// the return value of this method is used.
// - If the current error has a `Unwrap() []error` method
// the return values of this method is used.
// - Common errors in the Go runtime that contain an Err field will use this value.
func Walk(err error, f WalkFunc) {
for prev := err; err != nil; prev = err {
if f(err) {
return
}
switch e := err.(type) {
case multiWrapper:
for _, err = range e.Unwrap() {
Walk(err, f)
}
return
case causer:
err = e.Cause()
case wrapper:
err = e.Unwrap()
default:
// Unpack any struct or *struct with a field of name Err which satisfies
// the error interface. This includes *url.Error, *net.OpError,
// *os.SyscallError and many others in the stdlib.
errType := reflect.TypeOf(err)
errValue := reflect.ValueOf(err)
if errValue.IsValid() && errType.Kind() == reflect.Ptr {
errType = errType.Elem()
errValue = errValue.Elem()
}
if errValue.IsValid() && errType.Kind() == reflect.Struct {
if errField := errValue.FieldByName("Err"); errField.IsValid() {
errFieldValue := errField.Interface()
if newErr, ok := errFieldValue.(error); ok {
err = newErr
}
}
}
}
if reflect.DeepEqual(err, prev) { //nolint:govet // deepequalerrors
break
}
}
}
type causer interface {
Cause() error
}
type wrapper interface {
Unwrap() error
}
type multiWrapper interface {
Unwrap() []error
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/jwtutil/jwtutil.go | lib/jwtutil/jwtutil.go | // Package jwtutil provides JWT utilities.
package jwtutil
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/golang-jwt/jwt/v4"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/lib/oauthutil"
"maps"
"golang.org/x/oauth2"
)
// RandomHex creates a random string of the given length
func RandomHex(n int) (string, error) {
bytes := make([]byte, n)
if _, err := rand.Read(bytes); err != nil {
return "", err
}
return hex.EncodeToString(bytes), nil
}
// Config configures rclone using JWT
func Config(id, name, url string, claims jwt.Claims, headerParams map[string]any, queryParams map[string]string, privateKey *rsa.PrivateKey, m configmap.Mapper, client *http.Client) (err error) {
jwtToken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
maps.Copy(jwtToken.Header, headerParams)
payload, err := jwtToken.SignedString(privateKey)
if err != nil {
return fmt.Errorf("jwtutil: failed to encode payload: %w", err)
}
req, err := http.NewRequest("POST", url, nil)
if err != nil {
return fmt.Errorf("jwtutil: failed to create new request: %w", err)
}
q := req.URL.Query()
q.Add("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer")
q.Add("assertion", payload)
for key, value := range queryParams {
q.Add(key, value)
}
queryString := q.Encode()
req, err = http.NewRequest("POST", url, bytes.NewBuffer([]byte(queryString)))
if err != nil {
return fmt.Errorf("jwtutil: failed to create new request: %w", err)
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("jwtutil: failed making auth request: %w", err)
}
s, err := bodyToString(resp.Body)
if err != nil {
fs.Debugf(nil, "jwtutil: failed to get response body")
}
if resp.StatusCode != 200 {
err = errors.New(resp.Status)
return fmt.Errorf("jwtutil: failed making auth request: %w", err)
}
defer func() {
deferredErr := resp.Body.Close()
if deferredErr != nil {
err = fmt.Errorf("jwtutil: failed to close resp.Body: %w", err)
}
}()
result := &response{}
err = json.NewDecoder(strings.NewReader(s)).Decode(result)
if result.AccessToken == "" && err == nil {
err = errors.New("no AccessToken in Response")
}
if err != nil {
return fmt.Errorf("jwtutil: failed to get token: %w", err)
}
token := &oauth2.Token{
AccessToken: result.AccessToken,
TokenType: result.TokenType,
}
e := result.ExpiresIn
if e != 0 {
token.Expiry = time.Now().Add(time.Duration(e) * time.Second)
}
return oauthutil.PutToken(name, m, token, true)
}
func bodyToString(responseBody io.Reader) (bodyString string, err error) {
bodyBytes, err := io.ReadAll(responseBody)
if err != nil {
return "", err
}
bodyString = string(bodyBytes)
fs.Debugf(nil, "jwtutil: Response Body: %q", bodyString)
return bodyString, nil
}
type response struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
ExpiresIn int `json:"expires_in"`
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/encoder/os_other.go | lib/encoder/os_other.go | //go:build !windows && !darwin
package encoder
// OS is the encoding used by the local backend for non windows platforms
const OS = Base
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/encoder/standard.go | lib/encoder/standard.go | package encoder
// Standard defines the encoding that is used for paths in- and output by rclone.
//
// List of replaced characters:
//
// (0x00) -> '␀' // SYMBOL FOR NULL
// / (slash) -> '/' // FULLWIDTH SOLIDUS
const Standard = (EncodeZero |
EncodeSlash |
EncodeCtl |
EncodeDel |
EncodeDot)
// Base only encodes the zero byte and slash
const Base = (EncodeZero |
EncodeSlash |
EncodeDot)
// Display is the internal encoding for logging and output
const Display = Standard
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/encoder/encoder_test.go | lib/encoder/encoder_test.go | package encoder
import (
"fmt"
"regexp"
"strconv"
"strings"
"testing"
"github.com/spf13/pflag"
"github.com/stretchr/testify/assert"
)
// Check it satisfies the interfaces
var (
_ pflag.Value = (*MultiEncoder)(nil)
_ fmt.Scanner = (*MultiEncoder)(nil)
)
func TestEncodeString(t *testing.T) {
for _, test := range []struct {
mask MultiEncoder
want string
}{
{EncodeRaw, "Raw"},
{EncodeZero, "None"},
{EncodeDoubleQuote, "DoubleQuote"},
{EncodeDot, "Dot"},
{EncodeWin, "LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe"},
{EncodeHashPercent, "Hash,Percent"},
{EncodeSlash | EncodeDollar | EncodeColon, "Slash,Dollar,Colon"},
{EncodeSlash | (1 << 31), "Slash,0x80000000"},
} {
got := test.mask.String()
assert.Equal(t, test.want, got)
}
}
func TestEncodeSet(t *testing.T) {
for _, test := range []struct {
in string
want MultiEncoder
wantErr bool
}{
{"", 0, true},
{"Raw", EncodeRaw, false},
{"None", EncodeZero, false},
{"DoubleQuote", EncodeDoubleQuote, false},
{"Dot", EncodeDot, false},
{"LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe", EncodeWin, false},
{"Hash,Percent", EncodeHashPercent, false},
{"Slash,Dollar,Colon", EncodeSlash | EncodeDollar | EncodeColon, false},
{"Slash,0x80000000", EncodeSlash | (1 << 31), false},
{"Blerp", 0, true},
{"0xFGFFF", 0, true},
} {
var got MultiEncoder
err := got.Set(test.in)
assert.Equal(t, test.wantErr, err != nil, err)
assert.Equal(t, test.want, got, test.in)
}
}
type testCase struct {
mask MultiEncoder
in string
out string
}
func TestEncodeSingleMask(t *testing.T) {
for i, tc := range testCasesSingle {
e := tc.mask
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
got := e.Encode(tc.in)
if got != tc.out {
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
}
got2 := e.Decode(got)
if got2 != tc.in {
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
}
})
}
}
func TestEncodeSingleMaskEdge(t *testing.T) {
for i, tc := range testCasesSingleEdge {
e := tc.mask
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
got := e.Encode(tc.in)
if got != tc.out {
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
}
got2 := e.Decode(got)
if got2 != tc.in {
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
}
})
}
}
func TestEncodeDoubleMaskEdge(t *testing.T) {
for i, tc := range testCasesDoubleEdge {
e := tc.mask
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
got := e.Encode(tc.in)
if got != tc.out {
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
}
got2 := e.Decode(got)
if got2 != tc.in {
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
}
})
}
}
func TestEncodeInvalidUnicode(t *testing.T) {
for i, tc := range []testCase{
{
mask: EncodeInvalidUtf8,
in: "\xBF",
out: "‛BF",
}, {
mask: EncodeInvalidUtf8,
in: "\xBF\xFE",
out: "‛BF‛FE",
}, {
mask: EncodeInvalidUtf8,
in: "a\xBF\xFEb",
out: "a‛BF‛FEb",
}, {
mask: EncodeInvalidUtf8,
in: "a\xBFξ\xFEb",
out: "a‛BFξ‛FEb",
}, {
mask: EncodeInvalidUtf8 | EncodeBackSlash,
in: "a\xBF\\\xFEb",
out: "a‛BF\‛FEb",
}, {
mask: 0,
in: "\xBF",
out: "\xBF",
}, {
mask: 0,
in: "\xBF\xFE",
out: "\xBF\xFE",
}, {
mask: 0,
in: "a\xBF\xFEb",
out: "a\xBF\xFEb",
}, {
mask: 0,
in: "a\xBFξ\xFEb",
out: "a\xBFξ\xFEb",
}, {
mask: EncodeBackSlash,
in: "a\xBF\\\xFEb",
out: "a\xBF\\xFEb",
},
} {
e := tc.mask
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
got := e.Encode(tc.in)
if got != tc.out {
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
}
got2 := e.Decode(got)
if got2 != tc.in {
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
}
})
}
}
func TestEncodeDot(t *testing.T) {
for i, tc := range []testCase{
{
mask: EncodeZero,
in: ".",
out: ".",
}, {
mask: EncodeDot,
in: ".",
out: ".",
}, {
mask: EncodeZero,
in: "..",
out: "..",
}, {
mask: EncodeDot,
in: "..",
out: "..",
}, {
mask: EncodeDot,
in: "...",
out: "...",
}, {
mask: EncodeDot,
in: ". .",
out: ". .",
},
} {
e := tc.mask
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
got := e.Encode(tc.in)
if got != tc.out {
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
}
got2 := e.Decode(got)
if got2 != tc.in {
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
}
})
}
}
func TestDecodeHalf(t *testing.T) {
for i, tc := range []testCase{
{
mask: 0,
in: "‛",
out: "‛",
}, {
mask: EncodeZero,
in: "‛‛",
out: "‛",
}, {
mask: 0,
in: "‛a‛",
out: "‛a‛",
}, {
mask: EncodeInvalidUtf8,
in: "a‛B‛Eg",
out: "a‛B‛Eg",
}, {
mask: EncodeInvalidUtf8,
in: "a‛B\‛Eg",
out: "a‛B\‛Eg",
}, {
mask: EncodeInvalidUtf8 | EncodeBackSlash,
in: "a‛B\‛Eg",
out: "a‛B\\‛Eg",
},
} {
e := tc.mask
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
got := e.Decode(tc.in)
if got != tc.out {
t.Errorf("Decode(%q) want %q got %q", tc.in, tc.out, got)
}
})
}
}
const oneDrive = (Standard |
EncodeWin |
EncodeBackSlash |
EncodeHashPercent |
EncodeDel |
EncodeCtl |
EncodeLeftTilde |
EncodeRightSpace |
EncodeRightPeriod)
var benchTests = []struct {
in string
outOld string
outNew string
}{
{
"",
"",
"",
},
{
"abc 123",
"abc 123",
"abc 123",
},
{
`\*<>?:|#%".~`,
`\*<>?:|#%".~`,
`\*<>?:|#%".~`,
},
{
`\*<>?:|#%".~/\*<>?:|#%".~`,
`\*<>?:|#%".~/\*<>?:|#%".~`,
`\*<>?:|#%".~/\*<>?:|#%".~`,
},
{
" leading space",
" leading space",
" leading space",
},
{
"~leading tilde",
"~leading tilde",
"~leading tilde",
},
{
"trailing dot.",
"trailing dot.",
"trailing dot.",
},
{
" leading space/ leading space/ leading space",
" leading space/ leading space/ leading space",
" leading space/ leading space/ leading space",
},
{
"~leading tilde/~leading tilde/~leading tilde",
"~leading tilde/~leading tilde/~leading tilde",
"~leading tilde/~leading tilde/~leading tilde",
},
{
"leading tilde/~leading tilde",
"leading tilde/~leading tilde",
"leading tilde/~leading tilde",
},
{
"trailing dot./trailing dot./trailing dot.",
"trailing dot./trailing dot./trailing dot.",
"trailing dot./trailing dot./trailing dot.",
},
}
func benchReplace(b *testing.B, f func(string) string, old bool) {
for range make([]struct{}, b.N) {
for _, test := range benchTests {
got := f(test.in)
out := test.outNew
if old {
out = test.outOld
}
if got != out {
b.Errorf("Encode(%q) want %q got %q", test.in, out, got)
}
}
}
}
func benchRestore(b *testing.B, f func(string) string, old bool) {
for range make([]struct{}, b.N) {
for _, test := range benchTests {
out := test.outNew
if old {
out = test.outOld
}
got := f(out)
if got != test.in {
b.Errorf("Decode(%q) want %q got %q", out, test.in, got)
}
}
}
}
func BenchmarkOneDriveReplaceNew(b *testing.B) {
benchReplace(b, oneDrive.Encode, false)
}
func BenchmarkOneDriveReplaceOld(b *testing.B) {
benchReplace(b, replaceReservedChars, true)
}
func BenchmarkOneDriveRestoreNew(b *testing.B) {
benchRestore(b, oneDrive.Decode, false)
}
func BenchmarkOneDriveRestoreOld(b *testing.B) {
benchRestore(b, restoreReservedChars, true)
}
var (
charMap = map[rune]rune{
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
'*': '*', // FULLWIDTH ASTERISK
'<': '<', // FULLWIDTH LESS-THAN SIGN
'>': '>', // FULLWIDTH GREATER-THAN SIGN
'?': '?', // FULLWIDTH QUESTION MARK
':': ':', // FULLWIDTH COLON
'|': '|', // FULLWIDTH VERTICAL LINE
'#': '#', // FULLWIDTH NUMBER SIGN
'%': '%', // FULLWIDTH PERCENT SIGN
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
'.': '.', // FULLWIDTH FULL STOP
'~': '~', // FULLWIDTH TILDE
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixEndingInPeriod = regexp.MustCompile(`\.(/|$)`)
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
fixStartingWithTilde = regexp.MustCompile(`(/|^)~`)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// Folder names can't end with a period '.'
in = fixEndingInPeriod.ReplaceAllString(in, string(charMap['.'])+"$1")
// OneDrive for Business file or folder names cannot begin with a tilde '~'
in = fixStartingWithTilde.ReplaceAllString(in, "$1"+string(charMap['~']))
// Apparently file names can't start with space either
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
// Encode reserved characters
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != '.' && c != '~' && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/encoder/encoder_cases_test.go | lib/encoder/encoder_cases_test.go | // Code generated by ./internal/gen/main.go. DO NOT EDIT.
//go:generate go run ./internal/gen/main.go
package encoder
var testCasesSingle = []testCase{
{ // 0
mask: EncodeZero,
in: "*2π%j8|aw␄k%\x15β\x00σ\\τ7␟'R\f␀τ;β77␇",
out: "*2π%j8|aw␄k%\x15β␀σ\\τ7␟'R\f‛␀τ;β77␇",
}, { // 1
mask: EncodeSlash,
in: "␄␅VJg:K|\x10+\x10/φ`/π\n\bρRα\x03ζ?<VLν␗q",
out: "␄␅VJg:K|\x10+\x10‛/φ`/π\n\bρRα\x03ζ?<VLν␗q",
}, { // 2
mask: EncodeSingleQuote,
in: "γ␅'Pn\fχ\nε␍[υ\r␉]ζM2ω␘'$\nAN␄^'\\\x7f",
out: "γ␅‛'Pn\fχ\nε␍[υ\r␉]ζM2ω␘‛'$\nAN␄^'\\\x7f",
}, { // 3
mask: EncodeBackQuote,
in: "\x17\θWfcR␟([`\vV6τη\x1d['mυ>:#|βw{`ρ",
out: "\x17\θWfcR␟([‛`\vV6τη\x1d['mυ>:#|βw{`ρ",
}, { // 4
mask: EncodeLtGt,
in: "4>>o\n\x1bPι,␅Pυ\x0e<\n"!9tπ><␎\b(ης:oε",
out: "4‛>>o\n\x1bPι,␅Pυ\x0e‛<\n"!9tπ‛><␎\b(ης:oε",
}, { // 5
mask: EncodeSquareBracket,
in: "␛␡\fενm\x1bρ[M+τ\x19\\\x04νw^]#␚q.ψI0X#[]",
out: "␛␡\fενm\x1bρ[M+τ\x19\\\x04νw^‛]#␚q.ψI0X#‛[]",
}, { // 6
mask: EncodeSemicolon,
in: "Rφ#'\x06\x13ςY|ψ>l%;yso$;;ς␆ξ␗␚\x1b[\ι\",
out: "Rφ#'\x06\x13ςY|ψ>l%‛;yso$;;ς␆ξ␗␚\x1b[\ι\",
}, { // 7
mask: EncodeExclamation,
in: "\x17!;Xτ\x02?^εE!π\x01&#␇␐&ψ*ι~␀:~/\"θ␎R",
out: "\x17!;Xτ\x02?^εE‛!π\x01&#␇␐&ψ*ι~‛␀:~/\"θ␎R",
}, { // 8
mask: EncodeDollar,
in: "ωQ=λ␕γ␗$"ε/#R$!$␚\x1e:Y\x1e8\x7f$π␇ν\x19␁=",
out: "ωQ=λ␕γ␗‛$"ε/#R$!$␚\x1e:Y\x1e8\x7f$π␇ν\x19␁=",
}, { // 9
mask: EncodeDoubleQuote,
in: "χ␙ρ\x13\x15E\t\x0fC␈\x153sος␙7␔νk!π"␆o␎\"MW\x05",
out: "χ␙ρ\x13\x15E\t\x0fC␈\x153sος␙7␔νk!π‛"␆o␎"MW\x05",
}, { // 10
mask: EncodeColon,
in: "\x1f\x18%~:/\x18Q+δω␆R|:OζIs␇\x7f/\x17?␊μτC;φ",
out: "\x1f\x18%~:/\x18Q+δω␆R|‛:OζIs␇\x7f/\x17?␊μτC;φ",
}, { // 11
mask: EncodeQuestion,
in: "Q␝Jal?Vz\v␏␁\x16\x10?␛χκ?[␑ι\n<␝ν\x15#βλU",
out: "Q␝Jal‛?Vz\v␏␁\x16\x10?␛χκ?[␑ι\n<␝ν\x15#βλU",
}, { // 12
mask: EncodeAsterisk,
in: "W=\x05xξβ$\x1bmaε␡*␝!*␡␊f\\\"␑Yhzτ\x1fδτk",
out: "W=\x05xξβ$\x1bmaε␡*␝!‛*␡␊f\\\"␑Yhzτ\x1fδτk",
}, { // 13
mask: EncodePipe,
in: "#\x18␍␊X\x18␍kp*λ\f:\aθtg]|kρ;|υο␎Oξ␍`",
out: "#\x18␍␊X\x18␍kp*λ\f:\aθtg]‛|kρ;|υο␎Oξ␍`",
}, { // 14
mask: EncodeHash,
in: "*[␄%μ$φ#$$iδ%|␝ο\χ\x1fxI)\x11yG#!ςt9",
out: "*[␄%μ$φ#$$iδ%|␝ο\χ\x1fxI)\x11yG‛#!ςt9",
}, { // 15
mask: EncodePercent,
in: "DδM`ξ␍\x06ρ␓ζ:$T\x04␒φ!\x01O$%d%/γWJ\r%:",
out: "DδM`ξ␍\x06ρ␓ζ:$T\x04␒φ!\x01O$%d%/γWJ\r‛%:",
}, { // 16
mask: EncodeBackSlash,
in: "\x1al^.\\xP␇ββσ\x10*\x01␓\α>\x16*\":'/␄λ/eχV",
out: "\x1al^.\xP␇ββσ\x10*\x01␓‛\α>\x16*\":'/␄λ/eχV",
}, { // 17
mask: EncodeCrLf,
in: "Fg![␂π|\x0eY\nμ$*0␊ν\x01VχI3␍>\x00␐?δ\r␊ω",
out: "Fg![␂π|\x0eY␊μ$*0‛␊ν\x01VχI3‛␍>␀␐?δ␍‛␊ω",
}, { // 18
mask: EncodeDel,
in: "ο␡:πς␓\x14o␏*P:␙\x18d␊λδZ<;&/\x7f\x006[\x03Vδ",
out: "ο‛␡:πς␓\x14o␏*P:␙\x18d␊λδZ<;&/␡␀6[\x03Vδ",
}, { // 19
mask: EncodeCtl,
in: "␊\n␅␚ρ␉␄␈\x1f\\\x1bζ\x19␏t␘\aφ\f\x11\x05␃-␒␁␙␟␔ξ#␄XN␜\b\r\x14\x04␋\x10<␑2!\x18\x1c\t␆\x06␞{␍␐␎5␝\x12c\x1a\x03␗\x0e␛\b\x15\x1eX␛␐␌\x01\x1d\x17\x02\x1d8?\x13␖␂\x18\v␕^␙\x0fα␕␇;μ␓\x16",
out: "‛␊␊‛␅‛␚ρ‛␉‛␄‛␈␟\\␛ζ␙‛␏t‛␘␇φ␌␑␅‛␃-‛␒‛␁‛␙‛␟‛␔ξ#‛␄XN‛␜␈␍␔␄‛␋␐<‛␑2!␘␜␉‛␆␆‛␞{‛␍‛␐‛␎5‛␝␒c␚␃‛␗␎‛␛␈␕␞X‛␛‛␐‛␌␁␝␗␂␝8?␓‛␖‛␂␘␋‛␕^‛␙␏α‛␕‛␇;μ‛␓␖",
},
}
var testCasesSingleEdge = []testCase{
{ // 0
mask: EncodeLeftSpace,
in: " ",
out: "␠",
}, { // 1
mask: EncodeZero | EncodeLeftSpace,
in: "\fQα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5μ",
out: "\fQα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5μ",
}, { // 2
mask: EncodeZero | EncodeLeftSpace,
in: " ␠α␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5μ",
out: "␠␠α␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5μ",
}, { // 3
mask: EncodeZero | EncodeLeftSpace,
in: "␠␠α␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5μ",
out: "‛␠␠α␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5μ",
}, { // 4
mask: EncodeZero | EncodeLeftSpace,
in: " Qα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*␠μ",
out: "␠Qα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*␠μ",
}, { // 5
mask: EncodeZero | EncodeLeftSpace,
in: "␠Qα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*␠μ",
out: "‛␠Qα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*␠μ",
}, { // 6
mask: EncodeZero | EncodeLeftSpace,
in: " Qα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5␠",
out: "␠Qα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5␠",
}, { // 7
mask: EncodeZero | EncodeLeftSpace,
in: "␠Qα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5␠",
out: "‛␠Qα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5␠",
}, { // 8
mask: EncodeZero | EncodeLeftSpace,
in: "\f α␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5μ",
out: "\f α␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5μ",
}, { // 9
mask: EncodeZero | EncodeLeftSpace,
in: "\f␠α␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5μ",
out: "\f␠α␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5μ",
}, { // 10
mask: EncodeZero | EncodeLeftSpace,
in: "\f α␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*␠μ",
out: "\f α␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*␠μ",
}, { // 11
mask: EncodeZero | EncodeLeftSpace,
in: "\f␠α␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*␠μ",
out: "\f␠α␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*␠μ",
}, { // 12
mask: EncodeZero | EncodeLeftSpace,
in: "\f α␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5␠",
out: "\f α␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5␠",
}, { // 13
mask: EncodeZero | EncodeLeftSpace,
in: "\f␠α␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5␠",
out: "\f␠α␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5␠",
}, { // 14
mask: EncodeZero | EncodeLeftSpace,
in: "\fQα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`* μ",
out: "\fQα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`* μ",
}, { // 15
mask: EncodeZero | EncodeLeftSpace,
in: "\fQα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*␠μ",
out: "\fQα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*␠μ",
}, { // 16
mask: EncodeZero | EncodeLeftSpace,
in: "\fQα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`* ␠",
out: "\fQα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`* ␠",
}, { // 17
mask: EncodeZero | EncodeLeftSpace,
in: "\fQα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*␠␠",
out: "\fQα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*␠␠",
}, { // 18
mask: EncodeZero | EncodeLeftSpace,
in: "\fQα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5 ",
out: "\fQα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5 ",
}, { // 19
mask: EncodeZero | EncodeLeftSpace,
in: "\fQα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5␠",
out: "\fQα␃J\x04ψ␘Pζ '\x1a6θz|$`?N:␜h>μ`*5␠",
}, { // 20
mask: EncodeSlash | EncodeLeftSpace,
in: "[\"α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␖",
out: "[\"α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␖",
}, { // 21
mask: EncodeSlash | EncodeLeftSpace,
in: " ␠α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␖",
out: "␠␠α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␖",
}, { // 22
mask: EncodeSlash | EncodeLeftSpace,
in: "␠␠α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␖",
out: "‛␠␠α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␖",
}, { // 23
mask: EncodeSlash | EncodeLeftSpace,
in: " \"α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ␠␖",
out: "␠\"α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ␠␖",
}, { // 24
mask: EncodeSlash | EncodeLeftSpace,
in: "␠\"α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ␠␖",
out: "‛␠\"α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ␠␖",
}, { // 25
mask: EncodeSlash | EncodeLeftSpace,
in: " \"α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␠",
out: "␠\"α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␠",
}, { // 26
mask: EncodeSlash | EncodeLeftSpace,
in: "␠\"α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␠",
out: "‛␠\"α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␠",
}, { // 27
mask: EncodeSlash | EncodeLeftSpace,
in: "[ α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␖",
out: "[ α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␖",
}, { // 28
mask: EncodeSlash | EncodeLeftSpace,
in: "[␠α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␖",
out: "[␠α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␖",
}, { // 29
mask: EncodeSlash | EncodeLeftSpace,
in: "[ α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ␠␖",
out: "[ α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ␠␖",
}, { // 30
mask: EncodeSlash | EncodeLeftSpace,
in: "[␠α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ␠␖",
out: "[␠α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ␠␖",
}, { // 31
mask: EncodeSlash | EncodeLeftSpace,
in: "[ α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␠",
out: "[ α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␠",
}, { // 32
mask: EncodeSlash | EncodeLeftSpace,
in: "[␠α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␠",
out: "[␠α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␠",
}, { // 33
mask: EncodeSlash | EncodeLeftSpace,
in: "[\"α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ ␖",
out: "[\"α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ ␖",
}, { // 34
mask: EncodeSlash | EncodeLeftSpace,
in: "[\"α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ␠␖",
out: "[\"α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ␠␖",
}, { // 35
mask: EncodeSlash | EncodeLeftSpace,
in: "[\"α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ ␠",
out: "[\"α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ ␠",
}, { // 36
mask: EncodeSlash | EncodeLeftSpace,
in: "[\"α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ␠␠",
out: "[\"α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ␠␠",
}, { // 37
mask: EncodeSlash | EncodeLeftSpace,
in: "[\"α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[ ",
out: "[\"α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[ ",
}, { // 38
mask: EncodeSlash | EncodeLeftSpace,
in: "[\"α//s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␠",
out: "[\"α/‛/s]␎d\x19 9~ζO\n␍ντU*T\x0eπm<eλ[␠",
}, { // 39
mask: EncodeSingleQuote | EncodeLeftSpace,
in: "\x16␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1cw",
out: "\x16␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1cw",
}, { // 40
mask: EncodeSingleQuote | EncodeLeftSpace,
in: " ␠3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1cw",
out: "␠␠3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1cw",
}, { // 41
mask: EncodeSingleQuote | EncodeLeftSpace,
in: "␠␠3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1cw",
out: "‛␠␠3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1cw",
}, { // 42
mask: EncodeSingleQuote | EncodeLeftSpace,
in: " ␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%␠w",
out: "␠␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%␠w",
}, { // 43
mask: EncodeSingleQuote | EncodeLeftSpace,
in: "␠␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%␠w",
out: "‛␠␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%␠w",
}, { // 44
mask: EncodeSingleQuote | EncodeLeftSpace,
in: " ␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1c␠",
out: "␠␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1c␠",
}, { // 45
mask: EncodeSingleQuote | EncodeLeftSpace,
in: "␠␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1c␠",
out: "‛␠␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1c␠",
}, { // 46
mask: EncodeSingleQuote | EncodeLeftSpace,
in: "\x16 3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1cw",
out: "\x16 3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1cw",
}, { // 47
mask: EncodeSingleQuote | EncodeLeftSpace,
in: "\x16␠3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1cw",
out: "\x16␠3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1cw",
}, { // 48
mask: EncodeSingleQuote | EncodeLeftSpace,
in: "\x16 3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%␠w",
out: "\x16 3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%␠w",
}, { // 49
mask: EncodeSingleQuote | EncodeLeftSpace,
in: "\x16␠3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%␠w",
out: "\x16␠3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%␠w",
}, { // 50
mask: EncodeSingleQuote | EncodeLeftSpace,
in: "\x16 3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1c␠",
out: "\x16 3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1c␠",
}, { // 51
mask: EncodeSingleQuote | EncodeLeftSpace,
in: "\x16␠3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1c␠",
out: "\x16␠3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1c␠",
}, { // 52
mask: EncodeSingleQuote | EncodeLeftSpace,
in: "\x16␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι% w",
out: "\x16␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι% w",
}, { // 53
mask: EncodeSingleQuote | EncodeLeftSpace,
in: "\x16␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%␠w",
out: "\x16␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%␠w",
}, { // 54
mask: EncodeSingleQuote | EncodeLeftSpace,
in: "\x16␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι% ␠",
out: "\x16␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι% ␠",
}, { // 55
mask: EncodeSingleQuote | EncodeLeftSpace,
in: "\x16␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%␠␠",
out: "\x16␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%␠␠",
}, { // 56
mask: EncodeSingleQuote | EncodeLeftSpace,
in: "\x16␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1c ",
out: "\x16␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1c ",
}, { // 57
mask: EncodeSingleQuote | EncodeLeftSpace,
in: "\x16␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1c␠",
out: "\x16␎3ςδ+3ζ␃ξ \x19␁[t]pα'␐6K]\x17odι%\x1c␠",
}, { // 58
mask: EncodeBackQuote | EncodeLeftSpace,
in: "1><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ\x0f",
out: "1><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ\x0f",
}, { // 59
mask: EncodeBackQuote | EncodeLeftSpace,
in: " ␠<\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ\x0f",
out: "␠␠<\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ\x0f",
}, { // 60
mask: EncodeBackQuote | EncodeLeftSpace,
in: "␠␠<\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ\x0f",
out: "‛␠␠<\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ\x0f",
}, { // 61
mask: EncodeBackQuote | EncodeLeftSpace,
in: " ><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04␠\x0f",
out: "␠><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04␠\x0f",
}, { // 62
mask: EncodeBackQuote | EncodeLeftSpace,
in: "␠><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04␠\x0f",
out: "‛␠><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04␠\x0f",
}, { // 63
mask: EncodeBackQuote | EncodeLeftSpace,
in: " ><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ␠",
out: "␠><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ␠",
}, { // 64
mask: EncodeBackQuote | EncodeLeftSpace,
in: "␠><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ␠",
out: "‛␠><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ␠",
}, { // 65
mask: EncodeBackQuote | EncodeLeftSpace,
in: "1 <\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ\x0f",
out: "1 <\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ\x0f",
}, { // 66
mask: EncodeBackQuote | EncodeLeftSpace,
in: "1␠<\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ\x0f",
out: "1␠<\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ\x0f",
}, { // 67
mask: EncodeBackQuote | EncodeLeftSpace,
in: "1 <\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04␠\x0f",
out: "1 <\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04␠\x0f",
}, { // 68
mask: EncodeBackQuote | EncodeLeftSpace,
in: "1␠<\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04␠\x0f",
out: "1␠<\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04␠\x0f",
}, { // 69
mask: EncodeBackQuote | EncodeLeftSpace,
in: "1 <\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ␠",
out: "1 <\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ␠",
}, { // 70
mask: EncodeBackQuote | EncodeLeftSpace,
in: "1␠<\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ␠",
out: "1␠<\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ␠",
}, { // 71
mask: EncodeBackQuote | EncodeLeftSpace,
in: "1><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04 \x0f",
out: "1><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04 \x0f",
}, { // 72
mask: EncodeBackQuote | EncodeLeftSpace,
in: "1><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04␠\x0f",
out: "1><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04␠\x0f",
}, { // 73
mask: EncodeBackQuote | EncodeLeftSpace,
in: "1><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04 ␠",
out: "1><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04 ␠",
}, { // 74
mask: EncodeBackQuote | EncodeLeftSpace,
in: "1><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04␠␠",
out: "1><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04␠␠",
}, { // 75
mask: EncodeBackQuote | EncodeLeftSpace,
in: "1><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ ",
out: "1><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ ",
}, { // 76
mask: EncodeBackQuote | EncodeLeftSpace,
in: "1><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ␠",
out: "1><\"ξ]\r\"&B q␗␆σ;\t<␛b␑οF\x0eη)8\x04υ␠",
}, { // 77
mask: EncodeLtGt | EncodeLeftSpace,
in: "<~X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1fδ",
out: "‛<~X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1fδ",
}, { // 78
mask: EncodeLtGt | EncodeLeftSpace,
in: " ␠X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1fδ",
out: "␠␠X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1fδ",
}, { // 79
mask: EncodeLtGt | EncodeLeftSpace,
in: "␠␠X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1fδ",
out: "‛␠␠X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1fδ",
}, { // 80
mask: EncodeLtGt | EncodeLeftSpace,
in: " ~X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'␠δ",
out: "␠~X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'␠δ",
}, { // 81
mask: EncodeLtGt | EncodeLeftSpace,
in: "␠~X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'␠δ",
out: "‛␠~X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'␠δ",
}, { // 82
mask: EncodeLtGt | EncodeLeftSpace,
in: " ~X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1f␠",
out: "␠~X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1f␠",
}, { // 83
mask: EncodeLtGt | EncodeLeftSpace,
in: "␠~X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1f␠",
out: "‛␠~X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1f␠",
}, { // 84
mask: EncodeLtGt | EncodeLeftSpace,
in: "< X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1fδ",
out: "‛< X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1fδ",
}, { // 85
mask: EncodeLtGt | EncodeLeftSpace,
in: "<␠X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1fδ",
out: "‛<␠X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1fδ",
}, { // 86
mask: EncodeLtGt | EncodeLeftSpace,
in: "< X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'␠δ",
out: "‛< X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'␠δ",
}, { // 87
mask: EncodeLtGt | EncodeLeftSpace,
in: "<␠X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'␠δ",
out: "‛<␠X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'␠δ",
}, { // 88
mask: EncodeLtGt | EncodeLeftSpace,
in: "< X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1f␠",
out: "‛< X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1f␠",
}, { // 89
mask: EncodeLtGt | EncodeLeftSpace,
in: "<␠X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1f␠",
out: "‛<␠X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1f␠",
}, { // 90
mask: EncodeLtGt | EncodeLeftSpace,
in: "<~X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi' δ",
out: "‛<~X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi' δ",
}, { // 91
mask: EncodeLtGt | EncodeLeftSpace,
in: "<~X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'␠δ",
out: "‛<~X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'␠δ",
}, { // 92
mask: EncodeLtGt | EncodeLeftSpace,
in: "<~X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi' ␠",
out: "‛<~X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi' ␠",
}, { // 93
mask: EncodeLtGt | EncodeLeftSpace,
in: "<~X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'␠␠",
out: "‛<~X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'␠␠",
}, { // 94
mask: EncodeLtGt | EncodeLeftSpace,
in: "<~X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1f ",
out: "‛<~X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1f ",
}, { // 95
mask: EncodeLtGt | EncodeLeftSpace,
in: "<~X{#>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1f␠",
out: "‛<~X{#‛>\x10\x19)\x1b ␏_␇κβFχ␔][νB(\x1aπi'\x1f␠",
}, { // 96
mask: EncodeSquareBracket | EncodeLeftSpace,
in: "`γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘BX",
out: "`γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘BX",
}, { // 97
mask: EncodeSquareBracket | EncodeLeftSpace,
in: " ␠\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘BX",
out: "␠␠\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘BX",
}, { // 98
mask: EncodeSquareBracket | EncodeLeftSpace,
in: "␠␠\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘BX",
out: "‛␠␠\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘BX",
}, { // 99
mask: EncodeSquareBracket | EncodeLeftSpace,
in: " γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘␠X",
out: "␠γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘␠X",
}, { // 100
mask: EncodeSquareBracket | EncodeLeftSpace,
in: "␠γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘␠X",
out: "‛␠γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘␠X",
}, { // 101
mask: EncodeSquareBracket | EncodeLeftSpace,
in: " γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘B␠",
out: "␠γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘B␠",
}, { // 102
mask: EncodeSquareBracket | EncodeLeftSpace,
in: "␠γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘B␠",
out: "‛␠γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘B␠",
}, { // 103
mask: EncodeSquareBracket | EncodeLeftSpace,
in: "` \v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘BX",
out: "` \v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘BX",
}, { // 104
mask: EncodeSquareBracket | EncodeLeftSpace,
in: "`␠\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘BX",
out: "`␠\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘BX",
}, { // 105
mask: EncodeSquareBracket | EncodeLeftSpace,
in: "` \v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘␠X",
out: "` \v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘␠X",
}, { // 106
mask: EncodeSquareBracket | EncodeLeftSpace,
in: "`␠\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘␠X",
out: "`␠\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘␠X",
}, { // 107
mask: EncodeSquareBracket | EncodeLeftSpace,
in: "` \v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘B␠",
out: "` \v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘B␠",
}, { // 108
mask: EncodeSquareBracket | EncodeLeftSpace,
in: "`␠\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘B␠",
out: "`␠\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘B␠",
}, { // 109
mask: EncodeSquareBracket | EncodeLeftSpace,
in: "`γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘ X",
out: "`γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘ X",
}, { // 110
mask: EncodeSquareBracket | EncodeLeftSpace,
in: "`γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘␠X",
out: "`γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘␠X",
}, { // 111
mask: EncodeSquareBracket | EncodeLeftSpace,
in: "`γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘ ␠",
out: "`γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘ ␠",
}, { // 112
mask: EncodeSquareBracket | EncodeLeftSpace,
in: "`γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘␠␠",
out: "`γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘␠␠",
}, { // 113
mask: EncodeSquareBracket | EncodeLeftSpace,
in: "`γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘B ",
out: "`γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘B ",
}, { // 114
mask: EncodeSquareBracket | EncodeLeftSpace,
in: "`γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟␀␘B␠",
out: "`γ\v\x12I\vζ)*! αζ\x1f=τ\x04F%Cς;<␈H␟‛␀␘B␠",
}, { // 115
mask: EncodeSemicolon | EncodeLeftSpace,
in: "/%j2rB`␍'v Dυ␁:οπφL\x10;#:␀\rH2[~φ",
out: "/%j2rB`␍'v Dυ␁:οπφL\x10;#:‛␀\rH2[~φ",
}, { // 116
mask: EncodeSemicolon | EncodeLeftSpace,
in: " ␠j2rB`␍'v Dυ␁:οπφL\x10;#:␀\rH2[~φ",
out: "␠␠j2rB`␍'v Dυ␁:οπφL\x10;#:‛␀\rH2[~φ",
}, { // 117
mask: EncodeSemicolon | EncodeLeftSpace,
in: "␠␠j2rB`␍'v Dυ␁:οπφL\x10;#:␀\rH2[~φ",
out: "‛␠␠j2rB`␍'v Dυ␁:οπφL\x10;#:‛␀\rH2[~φ",
}, { // 118
mask: EncodeSemicolon | EncodeLeftSpace,
in: " %j2rB`␍'v Dυ␁:οπφL\x10;#:␀\rH2[␠φ",
out: "␠%j2rB`␍'v Dυ␁:οπφL\x10;#:‛␀\rH2[␠φ",
}, { // 119
mask: EncodeSemicolon | EncodeLeftSpace,
in: "␠%j2rB`␍'v Dυ␁:οπφL\x10;#:␀\rH2[␠φ",
out: "‛␠%j2rB`␍'v Dυ␁:οπφL\x10;#:‛␀\rH2[␠φ",
}, { // 120
mask: EncodeSemicolon | EncodeLeftSpace,
in: " %j2rB`␍'v Dυ␁:οπφL\x10;#:␀\rH2[~␠",
out: "␠%j2rB`␍'v Dυ␁:οπφL\x10;#:‛␀\rH2[~␠",
}, { // 121
mask: EncodeSemicolon | EncodeLeftSpace,
in: "␠%j2rB`␍'v Dυ␁:οπφL\x10;#:␀\rH2[~␠",
out: "‛␠%j2rB`␍'v Dυ␁:οπφL\x10;#:‛␀\rH2[~␠",
}, { // 122
mask: EncodeSemicolon | EncodeLeftSpace,
in: "/ j2rB`␍'v Dυ␁:οπφL\x10;#:␀\rH2[~φ",
out: "/ j2rB`␍'v Dυ␁:οπφL\x10;#:‛␀\rH2[~φ",
}, { // 123
mask: EncodeSemicolon | EncodeLeftSpace,
in: "/␠j2rB`␍'v Dυ␁:οπφL\x10;#:␀\rH2[~φ",
out: "/␠j2rB`␍'v Dυ␁:οπφL\x10;#:‛␀\rH2[~φ",
}, { // 124
mask: EncodeSemicolon | EncodeLeftSpace,
in: "/ j2rB`␍'v Dυ␁:οπφL\x10;#:␀\rH2[␠φ",
out: "/ j2rB`␍'v Dυ␁:οπφL\x10;#:‛␀\rH2[␠φ",
}, { // 125
mask: EncodeSemicolon | EncodeLeftSpace,
in: "/␠j2rB`␍'v Dυ␁:οπφL\x10;#:␀\rH2[␠φ",
out: "/␠j2rB`␍'v Dυ␁:οπφL\x10;#:‛␀\rH2[␠φ",
}, { // 126
mask: EncodeSemicolon | EncodeLeftSpace,
in: "/ j2rB`␍'v Dυ␁:οπφL\x10;#:␀\rH2[~␠",
out: "/ j2rB`␍'v Dυ␁:οπφL\x10;#:‛␀\rH2[~␠",
}, { // 127
mask: EncodeSemicolon | EncodeLeftSpace,
in: "/␠j2rB`␍'v Dυ␁:οπφL\x10;#:␀\rH2[~␠",
out: "/␠j2rB`␍'v Dυ␁:οπφL\x10;#:‛␀\rH2[~␠",
}, { // 128
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | true |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/encoder/os_darwin.go | lib/encoder/os_darwin.go | //go:build darwin
package encoder
// OS is the encoding used by the local backend for macOS
//
// macOS can't store invalid UTF-8, it converts them into %XX encoding
const OS = (Base |
EncodeInvalidUtf8)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/encoder/os_windows.go | lib/encoder/os_windows.go | //go:build windows
package encoder
// OS is the encoding used by the local backend for windows platforms
//
// List of replaced characters:
//
// < (less than) -> '<' // FULLWIDTH LESS-THAN SIGN
// > (greater than) -> '>' // FULLWIDTH GREATER-THAN SIGN
// : (colon) -> ':' // FULLWIDTH COLON
// " (double quote) -> '"' // FULLWIDTH QUOTATION MARK
// \ (backslash) -> '\' // FULLWIDTH REVERSE SOLIDUS
// | (vertical line) -> '|' // FULLWIDTH VERTICAL LINE
// ? (question mark) -> '?' // FULLWIDTH QUESTION MARK
// * (asterisk) -> '*' // FULLWIDTH ASTERISK
//
// Additionally names can't end with a period (.) or space ( ).
// List of replaced characters:
//
// . (period) -> '.' // FULLWIDTH FULL STOP
// (space) -> '␠' // SYMBOL FOR SPACE
//
// Also encode invalid UTF-8 bytes as Go can't convert them to UTF-16.
//
// https://docs.microsoft.com/de-de/windows/desktop/FileIO/naming-a-file#naming-conventions
const OS = (Base |
EncodeWin |
EncodeBackSlash |
EncodeCtl |
EncodeRightSpace |
EncodeRightPeriod |
EncodeInvalidUtf8)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.