repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/smb/kerberos.go
backend/smb/kerberos.go
package smb import ( "fmt" "os" "os/user" "path/filepath" "strings" "sync" "time" "github.com/jcmturner/gokrb5/v8/client" "github.com/jcmturner/gokrb5/v8/config" "github.com/jcmturner/gokrb5/v8/credentials" ) // KerberosFactory encapsulates dependencies and caches for Kerberos clients. type KerberosFactory struct { // clientCache caches Kerberos clients keyed by resolved ccache path. // Clients are reused unless the associated ccache file changes. clientCache sync.Map // map[string]*client.Client // errCache caches errors encountered when loading Kerberos clients. // Prevents repeated attempts for paths that previously failed. errCache sync.Map // map[string]error // modTimeCache tracks the last known modification time of ccache files. // Used to detect changes and trigger credential refresh. modTimeCache sync.Map // map[string]time.Time loadCCache func(string) (*credentials.CCache, error) newClient func(*credentials.CCache, *config.Config, ...func(*client.Settings)) (*client.Client, error) loadConfig func() (*config.Config, error) } // NewKerberosFactory creates a new instance of KerberosFactory with default dependencies. func NewKerberosFactory() *KerberosFactory { return &KerberosFactory{ loadCCache: credentials.LoadCCache, newClient: client.NewFromCCache, loadConfig: defaultLoadKerberosConfig, } } // GetClient returns a cached Kerberos client or creates a new one if needed. func (kf *KerberosFactory) GetClient(ccachePath string) (*client.Client, error) { resolvedPath, err := resolveCcachePath(ccachePath) if err != nil { return nil, err } stat, err := os.Stat(resolvedPath) if err != nil { kf.errCache.Store(resolvedPath, err) return nil, err } mtime := stat.ModTime() if oldMod, ok := kf.modTimeCache.Load(resolvedPath); ok { if oldTime, ok := oldMod.(time.Time); ok && oldTime.Equal(mtime) { if errVal, ok := kf.errCache.Load(resolvedPath); ok { return nil, errVal.(error) } if clientVal, ok := kf.clientCache.Load(resolvedPath); ok { return clientVal.(*client.Client), nil } } } // Load Kerberos config cfg, err := kf.loadConfig() if err != nil { kf.errCache.Store(resolvedPath, err) return nil, err } // Load ccache ccache, err := kf.loadCCache(resolvedPath) if err != nil { kf.errCache.Store(resolvedPath, err) return nil, err } // Create new client cl, err := kf.newClient(ccache, cfg) if err != nil { kf.errCache.Store(resolvedPath, err) return nil, err } // Cache and return kf.clientCache.Store(resolvedPath, cl) kf.errCache.Delete(resolvedPath) kf.modTimeCache.Store(resolvedPath, mtime) return cl, nil } // resolveCcachePath resolves the KRB5 ccache path. func resolveCcachePath(ccachePath string) (string, error) { if ccachePath == "" { ccachePath = os.Getenv("KRB5CCNAME") } switch { case strings.Contains(ccachePath, ":"): parts := strings.SplitN(ccachePath, ":", 2) prefix, path := parts[0], parts[1] switch prefix { case "FILE": return path, nil case "DIR": primary, err := os.ReadFile(filepath.Join(path, "primary")) if err != nil { return "", err } return filepath.Join(path, strings.TrimSpace(string(primary))), nil default: return "", fmt.Errorf("unsupported KRB5CCNAME: %s", ccachePath) } case ccachePath == "": u, err := user.Current() if err != nil { return "", err } return "/tmp/krb5cc_" + u.Uid, nil default: return ccachePath, nil } } // defaultLoadKerberosConfig loads Kerberos config from default or env path. func defaultLoadKerberosConfig() (*config.Config, error) { cfgPath := os.Getenv("KRB5_CONFIG") if cfgPath == "" { cfgPath = "/etc/krb5.conf" } return config.Load(cfgPath) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/smb/filepool_test.go
backend/smb/filepool_test.go
package smb import ( "context" "errors" "sync" "testing" "github.com/cloudsoda/go-smb2" "github.com/stretchr/testify/assert" ) // Mock Fs that implements FsInterface type mockFs struct { mu sync.Mutex putConnectionCalled bool putConnectionErr error getConnectionCalled bool getConnectionErr error getConnectionResult *conn removeSessionCalled bool } func (m *mockFs) putConnection(pc **conn, err error) { m.mu.Lock() defer m.mu.Unlock() m.putConnectionCalled = true m.putConnectionErr = err } func (m *mockFs) getConnection(ctx context.Context, share string) (*conn, error) { m.mu.Lock() defer m.mu.Unlock() m.getConnectionCalled = true if m.getConnectionErr != nil { return nil, m.getConnectionErr } if m.getConnectionResult != nil { return m.getConnectionResult, nil } return &conn{}, nil } func (m *mockFs) removeSession() { m.mu.Lock() defer m.mu.Unlock() m.removeSessionCalled = true } func (m *mockFs) isPutConnectionCalled() bool { m.mu.Lock() defer m.mu.Unlock() return m.putConnectionCalled } func (m *mockFs) getPutConnectionErr() error { m.mu.Lock() defer m.mu.Unlock() return m.putConnectionErr } func (m *mockFs) isGetConnectionCalled() bool { m.mu.Lock() defer m.mu.Unlock() return m.getConnectionCalled } func newMockFs() *mockFs { return &mockFs{} } // Helper function to create a mock file func newMockFile() *file { return &file{ File: &smb2.File{}, c: &conn{}, } } // Test filePool creation func TestNewFilePool(t *testing.T) { ctx := context.Background() fs := newMockFs() share := "testshare" path := "/test/path" pool := newFilePool(ctx, fs, share, path) assert.NotNil(t, pool) assert.Equal(t, ctx, pool.ctx) assert.Equal(t, fs, pool.fs) assert.Equal(t, share, pool.share) assert.Equal(t, path, pool.path) assert.Empty(t, pool.pool) } // Test getting file from pool when pool has files func TestFilePool_Get_FromPool(t *testing.T) { ctx := context.Background() fs := newMockFs() pool := newFilePool(ctx, fs, "testshare", "/test/path") // Add a mock file to the pool mockFile := newMockFile() pool.pool = append(pool.pool, mockFile) // Get file from pool f, err := pool.get() assert.NoError(t, err) assert.NotNil(t, f) assert.Equal(t, mockFile, f) assert.Empty(t, pool.pool) } // Test getting file when pool is empty func TestFilePool_Get_EmptyPool(t *testing.T) { ctx := context.Background() fs := newMockFs() // Set up the mock to return an error from getConnection // This tests that the pool calls getConnection when empty fs.getConnectionErr = errors.New("connection failed") pool := newFilePool(ctx, fs, "testshare", "test/path") // This should call getConnection and return the error f, err := pool.get() assert.Error(t, err) assert.Nil(t, f) assert.True(t, fs.isGetConnectionCalled()) assert.Equal(t, "connection failed", err.Error()) } // Test putting file successfully func TestFilePool_Put_Success(t *testing.T) { ctx := context.Background() fs := newMockFs() pool := newFilePool(ctx, fs, "testshare", "/test/path") mockFile := newMockFile() pool.put(mockFile, nil) assert.Len(t, pool.pool, 1) assert.Equal(t, mockFile, pool.pool[0]) } // Test putting file with error func TestFilePool_Put_WithError(t *testing.T) { ctx := context.Background() fs := newMockFs() pool := newFilePool(ctx, fs, "testshare", "/test/path") mockFile := newMockFile() pool.put(mockFile, errors.New("write error")) // Should call putConnection with error assert.True(t, fs.isPutConnectionCalled()) assert.Equal(t, errors.New("write error"), fs.getPutConnectionErr()) assert.Empty(t, pool.pool) } // Test putting nil file func TestFilePool_Put_NilFile(t *testing.T) { ctx := context.Background() fs := newMockFs() pool := newFilePool(ctx, fs, "testshare", "/test/path") // Should not panic pool.put(nil, nil) pool.put(nil, errors.New("some error")) assert.Empty(t, pool.pool) } // Test draining pool with files func TestFilePool_Drain_WithFiles(t *testing.T) { ctx := context.Background() fs := newMockFs() pool := newFilePool(ctx, fs, "testshare", "/test/path") // Add mock files to pool mockFile1 := newMockFile() mockFile2 := newMockFile() pool.pool = append(pool.pool, mockFile1, mockFile2) // Before draining assert.Len(t, pool.pool, 2) _ = pool.drain() assert.Empty(t, pool.pool) } // Test concurrent access to pool func TestFilePool_ConcurrentAccess(t *testing.T) { ctx := context.Background() fs := newMockFs() pool := newFilePool(ctx, fs, "testshare", "/test/path") const numGoroutines = 10 for range numGoroutines { mockFile := newMockFile() pool.pool = append(pool.pool, mockFile) } // Test concurrent get operations done := make(chan bool, numGoroutines) for range numGoroutines { go func() { defer func() { done <- true }() f, err := pool.get() if err == nil { pool.put(f, nil) } }() } for range numGoroutines { <-done } // Pool should be in a consistent after the concurrence access assert.Len(t, pool.pool, numGoroutines) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/smb/smb_internal_test.go
backend/smb/smb_internal_test.go
// Unit tests for internal SMB functions package smb import "testing" // TestIsPathDir tests the isPathDir function logic func TestIsPathDir(t *testing.T) { tests := []struct { path string expected bool }{ // Empty path should be considered a directory {"", true}, // Paths with trailing slash should be directories {"/", true}, {"share/", true}, {"share/dir/", true}, {"share/dir/subdir/", true}, // Paths without trailing slash should not be directories {"share", false}, {"share/dir", false}, {"share/dir/file", false}, {"share/dir/subdir/file", false}, // Edge cases {"share//", true}, {"share///", true}, {"share/dir//", true}, } for _, tt := range tests { t.Run(tt.path, func(t *testing.T) { result := isPathDir(tt.path) if result != tt.expected { t.Errorf("isPathDir(%q) = %v, want %v", tt.path, result, tt.expected) } }) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/chunker/chunker_test.go
backend/chunker/chunker_test.go
// Test the Chunker filesystem interface package chunker_test import ( "flag" "os" "path/filepath" "testing" _ "github.com/rclone/rclone/backend/all" // for integration tests "github.com/rclone/rclone/backend/chunker" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" ) // Command line flags var ( // Invalid characters are not supported by some remotes, e.g. Mailru. // We enable testing with invalid characters when -remote is not set, so // chunker overlays a local directory, but invalid characters are disabled // by default when -remote is set, e.g. when test_all runs backend tests. // You can still test with invalid characters using the below flag. UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set") ) // TestIntegration runs integration tests against a concrete remote // set by the -remote flag. If the flag is not set, it creates a // dynamic chunker overlay wrapping a local temporary directory. func TestIntegration(t *testing.T) { opt := fstests.Opt{ RemoteName: *fstest.RemoteName, NilObject: (*chunker.Object)(nil), SkipBadWindowsCharacters: !*UseBadChars, UnimplementableObjectMethods: []string{ "MimeType", "GetTier", "SetTier", "Metadata", "SetMetadata", }, UnimplementableFsMethods: []string{ "PublicLink", "OpenWriterAt", "OpenChunkWriter", "MergeDirs", "DirCacheFlush", "UserInfo", "Disconnect", "ListP", }, } if *fstest.RemoteName == "" { name := "TestChunker" opt.RemoteName = name + ":" tempDir := filepath.Join(os.TempDir(), "rclone-chunker-test-standard") opt.ExtraConfig = []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "chunker"}, {Name: name, Key: "remote", Value: tempDir}, } opt.QuickTestOK = true } fstests.Run(t, &opt) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/chunker/chunker_internal_test.go
backend/chunker/chunker_internal_test.go
package chunker import ( "bytes" "context" "flag" "fmt" "io" "path" "regexp" "strings" "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/lib/random" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // Command line flags var ( UploadKilobytes = flag.Int("upload-kilobytes", 0, "Upload size in Kilobytes, set this to test large uploads") ) // test that chunking does not break large uploads func testPutLarge(t *testing.T, f *Fs, kilobytes int) { t.Run(fmt.Sprintf("PutLarge%dk", kilobytes), func(t *testing.T) { fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{ ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"), Path: fmt.Sprintf("chunker-upload-%dk", kilobytes), Size: int64(kilobytes) * int64(fs.Kibi), }) }) } type settings map[string]any func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs { fsName := strings.Split(f.Name(), "{")[0] // strip off hash configMap := configmap.Simple{} for key, val := range opts { configMap[key] = fmt.Sprintf("%v", val) } rpath := fspath.JoinRootPath(f.Root(), path) remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), rpath) fixFs, err := fs.NewFs(ctx, remote) require.NoError(t, err) return fixFs } var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z") func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object { item := fstest.Item{Path: name, ModTime: mtime1} obj := fstests.PutTestContents(ctx, t, f, &item, contents, check) assert.NotNil(t, obj, message) return obj } // test chunk name parser func testChunkNameFormat(t *testing.T, f *Fs) { saveOpt := f.opt defer func() { // restore original settings (f is pointer, f.opt is struct) f.opt = saveOpt _ = f.setChunkNameFormat(f.opt.NameFormat) }() assertFormat := func(pattern, wantDataFormat, wantCtrlFormat, wantNameRegexp string) { err := f.setChunkNameFormat(pattern) assert.NoError(t, err) assert.Equal(t, wantDataFormat, f.dataNameFmt) assert.Equal(t, wantCtrlFormat, f.ctrlNameFmt) assert.Equal(t, wantNameRegexp, f.nameRegexp.String()) } assertFormatValid := func(pattern string) { err := f.setChunkNameFormat(pattern) assert.NoError(t, err) } assertFormatInvalid := func(pattern string) { err := f.setChunkNameFormat(pattern) assert.Error(t, err) } assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType, xactID string) { gotChunkName := "" assert.NotPanics(t, func() { gotChunkName = f.makeChunkName(mainName, chunkNo, ctrlType, xactID) }, "makeChunkName(%q,%d,%q,%q) must not panic", mainName, chunkNo, ctrlType, xactID) if gotChunkName != "" { assert.Equal(t, wantChunkName, gotChunkName) } } assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType, xactID string) { assert.Panics(t, func() { _ = f.makeChunkName(mainName, chunkNo, ctrlType, xactID) }, "makeChunkName(%q,%d,%q,%q) should panic", mainName, chunkNo, ctrlType, xactID) } assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType, wantXactID string) { gotMainName, gotChunkNo, gotCtrlType, gotXactID := f.parseChunkName(fileName) assert.Equal(t, wantMainName, gotMainName) assert.Equal(t, wantChunkNo, gotChunkNo) assert.Equal(t, wantCtrlType, gotCtrlType) assert.Equal(t, wantXactID, gotXactID) } const newFormatSupported = false // support for patterns not starting with base name (*) // valid formats assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`) assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`) assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`) assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`) assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`) if newFormatSupported { assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z][a-z0-9]{2,6})),(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`) } // invalid formats assertFormatInvalid(`chunk-#`) assertFormatInvalid(`*-chunk`) assertFormatInvalid(`*-*-chunk-#`) assertFormatInvalid(`*-chunk-#-#`) assertFormatInvalid(`#-chunk-*`) assertFormatInvalid(`*/#`) assertFormatValid(`*#`) assertFormatInvalid(`**#`) assertFormatInvalid(`#*`) assertFormatInvalid(``) assertFormatInvalid(`-`) // quick tests if newFormatSupported { assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9][0-9a-z]{3,8})\.\.tmp_([0-9]{10,13}))?$`) f.opt.StartFrom = 1 assertMakeName(`part_fish_1`, "fish", 0, "", "") assertParseName(`part_fish_43`, "fish", 42, "", "") assertMakeName(`part_fish__locks`, "fish", -2, "locks", "") assertParseName(`part_fish__locks`, "fish", -1, "locks", "") assertMakeName(`part_fish__x2y`, "fish", -2, "x2y", "") assertParseName(`part_fish__x2y`, "fish", -1, "x2y", "") assertMakeName(`part_fish_3_0004`, "fish", 2, "", "4") assertParseName(`part_fish_4_0005`, "fish", 3, "", "0005") assertMakeName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -3, "blkinfo", "jj5fvo3wr") assertParseName(`part_fish__blkinfo_zz9fvo3wr`, "fish", -1, "blkinfo", "zz9fvo3wr") // old-style temporary suffix (parse only) assertParseName(`part_fish_4..tmp_0000000011`, "fish", 3, "", "000b") assertParseName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -1, "blkinfo", "jj5fvo3wr") } // prepare format for long tests assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`) f.opt.StartFrom = 2 // valid data chunks assertMakeName(`fish.chunk.003`, "fish", 1, "", "") assertParseName(`fish.chunk.003`, "fish", 1, "", "") assertMakeName(`fish.chunk.021`, "fish", 19, "", "") assertParseName(`fish.chunk.021`, "fish", 19, "", "") // valid temporary data chunks assertMakeName(`fish.chunk.011_4321`, "fish", 9, "", "4321") assertParseName(`fish.chunk.011_4321`, "fish", 9, "", "4321") assertMakeName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc") assertParseName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc") assertMakeName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr") assertParseName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr") assertMakeName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr") assertParseName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr") // valid temporary data chunks (old temporary suffix, only parse) assertParseName(`fish.chunk.004..tmp_0000000047`, "fish", 2, "", "001b") assertParseName(`fish.chunk.323..tmp_9994567890123`, "fish", 321, "", "3jjfvo3wr") // parsing invalid data chunk names assertParseName(`fish.chunk.3`, "", -1, "", "") assertParseName(`fish.chunk.001`, "", -1, "", "") assertParseName(`fish.chunk.21`, "", -1, "", "") assertParseName(`fish.chunk.-21`, "", -1, "", "") assertParseName(`fish.chunk.004abcd`, "", -1, "", "") // missing underscore delimiter assertParseName(`fish.chunk.004__1234`, "", -1, "", "") // extra underscore delimiter assertParseName(`fish.chunk.004_123`, "", -1, "", "") // too short temporary suffix assertParseName(`fish.chunk.004_1234567890`, "", -1, "", "") // too long temporary suffix assertParseName(`fish.chunk.004_-1234`, "", -1, "", "") // temporary suffix must be positive assertParseName(`fish.chunk.004_123E`, "", -1, "", "") // uppercase not allowed assertParseName(`fish.chunk.004_12.3`, "", -1, "", "") // punctuation not allowed // parsing invalid data chunk names (old temporary suffix) assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", "") assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", "") assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", "") assertParseName(`fish.chunk.323..tmp_12345678901234`, "", -1, "", "") assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", "") // valid control chunks assertMakeName(`fish.chunk._info`, "fish", -1, "info", "") assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", "") assertMakeName(`fish.chunk._blkinfo`, "fish", -3, "blkinfo", "") assertMakeName(`fish.chunk._x2y`, "fish", -4, "x2y", "") assertParseName(`fish.chunk._info`, "fish", -1, "info", "") assertParseName(`fish.chunk._locks`, "fish", -1, "locks", "") assertParseName(`fish.chunk._blkinfo`, "fish", -1, "blkinfo", "") assertParseName(`fish.chunk._x2y`, "fish", -1, "x2y", "") // valid temporary control chunks assertMakeName(`fish.chunk._info_0001`, "fish", -1, "info", "1") assertMakeName(`fish.chunk._locks_4321`, "fish", -2, "locks", "4321") assertMakeName(`fish.chunk._uploads_abcd`, "fish", -3, "uploads", "abcd") assertMakeName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -4, "blkinfo", "xyzabcdef") assertMakeName(`fish.chunk._x2y_1aaa`, "fish", -5, "x2y", "1aaa") assertParseName(`fish.chunk._info_0001`, "fish", -1, "info", "0001") assertParseName(`fish.chunk._locks_4321`, "fish", -1, "locks", "4321") assertParseName(`fish.chunk._uploads_9abc`, "fish", -1, "uploads", "9abc") assertParseName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -1, "blkinfo", "xyzabcdef") assertParseName(`fish.chunk._x2y_1aaa`, "fish", -1, "x2y", "1aaa") // valid temporary control chunks (old temporary suffix, parse only) assertParseName(`fish.chunk._info..tmp_0000000047`, "fish", -1, "info", "001b") assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", "15wx") assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", "0000") assertParseName(`fish.chunk._blkinfo..tmp_9994567890123`, "fish", -1, "blkinfo", "3jjfvo3wr") assertParseName(`fish.chunk._x2y..tmp_0000000000`, "fish", -1, "x2y", "0000") // parsing invalid control chunk names assertParseName(`fish.chunk.metadata`, "", -1, "", "") // must be prepended by underscore assertParseName(`fish.chunk.info`, "", -1, "", "") assertParseName(`fish.chunk.locks`, "", -1, "", "") assertParseName(`fish.chunk.uploads`, "", -1, "", "") assertParseName(`fish.chunk._os`, "", -1, "", "") // too short assertParseName(`fish.chunk._metadata`, "", -1, "", "") // too long assertParseName(`fish.chunk._blockinfo`, "", -1, "", "") // way too long assertParseName(`fish.chunk._4me`, "", -1, "", "") // cannot start with digit assertParseName(`fish.chunk._567`, "", -1, "", "") // cannot be all digits assertParseName(`fish.chunk._me_ta`, "", -1, "", "") // punctuation not allowed assertParseName(`fish.chunk._in-fo`, "", -1, "", "") assertParseName(`fish.chunk._.bin`, "", -1, "", "") assertParseName(`fish.chunk._.2xy`, "", -1, "", "") // parsing invalid temporary control chunks assertParseName(`fish.chunk._blkinfo1234`, "", -1, "", "") // missing underscore delimiter assertParseName(`fish.chunk._info__1234`, "", -1, "", "") // extra underscore delimiter assertParseName(`fish.chunk._info_123`, "", -1, "", "") // too short temporary suffix assertParseName(`fish.chunk._info_1234567890`, "", -1, "", "") // too long temporary suffix assertParseName(`fish.chunk._info_-1234`, "", -1, "", "") // temporary suffix must be positive assertParseName(`fish.chunk._info_123E`, "", -1, "", "") // uppercase not allowed assertParseName(`fish.chunk._info_12.3`, "", -1, "", "") // punctuation not allowed assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", "") assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", "") assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", "") // short control chunk names: 3 letters ok, 1-2 letters not allowed assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", "") assertParseName(`fish.chunk._int`, "fish", -1, "int", "") assertMakeNamePanics("fish", -1, "in", "") assertMakeNamePanics("fish", -1, "up", "4") assertMakeNamePanics("fish", -1, "x", "") assertMakeNamePanics("fish", -1, "c", "1z") assertMakeName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0") assertMakeName(`fish.chunk._ext_0026`, "fish", -1, "ext", "26") assertMakeName(`fish.chunk._int_0abc`, "fish", -1, "int", "abc") assertMakeName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz") assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr") assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr") assertParseName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0000") assertParseName(`fish.chunk._ext_0026`, "fish", -1, "ext", "0026") assertParseName(`fish.chunk._int_0abc`, "fish", -1, "int", "0abc") assertParseName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz") assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr") assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr") // base file name can sometimes look like a valid chunk name assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", "") assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", "") assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", "") assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", "") assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", "") assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", "") // base file name looking like a valid chunk name (old temporary suffix) assertParseName(`fish.chunk.003.chunk.005..tmp_0000000022`, "fish.chunk.003", 3, "", "000m") assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", "") assertParseName(`fish.chunk._info.chunk.005..tmp_0000000023`, "fish.chunk._info", 3, "", "000n") assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "") assertParseName(`fish.chunk.003.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.003", -1, "blkinfo", "3jjfvo3wr") assertParseName(`fish.chunk._info.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._info", -1, "blkinfo", "3jjfvo3wr") assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", "") assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000025`, "fish.chunk.004..tmp_0000000021", 3, "", "000p") assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", "") assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.004..tmp_0000000021", -1, "blkinfo", "3jjfvo3wr") assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", "") assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", "") assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.004`, "fish.chunk._blkinfo..tmp_9994567890123", 2, "", "") assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.005..tmp_0000000026`, "fish.chunk._blkinfo..tmp_9994567890123", 3, "", "000q") assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "info", "") assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "blkinfo", "3jjfvo3wr") assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._Meta`, "", -1, "", "") assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "") assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blkinfo..tmp_1234567890123456789", 2, "", "") assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.005..tmp_0000000022`, "fish.chunk._blkinfo..tmp_1234567890123456789", 3, "", "000m") assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "info", "") assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "blkinfo", "3jjfvo3wr") assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", "") assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "") // attempts to make invalid chunk names assertMakeNamePanics("fish", -1, "", "") // neither data nor control assertMakeNamePanics("fish", 0, "info", "") // both data and control assertMakeNamePanics("fish", -1, "metadata", "") // control type too long assertMakeNamePanics("fish", -1, "blockinfo", "") // control type way too long assertMakeNamePanics("fish", -1, "2xy", "") // first digit not allowed assertMakeNamePanics("fish", -1, "123", "") // all digits not allowed assertMakeNamePanics("fish", -1, "Meta", "") // only lower case letters allowed assertMakeNamePanics("fish", -1, "in-fo", "") // punctuation not allowed assertMakeNamePanics("fish", -1, "_info", "") assertMakeNamePanics("fish", -1, "info_", "") assertMakeNamePanics("fish", -2, ".bind", "") assertMakeNamePanics("fish", -2, "bind.", "") assertMakeNamePanics("fish", -1, "", "1") // neither data nor control assertMakeNamePanics("fish", 0, "info", "23") // both data and control assertMakeNamePanics("fish", -1, "metadata", "45") // control type too long assertMakeNamePanics("fish", -1, "blockinfo", "7") // control type way too long assertMakeNamePanics("fish", -1, "2xy", "abc") // first digit not allowed assertMakeNamePanics("fish", -1, "123", "def") // all digits not allowed assertMakeNamePanics("fish", -1, "Meta", "mnk") // only lower case letters allowed assertMakeNamePanics("fish", -1, "in-fo", "xyz") // punctuation not allowed assertMakeNamePanics("fish", -1, "_info", "5678") assertMakeNamePanics("fish", -1, "info_", "999") assertMakeNamePanics("fish", -2, ".bind", "0") assertMakeNamePanics("fish", -2, "bind.", "0") assertMakeNamePanics("fish", 0, "", "1234567890") // temporary suffix too long assertMakeNamePanics("fish", 0, "", "123F4") // uppercase not allowed assertMakeNamePanics("fish", 0, "", "123.") // punctuation not allowed assertMakeNamePanics("fish", 0, "", "_123") } func testSmallFileInternals(t *testing.T, f *Fs) { const dir = "small" ctx := context.Background() saveOpt := f.opt defer func() { f.opt.FailHard = false _ = operations.Purge(ctx, f.base, dir) f.opt = saveOpt }() f.opt.FailHard = false modTime := fstest.Time("2001-02-03T04:05:06.499999999Z") checkSmallFileInternals := func(obj fs.Object) { assert.NotNil(t, obj) o, ok := obj.(*Object) assert.True(t, ok) assert.NotNil(t, o) if o == nil { return } switch { case !f.useMeta: // If meta format is "none", non-chunked file (even empty) // internally is a single chunk without meta object. assert.Nil(t, o.main) assert.True(t, o.isComposite()) // sorry, sometimes a name is misleading assert.Equal(t, 1, len(o.chunks)) case f.hashAll: // Consistent hashing forces meta object on small files too assert.NotNil(t, o.main) assert.True(t, o.isComposite()) assert.Equal(t, 1, len(o.chunks)) default: // normally non-chunked file is kept in the Object's main field assert.NotNil(t, o.main) assert.False(t, o.isComposite()) assert.Equal(t, 0, len(o.chunks)) } } checkContents := func(obj fs.Object, contents string) { assert.NotNil(t, obj) assert.Equal(t, int64(len(contents)), obj.Size()) r, err := obj.Open(ctx) assert.NoError(t, err) assert.NotNil(t, r) if r == nil { return } data, err := io.ReadAll(r) assert.NoError(t, err) assert.Equal(t, contents, string(data)) _ = r.Close() } checkHashsum := func(obj fs.Object) { var ht hash.Type switch { case !f.hashAll: return case f.useMD5: ht = hash.MD5 case f.useSHA1: ht = hash.SHA1 default: return } // even empty files must have hashsum in consistent mode sum, err := obj.Hash(ctx, ht) assert.NoError(t, err) assert.NotEqual(t, sum, "") } checkSmallFile := func(name, contents string) { filename := path.Join(dir, name) item := fstest.Item{Path: filename, ModTime: modTime} put := fstests.PutTestContents(ctx, t, f, &item, contents, false) assert.NotNil(t, put) checkSmallFileInternals(put) checkContents(put, contents) checkHashsum(put) // objects returned by Put and NewObject must have similar structure obj, err := f.NewObject(ctx, filename) assert.NoError(t, err) assert.NotNil(t, obj) checkSmallFileInternals(obj) checkContents(obj, contents) checkHashsum(obj) _ = obj.Remove(ctx) _ = put.Remove(ctx) // for good } checkSmallFile("emptyfile", "") checkSmallFile("smallfile", "Ok") } func testPreventCorruption(t *testing.T, f *Fs) { if f.opt.ChunkSize > 50 { t.Skip("this test requires small chunks") } const dir = "corrupted" ctx := context.Background() saveOpt := f.opt defer func() { f.opt.FailHard = false _ = operations.Purge(ctx, f.base, dir) f.opt = saveOpt }() f.opt.FailHard = true contents := random.String(250) modTime := fstest.Time("2001-02-03T04:05:06.499999999Z") const overlapMessage = "chunk overlap" assertOverlapError := func(err error) { assert.Error(t, err) if err != nil { assert.Contains(t, err.Error(), overlapMessage) } } newFile := func(name string) fs.Object { item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime} obj := fstests.PutTestContents(ctx, t, f, &item, contents, true) require.NotNil(t, obj) return obj } billyObj := newFile("billy") billyTxn := billyObj.(*Object).xactID if f.useNoRename { require.True(t, billyTxn != "") } else { require.True(t, billyTxn == "") } billyChunkName := func(chunkNo int) string { return f.makeChunkName(billyObj.Remote(), chunkNo, "", billyTxn) } err := f.Mkdir(ctx, billyChunkName(1)) assertOverlapError(err) _, err = f.Move(ctx, newFile("silly1"), billyChunkName(2)) assert.Error(t, err) assert.True(t, err == fs.ErrorCantMove || (err != nil && strings.Contains(err.Error(), overlapMessage))) _, err = f.Copy(ctx, newFile("silly2"), billyChunkName(3)) assert.Error(t, err) assert.True(t, err == fs.ErrorCantCopy || (err != nil && strings.Contains(err.Error(), overlapMessage))) // accessing chunks in strict mode is prohibited f.opt.FailHard = true billyChunk4Name := billyChunkName(4) _, err = f.base.NewObject(ctx, billyChunk4Name) require.NoError(t, err) _, err = f.NewObject(ctx, billyChunk4Name) assertOverlapError(err) f.opt.FailHard = false billyChunk4, err := f.NewObject(ctx, billyChunk4Name) assert.NoError(t, err) require.NotNil(t, billyChunk4) f.opt.FailHard = true _, err = f.Put(ctx, bytes.NewBufferString(contents), billyChunk4) assertOverlapError(err) // you can freely read chunks (if you have an object) r, err := billyChunk4.Open(ctx) assert.NoError(t, err) var chunkContents []byte assert.NotPanics(t, func() { chunkContents, err = io.ReadAll(r) _ = r.Close() }) assert.NoError(t, err) assert.NotEqual(t, contents, string(chunkContents)) // but you can't change them err = billyChunk4.Update(ctx, bytes.NewBufferString(contents), newFile("silly3")) assertOverlapError(err) // Remove isn't special, you can't corrupt files even if you have an object err = billyChunk4.Remove(ctx) assertOverlapError(err) // recreate billy in case it was anyhow corrupted willyObj := newFile("willy") willyTxn := willyObj.(*Object).xactID willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", willyTxn) f.opt.FailHard = false willyChunk, err := f.NewObject(ctx, willyChunkName) f.opt.FailHard = true assert.NoError(t, err) require.NotNil(t, willyChunk) _, err = operations.Copy(ctx, f, willyChunk, willyChunkName, newFile("silly4")) assertOverlapError(err) // operations.Move will return error when chunker's Move refused // to corrupt target file, but reverts to copy/delete method // still trying to delete target chunk. Chunker must come to rescue. _, err = operations.Move(ctx, f, willyChunk, willyChunkName, newFile("silly5")) assertOverlapError(err) r, err = willyChunk.Open(ctx) assert.NoError(t, err) assert.NotPanics(t, func() { _, err = io.ReadAll(r) _ = r.Close() }) assert.NoError(t, err) } func testChunkNumberOverflow(t *testing.T, f *Fs) { if f.opt.ChunkSize > 50 { t.Skip("this test requires small chunks") } const dir = "wreaked" const wreakNumber = 10200300 ctx := context.Background() saveOpt := f.opt defer func() { f.opt.FailHard = false _ = operations.Purge(ctx, f.base, dir) f.opt = saveOpt }() modTime := fstest.Time("2001-02-03T04:05:06.499999999Z") contents := random.String(100) newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) { filename = path.Join(dir, name) item := fstest.Item{Path: filename, ModTime: modTime} obj = fstests.PutTestContents(ctx, t, f, &item, contents, true) require.NotNil(t, obj) if chunkObj, isChunkObj := obj.(*Object); isChunkObj { txnID = chunkObj.xactID } return } f.opt.FailHard = false file, fileName, fileTxn := newFile(f, "wreaker") wreak, _, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", fileTxn)) f.opt.FailHard = false fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision()) _, err := f.NewObject(ctx, fileName) assert.Error(t, err) f.opt.FailHard = true _, err = f.List(ctx, dir) assert.Error(t, err) _, err = f.NewObject(ctx, fileName) assert.Error(t, err) f.opt.FailHard = false _ = wreak.Remove(ctx) _ = file.Remove(ctx) } func testMetadataInput(t *testing.T, f *Fs) { const minChunkForTest = 50 if f.opt.ChunkSize < minChunkForTest { t.Skip("this test requires chunks that fit metadata") } const dir = "usermeta" ctx := context.Background() saveOpt := f.opt defer func() { f.opt.FailHard = false _ = operations.Purge(ctx, f.base, dir) f.opt = saveOpt }() f.opt.FailHard = false runSubtest := func(contents, name string) { description := fmt.Sprintf("file with %s metadata", name) filename := path.Join(dir, name) require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct") part := testPutFile(ctx, t, f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true) _ = testPutFile(ctx, t, f, filename, contents, "upload "+description, false) obj, err := f.NewObject(ctx, filename) assert.NoError(t, err, "access "+description) assert.NotNil(t, obj) assert.Equal(t, int64(len(contents)), obj.Size(), "size "+description) o, ok := obj.(*Object) assert.NotNil(t, ok) if o != nil { assert.True(t, o.isComposite() && len(o.chunks) == 1, description+" is forced composite") o = nil } defer func() { _ = obj.Remove(ctx) _ = part.Remove(ctx) }() r, err := obj.Open(ctx) assert.NoError(t, err, "open "+description) assert.NotNil(t, r, "open stream of "+description) if err == nil && r != nil { data, err := io.ReadAll(r) assert.NoError(t, err, "read all of "+description) assert.Equal(t, contents, string(data), description+" contents is ok") _ = r.Close() } } metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "", "") require.NoError(t, err) todaysMeta := string(metaData) runSubtest(todaysMeta, "today") pastMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":1`) pastMeta = regexp.MustCompile(`"size":[0-9]+`).ReplaceAllLiteralString(pastMeta, `"size":0`) runSubtest(pastMeta, "past") futureMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":999`) futureMeta = regexp.MustCompile(`"nchunks":[0-9]+`).ReplaceAllLiteralString(futureMeta, `"nchunks":0,"x":"y"`) runSubtest(futureMeta, "future") } // Test that chunker refuses to change on objects with future/unknown metadata func testFutureProof(t *testing.T, f *Fs) { if !f.useMeta { t.Skip("this test requires metadata support") } saveOpt := f.opt ctx := context.Background() f.opt.FailHard = true const dir = "future" const file = dir + "/test" defer func() { f.opt.FailHard = false _ = operations.Purge(ctx, f.base, dir) f.opt = saveOpt }() modTime := fstest.Time("2001-02-03T04:05:06.499999999Z") putPart := func(name string, part int, data, msg string) { if part > 0 { name = f.makeChunkName(name, part-1, "", "") } item := fstest.Item{Path: name, ModTime: modTime} obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true) assert.NotNil(t, obj, msg) } // simulate chunked object from future meta := `{"ver":999,"nchunks":3,"size":9,"garbage":"litter","sha1":"0707f2970043f9f7c22029482db27733deaec029"}` putPart(file, 0, meta, "metaobject") putPart(file, 1, "abc", "chunk1") putPart(file, 2, "def", "chunk2") putPart(file, 3, "ghi", "chunk3") // List should succeed ls, err := f.List(ctx, dir) assert.NoError(t, err) assert.Equal(t, 1, len(ls)) assert.Equal(t, int64(9), ls[0].Size()) // NewObject should succeed obj, err := f.NewObject(ctx, file) assert.NoError(t, err) assert.Equal(t, file, obj.Remote()) assert.Equal(t, int64(9), obj.Size()) // Hash must fail _, err = obj.Hash(ctx, hash.SHA1) assert.Equal(t, ErrMetaUnknown, err) // Move must fail mobj, err := operations.Move(ctx, f, nil, file+"2", obj) assert.Nil(t, mobj) assert.Error(t, err) if err != nil { assert.Contains(t, err.Error(), "please upgrade rclone") } // Put must fail oi := object.NewStaticObjectInfo(file, modTime, 3, true, nil, nil) buf := bytes.NewBufferString("abc") _, err = f.Put(ctx, buf, oi) assert.Error(t, err) // Rcat must fail in := io.NopCloser(bytes.NewBufferString("abc")) robj, err := operations.Rcat(ctx, f, file, in, modTime, nil) assert.Nil(t, robj) assert.NotNil(t, err) if err != nil { assert.Contains(t, err.Error(), "please upgrade rclone") } } // The newer method of doing transactions without renaming should still be able to correctly process chunks that were created with renaming // If you attempt to do the inverse, however, the data chunks will be ignored causing commands to perform incorrectly func testBackwardsCompatibility(t *testing.T, f *Fs) { if !f.useMeta { t.Skip("Can't do norename transactions without metadata") } const dir = "backcomp" ctx := context.Background() saveOpt := f.opt saveUseNoRename := f.useNoRename defer func() { f.opt.FailHard = false _ = operations.Purge(ctx, f.base, dir) f.opt = saveOpt f.useNoRename = saveUseNoRename }() f.opt.ChunkSize = fs.SizeSuffix(10) modTime := fstest.Time("2001-02-03T04:05:06.499999999Z") contents := random.String(250) newFile := func(f fs.Fs, name string) (fs.Object, string) { filename := path.Join(dir, name) item := fstest.Item{Path: filename, ModTime: modTime} obj := fstests.PutTestContents(ctx, t, f, &item, contents, true) require.NotNil(t, obj) return obj, filename } f.opt.FailHard = false f.useNoRename = false file, fileName := newFile(f, "renamefile") f.opt.FailHard = false item := fstest.NewItem(fileName, contents, modTime) var items []fstest.Item items = append(items, item) f.useNoRename = true fstest.CheckListingWithRoot(t, f, dir, items, nil, f.Precision()) _, err := f.NewObject(ctx, fileName) assert.NoError(t, err) f.opt.FailHard = true _, err = f.List(ctx, dir) assert.NoError(t, err) f.opt.FailHard = false _ = file.Remove(ctx) } func testChunkerServerSideMove(t *testing.T, f *Fs) { if !f.useMeta { t.Skip("Can't test norename transactions without metadata") } ctx := context.Background() const dir = "servermovetest" subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), dir) subFs1, err := fs.NewFs(ctx, subRemote+"/subdir1") assert.NoError(t, err)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/chunker/chunker.go
backend/chunker/chunker.go
// Package chunker provides wrappers for Fs and Object which split large files in chunks package chunker import ( "bytes" "context" "crypto/md5" "crypto/sha1" "encoding/hex" "encoding/json" "errors" "fmt" gohash "hash" "io" "math/rand" "path" "regexp" "sort" "strconv" "strings" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/encoder" ) // Chunker's composite files have one or more chunks // and optional metadata object. If it's present, // meta object is named after the original file. // // The only supported metadata format is simplejson atm. // It supports only per-file meta objects that are rudimentary, // used mostly for consistency checks (lazily for performance reasons). // Other formats can be developed that use an external meta store // free of these limitations, but this needs some support from // rclone core (e.g. metadata store interfaces). // // The following types of chunks are supported: // data and control, active and temporary. // Chunk type is identified by matching chunk file name // based on the chunk name format configured by user and transaction // style being used. // // Both data and control chunks can be either temporary (aka hidden) // or active (non-temporary aka normal aka permanent). // An operation creates temporary chunks while it runs. // By completion it removes temporary and leaves active chunks. // // Temporary chunks have a special hardcoded suffix in addition // to the configured name pattern. // Temporary suffix includes so called transaction identifier // (abbreviated as `xactID` below), a generic non-negative base-36 "number" // used by parallel operations to share a composite object. // Chunker also accepts the longer decimal temporary suffix (obsolete), // which is transparently converted to the new format. In its maximum // length of 13 decimals it makes a 7-digit base-36 number. // // When transactions is set to the norename style, data chunks will // keep their temporary chunk names (with the transaction identifier // suffix). To distinguish them from temporary chunks, the txn field // of the metadata file is set to match the transaction identifier of // the data chunks. // // Chunker can tell data chunks from control chunks by the characters // located in the "hash placeholder" position of configured format. // Data chunks have decimal digits there. // Control chunks have in that position a short lowercase alphanumeric // string (starting with a letter) prepended by underscore. // // Metadata format v1 does not define any control chunk types, // they are currently ignored aka reserved. // In future they can be used to implement resumable uploads etc. const ( ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}` tempSuffixFormat = `_%04s` tempSuffixRegStr = `_([0-9a-z]{4,9})` tempSuffixRegOld = `\.\.tmp_([0-9]{10,13})` ) var ( // regular expressions to validate control type and temporary suffix ctrlTypeRegexp = regexp.MustCompile(`^` + ctrlTypeRegStr + `$`) tempSuffixRegexp = regexp.MustCompile(`^` + tempSuffixRegStr + `$`) ) // Normally metadata is a small piece of JSON (about 100-300 bytes). // The size of valid metadata must never exceed this limit. // Current maximum provides a reasonable room for future extensions. // // Please refrain from increasing it, this can cause old rclone versions // to fail, or worse, treat meta object as a normal file (see NewObject). // If more room is needed please bump metadata version forcing previous // releases to ask for upgrade, and offload extra info to a control chunk. // // And still chunker's primary function is to chunk large files // rather than serve as a generic metadata container. const ( maxMetadataSize = 1023 maxMetadataSizeWritten = 255 ) // Current/highest supported metadata format. const metadataVersion = 2 // optimizeFirstChunk enables the following optimization in the Put: // If a single chunk is expected, put the first chunk using the // base target name instead of a temporary name, thus avoiding // extra rename operation. // Warning: this optimization is not transaction safe. const optimizeFirstChunk = false // revealHidden is a stub until chunker lands the `reveal hidden` option. const revealHidden = false // Prevent memory overflow due to specially crafted chunk name const maxSafeChunkNumber = 10000000 // Number of attempts to find unique transaction identifier const maxTransactionProbes = 100 // standard chunker errors var ( ErrChunkOverflow = errors.New("chunk number overflow") ErrMetaTooBig = errors.New("metadata is too big") ErrMetaUnknown = errors.New("unknown metadata, please upgrade rclone") ) // variants of baseMove's parameter delMode const ( delNever = 0 // don't delete, just move delAlways = 1 // delete destination before moving delFailed = 2 // move, then delete and try again if failed ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "chunker", Description: "Transparently chunk/split large files", NewFs: NewFs, Options: []fs.Option{{ Name: "remote", Required: true, Help: `Remote to chunk/unchunk. Normally should contain a ':' and a path, e.g. "myremote:path/to/dir", "myremote:bucket" or maybe "myremote:" (not recommended).`, }, { Name: "chunk_size", Advanced: false, Default: fs.SizeSuffix(2147483648), // 2 GiB Help: `Files larger than chunk size will be split in chunks.`, }, { Name: "name_format", Advanced: true, Hide: fs.OptionHideCommandLine, Default: `*.rclone_chunk.###`, Help: `String format of chunk file names. The two placeholders are: base file name (*) and chunk number (#...). There must be one and only one asterisk and one or more consecutive hash characters. If chunk number has less digits than the number of hashes, it is left-padded by zeros. If there are more digits in the number, they are left as is. Possible chunk files are ignored if their name does not match given format.`, }, { Name: "start_from", Advanced: true, Hide: fs.OptionHideCommandLine, Default: 1, Help: `Minimum valid chunk number. Usually 0 or 1. By default chunk numbers start from 1.`, }, { Name: "meta_format", Advanced: true, Hide: fs.OptionHideCommandLine, Default: "simplejson", Help: `Format of the metadata object or "none". By default "simplejson". Metadata is a small JSON file named after the composite file.`, Examples: []fs.OptionExample{{ Value: "none", Help: `Do not use metadata files at all. Requires hash type "none".`, }, { Value: "simplejson", Help: `Simple JSON supports hash sums and chunk validation. It has the following fields: ver, size, nchunks, md5, sha1.`, }}, }, { Name: "hash_type", Advanced: false, Default: "md5", Help: `Choose how chunker handles hash sums. All modes but "none" require metadata.`, Examples: []fs.OptionExample{{ Value: "none", Help: `Pass any hash supported by wrapped remote for non-chunked files. Return nothing otherwise.`, }, { Value: "md5", Help: `MD5 for composite files.`, }, { Value: "sha1", Help: `SHA1 for composite files.`, }, { Value: "md5all", Help: `MD5 for all files.`, }, { Value: "sha1all", Help: `SHA1 for all files.`, }, { Value: "md5quick", Help: `Copying a file to chunker will request MD5 from the source. Falling back to SHA1 if unsupported.`, }, { Value: "sha1quick", Help: `Similar to "md5quick" but prefers SHA1 over MD5.`, }}, }, { Name: "fail_hard", Advanced: true, Default: false, Help: `Choose how chunker should handle files with missing or invalid chunks.`, Examples: []fs.OptionExample{ { Value: "true", Help: "Report errors and abort current command.", }, { Value: "false", Help: "Warn user, skip incomplete file and proceed.", }, }, }, { Name: "transactions", Advanced: true, Default: "rename", Help: `Choose how chunker should handle temporary files during transactions.`, Hide: fs.OptionHideCommandLine, Examples: []fs.OptionExample{ { Value: "rename", Help: "Rename temporary files after a successful transaction.", }, { Value: "norename", Help: `Leave temporary file names and write transaction ID to metadata file. Metadata is required for no rename transactions (meta format cannot be "none"). If you are using norename transactions you should be careful not to downgrade Rclone as older versions of Rclone don't support this transaction style and will misinterpret files manipulated by norename transactions. This method is EXPERIMENTAL, don't use on production systems.`, }, { Value: "auto", Help: `Rename or norename will be used depending on capabilities of the backend. If meta format is set to "none", rename transactions will always be used. This method is EXPERIMENTAL, don't use on production systems.`, }, }, }}, }) } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } if opt.StartFrom < 0 { return nil, errors.New("start_from must be non-negative") } remote := opt.Remote if strings.HasPrefix(remote, name+":") { return nil, errors.New("can't point remote at itself - check the value of the remote setting") } baseName, basePath, err := fspath.SplitFs(remote) if err != nil { return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err) } // Look for a file first remotePath := fspath.JoinRootPath(basePath, rpath) baseFs, err := cache.Get(ctx, baseName+remotePath) if err != fs.ErrorIsFile && err != nil { return nil, fmt.Errorf("failed to make remote %q to wrap: %w", baseName+remotePath, err) } if !operations.CanServerSideMove(baseFs) { return nil, errors.New("can't use chunker on a backend which doesn't support server-side move or copy") } f := &Fs{ base: baseFs, name: name, root: rpath, opt: *opt, } f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm. if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil { return nil, err } // Handle the tricky case detected by FsMkdir/FsPutFiles/FsIsFile // when `rpath` points to a composite multi-chunk file without metadata, // i.e. `rpath` does not exist in the wrapped remote, but chunker // detects a composite file because it finds the first chunk! // (yet can't satisfy fstest.CheckListing, will ignore) if err == nil && !f.useMeta { firstChunkPath := f.makeChunkName(remotePath, 0, "", "") newBase, testErr := cache.Get(ctx, baseName+firstChunkPath) if testErr == fs.ErrorIsFile { f.base = newBase err = testErr } } cache.PinUntilFinalized(f.base, f) // Correct root if definitely pointing to a file if err == fs.ErrorIsFile { f.root = path.Dir(f.root) if f.root == "." || f.root == "/" { f.root = "" } } // Note 1: the features here are ones we could support, and they are // ANDed with the ones from wrappedFs. // Note 2: features.Fill() points features.PutStream to our PutStream, // but features.Mask() will nullify it if wrappedFs does not have it. f.features = (&fs.Features{ CaseInsensitive: true, DuplicateFiles: true, ReadMimeType: false, // Object.MimeType not supported WriteMimeType: true, BucketBased: true, CanHaveEmptyDirectories: true, ServerSideAcrossConfigs: true, ReadDirMetadata: true, WriteDirMetadata: true, WriteDirSetModTime: true, UserDirMetadata: true, DirModTimeUpdatesOnWrite: true, }).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs) f.features.ListR = nil // Recursive listing may cause chunker skip files f.features.ListP = nil // ListP not supported yet return f, err } // Options defines the configuration for this backend type Options struct { Remote string `config:"remote"` ChunkSize fs.SizeSuffix `config:"chunk_size"` NameFormat string `config:"name_format"` StartFrom int `config:"start_from"` MetaFormat string `config:"meta_format"` HashType string `config:"hash_type"` FailHard bool `config:"fail_hard"` Transactions string `config:"transactions"` } // Fs represents a wrapped fs.Fs type Fs struct { name string root string base fs.Fs // remote wrapped by chunker overlay wrapper fs.Fs // wrapper is used by SetWrapper useMeta bool // false if metadata format is 'none' useMD5 bool // mutually exclusive with useSHA1 useSHA1 bool // mutually exclusive with useMD5 hashFallback bool // allows fallback from MD5 to SHA1 and vice versa hashAll bool // hash all files, mutually exclusive with hashFallback dataNameFmt string // name format of data chunks ctrlNameFmt string // name format of control chunks nameRegexp *regexp.Regexp // regular expression to match chunk names xactIDRand *rand.Rand // generator of random transaction identifiers xactIDMutex sync.Mutex // mutex for the source of randomness opt Options // copy of Options features *fs.Features // optional features dirSort bool // reserved for future, ignored useNoRename bool // can be set with the transactions option } // configure sets up chunker for given name format, meta format and hash type. // It also seeds the source of random transaction identifiers. // configure must be called only from NewFs or by unit tests. func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string) error { if err := f.setChunkNameFormat(nameFormat); err != nil { return fmt.Errorf("invalid name format '%s': %w", nameFormat, err) } if err := f.setMetaFormat(metaFormat); err != nil { return err } if err := f.setHashType(hashType); err != nil { return err } if err := f.setTransactionMode(transactionMode); err != nil { return err } randomSeed := time.Now().UnixNano() f.xactIDRand = rand.New(rand.NewSource(randomSeed)) return nil } func (f *Fs) setMetaFormat(metaFormat string) error { switch metaFormat { case "none": f.useMeta = false case "simplejson": f.useMeta = true default: return fmt.Errorf("unsupported meta format '%s'", metaFormat) } return nil } // setHashType // must be called *after* setMetaFormat. // // In the "All" mode chunker will force metadata on all files // if the wrapped remote can't provide given hashsum. func (f *Fs) setHashType(hashType string) error { f.useMD5 = false f.useSHA1 = false f.hashFallback = false f.hashAll = false requireMetaHash := true switch hashType { case "none": requireMetaHash = false case "md5": f.useMD5 = true case "sha1": f.useSHA1 = true case "md5quick": f.useMD5 = true f.hashFallback = true case "sha1quick": f.useSHA1 = true f.hashFallback = true case "md5all": f.useMD5 = true f.hashAll = !f.base.Hashes().Contains(hash.MD5) || f.base.Features().SlowHash case "sha1all": f.useSHA1 = true f.hashAll = !f.base.Hashes().Contains(hash.SHA1) || f.base.Features().SlowHash default: return fmt.Errorf("unsupported hash type '%s'", hashType) } if requireMetaHash && !f.useMeta { return fmt.Errorf("hash type '%s' requires compatible meta format", hashType) } return nil } func (f *Fs) setTransactionMode(transactionMode string) error { switch transactionMode { case "rename": f.useNoRename = false case "norename": if !f.useMeta { return errors.New("incompatible transaction options") } f.useNoRename = true case "auto": f.useNoRename = !f.CanQuickRename() if f.useNoRename && !f.useMeta { f.useNoRename = false return errors.New("using norename transactions requires metadata") } default: return fmt.Errorf("unsupported transaction mode '%s'", transactionMode) } return nil } // setChunkNameFormat converts pattern based chunk name format // into Printf format and Regular expressions for data and // control chunks. func (f *Fs) setChunkNameFormat(pattern string) error { // validate pattern if strings.Count(pattern, "*") != 1 { return errors.New("pattern must have exactly one asterisk (*)") } numDigits := strings.Count(pattern, "#") if numDigits < 1 { return errors.New("pattern must have a hash character (#)") } if strings.Index(pattern, "*") > strings.Index(pattern, "#") { return errors.New("asterisk (*) in pattern must come before hashes (#)") } if ok, _ := regexp.MatchString("^[^#]*[#]+[^#]*$", pattern); !ok { return errors.New("hashes (#) in pattern must be consecutive") } if dir, _ := path.Split(pattern); dir != "" { return errors.New("directory separator prohibited") } if pattern[0] != '*' { return errors.New("pattern must start with asterisk") // to be lifted later } // craft a unified regular expression for all types of chunks reHashes := regexp.MustCompile("[#]+") reDigits := "[0-9]+" if numDigits > 1 { reDigits = fmt.Sprintf("[0-9]{%d,}", numDigits) } reDataOrCtrl := fmt.Sprintf("(?:(%s)|_(%s))", reDigits, ctrlTypeRegStr) // this must be non-greedy or else it could eat up temporary suffix const mainNameRegStr = "(.+?)" strRegex := regexp.QuoteMeta(pattern) strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl) strRegex = strings.ReplaceAll(strRegex, "\\*", mainNameRegStr) strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld) f.nameRegexp = regexp.MustCompile(strRegex) // craft printf formats for active data/control chunks fmtDigits := "%d" if numDigits > 1 { fmtDigits = fmt.Sprintf("%%0%dd", numDigits) } strFmt := strings.ReplaceAll(pattern, "%", "%%") strFmt = strings.Replace(strFmt, "*", "%s", 1) f.dataNameFmt = reHashes.ReplaceAllLiteralString(strFmt, fmtDigits) f.ctrlNameFmt = reHashes.ReplaceAllLiteralString(strFmt, "_%s") return nil } // makeChunkName produces chunk name (or path) for a given file. // // filePath can be name, relative or absolute path of main file. // // chunkNo must be a zero based index of data chunk. // Negative chunkNo e.g. -1 indicates a control chunk. // ctrlType is type of control chunk (must be valid). // ctrlType must be "" for data chunks. // // xactID is a transaction identifier. Empty xactID denotes active chunk, // otherwise temporary chunk name is produced. func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string { dir, parentName := path.Split(filePath) var name, tempSuffix string switch { case chunkNo >= 0 && ctrlType == "": name = fmt.Sprintf(f.dataNameFmt, parentName, chunkNo+f.opt.StartFrom) case chunkNo < 0 && ctrlTypeRegexp.MatchString(ctrlType): name = fmt.Sprintf(f.ctrlNameFmt, parentName, ctrlType) default: panic("makeChunkName: invalid argument") // must not produce something we can't consume } if xactID != "" { tempSuffix = fmt.Sprintf(tempSuffixFormat, xactID) if !tempSuffixRegexp.MatchString(tempSuffix) { panic("makeChunkName: invalid argument") } } return dir + name + tempSuffix } // parseChunkName checks whether given file path belongs to // a chunk and extracts chunk name parts. // // filePath can be name, relative or absolute path of a file. // // Returned parentPath is path of the composite file owning the chunk. // It's a non-empty string if valid chunk name is detected // or "" if it's not a chunk. // Other returned values depend on detected chunk type: // data or control, active or temporary: // // data chunk - the returned chunkNo is non-negative and ctrlType is "" // control chunk - the chunkNo is -1 and ctrlType is a non-empty string // active chunk - the returned xactID is "" // temporary chunk - the xactID is a non-empty string func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ctrlType, xactID string) { dir, name := path.Split(filePath) match := f.nameRegexp.FindStringSubmatch(name) if match == nil || match[1] == "" { return "", -1, "", "" } var err error chunkNo = -1 if match[2] != "" { if chunkNo, err = strconv.Atoi(match[2]); err != nil { chunkNo = -1 } if chunkNo -= f.opt.StartFrom; chunkNo < 0 { fs.Infof(f, "invalid data chunk number in file %q", name) return "", -1, "", "" } } if match[4] != "" { xactID = match[4] } if match[5] != "" { // old-style temporary suffix number, err := strconv.ParseInt(match[5], 10, 64) if err != nil || number < 0 { fs.Infof(f, "invalid old-style transaction number in file %q", name) return "", -1, "", "" } // convert old-style transaction number to base-36 transaction ID xactID = fmt.Sprintf(tempSuffixFormat, strconv.FormatInt(number, 36)) xactID = xactID[1:] // strip leading underscore } parentPath = dir + match[1] ctrlType = match[3] return } // forbidChunk prints error message or raises error if file is chunk. // First argument sets log prefix, use `false` to suppress message. func (f *Fs) forbidChunk(o any, filePath string) error { if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" { if f.opt.FailHard { return fmt.Errorf("chunk overlap with %q", parentPath) } if boolVal, isBool := o.(bool); !isBool || boolVal { fs.Errorf(o, "chunk overlap with %q", parentPath) } } return nil } // newXactID produces a sufficiently random transaction identifier. // // The temporary suffix mask allows identifiers consisting of 4-9 // base-36 digits (ie. digits 0-9 or lowercase letters a-z). // The identifiers must be unique between transactions running on // the single file in parallel. // // Currently the function produces 6-character identifiers. // Together with underscore this makes a 7-character temporary suffix. // // The first 4 characters isolate groups of transactions by time intervals. // The maximum length of interval is base-36 "zzzz" ie. 1,679,615 seconds. // The function rather takes a maximum prime closest to this number // (see https://primes.utm.edu) as the interval length to better safeguard // against repeating pseudo-random sequences in cases when rclone is // invoked from a periodic scheduler like unix cron. // Thus, the interval is slightly more than 19 days 10 hours 33 minutes. // // The remaining 2 base-36 digits (in the range from 0 to 1295 inclusive) // are taken from the local random source. // This provides about 0.1% collision probability for two parallel // operations started at the same second and working on the same file. // // Non-empty filePath argument enables probing for existing temporary chunk // to further eliminate collisions. func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err error) { const closestPrimeZzzzSeconds = 1679609 const maxTwoBase36Digits = 1295 unixSec := time.Now().Unix() if unixSec < 0 { unixSec = -unixSec // unlikely but the number must be positive } circleSec := unixSec % closestPrimeZzzzSeconds first4chars := strconv.FormatInt(circleSec, 36) for range maxTransactionProbes { f.xactIDMutex.Lock() randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1) f.xactIDMutex.Unlock() last2chars := strconv.FormatInt(randomness, 36) xactID = fmt.Sprintf("%04s%02s", first4chars, last2chars) if filePath == "" { return } probeChunk := f.makeChunkName(filePath, 0, "", xactID) _, probeErr := f.base.NewObject(ctx, probeChunk) if probeErr != nil { return } } return "", fmt.Errorf("can't setup transaction for %s", filePath) } // List the objects and directories in dir into entries. // The entries can be returned in any order but should be // for a complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't found. // // Commands normally cleanup all temporary chunks in case of a failure. // However, if rclone dies unexpectedly, it can leave behind a bunch of // hidden temporary chunks. List and its underlying chunkEntries() // silently skip all temporary chunks in the directory. It's okay if // they belong to an unfinished command running in parallel. // // However, there is no way to discover dead temporary chunks atm. // As a workaround users can use `purge` to forcibly remove the whole // directory together with dead chunks. // In future a flag named like `--chunker-list-hidden` may be added to // rclone that will tell List to reveal hidden chunks. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { entries, err = f.base.List(ctx, dir) if err != nil { return nil, err } return f.processEntries(ctx, entries, dir) } // ListR lists the objects and directories of the Fs starting // from dir recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. // // Don't implement this unless you have a more efficient way // of listing recursively than doing a directory traversal. func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { do := f.base.Features().ListR return do(ctx, dir, func(entries fs.DirEntries) error { newEntries, err := f.processEntries(ctx, entries, dir) if err != nil { return err } return callback(newEntries) }) } // processEntries assembles chunk entries into composite entries func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirPath string) (newEntries fs.DirEntries, err error) { var sortedEntries fs.DirEntries if f.dirSort { // sort entries so that meta objects go before their chunks sortedEntries = make(fs.DirEntries, len(origEntries)) copy(sortedEntries, origEntries) sort.Sort(sortedEntries) } else { sortedEntries = origEntries } byRemote := make(map[string]*Object) badEntry := make(map[string]bool) isSubdir := make(map[string]bool) txnByRemote := map[string]string{} var tempEntries fs.DirEntries for _, dirOrObject := range sortedEntries { switch entry := dirOrObject.(type) { case fs.Object: remote := entry.Remote() mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote) if mainRemote == "" { // this is meta object or standalone file object := f.newObject("", entry, nil) byRemote[remote] = object tempEntries = append(tempEntries, object) if f.useNoRename { txnByRemote[remote], err = object.readXactID(ctx) if err != nil { return nil, err } } break } // this is some kind of chunk // metobject should have been created above if present mainObject := byRemote[mainRemote] isSpecial := xactID != txnByRemote[mainRemote] || ctrlType != "" if mainObject == nil && f.useMeta && !isSpecial { fs.Debugf(f, "skip orphan data chunk %q", remote) break } if mainObject == nil && !f.useMeta { // this is the "nometa" case // create dummy chunked object without metadata mainObject = f.newObject(mainRemote, nil, nil) byRemote[mainRemote] = mainObject if !badEntry[mainRemote] { tempEntries = append(tempEntries, mainObject) } } if isSpecial { if revealHidden { fs.Infof(f, "ignore non-data chunk %q", remote) } // need to read metadata to ensure actual object type // no need to read if metaobject is too big or absent, // use the fact that before calling validate() // the `size` field caches metaobject size, if any if f.useMeta && mainObject != nil && mainObject.size <= maxMetadataSize { mainObject.unsure = true } break } if err := mainObject.addChunk(entry, chunkNo); err != nil { if f.opt.FailHard { return nil, err } badEntry[mainRemote] = true } case fs.Directory: isSubdir[entry.Remote()] = true wrapDir := fs.NewDirWrapper(entry.Remote(), entry) tempEntries = append(tempEntries, wrapDir) default: if f.opt.FailHard { return nil, fmt.Errorf("unknown object type %T", entry) } fs.Debugf(f, "unknown object type %T", entry) } } for _, entry := range tempEntries { if object, ok := entry.(*Object); ok { remote := object.Remote() if isSubdir[remote] { if f.opt.FailHard { return nil, fmt.Errorf("%q is both meta object and directory", remote) } badEntry[remote] = true // fall thru } if badEntry[remote] { fs.Debugf(f, "invalid directory entry %q", remote) continue } if err := object.validate(); err != nil { if f.opt.FailHard { return nil, err } fs.Debugf(f, "invalid chunks in object %q", remote) continue } } newEntries = append(newEntries, entry) } if f.dirSort { sort.Sort(newEntries) } return newEntries, nil } // NewObject finds the Object at remote. // // Please note that every NewObject invocation will scan the whole directory. // Using here something like fs.DirCache might improve performance // (yet making the logic more complex). // // Note that chunker prefers analyzing file names rather than reading // the content of meta object assuming that directory scans are fast // but opening even a small file can be slow on some backends. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.scanObject(ctx, remote, false) } // scanObject is like NewObject with optional quick scan mode. // The quick mode avoids directory requests other than `List`, // ignores non-chunked objects and skips chunk size checks. func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) { if err := f.forbidChunk(false, remote); err != nil { return nil, fmt.Errorf("can't access: %w", err) } var ( o *Object baseObj fs.Object currentXactID string err error sameMain bool ) if f.useMeta { baseObj, err = f.base.NewObject(ctx, remote) if err != nil { return nil, err } remote = baseObj.Remote() // Chunker's meta object cannot be large and maxMetadataSize acts // as a hard limit. Anything larger than that is treated as a // non-chunked file without even checking its contents, so it's // paramount to prevent metadata from exceeding the maximum size. // Anything smaller is additionally checked for format. o = f.newObject("", baseObj, nil) if o.size > maxMetadataSize { return o, nil } } else { // Metadata is disabled, hence this is either a multi-chunk // composite file without meta object or a non-chunked file. // Create an empty wrapper here, scan directory to determine // which case it is and postpone reading if it's the latter one. o = f.newObject(remote, nil, nil) } // If the object is small, it's probably a meta object. // However, composite file must have data chunks besides it. // Scan directory for possible data chunks now and decide later on. dir := path.Dir(strings.TrimRight(remote, "/")) if dir == "." { dir = "" } entries, err := f.base.List(ctx, dir) switch err { case nil: // OK, fall thru case fs.ErrorDirNotFound: entries = nil default: return nil, fmt.Errorf("can't detect composite file: %w", err) } if f.useNoRename { currentXactID, err = o.readXactID(ctx) if err != nil { return nil, err } } caseInsensitive := f.features.CaseInsensitive for _, dirOrObject := range entries { entry, ok := dirOrObject.(fs.Object) if !ok { continue } entryRemote := entry.Remote() if !caseInsensitive && !strings.Contains(entryRemote, remote) { continue // bypass regexp to save cpu } mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(entryRemote) if mainRemote == "" { continue // skip non-chunks } if caseInsensitive { sameMain = strings.EqualFold(mainRemote, remote) if sameMain && f.base.Features().IsLocal { // on local, make sure the EqualFold still holds true when accounting for encoding.
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/uptobox/uptobox.go
backend/uptobox/uptobox.go
// Package uptobox provides an interface to the Uptobox storage system. package uptobox import ( "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "path" "regexp" "strconv" "strings" "time" "github.com/rclone/rclone/backend/uptobox/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/rest" ) const ( apiBaseURL = "https://uptobox.com/api" minSleep = 400 * time.Millisecond // api is extremely rate limited now maxSleep = 5 * time.Second decayConstant = 2 // bigger for slower decay, exponential attackConstant = 0 // start with max sleep ) func init() { fs.Register(&fs.RegInfo{ Name: "uptobox", Description: "Uptobox", NewFs: NewFs, Options: []fs.Option{{ Help: "Your access token.\n\nGet it from https://uptobox.com/my_account.", Name: "access_token", Sensitive: true, }, { Help: "Set to make uploaded files private", Name: "private", Advanced: true, Default: false, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // maxFileLength = 255 Default: (encoder.Display | encoder.EncodeBackQuote | encoder.EncodeDoubleQuote | encoder.EncodeLtGt | encoder.EncodeLeftSpace | encoder.EncodeInvalidUtf8), }}, }) } // Options defines the configuration for this backend type Options struct { AccessToken string `config:"access_token"` Private bool `config:"private"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs is the interface a cloud storage system must provide type Fs struct { root string name string opt Options features *fs.Features srv *rest.Client pacer *fs.Pacer IDRegexp *regexp.Regexp public string // "0" to make objects private } // Object represents an Uptobox object type Object struct { fs *Fs // what this object is part of remote string // The remote path hasMetaData bool // whether info below has been set size int64 // Bytes in the object // modTime time.Time // Modified time of the object code string } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String returns a description of the FS func (f *Fs) String() string { return fmt.Sprintf("Uptobox root '%s'", f.root) } // Precision of the ModTimes in this Fs func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported } // Hashes returns the supported hash types of the filesystem func (f *Fs) Hashes() hash.Set { return hash.Set(hash.None) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // dirPath returns an escaped file path (f.root, file) func (f *Fs) dirPath(file string) string { //return path.Join(f.diskRoot, file) if file == "" || file == "." { return "//" + f.root } return "//" + path.Join(f.root, file) } // returns the full path based on root and the last element func (f *Fs) splitPathFull(pth string) (string, string) { fullPath := strings.Trim(path.Join(f.root, pth), "/") i := len(fullPath) - 1 for i >= 0 && fullPath[i] != '/' { i-- } if i < 0 { return "//" + fullPath[:i+1], fullPath[i+1:] } // do not include the / at the split return "//" + fullPath[:i], fullPath[i+1:] } // splitPath is modified splitPath version that doesn't include the separator // in the base path func (f *Fs) splitPath(pth string) (string, string) { // chop of any leading or trailing '/' pth = strings.Trim(pth, "/") i := len(pth) - 1 for i >= 0 && pth[i] != '/' { i-- } if i < 0 { return pth[:i+1], pth[i+1:] } return pth[:i], pth[i+1:] } // NewFs makes a new Fs object from the path // // The path is of the form remote:path // // Remotes are looked up in the config file. If the remote isn't // found then NotFoundInConfigFile will be returned. // // On Windows avoid single character remote names as they can be mixed // up with drive letters. func NewFs(ctx context.Context, name string, root string, config configmap.Mapper) (fs.Fs, error) { opt := new(Options) err := configstruct.Set(config, opt) if err != nil { return nil, err } f := &Fs{ name: name, root: root, opt: *opt, pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))), } if root == "/" || root == "." { f.root = "" } else { f.root = root } f.features = (&fs.Features{ DuplicateFiles: true, CanHaveEmptyDirectories: true, ReadMimeType: false, }).Fill(ctx, f) if f.opt.Private { f.public = "0" } client := fshttp.NewClient(ctx) f.srv = rest.NewClient(client).SetRoot(apiBaseURL) f.IDRegexp = regexp.MustCompile(`^https://uptobox\.com/([a-zA-Z0-9]+)`) _, err = f.readMetaDataForPath(ctx, f.dirPath(""), &api.MetadataRequestOptions{Limit: 10}) if err != nil { if _, ok := err.(api.Error); !ok { return nil, err } // assume it's a file than oldRoot := f.root rootDir, file := f.splitPath(root) f.root = rootDir _, err = f.NewObject(ctx, file) if err == nil { return f, fs.ErrorIsFile } f.root = oldRoot } return f, nil } func (f *Fs) decodeError(resp *http.Response, response any) (err error) { defer fs.CheckClose(resp.Body, &err) body, err := io.ReadAll(resp.Body) if err != nil { return err } // try to unmarshal into correct structure err = json.Unmarshal(body, response) if err == nil { return nil } // try to unmarshal into Error var apiErr api.Error err = json.Unmarshal(body, &apiErr) if err != nil { return err } return apiErr } func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.MetadataRequestOptions) (*api.ReadMetadataResponse, error) { opts := rest.Opts{ Method: "GET", Path: "/user/files", Parameters: url.Values{ "token": []string{f.opt.AccessToken}, "path": []string{f.opt.Enc.FromStandardPath(path)}, "limit": []string{strconv.FormatUint(options.Limit, 10)}, }, } if options.Offset != 0 { opts.Parameters.Set("offset", strconv.FormatUint(options.Offset, 10)) } var err error var info api.ReadMetadataResponse var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } err = f.decodeError(resp, &info) if err != nil { return nil, err } if info.StatusCode != 0 { return nil, errors.New(info.Message) } return &info, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { root := f.dirPath(dir) var limit uint64 = 100 // max number of objects per request - 100 seems to be the maximum the api accepts var page uint64 = 1 var offset uint64 // for the next page of requests for { opts := &api.MetadataRequestOptions{ Limit: limit, Offset: offset, } info, err := f.readMetaDataForPath(ctx, root, opts) if err != nil { if apiErr, ok := err.(api.Error); ok { // might indicate other errors but we can probably assume not found here if apiErr.StatusCode == 1 { return nil, fs.ErrorDirNotFound } } return nil, err } for _, item := range info.Data.Files { remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name)) o, err := f.newObjectWithInfo(ctx, remote, &item) if err != nil { continue } entries = append(entries, o) } // folders are always listed entirely on every page grr. if page == 1 { for _, item := range info.Data.Folders { remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name)) d := fs.NewDir(remote, time.Time{}).SetID(strconv.FormatUint(item.FolderID, 10)) entries = append(entries, d) } } //offset for the next page of items page++ offset += limit //check if we reached end of list if page > uint64(info.Data.PageCount) { break } } return entries, nil } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.FileInfo) (fs.Object, error) { o := &Object{ fs: f, remote: remote, size: info.Size, code: info.Code, hasMetaData: true, } return o, nil } // NewObject finds the Object at remote. If it can't be found it // returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { // no way to directly access an object by path so we have to list the parent dir entries, err := f.List(ctx, path.Dir(remote)) if err != nil { // need to change error type // if the parent dir doesn't exist the object doesn't exist either if err == fs.ErrorDirNotFound { return nil, fs.ErrorObjectNotFound } return nil, err } for _, entry := range entries { if o, ok := entry.(fs.Object); ok { if o.Remote() == remote { return o, nil } } } return nil, fs.ErrorObjectNotFound } func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, filename string, uploadURL string, options ...fs.OpenOption) (*api.UploadResponse, error) { opts := rest.Opts{ Method: "POST", RootURL: "https:" + uploadURL, Body: in, ContentLength: &size, Options: options, MultipartContentName: "files", MultipartFileName: filename, } var err error var resp *http.Response var ul api.UploadResponse err = f.pacer.CallNoRetry(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &ul) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't upload file: %w", err) } return &ul, nil } // dstPath starts from root and includes // func (f *Fs) move(ctx context.Context, dstPath string, fileID string) (err error) { meta, err := f.readMetaDataForPath(ctx, dstPath, &api.MetadataRequestOptions{Limit: 10}) if err != nil { return err } opts := rest.Opts{ Method: "PATCH", Path: "/user/files", } mv := api.CopyMoveFileRequest{ Token: f.opt.AccessToken, FileCodes: fileID, DestinationFolderID: meta.Data.CurrentFolder.FolderID, Action: "move", } var resp *http.Response var info api.UpdateResponse err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &mv, &info) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("couldn't move file: %w", err) } if info.StatusCode != 0 { return fmt.Errorf("move: api error: %d - %s", info.StatusCode, info.Message) } return err } // updateFileInformation set's various file attributes most importantly it's name func (f *Fs) updateFileInformation(ctx context.Context, update *api.UpdateFileInformation) (err error) { opts := rest.Opts{ Method: "PATCH", Path: "/user/files", } var resp *http.Response var info api.UpdateResponse err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, update, &info) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("couldn't update file info: %w", err) } if info.StatusCode != 0 { return fmt.Errorf("updateFileInfo: api error: %d - %s", info.StatusCode, info.Message) } return err } func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) error { if size > int64(200e9) { // max size 200GB return errors.New("file too big, can't upload") } else if size == 0 { return fs.ErrorCantUploadEmptyFiles } // yes it does take 4 requests if we're uploading to root and 6+ if we're uploading to any subdir :( // create upload request opts := rest.Opts{ Method: "GET", Path: "/upload", } token := api.Token{ Token: f.opt.AccessToken, } var info api.UploadInfo err := f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, &token, &info) return shouldRetry(ctx, resp, err) }) if err != nil { return err } if info.StatusCode != 0 { return fmt.Errorf("putUnchecked api error: %d - %s", info.StatusCode, info.Message) } // we need to have a safe name for the upload to work tmpName := "rcloneTemp" + random.String(8) upload, err := f.uploadFile(ctx, in, size, tmpName, info.Data.UploadLink, options...) if err != nil { return err } if len(upload.Files) != 1 { return errors.New("upload unexpected response") } match := f.IDRegexp.FindStringSubmatch(upload.Files[0].URL) // move file to destination folder base, leaf := f.splitPath(remote) fullBase := f.dirPath(base) if fullBase != "//" { // make all the parent folders err = f.Mkdir(ctx, base) if err != nil { // this might need some more error handling. if any of the following requests fail // we'll leave an orphaned temporary file floating around somewhere // they rarely fail though return err } err = f.move(ctx, fullBase, match[1]) if err != nil { return err } } // rename file to final name err = f.updateFileInformation(ctx, &api.UpdateFileInformation{ Token: f.opt.AccessToken, FileCode: match[1], NewName: f.opt.Enc.FromStandardName(leaf), Public: f.public, }) if err != nil { return err } return nil } // Put in to the remote path with the modTime given of the given size // // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Put should either // return an error or upload it properly (rather than e.g. calling panic). // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { existingObj, err := f.NewObject(ctx, src.Remote()) switch err { case nil: return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it return f.PutUnchecked(ctx, in, src, options...) default: return nil, err } } // PutUnchecked uploads the object // // This will create a duplicate if we upload a new file without // checking to see if there is one already - use Put() for that. func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { err := f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...) if err != nil { return nil, err } return f.NewObject(ctx, src.Remote()) } // CreateDir dir creates a directory with the given parent path // base starts from root and may or may not include // func (f *Fs) CreateDir(ctx context.Context, base string, leaf string) (err error) { base = "//" + strings.Trim(base, "/") var resp *http.Response var apiErr api.Error opts := rest.Opts{ Method: "PUT", Path: "/user/files", } mkdir := api.CreateFolderRequest{ Name: f.opt.Enc.FromStandardName(leaf), Path: f.opt.Enc.FromStandardPath(base), Token: f.opt.AccessToken, } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &apiErr) return shouldRetry(ctx, resp, err) }) if err != nil { return err } // checking if the dir exists beforehand would be slower so we'll just ignore the error here if apiErr.StatusCode != 0 && !strings.Contains(apiErr.Data, "already exists") { return apiErr } return nil } func (f *Fs) mkDirs(ctx context.Context, path string) (err error) { // chop of any leading or trailing slashes dirs := strings.Split(path, "/") var base = "" for _, element := range dirs { // create every dir one by one if element != "" { err = f.CreateDir(ctx, base, element) if err != nil { return err } base += "/" + element } } return nil } // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { if dir == "" || dir == "." { return f.mkDirs(ctx, f.root) } return f.mkDirs(ctx, path.Join(f.root, dir)) } // may or may not delete folders with contents? func (f *Fs) purge(ctx context.Context, folderID uint64) (err error) { var resp *http.Response var apiErr api.Error opts := rest.Opts{ Method: "DELETE", Path: "/user/files", } rm := api.DeleteFolderRequest{ FolderID: folderID, Token: f.opt.AccessToken, } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &rm, &apiErr) return shouldRetry(ctx, resp, err) }) if err != nil { return err } if apiErr.StatusCode != 0 { return apiErr } return nil } // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { info, err := f.readMetaDataForPath(ctx, f.dirPath(dir), &api.MetadataRequestOptions{Limit: 10}) if err != nil { return err } if len(info.Data.Folders) > 0 || len(info.Data.Files) > 0 { return fs.ErrorDirectoryNotEmpty } return f.purge(ctx, info.Data.CurrentFolder.FolderID) } // Move src to this remote using server side move operations. func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } srcBase, srcLeaf := srcObj.fs.splitPathFull(src.Remote()) dstBase, dstLeaf := f.splitPathFull(remote) needRename := srcLeaf != dstLeaf needMove := srcBase != dstBase // do the move if required if needMove { err := f.mkDirs(ctx, strings.Trim(dstBase, "/")) if err != nil { return nil, fmt.Errorf("move: failed to make destination dirs: %w", err) } err = f.move(ctx, dstBase, srcObj.code) if err != nil { return nil, err } } // rename to final name if we need to if needRename { err := f.updateFileInformation(ctx, &api.UpdateFileInformation{ Token: f.opt.AccessToken, FileCode: srcObj.code, NewName: f.opt.Enc.FromStandardName(dstLeaf), Public: f.public, }) if err != nil { return nil, fmt.Errorf("move: failed final rename: %w", err) } } // copy the old object and apply the changes newObj := *srcObj newObj.remote = remote newObj.fs = f return &newObj, nil } // renameDir renames a directory func (f *Fs) renameDir(ctx context.Context, folderID uint64, newName string) (err error) { var resp *http.Response var apiErr api.Error opts := rest.Opts{ Method: "PATCH", Path: "/user/files", } rename := api.RenameFolderRequest{ Token: f.opt.AccessToken, FolderID: folderID, NewName: newName, } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &rename, &apiErr) return shouldRetry(ctx, resp, err) }) if err != nil { return err } if apiErr.StatusCode != 0 { return apiErr } return nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } // find out source srcPath := srcFs.dirPath(srcRemote) srcInfo, err := f.readMetaDataForPath(ctx, srcPath, &api.MetadataRequestOptions{Limit: 1}) if err != nil { return fmt.Errorf("dirmove: source not found: %w", err) } // check if the destination already exists dstPath := f.dirPath(dstRemote) _, err = f.readMetaDataForPath(ctx, dstPath, &api.MetadataRequestOptions{Limit: 1}) if err == nil { return fs.ErrorDirExists } // make the destination parent path dstBase, dstName := f.splitPathFull(dstRemote) err = f.mkDirs(ctx, strings.Trim(dstBase, "/")) if err != nil { return fmt.Errorf("dirmove: failed to create dirs: %w", err) } // find the destination parent dir dstInfo, err := f.readMetaDataForPath(ctx, dstBase, &api.MetadataRequestOptions{Limit: 1}) if err != nil { return fmt.Errorf("dirmove: failed to read destination: %w", err) } srcBase, srcName := srcFs.splitPathFull(srcRemote) needRename := srcName != dstName needMove := srcBase != dstBase // if we have to rename we'll have to use a temporary name since // there could already be a directory with the same name as the src directory if needRename { // rename to a temporary name tmpName := "rcloneTemp" + random.String(8) err = f.renameDir(ctx, srcInfo.Data.CurrentFolder.FolderID, tmpName) if err != nil { return fmt.Errorf("dirmove: failed initial rename: %w", err) } } // do the move if needMove { opts := rest.Opts{ Method: "PATCH", Path: "/user/files", } move := api.MoveFolderRequest{ Token: f.opt.AccessToken, FolderID: srcInfo.Data.CurrentFolder.FolderID, DestinationFolderID: dstInfo.Data.CurrentFolder.FolderID, Action: "move", } var resp *http.Response var apiErr api.Error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &move, &apiErr) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("dirmove: failed to move: %w", err) } if apiErr.StatusCode != 0 { return apiErr } } // rename to final name if needRename { err = f.renameDir(ctx, srcInfo.Data.CurrentFolder.FolderID, dstName) if err != nil { return fmt.Errorf("dirmove: failed final rename: %w", err) } } return nil } func (f *Fs) copy(ctx context.Context, dstPath string, fileID string) (err error) { meta, err := f.readMetaDataForPath(ctx, dstPath, &api.MetadataRequestOptions{Limit: 10}) if err != nil { return err } opts := rest.Opts{ Method: "PATCH", Path: "/user/files", } cp := api.CopyMoveFileRequest{ Token: f.opt.AccessToken, FileCodes: fileID, DestinationFolderID: meta.Data.CurrentFolder.FolderID, Action: "copy", } var resp *http.Response var info api.UpdateResponse err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &cp, &info) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("couldn't copy file: %w", err) } if info.StatusCode != 0 { return fmt.Errorf("copy: api error: %d - %s", info.StatusCode, info.Message) } return err } // Copy src to this remote using server side move operations. func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantMove } _, srcLeaf := f.splitPath(src.Remote()) dstBase, dstLeaf := f.splitPath(remote) needRename := srcLeaf != dstLeaf err := f.mkDirs(ctx, path.Join(f.root, dstBase)) if err != nil { return nil, fmt.Errorf("copy: failed to make destination dirs: %w", err) } err = f.copy(ctx, f.dirPath(dstBase), srcObj.code) if err != nil { return nil, err } newObj, err := f.NewObject(ctx, path.Join(dstBase, srcLeaf)) if err != nil { return nil, fmt.Errorf("copy: couldn't find copied object: %w", err) } if needRename { err := f.updateFileInformation(ctx, &api.UpdateFileInformation{ Token: f.opt.AccessToken, FileCode: newObj.(*Object).code, NewName: f.opt.Enc.FromStandardName(dstLeaf), Public: f.public, }) if err != nil { return nil, fmt.Errorf("copy: failed final rename: %w", err) } newObj.(*Object).remote = remote } return newObj, nil } // ------------------------------------------------------------ // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime(ctx context.Context) time.Time { ci := fs.GetConfig(ctx) return time.Time(ci.DefaultTime) } // Size returns the size of an object in bytes func (o *Object) Size() int64 { return o.size } // Hash returns the Md5sum of an object returning a lowercase hex string func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { return "", hash.ErrUnsupported } // ID returns the ID of the Object if known, or "" if not func (o *Object) ID() string { return o.code } // Storable returns whether this object is storable func (o *Object) Storable() bool { return true } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return fs.ErrorCantSetModTime } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { opts := rest.Opts{ Method: "GET", Path: "/link", Parameters: url.Values{ "token": []string{o.fs.opt.AccessToken}, "file_code": []string{o.code}, }, } var dl api.Download var resp *http.Response err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &dl) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("open: failed to get download link: %w", err) } fs.FixRangeOption(options, o.size) opts = rest.Opts{ Method: "GET", RootURL: dl.Data.DownloadLink, Options: options, } err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } return resp.Body, err } // Update the already existing object // // Copy the reader into the object updating modTime and size. // // The new object may have been created if an error is returned func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { if src.Size() < 0 { return errors.New("refusing to update with unknown size") } // upload with new size but old name err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...) if err != nil { return err } // delete duplicate object after successful upload err = o.Remove(ctx) if err != nil { return fmt.Errorf("failed to remove old version: %w", err) } // Fetch new object after deleting the duplicate info, err := o.fs.NewObject(ctx, o.Remote()) if err != nil { return err } // Replace guts of old object with new one *o = *info.(*Object) return nil } // Remove an object func (o *Object) Remove(ctx context.Context) error { opts := rest.Opts{ Method: "DELETE", Path: "/user/files", } delete := api.RemoveFileRequest{ Token: o.fs.opt.AccessToken, FileCodes: o.code, } var info api.UpdateResponse err := o.fs.pacer.Call(func() (bool, error) { resp, err := o.fs.srv.CallJSON(ctx, &opts, &delete, &info) return shouldRetry(ctx, resp, err) }) if err != nil { return err } if info.StatusCode != 0 { return fmt.Errorf("remove: api error: %d - %s", info.StatusCode, info.Message) } return nil } // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) _ fs.Copier = (*Fs)(nil) _ fs.Mover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil) _ fs.Object = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/uptobox/uptobox_test.go
backend/uptobox/uptobox_test.go
// Test Uptobox filesystem interface package uptobox_test import ( "testing" "github.com/rclone/rclone/backend/uptobox" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { if *fstest.RemoteName == "" { *fstest.RemoteName = "TestUptobox:" } fstests.Run(t, &fstests.Opt{ RemoteName: *fstest.RemoteName, NilObject: (*uptobox.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/uptobox/api/types.go
backend/uptobox/api/types.go
// Package api provides types used by the Uptobox API. package api import "fmt" // Error contains the error code and message returned by the API type Error struct { Success bool `json:"success,omitempty"` StatusCode int `json:"statusCode,omitempty"` Message string `json:"message,omitempty"` Data string `json:"data,omitempty"` } // Error returns a string for the error and satisfies the error interface func (e Error) Error() string { out := fmt.Sprintf("api error %d", e.StatusCode) if e.Message != "" { out += ": " + e.Message } if e.Data != "" { out += ": " + e.Data } return out } // FolderEntry represents a Uptobox subfolder when listing folder contents type FolderEntry struct { FolderID uint64 `json:"fld_id"` Description string `json:"fld_descr"` Password string `json:"fld_password"` FullPath string `json:"fullPath"` Path string `json:"fld_name"` Name string `json:"name"` Hash string `json:"hash"` } // FolderInfo represents the current folder when listing folder contents type FolderInfo struct { FolderID uint64 `json:"fld_id"` Hash string `json:"hash"` FileCount uint64 `json:"fileCount"` TotalFileSize int64 `json:"totalFileSize"` } // FileInfo represents a file when listing folder contents type FileInfo struct { Name string `json:"file_name"` Description string `json:"file_descr"` Created string `json:"file_created"` Size int64 `json:"file_size"` Downloads uint64 `json:"file_downloads"` Code string `json:"file_code"` Password string `json:"file_password"` Public int `json:"file_public"` LastDownload string `json:"file_last_download"` ID uint64 `json:"id"` } // ReadMetadataResponse is the response when listing folder contents type ReadMetadataResponse struct { StatusCode int `json:"statusCode"` Message string `json:"message"` Data struct { CurrentFolder FolderInfo `json:"currentFolder"` Folders []FolderEntry `json:"folders"` Files []FileInfo `json:"files"` PageCount int `json:"pageCount"` TotalFileCount int `json:"totalFileCount"` TotalFileSize int64 `json:"totalFileSize"` } `json:"data"` } // UploadInfo is the response when initiating an upload type UploadInfo struct { StatusCode int `json:"statusCode"` Message string `json:"message"` Data struct { UploadLink string `json:"uploadLink"` MaxUpload string `json:"maxUpload"` } `json:"data"` } // UploadResponse is the response to a successful upload type UploadResponse struct { Files []struct { Name string `json:"name"` Size int64 `json:"size"` URL string `json:"url"` DeleteURL string `json:"deleteUrl"` } `json:"files"` } // UpdateResponse is a generic response to various action on files (rename/copy/move) type UpdateResponse struct { Message string `json:"message"` StatusCode int `json:"statusCode"` } // Download is the response when requesting a download link type Download struct { StatusCode int `json:"statusCode"` Message string `json:"message"` Data struct { DownloadLink string `json:"dlLink"` } `json:"data"` } // MetadataRequestOptions represents all the options when listing folder contents type MetadataRequestOptions struct { Limit uint64 Offset uint64 SearchField string Search string } // CreateFolderRequest is used for creating a folder type CreateFolderRequest struct { Token string `json:"token"` Path string `json:"path"` Name string `json:"name"` } // DeleteFolderRequest is used for deleting a folder type DeleteFolderRequest struct { Token string `json:"token"` FolderID uint64 `json:"fld_id"` } // CopyMoveFileRequest is used for moving/copying a file type CopyMoveFileRequest struct { Token string `json:"token"` FileCodes string `json:"file_codes"` DestinationFolderID uint64 `json:"destination_fld_id"` Action string `json:"action"` } // MoveFolderRequest is used for moving a folder type MoveFolderRequest struct { Token string `json:"token"` FolderID uint64 `json:"fld_id"` DestinationFolderID uint64 `json:"destination_fld_id"` Action string `json:"action"` } // RenameFolderRequest is used for renaming a folder type RenameFolderRequest struct { Token string `json:"token"` FolderID uint64 `json:"fld_id"` NewName string `json:"new_name"` } // UpdateFileInformation is used for renaming a file type UpdateFileInformation struct { Token string `json:"token"` FileCode string `json:"file_code"` NewName string `json:"new_name,omitempty"` Description string `json:"description,omitempty"` Password string `json:"password,omitempty"` Public string `json:"public,omitempty"` } // RemoveFileRequest is used for deleting a file type RemoveFileRequest struct { Token string `json:"token"` FileCodes string `json:"file_codes"` } // Token represents the authentication token type Token struct { Token string `json:"token"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sugarsync/sugarsync.go
backend/sugarsync/sugarsync.go
// Package sugarsync provides an interface to the Sugarsync // object storage system. package sugarsync /* FIXME DirMove tests fails with: Can not move sync folder. go test -v -short -run TestIntegration/FsMkdir/FsPutFiles/FsDirMove -verbose -dump-bodies To work around this we use the remote "TestSugarSync:Test" to test with. */ import ( "context" "errors" "fmt" "io" "net/http" "net/url" "path" "regexp" "strconv" "strings" "sync" "time" "github.com/rclone/rclone/backend/sugarsync/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" ) /* maxFileLength = 16383 canWriteUnnormalized = true canReadUnnormalized = true canReadRenormalized = false canStream = true */ const ( appID = "/sc/9068489/215_1736969337" accessKeyID = "OTA2ODQ4OTE1NzEzNDAwNTI4Njc" encryptedPrivateAccessKey = "JONdXuRLNSRI5ue2Cr-vn-5m_YxyMNq9yHRKUQevqo8uaZjH502Z-x1axhyqOa8cDyldGq08RfFxozo" minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential rootURL = "https://api.sugarsync.com" listChunks = 500 // chunk size to read directory listings expiryLeeway = 5 * time.Minute // time before the token expires to renew ) // withDefault returns value but if value is "" then it returns defaultValue func withDefault(key, defaultValue string) (value string) { if value == "" { value = defaultValue } return value } // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "sugarsync", Description: "Sugarsync", NewFs: NewFs, Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, fmt.Errorf("failed to read options: %w", err) } switch config.State { case "": if opt.RefreshToken == "" { return fs.ConfigGoto("username") } return fs.ConfigConfirm("refresh", true, "config_refresh", "Already have a token - refresh?") case "refresh": if config.Result == "false" { return nil, nil } return fs.ConfigGoto("username") case "username": return fs.ConfigInput("password", "config_username", "username (email address)") case "password": m.Set("username", config.Result) return fs.ConfigPassword("auth", "config_password", "Your Sugarsync password.\n\nOnly required during setup and will not be stored.") case "auth": username, _ := m.Get("username") m.Set("username", "") password := config.Result authRequest := api.AppAuthorization{ Username: username, Password: obscure.MustReveal(password), Application: withDefault(opt.AppID, appID), AccessKeyID: withDefault(opt.AccessKeyID, accessKeyID), PrivateAccessKey: withDefault(opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)), } var resp *http.Response opts := rest.Opts{ Method: "POST", Path: "/app-authorization", } srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME // FIXME // err = f.pacer.Call(func() (bool, error) { resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil) // return shouldRetry(ctx, resp, err) //}) if err != nil { return nil, fmt.Errorf("failed to get token: %w", err) } opt.RefreshToken = resp.Header.Get("Location") m.Set("refresh_token", opt.RefreshToken) return nil, nil } return nil, fmt.Errorf("unknown state %q", config.State) }, Options: []fs.Option{{ Name: "app_id", Help: "Sugarsync App ID.\n\nLeave blank to use rclone's.", Sensitive: true, }, { Name: "access_key_id", Help: "Sugarsync Access Key ID.\n\nLeave blank to use rclone's.", Sensitive: true, }, { Name: "private_access_key", Help: "Sugarsync Private Access Key.\n\nLeave blank to use rclone's.", Sensitive: true, }, { Name: "hard_delete", Help: "Permanently delete files if true\notherwise put them in the deleted files.", Default: false, }, { Name: "refresh_token", Help: "Sugarsync refresh token.\n\nLeave blank normally, will be auto configured by rclone.", Advanced: true, Sensitive: true, }, { Name: "authorization", Help: "Sugarsync authorization.\n\nLeave blank normally, will be auto configured by rclone.", Advanced: true, Sensitive: true, }, { Name: "authorization_expiry", Help: "Sugarsync authorization expiry.\n\nLeave blank normally, will be auto configured by rclone.", Advanced: true, }, { Name: "user", Help: "Sugarsync user.\n\nLeave blank normally, will be auto configured by rclone.", Advanced: true, Sensitive: true, }, { Name: "root_id", Help: "Sugarsync root id.\n\nLeave blank normally, will be auto configured by rclone.", Advanced: true, Sensitive: true, }, { Name: "deleted_id", Help: "Sugarsync deleted folder id.\n\nLeave blank normally, will be auto configured by rclone.", Advanced: true, Sensitive: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.Base | encoder.EncodeCtl | encoder.EncodeInvalidUtf8), }}, }) } // Options defines the configuration for this backend type Options struct { AppID string `config:"app_id"` AccessKeyID string `config:"access_key_id"` PrivateAccessKey string `config:"private_access_key"` HardDelete bool `config:"hard_delete"` RefreshToken string `config:"refresh_token"` Authorization string `config:"authorization"` AuthorizationExpiry string `config:"authorization_expiry"` User string `config:"user"` RootID string `config:"root_id"` DeletedID string `config:"deleted_id"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a remote sugarsync type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options features *fs.Features // optional features srv *rest.Client // the connection to the server dirCache *dircache.DirCache // Map of directory path to directory id pacer *fs.Pacer // pacer for API calls m configmap.Mapper // config file access authMu sync.Mutex // used when doing authorization authExpiry time.Time // time the authorization expires } // Object describes a sugarsync object // // Will definitely have info but maybe not meta type Object struct { fs *Fs // what this object is part of remote string // The remote path hasMetaData bool // whether info below has been set size int64 // size of the object modTime time.Time // modification time of the object id string // ID of the object } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("sugarsync root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // parsePath parses a sugarsync 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // readMetaDataForPath reads the metadata from the path func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.File, err error) { // defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, fs.ErrorObjectNotFound } return nil, err } found, err := f.listAll(ctx, directoryID, func(item *api.File) bool { if strings.EqualFold(item.Name, leaf) { info = item return true } return false }, nil) if err != nil { return nil, err } if !found { return nil, fs.ErrorObjectNotFound } return info, nil } // readMetaDataForID reads the metadata for a file from the ID func (f *Fs) readMetaDataForID(ctx context.Context, ID string) (info *api.File, err error) { var resp *http.Response opts := rest.Opts{ Method: "GET", RootURL: ID, } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallXML(ctx, &opts, nil, &info) return shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { return nil, fs.ErrorObjectNotFound } return nil, fmt.Errorf("failed to get authorization: %w", err) } return info, nil } // getAuthToken gets an Auth token from the refresh token func (f *Fs) getAuthToken(ctx context.Context) error { fs.Debugf(f, "Renewing token") authRequest := api.TokenAuthRequest{ AccessKeyID: withDefault(f.opt.AccessKeyID, accessKeyID), PrivateAccessKey: withDefault(f.opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)), RefreshToken: f.opt.RefreshToken, } if authRequest.RefreshToken == "" { return errors.New("no refresh token found - run `rclone config reconnect`") } var authResponse api.Authorization var err error var resp *http.Response opts := rest.Opts{ Method: "POST", Path: "/authorization", ExtraHeaders: map[string]string{ "Authorization": "", // unset Authorization }, } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallXML(ctx, &opts, &authRequest, &authResponse) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("failed to get authorization: %w", err) } f.opt.Authorization = resp.Header.Get("Location") f.authExpiry = authResponse.Expiration f.opt.User = authResponse.User // Cache the results f.m.Set("authorization", f.opt.Authorization) f.m.Set("authorization_expiry", f.authExpiry.Format(time.RFC3339)) f.m.Set("user", f.opt.User) return nil } // Read the auth from the config file and refresh it if it is expired, setting it in srv func (f *Fs) getAuth(req *http.Request) (err error) { f.authMu.Lock() defer f.authMu.Unlock() ctx := req.Context() // if have auth, check it is in date if f.opt.Authorization == "" || f.opt.User == "" || f.authExpiry.IsZero() || time.Until(f.authExpiry) < expiryLeeway { // Get the auth token f.srv.SetSigner(nil) // temporarily remove the signer so we don't infinitely recurse err = f.getAuthToken(ctx) f.srv.SetSigner(f.getAuth) // replace signer if err != nil { return err } } // Set Authorization header req.Header.Set("Authorization", f.opt.Authorization) return nil } // Read the user info into f func (f *Fs) getUser(ctx context.Context) (user *api.User, err error) { var resp *http.Response opts := rest.Opts{ Method: "GET", Path: "/user", } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallXML(ctx, &opts, nil, &user) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("failed to get user: %w", err) } return user, nil } // Read the expiry time from a string func parseExpiry(expiryString string) time.Time { if expiryString == "" { return time.Time{} } expiry, err := time.Parse(time.RFC3339, expiryString) if err != nil { fs.Debugf("sugarsync", "Invalid expiry time %q read from config", expiryString) return time.Time{} } return expiry } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } root = parsePath(root) client := fshttp.NewClient(ctx) f := &Fs{ name: name, root: root, opt: *opt, srv: rest.NewClient(client).SetRoot(rootURL), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), m: m, authExpiry: parseExpiry(opt.AuthorizationExpiry), } f.features = (&fs.Features{ CaseInsensitive: true, CanHaveEmptyDirectories: true, }).Fill(ctx, f) f.srv.SetSigner(f.getAuth) // use signing hook to get the auth f.srv.SetErrorHandler(errorHandler) // Get rootID if f.opt.RootID == "" { user, err := f.getUser(ctx) if err != nil { return nil, err } f.opt.RootID = user.SyncFolders if strings.HasSuffix(f.opt.RootID, "/contents") { f.opt.RootID = f.opt.RootID[:len(f.opt.RootID)-9] } else { return nil, fmt.Errorf("unexpected rootID %q", f.opt.RootID) } // Cache the results f.m.Set("root_id", f.opt.RootID) f.opt.DeletedID = user.Deleted f.m.Set("deleted_id", f.opt.DeletedID) } f.dirCache = dircache.New(root, f.opt.RootID, f) // Find the current root err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) oldDirCache := f.dirCache f.dirCache = dircache.New(newRoot, f.opt.RootID, f) f.root = newRoot resetF := func() { f.dirCache = oldDirCache f.root = root } // Make new Fs which is the parent err = f.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f resetF() return f, nil } _, err := f.newObjectWithInfo(ctx, remote, nil) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f resetF() return f, nil } return nil, err } // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } var findError = regexp.MustCompile(`<h3>(.*?)</h3>`) // errorHandler parses errors from the body // // Errors seem to be HTML with <h3> containing the error text // <h3>Can not move sync folder.</h3> func errorHandler(resp *http.Response) (err error) { body, err := rest.ReadBody(resp) if err != nil { return fmt.Errorf("error reading error out of body: %w", err) } match := findError.FindSubmatch(body) if len(match) < 2 || len(match[1]) == 0 { return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body) } return fmt.Errorf("HTTP error %v (%v): %s", resp.StatusCode, resp.Status, match[1]) } // rootSlash returns root with a slash on if it is empty, otherwise empty string func (f *Fs) rootSlash() string { if f.root == "" { return f.root } return f.root + "/" } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } var err error if info != nil { // Set info err = o.setMetaData(info) } else { err = o.readMetaData(ctx) // reads info and meta, returning an error } if err != nil { return nil, err } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // FindLeaf finds a directory of name leaf in the folder with ID pathID func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { // fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf) // Find the leaf in pathID found, err = f.listAll(ctx, pathID, nil, func(item *api.Collection) bool { if strings.EqualFold(item.Name, leaf) { pathIDOut = item.Ref return true } return false }) // fs.Debugf(f, ">FindLeaf %q, %v, %v", pathIDOut, found, err) return pathIDOut, found, err } // CreateDir makes a directory with pathID as parent and name leaf func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) var resp *http.Response opts := rest.Opts{ Method: "POST", RootURL: pathID, NoResponse: true, } var mkdir any if pathID == f.opt.RootID { // folders at the root are syncFolders mkdir = &api.CreateSyncFolder{ Name: f.opt.Enc.FromStandardName(leaf), } opts.ExtraHeaders = map[string]string{ "*X-SugarSync-API-Version": "1.5", // non canonical header } } else { mkdir = &api.CreateFolder{ Name: f.opt.Enc.FromStandardName(leaf), } } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallXML(ctx, &opts, mkdir, nil) return shouldRetry(ctx, resp, err) }) if err != nil { return "", err } newID = resp.Header.Get("Location") if newID == "" { // look up ID if not returned (e.g. for syncFolder) var found bool newID, found, err = f.FindLeaf(ctx, pathID, leaf) if err != nil { return "", err } if !found { return "", fmt.Errorf("couldn't find ID for newly created directory %q", leaf) } } return newID, nil } // list the objects into the function supplied // // Should return true to finish processing type listAllFileFn func(*api.File) bool // list the folders into the function supplied // // Should return true to finish processing type listAllFolderFn func(*api.Collection) bool // Lists the directory required calling the user function on each item found // // If the user fn ever returns true then it early exits with found = true func (f *Fs) listAll(ctx context.Context, dirID string, fileFn listAllFileFn, folderFn listAllFolderFn) (found bool, err error) { opts := rest.Opts{ Method: "GET", RootURL: dirID, Path: "/contents", Parameters: url.Values{}, } opts.Parameters.Set("max", strconv.Itoa(listChunks)) start := 0 OUTER: for { opts.Parameters.Set("start", strconv.Itoa(start)) var result api.CollectionContents var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallXML(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if err != nil { return found, fmt.Errorf("couldn't list files: %w", err) } if fileFn != nil { for i := range result.Files { item := &result.Files[i] item.Name = f.opt.Enc.ToStandardName(item.Name) if fileFn(item) { found = true break OUTER } } } if folderFn != nil { for i := range result.Collections { item := &result.Collections[i] item.Name = f.opt.Enc.ToStandardName(item.Name) if folderFn(item) { found = true break OUTER } } } if !result.HasMore { break } start = result.End + 1 } return } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } var iErr error _, err = f.listAll(ctx, directoryID, func(info *api.File) bool { remote := path.Join(dir, info.Name) o, err := f.newObjectWithInfo(ctx, remote, info) if err != nil { iErr = err return true } entries = append(entries, o) return false }, func(info *api.Collection) bool { remote := path.Join(dir, info.Name) id := info.Ref // cache the directory ID for later lookups f.dirCache.Put(remote, id) d := fs.NewDir(remote, info.TimeCreated).SetID(id) entries = append(entries, d) return false }) if err != nil { return nil, err } if iErr != nil { return nil, iErr } return entries, nil } // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // // Returns the object, leaf, directoryID and error. // // Used to create new objects func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { // Create the directory for the object if it doesn't exist leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true) if err != nil { return } // Temporary Object under construction o = &Object{ fs: f, remote: remote, } return o, leaf, directoryID, nil } // Put the object // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil) switch err { case nil: return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it return f.PutUnchecked(ctx, in, src, options...) default: return nil, err } } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(ctx, in, src, options...) } // PutUnchecked the object into the container // // This will produce an error if the object already exists. // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() modTime := src.ModTime(ctx) o, _, _, err := f.createObject(ctx, remote, modTime, size) if err != nil { return nil, err } return o, o.Update(ctx, in, src, options...) } // Mkdir creates the container if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { _, err := f.dirCache.FindDir(ctx, dir, true) return err } // delete removes an object or directory by ID either putting it // in the Deleted files or deleting it permanently func (f *Fs) delete(ctx context.Context, isFile bool, id string, remote string, hardDelete bool) (err error) { if hardDelete { opts := rest.Opts{ Method: "DELETE", RootURL: id, NoResponse: true, } return f.pacer.Call(func() (bool, error) { resp, err := f.srv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) } // Move file/dir to deleted files if not hard delete leaf := path.Base(remote) if isFile { _, err = f.moveFile(ctx, id, leaf, f.opt.DeletedID) } else { err = f.moveDir(ctx, id, leaf, f.opt.DeletedID) } return err } // purgeCheck removes the root directory, if check is set then it // refuses to do so if it has anything in func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } dc := f.dirCache directoryID, err := dc.FindDir(ctx, dir, false) if err != nil { return err } if check { found, err := f.listAll(ctx, directoryID, func(item *api.File) bool { return true }, func(item *api.Collection) bool { return true }) if err != nil { return err } if found { return fs.ErrorDirectoryNotEmpty } } err = f.delete(ctx, false, directoryID, root, f.opt.HardDelete || check) if err != nil { return err } f.dirCache.FlushDir(dir) return nil } // Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, true) } // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } err = srcObj.readMetaData(ctx) if err != nil { return nil, err } srcPath := srcObj.fs.rootSlash() + srcObj.remote dstPath := f.rootSlash() + remote if strings.EqualFold(srcPath, dstPath) { return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath) } // Create temporary object dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } // Find and remove existing object cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy") if err != nil { return nil, err } defer cleanup(&err) // Copy the object opts := rest.Opts{ Method: "POST", RootURL: directoryID, NoResponse: true, } copyFile := api.CopyFile{ Name: f.opt.Enc.FromStandardName(leaf), Source: srcObj.id, } var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallXML(ctx, &opts, &copyFile, nil) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } dstObj.id = resp.Header.Get("Location") err = dstObj.readMetaData(ctx) if err != nil { return nil, err } return dstObj, nil } // Purge deletes all the files in the directory // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge(ctx context.Context, dir string) error { // Caution: Deleting a folder may orphan objects. It's important // to remove the contents of the folder before you delete the // folder. That's because removing a folder using DELETE does not // remove the objects contained within the folder. If you delete // a folder without first deleting its contents, the contents may // be rendered inaccessible. // // An alternative to permanently deleting a folder is moving it to the // Deleted Files folder. A folder (and all its contents) in the // Deleted Files folder can be recovered. Your app can retrieve the // link to the user's Deleted Files folder from the <deleted> element // in the user resource representation. Your application can then move // a folder to the Deleted Files folder by issuing an HTTP PUT request // to the URL that represents the file resource and provide as input, // XML that specifies in the <parent> element the link to the Deleted // Files folder. if f.opt.HardDelete { return fs.ErrorCantPurge } return f.purgeCheck(ctx, dir, false) } // moveFile moves a file server-side func (f *Fs) moveFile(ctx context.Context, id, leaf, directoryID string) (info *api.File, err error) { opts := rest.Opts{ Method: "PUT", RootURL: id, } move := api.MoveFile{ Name: f.opt.Enc.FromStandardName(leaf), Parent: directoryID, } var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallXML(ctx, &opts, &move, &info) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } // The docs say that there is nothing returned but apparently // there is... however it doesn't have Ref // // If ref not set, assume it hasn't changed if info.Ref == "" { info.Ref = id } return info, nil } // moveDir moves a folder server-side func (f *Fs) moveDir(ctx context.Context, id, leaf, directoryID string) (err error) { // Move the object opts := rest.Opts{ Method: "PUT", RootURL: id, NoResponse: true, } move := api.MoveFolder{ Name: f.opt.Enc.FromStandardName(leaf), Parent: directoryID, } var resp *http.Response return f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallXML(ctx, &opts, &move, nil) return shouldRetry(ctx, resp, err) }) } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } // Create temporary object dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } // Do the move info, err := f.moveFile(ctx, srcObj.id, leaf, directoryID) if err != nil { return nil, err } err = dstObj.setMetaData(info) if err != nil { return nil, err } return dstObj, nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) if err != nil { return err } // Do the move err = f.moveDir(ctx, srcID, dstLeaf, dstDirectoryID) if err != nil { return err } srcFs.dirCache.FlushDir(srcRemote) return nil } // PublicLink adds a "readable by anyone with link" permission on the given file or folder. func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { obj, err := f.NewObject(ctx, remote) if err != nil { return "", err } o, ok := obj.(*Object) if !ok { return "", errors.New("internal error: not an Object") } opts := rest.Opts{ Method: "PUT", RootURL: o.id, } linkFile := api.SetPublicLink{ PublicLink: api.PublicLink{Enabled: true}, } var resp *http.Response var info *api.File err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallXML(ctx, &opts, &linkFile, &info) return shouldRetry(ctx, resp, err) }) if err != nil { return "", err } return info.PublicLink.URL, err } // DirCacheFlush resets the directory cache - used in testing as an // optional interface func (f *Fs) DirCacheFlush() { f.dirCache.ResetRoot() } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.None) } // ------------------------------------------------------------ // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Hash returns the SHA-1 of an object returning a lowercase hex string func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { return "", hash.ErrUnsupported } // Size returns the size of an object in bytes func (o *Object) Size() int64 { err := o.readMetaData(context.TODO()) if err != nil {
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sugarsync/sugarsync_internal_test.go
backend/sugarsync/sugarsync_internal_test.go
package sugarsync import ( "bytes" "io" "net/http" "testing" "github.com/stretchr/testify/assert" ) func TestErrorHandler(t *testing.T) { for _, test := range []struct { name string body string code int status string want string }{ { name: "empty", body: "", code: 500, status: "internal error", want: `HTTP error 500 (internal error) returned body: ""`, }, { name: "unknown", body: "<h1>unknown</h1>", code: 500, status: "internal error", want: `HTTP error 500 (internal error) returned body: "<h1>unknown</h1>"`, }, { name: "blank", body: "Nothing here <h3></h3>", code: 500, status: "internal error", want: `HTTP error 500 (internal error) returned body: "Nothing here <h3></h3>"`, }, { name: "real", body: "<h1>an error</h1>\n<h3>Can not move sync folder.</h3>\n<p>more stuff</p>", code: 500, status: "internal error", want: `HTTP error 500 (internal error): Can not move sync folder.`, }, } { t.Run(test.name, func(t *testing.T) { resp := http.Response{ Body: io.NopCloser(bytes.NewBufferString(test.body)), StatusCode: test.code, Status: test.status, } got := errorHandler(&resp) assert.Equal(t, test.want, got.Error()) }) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sugarsync/sugarsync_test.go
backend/sugarsync/sugarsync_test.go
// Test Sugarsync filesystem interface package sugarsync_test import ( "testing" "github.com/rclone/rclone/backend/sugarsync" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestSugarSync:Test", NilObject: (*sugarsync.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sugarsync/api/types.go
backend/sugarsync/api/types.go
// Package api has type definitions for sugarsync // // Converted from the API docs with help from https://www.onlinetool.io/xmltogo/ package api import ( "encoding/xml" "time" ) // AppAuthorization is used to request a refresh token // // The token is returned in the Location: field type AppAuthorization struct { XMLName xml.Name `xml:"appAuthorization"` Username string `xml:"username"` Password string `xml:"password"` Application string `xml:"application"` AccessKeyID string `xml:"accessKeyId"` PrivateAccessKey string `xml:"privateAccessKey"` } // TokenAuthRequest is the request to get Authorization type TokenAuthRequest struct { XMLName xml.Name `xml:"tokenAuthRequest"` AccessKeyID string `xml:"accessKeyId"` PrivateAccessKey string `xml:"privateAccessKey"` RefreshToken string `xml:"refreshToken"` } // Authorization is returned from the TokenAuthRequest type Authorization struct { XMLName xml.Name `xml:"authorization"` Expiration time.Time `xml:"expiration"` User string `xml:"user"` } // File represents a single file type File struct { Name string `xml:"displayName"` Ref string `xml:"ref"` DsID string `xml:"dsid"` TimeCreated time.Time `xml:"timeCreated"` Parent string `xml:"parent"` Size int64 `xml:"size"` LastModified time.Time `xml:"lastModified"` MediaType string `xml:"mediaType"` PresentOnServer bool `xml:"presentOnServer"` FileData string `xml:"fileData"` Versions string `xml:"versions"` PublicLink PublicLink } // Collection represents // - Workspace Collection // - Sync Folders collection // - Folder type Collection struct { Type string `xml:"type,attr"` Name string `xml:"displayName"` Ref string `xml:"ref"` // only for Folder DsID string `xml:"dsid"` TimeCreated time.Time `xml:"timeCreated"` Parent string `xml:"parent"` Collections string `xml:"collections"` Files string `xml:"files"` Contents string `xml:"contents"` // Sharing bool `xml:"sharing>enabled,attr"` } // CollectionContents is the result of a list call type CollectionContents struct { //XMLName xml.Name `xml:"collectionContents"` Start int `xml:"start,attr"` HasMore bool `xml:"hasMore,attr"` End int `xml:"end,attr"` Collections []Collection `xml:"collection"` Files []File `xml:"file"` } // User is returned from the /user call type User struct { XMLName xml.Name `xml:"user"` Username string `xml:"username"` Nickname string `xml:"nickname"` Quota struct { Limit int64 `xml:"limit"` Usage int64 `xml:"usage"` } `xml:"quota"` Workspaces string `xml:"workspaces"` SyncFolders string `xml:"syncfolders"` Deleted string `xml:"deleted"` MagicBriefcase string `xml:"magicBriefcase"` WebArchive string `xml:"webArchive"` MobilePhotos string `xml:"mobilePhotos"` Albums string `xml:"albums"` RecentActivities string `xml:"recentActivities"` ReceivedShares string `xml:"receivedShares"` PublicLinks string `xml:"publicLinks"` MaximumPublicLinkSize int `xml:"maximumPublicLinkSize"` } // CreateFolder is posted to a folder URL to create a folder type CreateFolder struct { XMLName xml.Name `xml:"folder"` Name string `xml:"displayName"` } // MoveFolder is posted to a folder URL to move a folder type MoveFolder struct { XMLName xml.Name `xml:"folder"` Name string `xml:"displayName"` Parent string `xml:"parent"` } // CreateSyncFolder is posted to the root folder URL to create a sync folder type CreateSyncFolder struct { XMLName xml.Name `xml:"syncFolder"` Name string `xml:"displayName"` } // CreateFile is posted to a folder URL to create a file type CreateFile struct { XMLName xml.Name `xml:"file"` Name string `xml:"displayName"` MediaType string `xml:"mediaType"` } // MoveFile is posted to a file URL to create a file type MoveFile struct { XMLName xml.Name `xml:"file"` Name string `xml:"displayName"` Parent string `xml:"parent"` } // CopyFile copies a file from source type CopyFile struct { XMLName xml.Name `xml:"fileCopy"` Source string `xml:"source,attr"` Name string `xml:"displayName"` } // PublicLink is the URL and enabled flag for a public link type PublicLink struct { XMLName xml.Name `xml:"publicLink"` URL string `xml:",chardata"` Enabled bool `xml:"enabled,attr"` } // SetPublicLink can be used to enable the file for sharing type SetPublicLink struct { XMLName xml.Name `xml:"file"` PublicLink PublicLink } // SetLastModified sets the modified time for a file type SetLastModified struct { XMLName xml.Name `xml:"file"` LastModified time.Time `xml:"lastModified"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/azureblob/azureblob_unsupported.go
backend/azureblob/azureblob_unsupported.go
// Build for azureblob for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 || solaris || js // Package azureblob provides an interface to the Microsoft Azure blob object storage system package azureblob
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/azureblob/azureblob_test.go
backend/azureblob/azureblob_test.go
// Test AzureBlob filesystem interface //go:build !plan9 && !solaris && !js package azureblob import ( "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" "github.com/stretchr/testify/assert" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { name := "TestAzureBlob" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":", NilObject: (*Object)(nil), TiersToTest: []string{"Hot", "Cool", "Cold"}, ChunkedUpload: fstests.ChunkedUploadConfig{ MinChunkSize: defaultChunkSize, }, ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "use_copy_blob", Value: "false"}, }, }) } // TestIntegration2 runs integration tests against the remote func TestIntegration2(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("Skipping as -remote set") } name := "TestAzureBlob" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":", NilObject: (*Object)(nil), TiersToTest: []string{"Hot", "Cool", "Cold"}, ChunkedUpload: fstests.ChunkedUploadConfig{ MinChunkSize: defaultChunkSize, }, ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "directory_markers", Value: "true"}, {Name: name, Key: "use_copy_blob", Value: "false"}, }, }) } func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadChunkSize(cs) } func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setCopyCutoff(cs) } var ( _ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetCopyCutoffer = (*Fs)(nil) ) func TestValidateAccessTier(t *testing.T) { tests := map[string]struct { accessTier string want bool }{ "hot": {"hot", true}, "HOT": {"HOT", true}, "Hot": {"Hot", true}, "cool": {"cool", true}, "cold": {"cold", true}, "archive": {"archive", true}, "empty": {"", false}, "unknown": {"unknown", false}, } for name, test := range tests { t.Run(name, func(t *testing.T) { got := validateAccessTier(test.accessTier) assert.Equal(t, test.want, got) }) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/azureblob/azureblob_internal_test.go
backend/azureblob/azureblob_internal_test.go
//go:build !plan9 && !solaris && !js package azureblob import ( "context" "encoding/base64" "fmt" "net/http" "strings" "testing" "time" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/lib/random" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestBlockIDCreator(t *testing.T) { // Check creation and random number bic, err := newBlockIDCreator() require.NoError(t, err) bic2, err := newBlockIDCreator() require.NoError(t, err) assert.NotEqual(t, bic.random, bic2.random) assert.NotEqual(t, bic.random, [8]byte{}) // Set random to known value for tests bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8} chunkNumber := uint64(0xFEDCBA9876543210) // Check creation of ID want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8}) assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want) got := bic.newBlockID(chunkNumber) assert.Equal(t, want, got) assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got) // Test checkID is working assert.NoError(t, bic.checkID(chunkNumber, got)) assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64") assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length") assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded") assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes") } func (f *Fs) testFeatures(t *testing.T) { // Check first feature flags are set on this remote enabled := f.Features().SetTier assert.True(t, enabled) enabled = f.Features().GetTier assert.True(t, enabled) } type ReadSeekCloser struct { *strings.Reader } func (r *ReadSeekCloser) Close() error { return nil } // Stage a block at remote but don't commit it func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) { var ( containerName, blobPath = f.split(remote) containerClient = f.cntSVC(containerName) blobClient = containerClient.NewBlockBlobClient(blobPath) data = "uncommitted data" blockID = "1" blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID)) ) r := &ReadSeekCloser{strings.NewReader(data)} _, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil) require.NoError(t, err) // Verify the block is staged but not committed blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil) require.NoError(t, err) found := false for _, block := range blockList.UncommittedBlocks { if *block.Name == blockIDBase64 { found = true break } } require.True(t, found, "Block ID not found in uncommitted blocks") } // This tests uploading a blob where it has uncommitted blocks with a different ID size. // // https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/ // // TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks func (f *Fs) testWriteUncommittedBlocks(t *testing.T) { var ( ctx = context.Background() remote = "testBlob" ) // Multipart copy the blob please oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff f.opt.UseCopyBlob = false f.opt.CopyCutoff = f.opt.ChunkSize defer func() { f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff }() // Create a blob with uncommitted blocks f.stageBlockWithoutCommit(ctx, t, remote) // Now attempt to overwrite the block with a different sized block ID to provoke this error // Check the object does not exist _, err := f.NewObject(ctx, remote) require.Equal(t, fs.ErrorObjectNotFound, err) // Upload a multipart file over the block with uncommitted chunks of a different ID size size := 4*int(f.opt.ChunkSize) - 1 contents := random.String(size) item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z")) o := fstests.PutTestContents(ctx, t, f, &item, contents, true) // Check size assert.Equal(t, int64(size), o.Size()) // Create a new blob with uncommitted blocks newRemote := "testBlob2" f.stageBlockWithoutCommit(ctx, t, newRemote) // Copy over that block dst, err := f.Copy(ctx, o, newRemote) require.NoError(t, err) // Check basics assert.Equal(t, int64(size), dst.Size()) assert.Equal(t, newRemote, dst.Remote()) // Check contents gotContents := fstests.ReadObject(ctx, t, dst, -1) assert.Equal(t, contents, gotContents) // Remove the object require.NoError(t, dst.Remove(ctx)) } func (f *Fs) InternalTest(t *testing.T) { t.Run("Features", f.testFeatures) t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks) t.Run("Metadata", f.testMetadataPaths) } // helper to read blob properties for an object func getProps(ctx context.Context, t *testing.T, o fs.Object) *blob.GetPropertiesResponse { ao := o.(*Object) props, err := ao.readMetaDataAlways(ctx) require.NoError(t, err) return props } // helper to assert select headers and user metadata func assertHeadersAndMetadata(t *testing.T, props *blob.GetPropertiesResponse, want map[string]string, wantUserMeta map[string]string) { // Headers get := func(p *string) string { if p == nil { return "" } return *p } if v, ok := want["content-type"]; ok { assert.Equal(t, v, get(props.ContentType), "content-type") } if v, ok := want["cache-control"]; ok { assert.Equal(t, v, get(props.CacheControl), "cache-control") } if v, ok := want["content-disposition"]; ok { assert.Equal(t, v, get(props.ContentDisposition), "content-disposition") } if v, ok := want["content-encoding"]; ok { assert.Equal(t, v, get(props.ContentEncoding), "content-encoding") } if v, ok := want["content-language"]; ok { assert.Equal(t, v, get(props.ContentLanguage), "content-language") } // User metadata (case-insensitive keys from service) norm := make(map[string]*string, len(props.Metadata)) for kk, vv := range props.Metadata { norm[strings.ToLower(kk)] = vv } for k, v := range wantUserMeta { pv, ok := norm[strings.ToLower(k)] if assert.True(t, ok, fmt.Sprintf("missing user metadata key %q", k)) { if pv == nil { assert.Equal(t, v, "", k) } else { assert.Equal(t, v, *pv, k) } } else { // Log available keys for diagnostics keys := make([]string, 0, len(props.Metadata)) for kk := range props.Metadata { keys = append(keys, kk) } t.Logf("available user metadata keys: %v", keys) } } } // helper to read blob tags for an object func getTagsMap(ctx context.Context, t *testing.T, o fs.Object) map[string]string { ao := o.(*Object) blb := ao.getBlobSVC() resp, err := blb.GetTags(ctx, nil) require.NoError(t, err) out := make(map[string]string) for _, tag := range resp.BlobTagSet { if tag.Key != nil { k := *tag.Key v := "" if tag.Value != nil { v = *tag.Value } out[k] = v } } return out } // Test metadata across different write paths func (f *Fs) testMetadataPaths(t *testing.T) { ctx := context.Background() if testing.Short() { t.Skip("skipping in short mode") } // Common expected metadata and headers baseMeta := fs.Metadata{ "cache-control": "no-cache", "content-disposition": "inline", "content-language": "en-US", // Note: Don't set content-encoding here to avoid download decoding differences // We will set a custom user metadata key "potato": "royal", // and modtime "mtime": fstest.Time("2009-05-06T04:05:06.499999999Z").Format(time.RFC3339Nano), } // Singlepart upload t.Run("PutSinglepart", func(t *testing.T) { // size less than chunk size contents := random.String(int(f.opt.ChunkSize / 2)) item := fstest.NewItem("meta-single.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) // override content-type via metadata mapping meta := fs.Metadata{} meta.Merge(baseMeta) meta["content-type"] = "text/plain" obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", meta) defer func() { _ = obj.Remove(ctx) }() props := getProps(ctx, t, obj) assertHeadersAndMetadata(t, props, map[string]string{ "content-type": "text/plain", "cache-control": "no-cache", "content-disposition": "inline", "content-language": "en-US", }, map[string]string{ "potato": "royal", }) _ = http.StatusOK // keep import for parity but don't inspect RawResponse }) // Multipart upload t.Run("PutMultipart", func(t *testing.T) { // size greater than chunk size to force multipart contents := random.String(int(f.opt.ChunkSize + 1024)) item := fstest.NewItem("meta-multipart.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) meta := fs.Metadata{} meta.Merge(baseMeta) meta["content-type"] = "application/json" obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", meta) defer func() { _ = obj.Remove(ctx) }() props := getProps(ctx, t, obj) assertHeadersAndMetadata(t, props, map[string]string{ "content-type": "application/json", "cache-control": "no-cache", "content-disposition": "inline", "content-language": "en-US", }, map[string]string{ "potato": "royal", }) // Tags: Singlepart upload t.Run("PutSinglepartTags", func(t *testing.T) { contents := random.String(int(f.opt.ChunkSize / 2)) item := fstest.NewItem("tags-single.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) meta := fs.Metadata{ "x-ms-tags": "env=dev,team=sync", } obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/plain", meta) defer func() { _ = obj.Remove(ctx) }() tags := getTagsMap(ctx, t, obj) assert.Equal(t, "dev", tags["env"]) assert.Equal(t, "sync", tags["team"]) }) // Tags: Multipart upload t.Run("PutMultipartTags", func(t *testing.T) { contents := random.String(int(f.opt.ChunkSize + 2048)) item := fstest.NewItem("tags-multipart.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) meta := fs.Metadata{ "x-ms-tags": "project=alpha,release=2025-08", } obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "application/octet-stream", meta) defer func() { _ = obj.Remove(ctx) }() tags := getTagsMap(ctx, t, obj) assert.Equal(t, "alpha", tags["project"]) assert.Equal(t, "2025-08", tags["release"]) }) }) // Singlepart copy with metadata-set mapping; omit content-type to exercise fallback t.Run("CopySinglepart", func(t *testing.T) { // create small source contents := random.String(int(f.opt.ChunkSize / 2)) srcItem := fstest.NewItem("meta-copy-single-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil) defer func() { _ = srcObj.Remove(ctx) }() // set mapping via MetadataSet ctx2, ci := fs.AddConfig(ctx) ci.Metadata = true ci.MetadataSet = fs.Metadata{ "cache-control": "private, max-age=60", "content-disposition": "attachment; filename=foo.txt", "content-language": "fr", // no content-type: should fallback to source "potato": "maris", } // do copy dstName := "meta-copy-single-dst.txt" dst, err := f.Copy(ctx2, srcObj, dstName) require.NoError(t, err) defer func() { _ = dst.Remove(ctx2) }() props := getProps(ctx2, t, dst) // content-type should fallback to source (text/plain) assertHeadersAndMetadata(t, props, map[string]string{ "content-type": "text/plain", "cache-control": "private, max-age=60", "content-disposition": "attachment; filename=foo.txt", "content-language": "fr", }, map[string]string{ "potato": "maris", }) // mtime should be populated on copy when --metadata is used // and should equal the source ModTime (RFC3339Nano) // Read user metadata (case-insensitive) m := props.Metadata var gotMtime string for k, v := range m { if strings.EqualFold(k, "mtime") && v != nil { gotMtime = *v break } } if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") { // parse and compare times ignoring formatting differences parsed, err := time.Parse(time.RFC3339Nano, gotMtime) require.NoError(t, err) assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime") } }) // CopySinglepart with only --metadata (no MetadataSet) must inject mtime and preserve src content-type t.Run("CopySinglepart_MetadataOnly", func(t *testing.T) { contents := random.String(int(f.opt.ChunkSize / 2)) srcItem := fstest.NewItem("meta-copy-single-only-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil) defer func() { _ = srcObj.Remove(ctx) }() ctx2, ci := fs.AddConfig(ctx) ci.Metadata = true dstName := "meta-copy-single-only-dst.txt" dst, err := f.Copy(ctx2, srcObj, dstName) require.NoError(t, err) defer func() { _ = dst.Remove(ctx2) }() props := getProps(ctx2, t, dst) assertHeadersAndMetadata(t, props, map[string]string{ "content-type": "text/plain", }, map[string]string{}) // Assert mtime injected m := props.Metadata var gotMtime string for k, v := range m { if strings.EqualFold(k, "mtime") && v != nil { gotMtime = *v break } } if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") { parsed, err := time.Parse(time.RFC3339Nano, gotMtime) require.NoError(t, err) assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime") } }) // Multipart copy with metadata-set mapping; omit content-type to exercise fallback t.Run("CopyMultipart", func(t *testing.T) { // create large source to force multipart contents := random.String(int(f.opt.CopyCutoff + 1024)) srcItem := fstest.NewItem("meta-copy-multi-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil) defer func() { _ = srcObj.Remove(ctx) }() // set mapping via MetadataSet ctx2, ci := fs.AddConfig(ctx) ci.Metadata = true ci.MetadataSet = fs.Metadata{ "cache-control": "max-age=0, no-cache", // omit content-type to trigger fallback "content-language": "de", "potato": "desiree", } dstName := "meta-copy-multi-dst.txt" dst, err := f.Copy(ctx2, srcObj, dstName) require.NoError(t, err) defer func() { _ = dst.Remove(ctx2) }() props := getProps(ctx2, t, dst) // content-type should fallback to source (application/octet-stream) assertHeadersAndMetadata(t, props, map[string]string{ "content-type": "application/octet-stream", "cache-control": "max-age=0, no-cache", "content-language": "de", }, map[string]string{ "potato": "desiree", }) // mtime should be populated on copy when --metadata is used m := props.Metadata var gotMtime string for k, v := range m { if strings.EqualFold(k, "mtime") && v != nil { gotMtime = *v break } } if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") { parsed, err := time.Parse(time.RFC3339Nano, gotMtime) require.NoError(t, err) assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime") } }) // CopyMultipart with only --metadata must inject mtime and preserve src content-type t.Run("CopyMultipart_MetadataOnly", func(t *testing.T) { contents := random.String(int(f.opt.CopyCutoff + 2048)) srcItem := fstest.NewItem("meta-copy-multi-only-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil) defer func() { _ = srcObj.Remove(ctx) }() ctx2, ci := fs.AddConfig(ctx) ci.Metadata = true dstName := "meta-copy-multi-only-dst.txt" dst, err := f.Copy(ctx2, srcObj, dstName) require.NoError(t, err) defer func() { _ = dst.Remove(ctx2) }() props := getProps(ctx2, t, dst) assertHeadersAndMetadata(t, props, map[string]string{ "content-type": "application/octet-stream", }, map[string]string{}) m := props.Metadata var gotMtime string for k, v := range m { if strings.EqualFold(k, "mtime") && v != nil { gotMtime = *v break } } if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") { parsed, err := time.Parse(time.RFC3339Nano, gotMtime) require.NoError(t, err) assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime") } }) // Tags: Singlepart copy t.Run("CopySinglepartTags", func(t *testing.T) { // create small source contents := random.String(int(f.opt.ChunkSize / 2)) srcItem := fstest.NewItem("tags-copy-single-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil) defer func() { _ = srcObj.Remove(ctx) }() // set mapping via MetadataSet including tags ctx2, ci := fs.AddConfig(ctx) ci.Metadata = true ci.MetadataSet = fs.Metadata{ "x-ms-tags": "copy=single,mode=test", } dstName := "tags-copy-single-dst.txt" dst, err := f.Copy(ctx2, srcObj, dstName) require.NoError(t, err) defer func() { _ = dst.Remove(ctx2) }() tags := getTagsMap(ctx2, t, dst) assert.Equal(t, "single", tags["copy"]) assert.Equal(t, "test", tags["mode"]) }) // Tags: Multipart copy t.Run("CopyMultipartTags", func(t *testing.T) { // create large source to force multipart contents := random.String(int(f.opt.CopyCutoff + 4096)) srcItem := fstest.NewItem("tags-copy-multi-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil) defer func() { _ = srcObj.Remove(ctx) }() ctx2, ci := fs.AddConfig(ctx) ci.Metadata = true ci.MetadataSet = fs.Metadata{ "x-ms-tags": "copy=multi,mode=test", } dstName := "tags-copy-multi-dst.txt" dst, err := f.Copy(ctx2, srcObj, dstName) require.NoError(t, err) defer func() { _ = dst.Remove(ctx2) }() tags := getTagsMap(ctx2, t, dst) assert.Equal(t, "multi", tags["copy"]) assert.Equal(t, "test", tags["mode"]) }) // Negative: invalid x-ms-tags must error t.Run("InvalidXMsTags", func(t *testing.T) { contents := random.String(32) item := fstest.NewItem("tags-invalid.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) // construct ObjectInfo with invalid x-ms-tags buf := strings.NewReader(contents) // Build obj info with metadata meta := fs.Metadata{ "x-ms-tags": "badpair-without-equals", } // force metadata on ctx2, ci := fs.AddConfig(ctx) ci.Metadata = true obji := object.NewStaticObjectInfo(item.Path, item.ModTime, int64(len(contents)), true, nil, nil) obji = obji.WithMetadata(meta).WithMimeType("text/plain") _, err := f.Put(ctx2, buf, obji) require.Error(t, err) assert.Contains(t, err.Error(), "invalid tag") }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/azureblob/azureblob.go
backend/azureblob/azureblob.go
//go:build !plan9 && !solaris && !js // Package azureblob provides an interface to the Microsoft Azure blob object storage system package azureblob import ( "bytes" "context" "crypto/md5" "crypto/rand" "encoding/base64" "encoding/binary" "encoding/hex" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "os" "path" "slices" "sort" "strconv" "strings" "sync" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/chunksize" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/multipart" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pool" "golang.org/x/sync/errgroup" ) const ( minSleep = 10 * time.Millisecond maxSleep = 10 * time.Second decayConstant = 1 // bigger for slower decay, exponential maxListChunkSize = 5000 // number of items to read at once modTimeKey = "mtime" dirMetaKey = "hdi_isfolder" dirMetaValue = "true" timeFormatIn = time.RFC3339 timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00" storageDefaultBaseURL = "blob.core.windows.net" defaultChunkSize = 4 * fs.Mebi defaultAccessTier = blob.AccessTier("") // FIXME AccessTierNone // Default storage account, key and blob endpoint for emulator support, // though it is a base64 key checked in here, it is publicly available secret. emulatorAccount = "devstoreaccount1" emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1" sasCopyValidity = time.Hour // how long SAS should last when doing server side copy ) var ( errCantUpdateArchiveTierBlobs = fserrors.NoRetryError(errors.New("can't update archive tier blob without --azureblob-archive-tier-delete")) // Take this when changing or reading metadata. // // It acts as global metadata lock so we don't bloat Object // with an extra lock that will only very rarely be contended. metadataMu sync.Mutex ) // system metadata keys which this backend owns var systemMetadataInfo = map[string]fs.MetadataHelp{ "cache-control": { Help: "Cache-Control header", Type: "string", Example: "no-cache", }, "content-disposition": { Help: "Content-Disposition header", Type: "string", Example: "inline", }, "content-encoding": { Help: "Content-Encoding header", Type: "string", Example: "gzip", }, "content-language": { Help: "Content-Language header", Type: "string", Example: "en-US", }, "content-type": { Help: "Content-Type header", Type: "string", Example: "text/plain", }, "tier": { Help: "Tier of the object", Type: "string", Example: "Hot", ReadOnly: true, }, "mtime": { Help: "Time of last modification, read from rclone metadata", Type: "RFC 3339", Example: "2006-01-02T15:04:05.999999999Z07:00", }, } // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "azureblob", Description: "Microsoft Azure Blob Storage", NewFs: NewFs, MetadataInfo: &fs.MetadataInfo{ System: systemMetadataInfo, Help: `User metadata is stored as x-ms-meta- keys. Azure metadata keys are case insensitive and are always returned in lower case.`, }, Options: []fs.Option{{ Name: "account", Help: `Azure Storage Account Name. Set this to the Azure Storage Account Name in use. Leave blank to use SAS URL or Emulator, otherwise it needs to be set. If this is blank and if env_auth is set it will be read from the environment variable ` + "`AZURE_STORAGE_ACCOUNT_NAME`" + ` if possible. `, Sensitive: true, }, { Name: "env_auth", Help: `Read credentials from runtime (environment variables, CLI or MSI). See the [authentication docs](/azureblob#authentication) for full info.`, Default: false, }, { Name: "key", Help: `Storage Account Shared Key. Leave blank to use SAS URL or Emulator.`, Sensitive: true, }, { Name: "sas_url", Help: `SAS URL for container level access only. Leave blank if using account/key or Emulator.`, Sensitive: true, }, { Name: "tenant", Help: `ID of the service principal's tenant. Also called its directory ID. Set this if using - Service principal with client secret - Service principal with certificate - User with username and password `, Sensitive: true, }, { Name: "client_id", Help: `The ID of the client in use. Set this if using - Service principal with client secret - Service principal with certificate - User with username and password `, Sensitive: true, }, { Name: "client_secret", Help: `One of the service principal's client secrets Set this if using - Service principal with client secret `, Sensitive: true, }, { Name: "client_certificate_path", Help: `Path to a PEM or PKCS12 certificate file including the private key. Set this if using - Service principal with certificate `, }, { Name: "client_certificate_password", Help: `Password for the certificate file (optional). Optionally set this if using - Service principal with certificate And the certificate has a password. `, IsPassword: true, }, { Name: "client_send_certificate_chain", Help: `Send the certificate chain when using certificate auth. Specifies whether an authentication request will include an x5c header to support subject name / issuer based authentication. When set to true, authentication requests include the x5c header. Optionally set this if using - Service principal with certificate `, Default: false, Advanced: true, }, { Name: "username", Help: `User name (usually an email address) Set this if using - User with username and password `, Advanced: true, Sensitive: true, }, { Name: "password", Help: `The user's password Set this if using - User with username and password `, IsPassword: true, Advanced: true, }, { Name: "service_principal_file", Help: `Path to file containing credentials for use with a service principal. Leave blank normally. Needed only if you want to use a service principal instead of interactive login. $ az ad sp create-for-rbac --name "<name>" \ --role "Storage Blob Data Owner" \ --scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \ > azure-principal.json See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details. It may be more convenient to put the credentials directly into the rclone config file under the ` + "`client_id`, `tenant` and `client_secret`" + ` keys instead of setting ` + "`service_principal_file`" + `. `, Advanced: true, }, { Name: "disable_instance_discovery", Help: `Skip requesting Microsoft Entra instance metadata This should be set true only by applications authenticating in disconnected clouds, or private clouds such as Azure Stack. It determines whether rclone requests Microsoft Entra instance metadata from ` + "`https://login.microsoft.com/`" + ` before authenticating. Setting this to true will skip this request, making you responsible for ensuring the configured authority is valid and trustworthy. `, Default: false, Advanced: true, }, { Name: "use_msi", Help: `Use a managed service identity to authenticate (only works in Azure). When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/) to authenticate to Azure Storage instead of a SAS token or account key. If the VM(SS) on which this program is running has a system-assigned identity, it will be used by default. If the resource has no system-assigned but exactly one user-assigned identity, the user-assigned identity will be used by default. If the resource has multiple user-assigned identities, the identity to use must be explicitly specified using exactly one of the msi_object_id, msi_client_id, or msi_mi_res_id parameters.`, Default: false, Advanced: true, }, { Name: "msi_object_id", Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.", Advanced: true, Sensitive: true, }, { Name: "msi_client_id", Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.", Advanced: true, Sensitive: true, }, { Name: "msi_mi_res_id", Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.", Advanced: true, Sensitive: true, }, { Name: "use_emulator", Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.", Default: false, Advanced: true, }, { Name: "use_az", Help: `Use Azure CLI tool az for authentication Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/) as the sole means of authentication. Setting this can be useful if you wish to use the az CLI on a host with a System Managed Identity that you do not want to use. Don't set env_auth at the same time. `, Default: false, Advanced: true, }, { Name: "endpoint", Help: "Endpoint for the service.\n\nLeave blank normally.", Advanced: true, }, { Name: "upload_cutoff", Help: "Cutoff for switching to chunked upload (<= 256 MiB) (deprecated).", Advanced: true, }, { Name: "chunk_size", Help: `Upload chunk size. Note that this is stored in memory and there may be up to "--transfers" * "--azureblob-upload-concurrency" chunks stored at once in memory.`, Default: defaultChunkSize, Advanced: true, }, { Name: "upload_concurrency", Help: `Concurrency for multipart uploads. This is the number of chunks of the same file that are uploaded concurrently. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing this may help to speed up the transfers. In tests, upload speed increases almost linearly with upload concurrency. For example to fill a gigabit pipe it may be necessary to raise this to 64. Note that this will use more memory. Note that chunks are stored in memory and there may be up to "--transfers" * "--azureblob-upload-concurrency" chunks stored at once in memory.`, Default: 16, Advanced: true, }, { Name: "copy_cutoff", Help: `Cutoff for switching to multipart copy. Any files larger than this that need to be server-side copied will be copied in chunks of chunk_size using the put block list API. Files smaller than this limit will be copied with the Copy Blob API.`, Default: 8 * fs.Mebi, Advanced: true, }, { Name: "copy_concurrency", Help: `Concurrency for multipart copy. This is the number of chunks of the same file that are copied concurrently. These chunks are not buffered in memory and Microsoft recommends setting this value to greater than 1000 in the azcopy documentation. https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azcopy-optimize#increase-concurrency In tests, copy speed increases almost linearly with copy concurrency.`, Default: 512, Advanced: true, }, { Name: "use_copy_blob", Help: `Whether to use the Copy Blob API when copying to the same storage account. If true (the default) then rclone will use the Copy Blob API for copies to the same storage account even when the size is above the copy_cutoff. Rclone assumes that the same storage account means the same config and does not check for the same storage account in different configs. There should be no need to change this value. `, Default: true, Advanced: true, }, { Name: "list_chunk", Help: `Size of blob list. This sets the number of blobs requested in each listing chunk. Default is the maximum, 5000. "List blobs" requests are permitted 2 minutes per megabyte to complete. If an operation is taking longer than 2 minutes per megabyte on average, it will time out ( [source](https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-blob-service-operations#exceptions-to-default-timeout-interval) ). This can be used to limit the number of blobs items to return, to avoid the time out.`, Default: maxListChunkSize, Advanced: true, }, { Name: "access_tier", Help: `Access tier of blob: hot, cool, cold or archive. Archived blobs can be restored by setting access tier to hot, cool or cold. Leave blank if you intend to use default access tier, which is set at account level If there is no "access tier" specified, rclone doesn't apply any tier. rclone performs "Set Tier" operation on blobs while uploading, if objects are not modified, specifying "access tier" to new one will have no effect. If blobs are in "archive tier" at remote, trying to perform data transfer operations from remote will not be allowed. User should first restore by tiering blob to "Hot", "Cool" or "Cold".`, Advanced: true, }, { Name: "archive_tier_delete", Default: false, Help: fmt.Sprintf(`Delete archive tier blobs before overwriting. Archive tier blobs cannot be updated. So without this flag, if you attempt to update an archive tier blob, then rclone will produce the error: %v With this flag set then before rclone attempts to overwrite an archive tier blob, it will delete the existing blob before uploading its replacement. This has the potential for data loss if the upload fails (unlike updating a normal blob) and also may cost more since deleting archive tier blobs early may be chargable. `, errCantUpdateArchiveTierBlobs), Advanced: true, }, { Name: "disable_checksum", Help: `Don't store MD5 checksum with object metadata. Normally rclone will calculate the MD5 checksum of the input before uploading it so it can add it to metadata on the object. This is great for data integrity checking but can cause long delays for large files to start uploading.`, Default: false, Advanced: true, }, { Name: "memory_pool_flush_time", Default: fs.Duration(time.Minute), Advanced: true, Hide: fs.OptionHideBoth, Help: `How often internal memory buffer pools will be flushed. (no longer used)`, }, { Name: "memory_pool_use_mmap", Default: false, Advanced: true, Hide: fs.OptionHideBoth, Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.EncodeInvalidUtf8 | encoder.EncodeSlash | encoder.EncodeCtl | encoder.EncodeDel | encoder.EncodeBackSlash | encoder.EncodeRightPeriod), }, { Name: "public_access", Help: "Public access level of a container: blob or container.", Default: "", Examples: []fs.OptionExample{ { Value: "", Help: "The container and its blobs can be accessed only with an authorized request.\nIt's a default value.", }, { Value: string(container.PublicAccessTypeBlob), Help: "Blob data within this container can be read via anonymous request.", }, { Value: string(container.PublicAccessTypeContainer), Help: "Allow full public read access for container and blob data.", }, }, Advanced: true, }, { Name: "directory_markers", Default: false, Advanced: true, Help: `Upload an empty object with a trailing slash when a new directory is created Empty folders are unsupported for bucket based remotes, this option creates an empty object ending with "/", to persist the folder. This object also has the metadata "` + dirMetaKey + ` = ` + dirMetaValue + `" to conform to the Microsoft standard. `, }, { Name: "no_check_container", Help: `If set, don't attempt to check the container exists or create it. This can be useful when trying to minimise the number of transactions rclone does if you know the container exists already. `, Default: false, Advanced: true, }, { Name: "no_head_object", Help: `If set, do not do HEAD before GET when getting objects.`, Default: false, Advanced: true, }, { Name: "delete_snapshots", Help: `Set to specify how to deal with snapshots on blob deletion.`, Examples: []fs.OptionExample{ { Value: "", Help: "By default, the delete operation fails if a blob has snapshots", }, { Value: string(blob.DeleteSnapshotsOptionTypeInclude), Help: "Specify 'include' to remove the root blob and all its snapshots", }, { Value: string(blob.DeleteSnapshotsOptionTypeOnly), Help: "Specify 'only' to remove only the snapshots but keep the root blob.", }, }, Default: "", Exclusive: true, Advanced: true, }}, }) } // Options defines the configuration for this backend type Options struct { Account string `config:"account"` EnvAuth bool `config:"env_auth"` Key string `config:"key"` SASURL string `config:"sas_url"` Tenant string `config:"tenant"` ClientID string `config:"client_id"` ClientSecret string `config:"client_secret"` ClientCertificatePath string `config:"client_certificate_path"` ClientCertificatePassword string `config:"client_certificate_password"` ClientSendCertificateChain bool `config:"client_send_certificate_chain"` Username string `config:"username"` Password string `config:"password"` ServicePrincipalFile string `config:"service_principal_file"` DisableInstanceDiscovery bool `config:"disable_instance_discovery"` UseMSI bool `config:"use_msi"` MSIObjectID string `config:"msi_object_id"` MSIClientID string `config:"msi_client_id"` MSIResourceID string `config:"msi_mi_res_id"` UseAZ bool `config:"use_az"` Endpoint string `config:"endpoint"` ChunkSize fs.SizeSuffix `config:"chunk_size"` CopyCutoff fs.SizeSuffix `config:"copy_cutoff"` CopyConcurrency int `config:"copy_concurrency"` UseCopyBlob bool `config:"use_copy_blob"` UploadConcurrency int `config:"upload_concurrency"` ListChunkSize uint `config:"list_chunk"` AccessTier string `config:"access_tier"` ArchiveTierDelete bool `config:"archive_tier_delete"` UseEmulator bool `config:"use_emulator"` DisableCheckSum bool `config:"disable_checksum"` Enc encoder.MultiEncoder `config:"encoding"` PublicAccess string `config:"public_access"` DirectoryMarkers bool `config:"directory_markers"` NoCheckContainer bool `config:"no_check_container"` NoHeadObject bool `config:"no_head_object"` DeleteSnapshots string `config:"delete_snapshots"` } // Fs represents a remote azure server type Fs struct { name string // name of this remote root string // the path we are working on if any opt Options // parsed config options ci *fs.ConfigInfo // global config features *fs.Features // optional features cntSVCcacheMu sync.Mutex // mutex to protect cntSVCcache cntSVCcache map[string]*container.Client // reference to containerClient per container svc *service.Client // client to access azblob cred azcore.TokenCredential // how to generate tokens (may be nil) sharedKeyCred *service.SharedKeyCredential // shared key credentials (may be nil) anonymous bool // if this is anonymous access rootContainer string // container part of root (if any) rootDirectory string // directory part of root (if any) isLimited bool // if limited to one container cache *bucket.Cache // cache for container creation status pacer *fs.Pacer // To pace and retry the API calls uploadToken *pacer.TokenDispenser // control concurrency publicAccess container.PublicAccessType // Container Public Access Level // user delegation cache userDelegationMu sync.Mutex userDelegation *service.UserDelegationCredential userDelegationExpiry time.Time } // Object describes an azure object type Object struct { fs *Fs // what this object is part of remote string // The remote path modTime time.Time // The modified time of the object if known md5 string // MD5 hash if known size int64 // Size of the object mimeType string // Content-Type of the object accessTier blob.AccessTier // Blob Access Tier meta map[string]string // blob metadata - take metadataMu when accessing tags map[string]string // blob tags } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { if f.rootContainer == "" { return "Azure root" } if f.rootDirectory == "" { return fmt.Sprintf("Azure container %s", f.rootContainer) } return fmt.Sprintf("Azure container %s path %s", f.rootContainer, f.rootDirectory) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // parsePath parses a remote 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // split returns container and containerPath from the rootRelativePath // relative to f.root func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) { containerName, containerPath = bucket.Split(bucket.Join(f.root, rootRelativePath)) if f.opt.DirectoryMarkers && strings.HasSuffix(containerPath, "//") { containerPath = containerPath[:len(containerPath)-1] } return f.opt.Enc.FromStandardName(containerName), f.opt.Enc.FromStandardPath(containerPath) } // split returns container and containerPath from the object func (o *Object) split() (container, containerPath string) { return o.fs.split(o.remote) } // validateAccessTier checks if azureblob supports user supplied tier func validateAccessTier(tier string) bool { return strings.EqualFold(tier, string(blob.AccessTierHot)) || strings.EqualFold(tier, string(blob.AccessTierCool)) || strings.EqualFold(tier, string(blob.AccessTierCold)) || strings.EqualFold(tier, string(blob.AccessTierArchive)) } // validatePublicAccess checks if azureblob supports use supplied public access level func validatePublicAccess(publicAccess string) bool { switch publicAccess { case "", string(container.PublicAccessTypeBlob), string(container.PublicAccessTypeContainer): // valid cases return true default: return false } } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 401, // Unauthorized (e.g. "Token has expired") 408, // Request Timeout 429, // Rate exceeded. 500, // Get occasional 500 Internal Server Error 503, // Service Unavailable 504, // Gateway Time-out } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } var storageErr *azcore.ResponseError if errors.As(err, &storageErr) { // General errors from: // https://learn.microsoft.com/en-us/rest/api/storageservices/common-rest-api-error-codes // Blob specific errors from: // https://learn.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes switch storageErr.ErrorCode { case "InvalidBlobOrBlock": // These errors happen sometimes in multipart uploads // because of block concurrency issues return true, err case "InternalError": // The server encountered an internal error. Please retry the request. return true, err case "OperationTimedOut": // The operation could not be completed within the permitted time. The // operation may or may not have succeeded on the server side. Please query // the server state before retrying the operation. return true, err case "ServerBusy": // The server is currently unable to receive requests. Please retry your // request. return true, err } statusCode := storageErr.StatusCode if slices.Contains(retryErrorCodes, statusCode) { return true, err } } return fserrors.ShouldRetry(err), err } func checkUploadChunkSize(cs fs.SizeSuffix) error { const minChunkSize = fs.SizeSuffixBase if cs < minChunkSize { return fmt.Errorf("%s is less than %s", cs, minChunkSize) } return nil } func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadChunkSize(cs) if err == nil { old, f.opt.ChunkSize = f.opt.ChunkSize, cs } return } func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadChunkSize(cs) if err == nil { old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs } return } type servicePrincipalCredentials struct { AppID string `json:"appId"` Password string `json:"password"` Tenant string `json:"tenant"` } // parseServicePrincipalCredentials unmarshals a service principal credentials JSON file as generated by az cli. func parseServicePrincipalCredentials(ctx context.Context, credentialsData []byte) (*servicePrincipalCredentials, error) { var spCredentials servicePrincipalCredentials if err := json.Unmarshal(credentialsData, &spCredentials); err != nil { return nil, fmt.Errorf("error parsing credentials from JSON file: %w", err) } // TODO: support certificate credentials // Validate all fields present if spCredentials.AppID == "" || spCredentials.Password == "" || spCredentials.Tenant == "" { return nil, fmt.Errorf("missing fields in credentials file") } return &spCredentials, nil } // setRoot changes the root of the Fs func (f *Fs) setRoot(root string) { f.root = parsePath(root) f.rootContainer, f.rootDirectory = bucket.Split(f.root) } // Wrap the http.Transport to satisfy the Transporter interface type transporter struct { http.RoundTripper } // Make a new transporter func newTransporter(ctx context.Context) transporter { return transporter{ RoundTripper: fshttp.NewTransport(ctx), } } // Do sends the HTTP request and returns the HTTP response or error. func (tr transporter) Do(req *http.Request) (*http.Response, error) { return tr.RoundTripper.RoundTrip(req) } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } err = checkUploadChunkSize(opt.ChunkSize) if err != nil { return nil, fmt.Errorf("chunk size: %w", err) } if opt.ListChunkSize > maxListChunkSize { return nil, fmt.Errorf("blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize) } if opt.AccessTier == "" { opt.AccessTier = string(defaultAccessTier) } else if !validateAccessTier(opt.AccessTier) { return nil, fmt.Errorf("supported access tiers are %s, %s, %s and %s", string(blob.AccessTierHot), string(blob.AccessTierCool), string(blob.AccessTierCold), string(blob.AccessTierArchive)) } if !validatePublicAccess((opt.PublicAccess)) { return nil, fmt.Errorf("supported public access level are %s and %s", string(container.PublicAccessTypeBlob), string(container.PublicAccessTypeContainer)) } ci := fs.GetConfig(ctx) f := &Fs{ name: name, opt: *opt, ci: ci, pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), uploadToken: pacer.NewTokenDispenser(ci.Transfers), cache: bucket.NewCache(), cntSVCcache: make(map[string]*container.Client, 1), } f.publicAccess = container.PublicAccessType(opt.PublicAccess) f.setRoot(root) f.features = (&fs.Features{ ReadMimeType: true, WriteMimeType: true, ReadMetadata: true, WriteMetadata: true, UserMetadata: true, BucketBased: true, BucketBasedRootOK: true, SetTier: true, GetTier: true, ServerSideAcrossConfigs: true, DoubleSlash: true, }).Fill(ctx, f) if opt.DirectoryMarkers { f.features.CanHaveEmptyDirectories = true fs.Debugf(f, "Using directory markers") } // Client options specifying our own transport policyClientOptions := policy.ClientOptions{ Transport: newTransporter(ctx), } clientOpt := service.ClientOptions{ ClientOptions: policyClientOptions, } // Here we auth by setting one of f.cred, f.sharedKeyCred, f.svc or f.anonymous switch { case opt.EnvAuth: // Read account from environment if needed if opt.Account == "" { opt.Account, _ = os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") } // Read credentials from the environment options := azidentity.DefaultAzureCredentialOptions{ ClientOptions: policyClientOptions, DisableInstanceDiscovery: opt.DisableInstanceDiscovery, } f.cred, err = azidentity.NewDefaultAzureCredential(&options) if err != nil { return nil, fmt.Errorf("create azure environment credential failed: %w", err) } case opt.UseEmulator: if opt.Account == "" { opt.Account = emulatorAccount } if opt.Key == "" { opt.Key = emulatorAccountKey } if opt.Endpoint == "" { opt.Endpoint = emulatorBlobEndpoint } f.sharedKeyCred, err = service.NewSharedKeyCredential(opt.Account, opt.Key) if err != nil { return nil, fmt.Errorf("create new shared key credential for emulator failed: %w", err) } case opt.Account != "" && opt.Key != "": f.sharedKeyCred, err = service.NewSharedKeyCredential(opt.Account, opt.Key) if err != nil { return nil, fmt.Errorf("create new shared key credential failed: %w", err) } case opt.SASURL != "": parts, err := sas.ParseURL(opt.SASURL) if err != nil { return nil, fmt.Errorf("failed to parse SAS URL: %w", err) } endpoint := opt.SASURL containerName := parts.ContainerName // Check if we have container level SAS or account level SAS
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/pikpak/pikpak.go
backend/pikpak/pikpak.go
// Package pikpak provides an interface to the PikPak package pikpak // ------------------------------------------------------------ // NOTE // ------------------------------------------------------------ // md5sum is not always available, sometimes given empty. // Trashed files are not restored to the original location when using `batchUntrash` // Can't stream without `--vfs-cache-mode=full` // ------------------------------------------------------------ // TODO // ------------------------------------------------------------ // * List() with options starred-only // * user-configurable list chunk // * backend command: untrash, iscached // * api(event,task) import ( "bytes" "context" "encoding/base64" "encoding/json" "errors" "fmt" "io" "math" "net/http" "net/url" "path" "reflect" "strconv" "strings" "sync" "time" "github.com/aws/aws-sdk-go-v2/aws" awsconfig "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/rclone/rclone/backend/pikpak/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/rest" "golang.org/x/oauth2" ) // Constants const ( clientID = "YUMx5nI8ZU8Ap8pm" clientVersion = "2.0.0" packageName = "mypikpak.com" defaultUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0" minSleep = 100 * time.Millisecond maxSleep = 2 * time.Second taskWaitTime = 500 * time.Millisecond decayConstant = 2 // bigger for slower decay, exponential rootURL = "https://api-drive.mypikpak.com" maxUploadParts = 10000 // Part number must be an integer between 1 and 10000, inclusive. defaultChunkSize = fs.SizeSuffix(1024 * 1024 * 5) // Part size should be in [100KB, 5GB] minChunkSize = 100 * fs.Kibi maxChunkSize = 5 * fs.Gibi defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024) maxUploadCutoff = 5 * fs.Gibi // maximum allowed size for singlepart uploads ) // Globals var ( // Description of how to auth for this app oauthConfig = &oauthutil.Config{ Scopes: nil, AuthURL: "https://user.mypikpak.com/v1/auth/signin", TokenURL: "https://user.mypikpak.com/v1/auth/token", AuthStyle: oauth2.AuthStyleInParams, ClientID: clientID, RedirectURL: oauthutil.RedirectURL, } ) // pikpakAutorize retrieves OAuth token using user/pass and save it to rclone.conf func pikpakAuthorize(ctx context.Context, opt *Options, name string, m configmap.Mapper) error { if opt.Username == "" { return errors.New("no username") } pass, err := obscure.Reveal(opt.Password) if err != nil { return fmt.Errorf("failed to decode password - did you obscure it?: %w", err) } // new device id if necessary if len(opt.DeviceID) != 32 { opt.DeviceID = genDeviceID() m.Set("device_id", opt.DeviceID) fs.Infof(nil, "Using new device id %q", opt.DeviceID) } opts := rest.Opts{ Method: "POST", RootURL: "https://user.mypikpak.com/v1/auth/signin", } req := map[string]string{ "username": opt.Username, "password": pass, "client_id": clientID, } var token api.Token rst := newPikpakClient(getClient(ctx, opt), opt).SetCaptchaTokener(ctx, m) _, err = rst.CallJSON(ctx, &opts, req, &token) if apiErr, ok := err.(*api.Error); ok { if apiErr.Reason == "captcha_invalid" && apiErr.Code == 4002 { rst.captcha.Invalidate() _, err = rst.CallJSON(ctx, &opts, req, &token) } } if err != nil { return fmt.Errorf("failed to retrieve token using username/password: %w", err) } t := &oauth2.Token{ AccessToken: token.AccessToken, TokenType: token.TokenType, RefreshToken: token.RefreshToken, Expiry: token.Expiry(), } return oauthutil.PutToken(name, m, t, false) } // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "pikpak", Description: "PikPak", NewFs: NewFs, CommandHelp: commandHelp, Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, fmt.Errorf("couldn't parse config into struct: %w", err) } switch config.State { case "": // Check token exists if _, err := oauthutil.GetToken(name, m); err != nil { return fs.ConfigGoto("authorize") } return fs.ConfigConfirm("authorize_ok", false, "consent_to_authorize", "Re-authorize for new token?") case "authorize_ok": if config.Result == "false" { return nil, nil } return fs.ConfigGoto("authorize") case "authorize": if err := pikpakAuthorize(ctx, opt, name, m); err != nil { return nil, err } return nil, nil } return nil, fmt.Errorf("unknown state %q", config.State) }, Options: []fs.Option{{ Name: "user", Help: "Pikpak username.", Required: true, Sensitive: true, }, { Name: "pass", Help: "Pikpak password.", Required: true, IsPassword: true, }, { Name: "device_id", Help: "Device ID used for authorization.", Advanced: true, Sensitive: true, }, { Name: "user_agent", Default: defaultUserAgent, Advanced: true, Help: fmt.Sprintf(`HTTP user agent for pikpak. Defaults to "%s" or "--pikpak-user-agent" provided on command line.`, defaultUserAgent), }, { Name: "root_folder_id", Help: `ID of the root folder. Leave blank normally. Fill in for rclone to use a non root folder as its starting point. `, Advanced: true, Sensitive: true, }, { Name: "use_trash", Default: true, Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--pikpak-use-trash=false` to delete files permanently instead.", Advanced: true, }, { Name: "trashed_only", Default: false, Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.", Advanced: true, }, { Name: "no_media_link", Default: false, Help: "Use original file links instead of media links.\n\nThis avoids issues caused by invalid media links, but may reduce download speeds.", Advanced: true, }, { Name: "hash_memory_limit", Help: "Files bigger than this will be cached on disk to calculate hash if required.", Default: fs.SizeSuffix(10 * 1024 * 1024), Advanced: true, }, { Name: "upload_cutoff", Help: `Cutoff for switching to chunked upload. Any files larger than this will be uploaded in chunks of chunk_size. The minimum is 0 and the maximum is 5 GiB.`, Default: defaultUploadCutoff, Advanced: true, }, { Name: "chunk_size", Help: `Chunk size for multipart uploads. Large files will be uploaded in chunks of this size. Note that this is stored in memory and there may be up to "--transfers" * "--pikpak-upload-concurrency" chunks stored at once in memory. If you are transferring large files over high-speed links and you have enough memory, then increasing this will speed up the transfers. Rclone will automatically increase the chunk size when uploading a large file of known size to stay below the 10,000 chunks limit. Increasing the chunk size decreases the accuracy of the progress statistics displayed with "-P" flag.`, Default: defaultChunkSize, Advanced: true, }, { Name: "upload_concurrency", Help: `Concurrency for multipart uploads. This is the number of chunks of the same file that are uploaded concurrently for multipart uploads. Note that chunks are stored in memory and there may be up to "--transfers" * "--pikpak-upload-concurrency" chunks stored at once in memory. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing this may help to speed up the transfers.`, Default: 4, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.EncodeCtl | encoder.EncodeDot | encoder.EncodeBackSlash | encoder.EncodeSlash | encoder.EncodeDoubleQuote | encoder.EncodeAsterisk | encoder.EncodeColon | encoder.EncodeLtGt | encoder.EncodeQuestion | encoder.EncodePipe | encoder.EncodeLeftSpace | encoder.EncodeRightSpace | encoder.EncodeRightPeriod | encoder.EncodeInvalidUtf8), }}, }) } // Options defines the configuration for this backend type Options struct { Username string `config:"user"` Password string `config:"pass"` UserID string `config:"user_id"` // only available during runtime DeviceID string `config:"device_id"` UserAgent string `config:"user_agent"` RootFolderID string `config:"root_folder_id"` UseTrash bool `config:"use_trash"` TrashedOnly bool `config:"trashed_only"` NoMediaLink bool `config:"no_media_link"` HashMemoryThreshold fs.SizeSuffix `config:"hash_memory_limit"` ChunkSize fs.SizeSuffix `config:"chunk_size"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadConcurrency int `config:"upload_concurrency"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a remote pikpak type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options features *fs.Features // optional features rst *pikpakClient // the connection to the server dirCache *dircache.DirCache // Map of directory path to directory id pacer *fs.Pacer // pacer for API calls rootFolderID string // the id of the root folder client *http.Client // authorized client m configmap.Mapper tokenMu *sync.Mutex // when renewing tokens } // Object describes a pikpak object type Object struct { fs *Fs // what this object is part of remote string // The remote path hasMetaData bool // whether info below has been set id string // ID of the object size int64 // size of the object modTime time.Time // modification time of the object mimeType string // The object MIME type parent string // ID of the parent directories gcid string // custom hash of the object md5sum string // md5sum of the object link *api.Link // link to download the object linkMu *sync.Mutex } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("PikPak root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported // meaning that the modification times from the backend shouldn't be used for syncing // as they can't be set. } // DirCacheFlush resets the directory cache - used in testing as an // optional interface func (f *Fs) DirCacheFlush() { f.dirCache.ResetRoot() } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.MD5) } // parsePath parses a remote path func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // parentIDForRequest returns ParentId for api requests func parentIDForRequest(dirID string) string { if dirID == "root" { return "" } return dirID } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // reAuthorize re-authorize oAuth token during runtime func (f *Fs) reAuthorize(ctx context.Context) (err error) { f.tokenMu.Lock() defer f.tokenMu.Unlock() if err := pikpakAuthorize(ctx, &f.opt, f.name, f.m); err != nil { return err } return f.newClientWithPacer(ctx) } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } if err == nil { return false, nil } if fserrors.ShouldRetry(err) { return true, err } authRetry := false // traceback to possible api.Error wrapped in err, and re-authorize if necessary // "unauthenticated" (16): when access_token is invalid, but should be handled by oauthutil var terr *oauth2.RetrieveError if errors.As(err, &terr) { apiErr := new(api.Error) if err := json.Unmarshal(terr.Body, apiErr); err == nil { if apiErr.Reason == "invalid_grant" { // "invalid_grant" (4126): The refresh token is incorrect or expired // // Invalid refresh token. It may have been refreshed by another process. authRetry = true } } } // Once err was processed by maybeWrapOAuthError() in lib/oauthutil, // the above code is no longer sufficient to handle the 'invalid_grant' error. if strings.Contains(err.Error(), "invalid_grant") { authRetry = true } if authRetry { if authErr := f.reAuthorize(ctx); authErr != nil { return false, fserrors.FatalError(authErr) } } switch apiErr := err.(type) { case *api.Error: if apiErr.Reason == "file_rename_uncompleted" { // "file_rename_uncompleted" (9): Renaming uncompleted file or folder is not supported // This error occurs when you attempt to rename objects // right after some server-side changes, e.g. DirMove, Move, Copy return true, err } else if apiErr.Reason == "file_duplicated_name" { // "file_duplicated_name" (3): File name cannot be repeated // This error may occur when attempting to rename temp object (newly uploaded) // right after the old one is removed. return true, err } else if apiErr.Reason == "task_daily_create_limit_vip" { // "task_daily_create_limit_vip" (11): Sorry, you have submitted too many tasks and have exceeded the current processing capacity, please try again tomorrow return false, fserrors.FatalError(err) } else if apiErr.Reason == "file_space_not_enough" { // "file_space_not_enough" (8): Storage space is not enough return false, fserrors.FatalError(err) } else if apiErr.Reason == "captcha_invalid" && apiErr.Code == 9 { // "captcha_invalid" (9): Verification code is invalid // This error occurred on the POST:/drive/v1/files endpoint // when a zero-byte file was uploaded with an invalid captcha token f.rst.captcha.Invalidate() return true, err } else if strings.Contains(apiErr.Reason, "idx.shub.mypikpak.com") && apiErr.Code == 500 { // internal server error: Post "http://idx.shub.mypikpak.com": context deadline exceeded (Client.Timeout exceeded while awaiting headers) // This typically happens when trying to retrieve a gcid for which no record exists. // No retry is needed in this case. return false, err } } return authRetry || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // errorHandler parses a non 2xx error response into an error func errorHandler(resp *http.Response) error { // Decode error response errResponse := new(api.Error) err := rest.DecodeJSON(resp, &errResponse) if err != nil { fs.Debugf(nil, "Couldn't decode error response: %v", err) } if errResponse.Reason == "" { errResponse.Reason = resp.Status } if errResponse.Code == 0 { errResponse.Code = resp.StatusCode } return errResponse } // getClient makes an http client according to the options func getClient(ctx context.Context, opt *Options) *http.Client { // Override few config settings and create a client newCtx, ci := fs.AddConfig(ctx) ci.UserAgent = opt.UserAgent return fshttp.NewClient(newCtx) } // newClientWithPacer sets a new http/rest client with a pacer to Fs func (f *Fs) newClientWithPacer(ctx context.Context) (err error) { var ts *oauthutil.TokenSource f.client, ts, err = oauthutil.NewClientWithBaseClient(ctx, f.name, f.m, oauthConfig, getClient(ctx, &f.opt)) if err != nil { return fmt.Errorf("failed to create oauth client: %w", err) } token, err := ts.Token() if err != nil { return err } // parse user_id from oauth access token for later use if parts := strings.Split(token.AccessToken, "."); len(parts) > 1 { jsonStr, _ := base64.URLEncoding.DecodeString(parts[1] + "===") info := struct { UserID string `json:"sub,omitempty"` }{} if jsonErr := json.Unmarshal(jsonStr, &info); jsonErr == nil { f.opt.UserID = info.UserID } } f.rst = newPikpakClient(f.client, &f.opt).SetCaptchaTokener(ctx, f.m) f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))) return nil } func checkUploadChunkSize(cs fs.SizeSuffix) error { if cs < minChunkSize { return fmt.Errorf("%s is less than %s", cs, minChunkSize) } if cs > maxChunkSize { return fmt.Errorf("%s is greater than %s", cs, maxChunkSize) } return nil } func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadChunkSize(cs) if err == nil { old, f.opt.ChunkSize = f.opt.ChunkSize, cs } return } func checkUploadCutoff(cs fs.SizeSuffix) error { if cs > maxUploadCutoff { return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff) } return nil } func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadCutoff(cs) if err == nil { old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs } return } // newFs partially constructs Fs from the path // // It constructs a valid Fs but doesn't attempt to figure out whether // it is a file or a directory. func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } err = checkUploadChunkSize(opt.ChunkSize) if err != nil { return nil, fmt.Errorf("pikpak: chunk size: %w", err) } err = checkUploadCutoff(opt.UploadCutoff) if err != nil { return nil, fmt.Errorf("pikpak: upload cutoff: %w", err) } root := parsePath(path) f := &Fs{ name: name, root: root, opt: *opt, m: m, tokenMu: new(sync.Mutex), } f.features = (&fs.Features{ ReadMimeType: true, // can read the mime type of objects CanHaveEmptyDirectories: true, // can have empty directories NoMultiThreading: true, // can't have multiple threads downloading }).Fill(ctx, f) // new device id if necessary if len(f.opt.DeviceID) != 32 { f.opt.DeviceID = genDeviceID() m.Set("device_id", f.opt.DeviceID) fs.Infof(nil, "Using new device id %q", f.opt.DeviceID) } if err := f.newClientWithPacer(ctx); err != nil { // re-authorize if necessary if strings.Contains(err.Error(), "invalid_grant") { return f, f.reAuthorize(ctx) } return nil, err } return f, nil } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, error) { f, err := newFs(ctx, name, path, m) if err != nil { return nil, err } // Set the root folder ID if f.opt.RootFolderID != "" { // use root_folder ID if set f.rootFolderID = f.opt.RootFolderID } else { // pseudo-root f.rootFolderID = "root" } f.dirCache = dircache.New(f.root, f.rootFolderID, f) // Find the current root err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(f.root) tempF := *f tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF) tempF.root = newRoot // Make new Fs which is the parent err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } _, err := tempF.NewObject(ctx, remote) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f return f, nil } return nil, err } f.features.Fill(ctx, &tempF) // XXX: update the old f here instead of returning tempF, since // `features` were already filled with functions having *f as a receiver. // See https://github.com/rclone/rclone/issues/2182 f.dirCache = tempF.dirCache f.root = tempF.root // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // readMetaDataForPath reads the metadata from the path func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.File, err error) { leaf, dirID, err := f.dirCache.FindPath(ctx, path, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, fs.ErrorObjectNotFound } return nil, err } // checking whether fileObj with name of leaf exists in dirID trashed := "false" if f.opt.TrashedOnly { trashed = "true" } found, err := f.listAll(ctx, dirID, api.KindOfFile, trashed, func(item *api.File) bool { if item.Name == leaf { info = item return true } return false }) if err != nil { return nil, err } if !found { return nil, fs.ErrorObjectNotFound } return info, nil } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (fs.Object, error) { o := &Object{ fs: f, remote: remote, linkMu: new(sync.Mutex), } var err error if info != nil { err = o.setMetaData(info) } else { err = o.readMetaData(ctx) // reads info and meta, returning an error } if err != nil { return nil, err } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // FindLeaf finds a directory of name leaf in the folder with ID pathID func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { // Find the leaf in pathID trashed := "false" if f.opt.TrashedOnly { // still need to list folders trashed = "" } found, err = f.listAll(ctx, pathID, api.KindOfFolder, trashed, func(item *api.File) bool { if item.Name == leaf { pathIDOut = item.ID return true } return false }) return pathIDOut, found, err } // list the objects into the function supplied // // If directories is set it only sends directories // User function to process a File item from listAll // // Should return true to finish processing type listAllFn func(*api.File) bool // Lists the directory required calling the user function on each item found // // If the user fn ever returns true then it early exits with found = true func (f *Fs) listAll(ctx context.Context, dirID, kind, trashed string, fn listAllFn) (found bool, err error) { // Url Parameters params := url.Values{} params.Set("thumbnail_size", api.ThumbnailSizeM) params.Set("limit", strconv.Itoa(api.ListLimit)) params.Set("with_audit", strconv.FormatBool(true)) if parentID := parentIDForRequest(dirID); parentID != "" { params.Set("parent_id", parentID) } // Construct filter string filters := &api.Filters{} filters.Set("Phase", "eq", api.PhaseTypeComplete) filters.Set("Trashed", "eq", trashed) filters.Set("Kind", "eq", kind) if filterStr, err := json.Marshal(filters); err == nil { params.Set("filters", string(filterStr)) } // fs.Debugf(f, "list params: %v", params) opts := rest.Opts{ Method: "GET", Path: "/drive/v1/files", Parameters: params, } pageToken := "" OUTER: for { opts.Parameters.Set("page_token", pageToken) var info api.FileList var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rst.CallJSON(ctx, &opts, nil, &info) return f.shouldRetry(ctx, resp, err) }) if err != nil { return found, fmt.Errorf("couldn't list files: %w", err) } if len(info.Files) == 0 { break } for _, item := range info.Files { item.Name = f.opt.Enc.ToStandardName(item.Name) if fn(item) { found = true break OUTER } } if info.NextPageToken == "" { break } pageToken = info.NextPageToken } return } // itemToDirEntry converts a api.File to an fs.DirEntry. // When the api.File cannot be represented as an fs.DirEntry // (nil, nil) is returned. func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *api.File) (entry fs.DirEntry, err error) { switch { case item.Kind == api.KindOfFolder: // cache the directory ID for later lookups f.dirCache.Put(remote, item.ID) d := fs.NewDir(remote, time.Time(item.ModifiedTime)).SetID(item.ID) if item.ParentID == "" { d.SetParentID("root") } else { d.SetParentID(item.ParentID) } return d, nil case f.opt.TrashedOnly && !item.Trashed: // ignore object default: entry, err = f.newObjectWithInfo(ctx, remote, item) if err == fs.ErrorObjectNotFound { return nil, nil } return entry, err } return nil, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { // fs.Debugf(f, "List(%q)\n", dir) dirID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } var iErr error trashed := "false" if f.opt.TrashedOnly { // still need to list folders trashed = "" } _, err = f.listAll(ctx, dirID, "", trashed, func(item *api.File) bool { entry, err := f.itemToDirEntry(ctx, path.Join(dir, item.Name), item) if err != nil { iErr = err return true } if entry != nil { entries = append(entries, entry) } return false }) if err != nil { return nil, err } if iErr != nil { return nil, iErr } return entries, nil } // CreateDir makes a directory with pathID as parent and name leaf func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) req := api.RequestNewFile{ Name: f.opt.Enc.FromStandardName(leaf), Kind: api.KindOfFolder, ParentID: parentIDForRequest(pathID), } info, err := f.requestNewFile(ctx, &req) if err != nil { return "", err } return info.File.ID, nil } // Mkdir creates the container if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { _, err := f.dirCache.FindDir(ctx, dir, true) return err } // About gets quota information func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { info, err := f.getAbout(ctx) if err != nil { return nil, fmt.Errorf("failed to get drive quota: %w", err) } q := info.Quota usage = &fs.Usage{ Used: fs.NewUsageValue(q.Usage), // bytes in use // Trashed: fs.NewUsageValue(q.UsageInTrash), // bytes in trash but this seems not working } if q.Limit > 0 { usage.Total = fs.NewUsageValue(q.Limit) // quota of bytes that can be used usage.Free = fs.NewUsageValue(q.Limit - q.Usage) // bytes which can be uploaded before reaching the quota } return usage, nil } // PublicLink adds a "readable by anyone with link" permission on the given file or folder. func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { id, err := f.dirCache.FindDir(ctx, remote, false) if err == nil { fs.Debugf(f, "attempting to share directory '%s'", remote) } else { fs.Debugf(f, "attempting to share single file '%s'", remote) o, err := f.NewObject(ctx, remote) if err != nil { return "", err } id = o.(*Object).id } expiry := -1 if expire < fs.DurationOff { expiry = int(math.Ceil(time.Duration(expire).Hours() / 24)) } req := api.RequestShare{ FileIDs: []string{id}, ShareTo: "publiclink", ExpirationDays: expiry, PassCodeOption: "NOT_REQUIRED", } info, err := f.requestShare(ctx, &req) if err != nil { return "", err } return info.ShareURL, err } // delete a file or directory by ID w/o using trash func (f *Fs) deleteObjects(ctx context.Context, IDs []string, useTrash bool) (err error) { if len(IDs) == 0 { return nil } action := "batchDelete" if useTrash { action = "batchTrash" } req := api.RequestBatch{ IDs: IDs, } if err := f.requestBatchAction(ctx, action, &req); err != nil { return fmt.Errorf("delete object failed: %w", err) } return nil } // untrash a file or directory by ID // // If a name collision occurs in the destination folder, PikPak might automatically // rename the restored item(s) by appending a numbered suffix. For example, // foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists func (f *Fs) untrashObjects(ctx context.Context, IDs []string) (err error) { if len(IDs) == 0 { return nil } req := api.RequestBatch{ IDs: IDs, } if err := f.requestBatchAction(ctx, "batchUntrash", &req); err != nil { return fmt.Errorf("untrash object failed: %w", err) } return nil } // purgeCheck removes the root directory, if check is set then it // refuses to do so if it has anything in func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } rootID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return err } trashedFiles := false if check { found, err := f.listAll(ctx, rootID, "", "", func(item *api.File) bool { if !item.Trashed { fs.Debugf(dir, "Rmdir: contains file: %q", item.Name) return true } fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Name) trashedFiles = true return false }) if err != nil { return err } if found { return fs.ErrorDirectoryNotEmpty } } if root != "" { // trash the directory if it had trashed files // in or the user wants to trash, otherwise // delete it. err = f.deleteObjects(ctx, []string{rootID}, trashedFiles || f.opt.UseTrash) if err != nil { return err } } else if check { return errors.New("can't purge root directory") } f.dirCache.FlushDir(dir) if err != nil { return err } return nil } // Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, true) } // Purge deletes all the files and the container // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, false) } // CleanUp empties the trash func (f *Fs) CleanUp(ctx context.Context) (err error) { opts := rest.Opts{ Method: "PATCH", Path: "/drive/v1/files/trash:empty", } info := struct { TaskID string `json:"task_id"` }{} var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rst.CallJSON(ctx, &opts, nil, &info) return f.shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("couldn't empty trash: %w", err) } return f.waitTask(ctx, info.TaskID) } // Move the object to a new parent folder // // Objects cannot be moved to their current folder.
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/pikpak/pikpak_test.go
backend/pikpak/pikpak_test.go
// Test PikPak filesystem interface package pikpak import ( "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestPikPak:", NilObject: (*Object)(nil), ChunkedUpload: fstests.ChunkedUploadConfig{ MinChunkSize: minChunkSize, MaxChunkSize: maxChunkSize, }, }) } func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadChunkSize(cs) } func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadCutoff(cs) } var ( _ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadCutoffer = (*Fs)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/pikpak/helper.go
backend/pikpak/helper.go
package pikpak import ( "bytes" "context" "crypto/md5" "crypto/sha1" "encoding/hex" "encoding/json" "errors" "fmt" "io" "math/rand" "net/http" "net/url" "os" "strconv" "strings" "sync" "time" "github.com/rclone/rclone/backend/pikpak/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/lib/rest" ) // Globals const ( cachePrefix = "rclone-pikpak-gcid-" ) // requestDecompress requests decompress of compressed files func (f *Fs) requestDecompress(ctx context.Context, file *api.File, password string) (info *api.DecompressResult, err error) { req := &api.RequestDecompress{ Gcid: file.Hash, Password: password, FileID: file.ID, Files: []*api.FileInArchive{}, DefaultParent: true, } opts := rest.Opts{ Method: "POST", Path: "/decompress/v1/decompress", } var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rst.CallJSON(ctx, &opts, &req, &info) return f.shouldRetry(ctx, resp, err) }) return } // getUserInfo gets UserInfo from API func (f *Fs) getUserInfo(ctx context.Context) (info *api.User, err error) { opts := rest.Opts{ Method: "GET", RootURL: "https://user.mypikpak.com/v1/user/me", } var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rst.CallJSON(ctx, &opts, nil, &info) return f.shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("failed to get userinfo: %w", err) } return } // getVIPInfo gets VIPInfo from API func (f *Fs) getVIPInfo(ctx context.Context) (info *api.VIP, err error) { opts := rest.Opts{ Method: "GET", RootURL: "https://api-drive.mypikpak.com/drive/v1/privilege/vip", } var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rst.CallJSON(ctx, &opts, nil, &info) return f.shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("failed to get vip info: %w", err) } return } // requestBatchAction requests batch actions to API // // action can be one of batch{Copy,Delete,Trash,Untrash} func (f *Fs) requestBatchAction(ctx context.Context, action string, req *api.RequestBatch) (err error) { opts := rest.Opts{ Method: "POST", Path: "/drive/v1/files:" + action, } info := struct { TaskID string `json:"task_id"` }{} var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rst.CallJSON(ctx, &opts, &req, &info) return f.shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("batch action %q failed: %w", action, err) } return f.waitTask(ctx, info.TaskID) } // requestNewTask requests a new api.NewTask and returns api.Task func (f *Fs) requestNewTask(ctx context.Context, req *api.RequestNewTask) (info *api.Task, err error) { opts := rest.Opts{ Method: "POST", Path: "/drive/v1/files", } var newTask api.NewTask var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rst.CallJSON(ctx, &opts, &req, &newTask) return f.shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } return newTask.Task, nil } // requestNewFile requests a new api.NewFile and returns api.File func (f *Fs) requestNewFile(ctx context.Context, req *api.RequestNewFile) (info *api.NewFile, err error) { opts := rest.Opts{ Method: "POST", Path: "/drive/v1/files", } var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rst.CallJSON(ctx, &opts, &req, &info) return f.shouldRetry(ctx, resp, err) }) return } // getFile gets api.File from API for the ID passed // and returns rich information containing additional fields below // * web_content_link // * thumbnail_link // * links // * medias func (f *Fs) getFile(ctx context.Context, ID string) (info *api.File, err error) { opts := rest.Opts{ Method: "GET", Path: "/drive/v1/files/" + ID, } var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rst.CallJSON(ctx, &opts, nil, &info) if err == nil && !info.Links.ApplicationOctetStream.Valid() { time.Sleep(5 * time.Second) return true, errors.New("no link") } return f.shouldRetry(ctx, resp, err) }) if err == nil { info.Name = f.opt.Enc.ToStandardName(info.Name) } return } // patchFile updates attributes of the file by ID // // currently known patchable fields are // * name func (f *Fs) patchFile(ctx context.Context, ID string, req *api.File) (info *api.File, err error) { opts := rest.Opts{ Method: "PATCH", Path: "/drive/v1/files/" + ID, } var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rst.CallJSON(ctx, &opts, &req, &info) return f.shouldRetry(ctx, resp, err) }) return } // getTask gets api.Task from API for the ID passed func (f *Fs) getTask(ctx context.Context, ID string, checkPhase bool) (info *api.Task, err error) { opts := rest.Opts{ Method: "GET", Path: "/drive/v1/tasks/" + ID, } var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rst.CallJSON(ctx, &opts, nil, &info) if checkPhase { if err == nil && info.Phase != api.PhaseTypeComplete { // could be pending right after the task is created return true, fmt.Errorf("%s (%s) is still in %s", info.Name, info.Type, info.Phase) } } return f.shouldRetry(ctx, resp, err) }) return } // waitTask waits for async tasks to be completed func (f *Fs) waitTask(ctx context.Context, ID string) (err error) { time.Sleep(taskWaitTime) if info, err := f.getTask(ctx, ID, true); err != nil { if info == nil { return fmt.Errorf("can't verify the task is completed: %q", ID) } return fmt.Errorf("can't verify the task is completed: %#v", info) } return } // deleteTask remove a task having the specified ID func (f *Fs) deleteTask(ctx context.Context, ID string, deleteFiles bool) (err error) { params := url.Values{} params.Set("delete_files", strconv.FormatBool(deleteFiles)) params.Set("task_ids", ID) opts := rest.Opts{ Method: "DELETE", Path: "/drive/v1/tasks", Parameters: params, NoResponse: true, } var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rst.CallJSON(ctx, &opts, nil, nil) return f.shouldRetry(ctx, resp, err) }) return } // getAbout gets drive#quota information from server func (f *Fs) getAbout(ctx context.Context) (info *api.About, err error) { opts := rest.Opts{ Method: "GET", Path: "/drive/v1/about", } var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rst.CallJSON(ctx, &opts, nil, &info) return f.shouldRetry(ctx, resp, err) }) return } // requestShare returns information about sharable links func (f *Fs) requestShare(ctx context.Context, req *api.RequestShare) (info *api.Share, err error) { opts := rest.Opts{ Method: "POST", Path: "/drive/v1/share", } var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rst.CallJSON(ctx, &opts, &req, &info) return f.shouldRetry(ctx, resp, err) }) return } // getGcid retrieves Gcid cached in API server func (f *Fs) getGcid(ctx context.Context, src fs.ObjectInfo) (gcid string, err error) { cid, err := calcCid(ctx, src) if err != nil { return } if src.Size() == 0 { // If src is zero-length, the API will return // Error "cid and file_size is required" (400) // In this case, we can simply return cid == gcid return cid, nil } params := url.Values{} params.Set("cid", cid) params.Set("file_size", strconv.FormatInt(src.Size(), 10)) opts := rest.Opts{ Method: "GET", Path: "/drive/v1/resource/cid", Parameters: params, } info := struct { Gcid string `json:"gcid,omitempty"` }{} var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rst.CallJSON(ctx, &opts, nil, &info) return f.shouldRetry(ctx, resp, err) }) if err != nil { return "", err } return info.Gcid, nil } // Read the gcid of in returning a reader which will read the same contents // // The cleanup function should be called when out is finished with // regardless of whether this function returned an error or not. func readGcid(in io.Reader, size, threshold int64) (gcid string, out io.Reader, cleanup func(), err error) { // nothing to clean up by default cleanup = func() {} // don't cache small files on disk to reduce wear of the disk if size > threshold { var tempFile *os.File // create the cache file tempFile, err = os.CreateTemp("", cachePrefix) if err != nil { return } _ = os.Remove(tempFile.Name()) // Delete the file - may not work on Windows // clean up the file after we are done downloading cleanup = func() { // the file should normally already be close, but just to make sure _ = tempFile.Close() _ = os.Remove(tempFile.Name()) // delete the cache file after we are done - may be deleted already } // use the teeReader to write to the local file AND calculate the gcid while doing so teeReader := io.TeeReader(in, tempFile) // copy the ENTIRE file to disk and calculate the gcid in the process if gcid, err = calcGcid(teeReader, size); err != nil { return } // jump to the start of the local file so we can pass it along if _, err = tempFile.Seek(0, 0); err != nil { return } // replace the already read source with a reader of our cached file out = tempFile } else { buf := &bytes.Buffer{} teeReader := io.TeeReader(in, buf) if gcid, err = calcGcid(teeReader, size); err != nil { return } out = buf } return } // calcGcid calculates Gcid from reader // // Gcid is a custom hash to index a file contents func calcGcid(r io.Reader, size int64) (string, error) { calcBlockSize := func(j int64) int64 { var psize int64 = 0x40000 for float64(j)/float64(psize) > 0x200 && psize < 0x200000 { psize <<= 1 } return psize } totalHash := sha1.New() blockHash := sha1.New() readSize := calcBlockSize(size) for { blockHash.Reset() if n, err := io.CopyN(blockHash, r, readSize); err != nil && n == 0 { if err != io.EOF { return "", err } break } totalHash.Write(blockHash.Sum(nil)) } return hex.EncodeToString(totalHash.Sum(nil)), nil } // unWrapObjectInfo returns the underlying Object unwrapped as much as // possible or nil even if it is an OverrideRemote func unWrapObjectInfo(oi fs.ObjectInfo) fs.Object { if o, ok := oi.(fs.Object); ok { return fs.UnWrapObject(o) } else if do, ok := oi.(*fs.OverrideRemote); ok { // Unwrap if it is an operations.OverrideRemote return do.UnWrap() } return nil } // calcCid calculates Cid from source // // Cid is a simplified version of Gcid func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) { srcObj := unWrapObjectInfo(src) if srcObj == nil { return "", fmt.Errorf("failed to unwrap object from src: %s", src) } size := src.Size() hash := sha1.New() var rc io.ReadCloser readHash := func(start, length int64) (err error) { end := start + length - 1 if rc, err = srcObj.Open(ctx, &fs.RangeOption{Start: start, End: end}); err != nil { return fmt.Errorf("failed to open src with range (%d, %d): %w", start, end, err) } defer fs.CheckClose(rc, &err) _, err = io.Copy(hash, rc) return err } if size <= 0xF000 { // 61440 = 60KB err = readHash(0, size) } else { // 20KB from three different parts for _, start := range []int64{0, size / 3, size - 0x5000} { err = readHash(start, 0x5000) if err != nil { break } } } if err != nil { return "", fmt.Errorf("failed to hash: %w", err) } cid = strings.ToUpper(hex.EncodeToString(hash.Sum(nil))) return } // ------------------------------------------------------------ authorization // randomly generates device id used for request header 'x-device-id' // // original javascript implementation // // return "xxxxxxxxxxxx4xxxyxxxxxxxxxxxxxxx".replace(/[xy]/g, (e) => { // const t = (16 * Math.random()) | 0; // return ("x" == e ? t : (3 & t) | 8).toString(16); // }); func genDeviceID() string { base := []byte("xxxxxxxxxxxx4xxxyxxxxxxxxxxxxxxx") for i, char := range base { switch char { case 'x': base[i] = fmt.Sprintf("%x", rand.Intn(16))[0] case 'y': base[i] = fmt.Sprintf("%x", rand.Intn(16)&3|8)[0] } } return string(base) } var md5Salt = []string{ "C9qPpZLN8ucRTaTiUMWYS9cQvWOE", "+r6CQVxjzJV6LCV", "F", "pFJRC", "9WXYIDGrwTCz2OiVlgZa90qpECPD6olt", "/750aCr4lm/Sly/c", "RB+DT/gZCrbV", "", "CyLsf7hdkIRxRm215hl", "7xHvLi2tOYP0Y92b", "ZGTXXxu8E/MIWaEDB+Sm/", "1UI3", "E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO", "ihtqpG6FMt65+Xk+tWUH2", "NhXXU9rg4XXdzo7u5o", } func md5Sum(text string) string { hash := md5.Sum([]byte(text)) return hex.EncodeToString(hash[:]) } func calcCaptchaSign(deviceID string) (timestamp, sign string) { timestamp = fmt.Sprint(time.Now().UnixMilli()) str := fmt.Sprint(clientID, clientVersion, packageName, deviceID, timestamp) for _, salt := range md5Salt { str = md5Sum(str + salt) } sign = "1." + str return } func newCaptchaTokenRequest(action, oldToken string, opt *Options) (req *api.CaptchaTokenRequest) { req = &api.CaptchaTokenRequest{ Action: action, CaptchaToken: oldToken, // can be empty initially ClientID: clientID, DeviceID: opt.DeviceID, Meta: new(api.CaptchaTokenMeta), } switch action { case "POST:/v1/auth/signin": req.Meta.UserName = opt.Username default: timestamp, captchaSign := calcCaptchaSign(opt.DeviceID) req.Meta.CaptchaSign = captchaSign req.Meta.Timestamp = timestamp req.Meta.ClientVersion = clientVersion req.Meta.PackageName = packageName req.Meta.UserID = opt.UserID } return } // CaptchaTokenSource stores updated captcha tokens in the config file type CaptchaTokenSource struct { mu sync.Mutex m configmap.Mapper opt *Options token *api.CaptchaToken ctx context.Context rst *pikpakClient } // initialize CaptchaTokenSource from rclone.conf if possible func newCaptchaTokenSource(ctx context.Context, opt *Options, m configmap.Mapper) *CaptchaTokenSource { token := new(api.CaptchaToken) tokenString, ok := m.Get("captcha_token") if !ok || tokenString == "" { fs.Debugf(nil, "failed to read captcha token out of config file") } else { if err := json.Unmarshal([]byte(tokenString), token); err != nil { fs.Debugf(nil, "failed to parse captcha token out of config file: %v", err) } } return &CaptchaTokenSource{ m: m, opt: opt, token: token, ctx: ctx, rst: newPikpakClient(getClient(ctx, opt), opt), } } // requestToken retrieves captcha token from API func (cts *CaptchaTokenSource) requestToken(ctx context.Context, req *api.CaptchaTokenRequest) (err error) { opts := rest.Opts{ Method: "POST", RootURL: "https://user.mypikpak.com/v1/shield/captcha/init", } var info *api.CaptchaToken _, err = cts.rst.CallJSON(ctx, &opts, &req, &info) if err == nil && info.ExpiresIn != 0 { // populate to Expiry info.Expiry = time.Now().Add(time.Duration(info.ExpiresIn) * time.Second) cts.token = info // update with a new one } return } func (cts *CaptchaTokenSource) refreshToken(opts *rest.Opts) (string, error) { oldToken := "" if cts.token != nil { oldToken = cts.token.CaptchaToken } action := "GET:/drive/v1/about" if opts.RootURL == "" && opts.Path != "" { action = fmt.Sprintf("%s:%s", opts.Method, opts.Path) } else if u, err := url.Parse(opts.RootURL); err == nil { action = fmt.Sprintf("%s:%s", opts.Method, u.Path) } req := newCaptchaTokenRequest(action, oldToken, cts.opt) if err := cts.requestToken(cts.ctx, req); err != nil { return "", fmt.Errorf("failed to retrieve captcha token from api: %w", err) } // put it into rclone.conf tokenBytes, err := json.Marshal(cts.token) if err != nil { return "", fmt.Errorf("failed to marshal captcha token: %w", err) } cts.m.Set("captcha_token", string(tokenBytes)) return cts.token.CaptchaToken, nil } // Invalidate resets existing captcha token for a forced refresh func (cts *CaptchaTokenSource) Invalidate() { cts.mu.Lock() cts.token.CaptchaToken = "" cts.mu.Unlock() } // Token returns a valid captcha token func (cts *CaptchaTokenSource) Token(opts *rest.Opts) (string, error) { cts.mu.Lock() defer cts.mu.Unlock() if cts.token.Valid() { return cts.token.CaptchaToken, nil } return cts.refreshToken(opts) } // pikpakClient wraps rest.Client with a handle of captcha token type pikpakClient struct { opt *Options client *rest.Client captcha *CaptchaTokenSource } // newPikpakClient takes an (oauth) http.Client and makes a new api instance for pikpak with // * error handler // * root url // * default headers func newPikpakClient(c *http.Client, opt *Options) *pikpakClient { client := rest.NewClient(c).SetErrorHandler(errorHandler).SetRoot(rootURL) for key, val := range map[string]string{ "Referer": "https://mypikpak.com/", "x-client-id": clientID, "x-client-version": clientVersion, "x-device-id": opt.DeviceID, // "x-device-model": "firefox%2F129.0", // "x-device-name": "PC-Firefox", // "x-device-sign": fmt.Sprintf("wdi10.%sxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", opt.DeviceID), // "x-net-work-type": "NONE", // "x-os-version": "Win32", // "x-platform-version": "1", // "x-protocol-version": "301", // "x-provider-name": "NONE", // "x-sdk-version": "8.0.3", } { client.SetHeader(key, val) } return &pikpakClient{ client: client, opt: opt, } } // This should be called right after pikpakClient initialized func (c *pikpakClient) SetCaptchaTokener(ctx context.Context, m configmap.Mapper) *pikpakClient { c.captcha = newCaptchaTokenSource(ctx, c.opt, m) return c } func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request any, response any) (resp *http.Response, err error) { if c.captcha != nil { token, err := c.captcha.Token(opts) if err != nil || token == "" { return nil, fserrors.FatalError(fmt.Errorf("couldn't get captcha token: %v", err)) } if opts.ExtraHeaders == nil { opts.ExtraHeaders = make(map[string]string) } opts.ExtraHeaders["x-captcha-token"] = token } return c.client.CallJSON(ctx, opts, request, response) } func (c *pikpakClient) Call(ctx context.Context, opts *rest.Opts) (resp *http.Response, err error) { return c.client.Call(ctx, opts) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/pikpak/multipart.go
backend/pikpak/multipart.go
package pikpak import ( "context" "fmt" "io" "sort" "strings" "sync" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/rclone/rclone/backend/pikpak/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/chunksize" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pool" "golang.org/x/sync/errgroup" ) const ( bufferSize = 1024 * 1024 // default size of the pages used in the reader bufferCacheSize = 64 // max number of buffers to keep in cache bufferCacheFlushTime = 5 * time.Second // flush the cached buffers after this long ) // bufferPool is a global pool of buffers var ( bufferPool *pool.Pool bufferPoolOnce sync.Once ) // get a buffer pool func getPool() *pool.Pool { bufferPoolOnce.Do(func() { ci := fs.GetConfig(context.Background()) // Initialise the buffer pool when used bufferPool = pool.New(bufferCacheFlushTime, bufferSize, bufferCacheSize, ci.UseMmap) }) return bufferPool } // NewRW gets a pool.RW using the multipart pool func NewRW() *pool.RW { return pool.NewRW(getPool()) } // Upload does a multipart upload in parallel func (w *pikpakChunkWriter) Upload(ctx context.Context) (err error) { // make concurrency machinery tokens := pacer.NewTokenDispenser(w.con) uploadCtx, cancel := context.WithCancel(ctx) defer cancel() defer atexit.OnError(&err, func() { cancel() fs.Debugf(w.o, "multipart upload: Cancelling...") errCancel := w.Abort(ctx) if errCancel != nil { fs.Debugf(w.o, "multipart upload: failed to cancel: %v", errCancel) } })() var ( g, gCtx = errgroup.WithContext(uploadCtx) finished = false off int64 size = w.size chunkSize = w.chunkSize ) // Do the accounting manually in, acc := accounting.UnWrapAccounting(w.in) for partNum := int64(0); !finished; partNum++ { // Get a block of memory from the pool and token which limits concurrency. tokens.Get() rw := NewRW() if acc != nil { rw.SetAccounting(acc.AccountRead) } free := func() { // return the memory and token _ = rw.Close() // Can't return an error tokens.Put() } // Fail fast, in case an errgroup managed function returns an error // gCtx is cancelled. There is no point in uploading all the other parts. if gCtx.Err() != nil { free() break } // Read the chunk var n int64 n, err = io.CopyN(rw, in, chunkSize) if err == io.EOF { if n == 0 && partNum != 0 { // end if no data and if not first chunk free() break } finished = true } else if err != nil { free() return fmt.Errorf("multipart upload: failed to read source: %w", err) } partNum := partNum partOff := off off += n g.Go(func() (err error) { defer free() fs.Debugf(w.o, "multipart upload: starting chunk %d size %v offset %v/%v", partNum, fs.SizeSuffix(n), fs.SizeSuffix(partOff), fs.SizeSuffix(size)) _, err = w.WriteChunk(gCtx, int32(partNum), rw) return err }) } err = g.Wait() if err != nil { return err } err = w.Close(ctx) if err != nil { return fmt.Errorf("multipart upload: failed to finalise: %w", err) } return nil } var warnStreamUpload sync.Once // state of ChunkWriter type pikpakChunkWriter struct { chunkSize int64 size int64 con int f *Fs o *Object in io.Reader mu sync.Mutex completedParts []types.CompletedPart client *s3.Client mOut *s3.CreateMultipartUploadOutput } func (f *Fs) newChunkWriter(ctx context.Context, remote string, size int64, p *api.ResumableParams, in io.Reader, options ...fs.OpenOption) (w *pikpakChunkWriter, err error) { // Temporary Object under construction o := &Object{ fs: f, remote: remote, } // calculate size of parts chunkSize := f.opt.ChunkSize // size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize // buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of // 48 GiB which seems like a not too unreasonable limit. if size == -1 { warnStreamUpload.Do(func() { fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v", f.opt.ChunkSize, fs.SizeSuffix(int64(chunkSize)*int64(maxUploadParts))) }) } else { chunkSize = chunksize.Calculator(o, size, maxUploadParts, chunkSize) } client, err := f.newS3Client(ctx, p) if err != nil { return nil, fmt.Errorf("failed to create upload client: %w", err) } w = &pikpakChunkWriter{ chunkSize: int64(chunkSize), size: size, con: max(1, f.opt.UploadConcurrency), f: f, o: o, in: in, completedParts: make([]types.CompletedPart, 0), client: client, } req := &s3.CreateMultipartUploadInput{ Bucket: &p.Bucket, Key: &p.Key, } // Apply upload options for _, option := range options { key, value := option.Header() lowerKey := strings.ToLower(key) switch lowerKey { case "": // ignore case "cache-control": req.CacheControl = aws.String(value) case "content-disposition": req.ContentDisposition = aws.String(value) case "content-encoding": req.ContentEncoding = aws.String(value) case "content-type": req.ContentType = aws.String(value) } } err = w.f.pacer.Call(func() (bool, error) { w.mOut, err = w.client.CreateMultipartUpload(ctx, req) return w.shouldRetry(ctx, err) }) if err != nil { return nil, fmt.Errorf("create multipart upload failed: %w", err) } fs.Debugf(w.o, "multipart upload: %q initiated", *w.mOut.UploadId) return } // shouldRetry returns a boolean as to whether this err // deserve to be retried. It returns the err as a convenience func (w *pikpakChunkWriter) shouldRetry(ctx context.Context, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } if fserrors.ShouldRetry(err) { return true, err } return false, err } // add a part number and etag to the completed parts func (w *pikpakChunkWriter) addCompletedPart(part types.CompletedPart) { w.mu.Lock() defer w.mu.Unlock() w.completedParts = append(w.completedParts, part) } // WriteChunk will write chunk number with reader bytes, where chunk number >= 0 func (w *pikpakChunkWriter) WriteChunk(ctx context.Context, chunkNumber int32, reader io.ReadSeeker) (currentChunkSize int64, err error) { if chunkNumber < 0 { err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber) return -1, err } partNumber := chunkNumber + 1 var res *s3.UploadPartOutput err = w.f.pacer.Call(func() (bool, error) { // Discover the size by seeking to the end currentChunkSize, err = reader.Seek(0, io.SeekEnd) if err != nil { return false, err } // rewind the reader on retry and after reading md5 _, err := reader.Seek(0, io.SeekStart) if err != nil { return false, err } res, err = w.client.UploadPart(ctx, &s3.UploadPartInput{ Bucket: w.mOut.Bucket, Key: w.mOut.Key, UploadId: w.mOut.UploadId, PartNumber: &partNumber, Body: reader, }) if err != nil { if chunkNumber <= 8 { return w.shouldRetry(ctx, err) } // retry all chunks once have done the first few return true, err } return false, nil }) if err != nil { return -1, fmt.Errorf("failed to upload chunk %d with %v bytes: %w", partNumber, currentChunkSize, err) } w.addCompletedPart(types.CompletedPart{ PartNumber: &partNumber, ETag: res.ETag, }) fs.Debugf(w.o, "multipart upload: wrote chunk %d with %v bytes", partNumber, currentChunkSize) return currentChunkSize, err } // Abort the multipart upload func (w *pikpakChunkWriter) Abort(ctx context.Context) (err error) { // Abort the upload session err = w.f.pacer.Call(func() (bool, error) { _, err = w.client.AbortMultipartUpload(ctx, &s3.AbortMultipartUploadInput{ Bucket: w.mOut.Bucket, Key: w.mOut.Key, UploadId: w.mOut.UploadId, }) return w.shouldRetry(ctx, err) }) if err != nil { return fmt.Errorf("failed to abort multipart upload %q: %w", *w.mOut.UploadId, err) } fs.Debugf(w.o, "multipart upload: %q aborted", *w.mOut.UploadId) return } // Close and finalise the multipart upload func (w *pikpakChunkWriter) Close(ctx context.Context) (err error) { // sort the completed parts by part number sort.Slice(w.completedParts, func(i, j int) bool { return *w.completedParts[i].PartNumber < *w.completedParts[j].PartNumber }) // Finalise the upload session err = w.f.pacer.Call(func() (bool, error) { _, err = w.client.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{ Bucket: w.mOut.Bucket, Key: w.mOut.Key, UploadId: w.mOut.UploadId, MultipartUpload: &types.CompletedMultipartUpload{ Parts: w.completedParts, }, }) return w.shouldRetry(ctx, err) }) if err != nil { return fmt.Errorf("failed to complete multipart upload: %w", err) } fs.Debugf(w.o, "multipart upload: %q finished", *w.mOut.UploadId) return }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/pikpak/api/types_test.go
backend/pikpak/api/types_test.go
package api import ( "fmt" "testing" "time" ) // TestLinkValid tests the Link.Valid method for various scenarios func TestLinkValid(t *testing.T) { tests := []struct { name string link *Link expected bool desc string }{ { name: "nil link", link: nil, expected: false, desc: "nil link should be invalid", }, { name: "empty URL", link: &Link{URL: ""}, expected: false, desc: "empty URL should be invalid", }, { name: "valid URL with future expire parameter", link: &Link{ URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(time.Hour).Unix()), }, expected: true, desc: "URL with future expire parameter should be valid", }, { name: "expired URL with past expire parameter", link: &Link{ URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(-time.Hour).Unix()), }, expected: false, desc: "URL with past expire parameter should be invalid", }, { name: "URL expire parameter takes precedence over Expire field", link: &Link{ URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(time.Hour).Unix()), Expire: Time(time.Now().Add(-time.Hour)), // Fallback is expired }, expected: true, desc: "URL expire parameter should take precedence over Expire field", }, { name: "URL expire parameter within 10 second buffer should be invalid", link: &Link{ URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(5*time.Second).Unix()), }, expected: false, desc: "URL expire parameter within 10 second buffer should be invalid", }, { name: "fallback to Expire field when no URL expire parameter", link: &Link{ URL: "https://example.com/file", Expire: Time(time.Now().Add(time.Hour)), }, expected: true, desc: "should fallback to Expire field when URL has no expire parameter", }, { name: "fallback to Expire field when URL expire parameter is invalid", link: &Link{ URL: "https://example.com/file?expire=invalid", Expire: Time(time.Now().Add(time.Hour)), }, expected: true, desc: "should fallback to Expire field when URL expire parameter is unparsable", }, { name: "invalid when both URL expire and Expire field are expired", link: &Link{ URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(-time.Hour).Unix()), Expire: Time(time.Now().Add(-time.Hour)), }, expected: false, desc: "should be invalid when both URL expire and Expire field are expired", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := tt.link.Valid() if result != tt.expected { t.Errorf("Link.Valid() = %v, expected %v. %s", result, tt.expected, tt.desc) } }) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/pikpak/api/types.go
backend/pikpak/api/types.go
// Package api has type definitions for pikpak // // Manually obtained from the API responses using Browse Dev. Tool and https://mholt.github.io/json-to-go/ package api import ( "fmt" "net/url" "reflect" "strconv" "time" ) const ( // "2022-09-17T14:31:06.056+08:00" timeFormat = `"` + time.RFC3339 + `"` ) // Time represents date and time information for the pikpak API, by using RFC3339 type Time time.Time // MarshalJSON turns a Time into JSON (in UTC) func (t *Time) MarshalJSON() (out []byte, err error) { timeString := (*time.Time)(t).Format(timeFormat) return []byte(timeString), nil } // UnmarshalJSON turns JSON into a Time func (t *Time) UnmarshalJSON(data []byte) error { if string(data) == "null" || string(data) == `""` { return nil } newT, err := time.Parse(timeFormat, string(data)) if err != nil { return err } *t = Time(newT) return nil } // Types of things in Item const ( KindOfFolder = "drive#folder" KindOfFile = "drive#file" KindOfFileList = "drive#fileList" KindOfResumable = "drive#resumable" KindOfForm = "drive#form" ThumbnailSizeS = "SIZE_SMALL" ThumbnailSizeM = "SIZE_MEDIUM" ThumbnailSizeL = "SIZE_LARGE" PhaseTypeComplete = "PHASE_TYPE_COMPLETE" PhaseTypeRunning = "PHASE_TYPE_RUNNING" PhaseTypeError = "PHASE_TYPE_ERROR" PhaseTypePending = "PHASE_TYPE_PENDING" UploadTypeForm = "UPLOAD_TYPE_FORM" UploadTypeResumable = "UPLOAD_TYPE_RESUMABLE" ListLimit = 500 ) // ------------------------------------------------------------ // Error details api error from pikpak type Error struct { Reason string `json:"error"` // short description of the reason, e.g. "file_name_empty" "invalid_request" Code int `json:"error_code"` URL string `json:"error_url,omitempty"` Message string `json:"error_description,omitempty"` // can have either of `error_details` or `details`` ErrorDetails []*ErrorDetails `json:"error_details,omitempty"` Details []*ErrorDetails `json:"details,omitempty"` } // ErrorDetails contains further details of api error type ErrorDetails struct { Type string `json:"@type,omitempty"` Reason string `json:"reason,omitempty"` Domain string `json:"domain,omitempty"` Metadata struct{} `json:"metadata,omitempty"` // TODO: undiscovered yet Locale string `json:"locale,omitempty"` // e.g. "en" Message string `json:"message,omitempty"` StackEntries []any `json:"stack_entries,omitempty"` // TODO: undiscovered yet Detail string `json:"detail,omitempty"` } // Error returns a string for the error and satisfies the error interface func (e *Error) Error() string { out := fmt.Sprintf("Error %q (%d)", e.Reason, e.Code) if e.Message != "" { out += ": " + e.Message } return out } // Check Error satisfies the error interface var _ error = (*Error)(nil) // ------------------------------------------------------------ // Filters contains parameters for filters when listing. // // possible operators // * in: a list of comma-separated string // * eq: "true" or "false" // * gt or lt: time format string, e.g. "2023-01-28T10:56:49.757+08:00" type Filters struct { Phase map[string]string `json:"phase,omitempty"` // "in" or "eq" Trashed map[string]bool `json:"trashed,omitempty"` // "eq" Kind map[string]string `json:"kind,omitempty"` // "eq" Starred map[string]bool `json:"starred,omitempty"` // "eq" ModifiedTime map[string]string `json:"modified_time,omitempty"` // "gt" or "lt" } // Set sets filter values using field name, operator and corresponding value func (f *Filters) Set(field, operator, value string) { if value == "" { // UNSET for empty values return } r := reflect.ValueOf(f) fd := reflect.Indirect(r).FieldByName(field) if v, err := strconv.ParseBool(value); err == nil { fd.Set(reflect.ValueOf(map[string]bool{operator: v})) } else { fd.Set(reflect.ValueOf(map[string]string{operator: value})) } } // ------------------------------------------------------------ // Common Elements // Link contains a download URL for opening files type Link struct { URL string `json:"url"` Token string `json:"token"` Expire Time `json:"expire"` Type string `json:"type,omitempty"` } // Valid reports whether l is non-nil, has an URL, and is not expired. // It primarily checks the URL's expire query parameter, falling back to the Expire field. func (l *Link) Valid() bool { if l == nil || l.URL == "" { return false } // Primary validation: check URL's expire query parameter if u, err := url.Parse(l.URL); err == nil { if expireStr := u.Query().Get("expire"); expireStr != "" { // Try parsing as Unix timestamp (seconds) if expireInt, err := strconv.ParseInt(expireStr, 10, 64); err == nil { expireTime := time.Unix(expireInt, 0) return time.Now().Add(10 * time.Second).Before(expireTime) } } } // Fallback validation: use the Expire field if URL parsing didn't work return time.Now().Add(10 * time.Second).Before(time.Time(l.Expire)) } // URL is a basic form of URL type URL struct { Kind string `json:"kind,omitempty"` // e.g. "upload#url" URL string `json:"url,omitempty"` } // ------------------------------------------------------------ // Base Elements // FileList contains a list of File elements type FileList struct { Kind string `json:"kind,omitempty"` // drive#fileList Files []*File `json:"files,omitempty"` NextPageToken string `json:"next_page_token"` Version string `json:"version,omitempty"` VersionOutdated bool `json:"version_outdated,omitempty"` SyncTime Time `json:"sync_time"` } // File is a basic element representing a single file object // // There are two types of download links, // 1) one from File.WebContentLink or File.Links.ApplicationOctetStream.URL and // 2) the other from File.Medias[].Link.URL. // Empirically, 2) is less restrictive to multiple concurrent range-requests // for a single file, i.e. supports for higher `--multi-thread-streams=N`. // However, it is not generally applicable as it is only for media. type File struct { Apps []*FileApp `json:"apps,omitempty"` Audit *FileAudit `json:"audit,omitempty"` Collection string `json:"collection,omitempty"` // TODO CreatedTime Time `json:"created_time,omitempty"` DeleteTime Time `json:"delete_time,omitempty"` FileCategory string `json:"file_category,omitempty"` // "AUDIO", "VIDEO" FileExtension string `json:"file_extension,omitempty"` FolderType string `json:"folder_type,omitempty"` Hash string `json:"hash,omitempty"` // custom hash with a form of sha1sum IconLink string `json:"icon_link,omitempty"` ID string `json:"id,omitempty"` Kind string `json:"kind,omitempty"` // "drive#file" Links *FileLinks `json:"links,omitempty"` Md5Checksum string `json:"md5_checksum,omitempty"` Medias []*Media `json:"medias,omitempty"` MimeType string `json:"mime_type,omitempty"` ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved Name string `json:"name,omitempty"` OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO OriginalURL string `json:"original_url,omitempty"` Params *FileParams `json:"params,omitempty"` ParentID string `json:"parent_id,omitempty"` Phase string `json:"phase,omitempty"` Revision int `json:"revision,omitempty,string"` ReferenceEvents []any `json:"reference_events"` ReferenceResource any `json:"reference_resource"` Size int64 `json:"size,omitempty,string"` SortName string `json:"sort_name,omitempty"` Space string `json:"space,omitempty"` SpellName []any `json:"spell_name,omitempty"` // TODO maybe list of something? Starred bool `json:"starred,omitempty"` Tags []any `json:"tags"` ThumbnailLink string `json:"thumbnail_link,omitempty"` Trashed bool `json:"trashed,omitempty"` UserID string `json:"user_id,omitempty"` UserModifiedTime Time `json:"user_modified_time,omitempty"` WebContentLink string `json:"web_content_link,omitempty"` Writable bool `json:"writable,omitempty"` } // FileLinks includes links to file at backend type FileLinks struct { ApplicationOctetStream *Link `json:"application/octet-stream,omitempty"` } // FileAudit contains audit information for the file type FileAudit struct { Status string `json:"status,omitempty"` // "STATUS_OK" Message string `json:"message,omitempty"` Title string `json:"title,omitempty"` } // Media contains info about supported version of media, e.g. original, transcoded, etc type Media struct { MediaID string `json:"media_id,omitempty"` MediaName string `json:"media_name,omitempty"` Video struct { Height int `json:"height,omitempty"` Width int `json:"width,omitempty"` Duration int64 `json:"duration,omitempty"` BitRate int `json:"bit_rate,omitempty"` FrameRate int `json:"frame_rate,omitempty"` VideoCodec string `json:"video_codec,omitempty"` // "h264", "hevc" AudioCodec string `json:"audio_codec,omitempty"` // "pcm_bluray", "aac" VideoType string `json:"video_type,omitempty"` // "mpegts" HdrType string `json:"hdr_type,omitempty"` } `json:"video,omitempty"` Link *Link `json:"link,omitempty"` NeedMoreQuota bool `json:"need_more_quota,omitempty"` VipTypes []any `json:"vip_types,omitempty"` // TODO maybe list of something? RedirectLink string `json:"redirect_link,omitempty"` IconLink string `json:"icon_link,omitempty"` IsDefault bool `json:"is_default,omitempty"` Priority int `json:"priority,omitempty"` IsOrigin bool `json:"is_origin,omitempty"` ResolutionName string `json:"resolution_name,omitempty"` IsVisible bool `json:"is_visible,omitempty"` Category string `json:"category,omitempty"` // "category_origin" Audio any `json:"audio"` // TODO: undiscovered yet } // FileParams includes parameters for instant open type FileParams struct { DeviceID string `json:"device_id,omitempty"` Duration int64 `json:"duration,omitempty,string"` // in seconds Height int `json:"height,omitempty,string"` Platform string `json:"platform,omitempty"` // "Upload" PlatformIcon string `json:"platform_icon,omitempty"` TaskID string `json:"task_id"` URL string `json:"url,omitempty"` Width int `json:"width,omitempty,string"` } // FileApp includes parameters for instant open type FileApp struct { ID string `json:"id,omitempty"` // "decompress" for rar files Name string `json:"name,omitempty"` // decompress" for rar files Access []any `json:"access,omitempty"` Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide" RedirectLink string `json:"redirect_link,omitempty"` VipTypes []any `json:"vip_types,omitempty"` NeedMoreQuota bool `json:"need_more_quota,omitempty"` IconLink string `json:"icon_link,omitempty"` IsDefault bool `json:"is_default,omitempty"` Params struct{} `json:"params,omitempty"` // TODO CategoryIDs []any `json:"category_ids,omitempty"` AdSceneType int `json:"ad_scene_type,omitempty"` Space string `json:"space,omitempty"` Links struct{} `json:"links,omitempty"` // TODO } // ------------------------------------------------------------ // TaskList contains a list of Task elements type TaskList struct { Tasks []*Task `json:"tasks,omitempty"` // "drive#task" NextPageToken string `json:"next_page_token"` ExpiresIn int `json:"expires_in,omitempty"` } // Task is a basic element representing a single task such as offline download and upload type Task struct { Kind string `json:"kind,omitempty"` // "drive#task" ID string `json:"id,omitempty"` // task id? Name string `json:"name,omitempty"` // torrent name? Type string `json:"type,omitempty"` // "offline" UserID string `json:"user_id,omitempty"` Statuses []any `json:"statuses,omitempty"` // TODO StatusSize int `json:"status_size,omitempty"` // TODO Params *TaskParams `json:"params,omitempty"` // TODO FileID string `json:"file_id,omitempty"` FileName string `json:"file_name,omitempty"` FileSize string `json:"file_size,omitempty"` Message string `json:"message,omitempty"` // e.g. "Saving" CreatedTime Time `json:"created_time,omitempty"` UpdatedTime Time `json:"updated_time,omitempty"` ThirdTaskID string `json:"third_task_id,omitempty"` // TODO Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING" Progress int `json:"progress,omitempty"` IconLink string `json:"icon_link,omitempty"` Callback string `json:"callback,omitempty"` ReferenceResource any `json:"reference_resource,omitempty"` // TODO Space string `json:"space,omitempty"` } // TaskParams includes parameters informing status of Task type TaskParams struct { Age string `json:"age,omitempty"` PredictSpeed string `json:"predict_speed,omitempty"` PredictType string `json:"predict_type,omitempty"` URL string `json:"url,omitempty"` } // Form contains parameters for upload by multipart/form-data type Form struct { Headers struct{} `json:"headers"` Kind string `json:"kind"` // "drive#form" Method string `json:"method"` // "POST" MultiParts struct { OSSAccessKeyID string `json:"OSSAccessKeyId"` Signature string `json:"Signature"` Callback string `json:"callback"` Key string `json:"key"` Policy string `json:"policy"` XUserData string `json:"x:user_data"` } `json:"multi_parts"` URL string `json:"url"` } // Resumable contains parameters for upload by resumable type Resumable struct { Kind string `json:"kind,omitempty"` // "drive#resumable" Provider string `json:"provider,omitempty"` // e.g. "PROVIDER_ALIYUN" Params *ResumableParams `json:"params,omitempty"` } // ResumableParams specifies resumable paramegers type ResumableParams struct { AccessKeyID string `json:"access_key_id,omitempty"` AccessKeySecret string `json:"access_key_secret,omitempty"` Bucket string `json:"bucket,omitempty"` Endpoint string `json:"endpoint,omitempty"` Expiration Time `json:"expiration,omitempty"` Key string `json:"key,omitempty"` SecurityToken string `json:"security_token,omitempty"` } // FileInArchive is a basic element in archive type FileInArchive struct { Index int `json:"index,omitempty"` Filename string `json:"filename,omitempty"` Filesize string `json:"filesize,omitempty"` MimeType string `json:"mime_type,omitempty"` Gcid string `json:"gcid,omitempty"` Kind string `json:"kind,omitempty"` IconLink string `json:"icon_link,omitempty"` Path string `json:"path,omitempty"` } // ------------------------------------------------------------ // NewFile is a response to RequestNewFile type NewFile struct { File *File `json:"file,omitempty"` Form *Form `json:"form,omitempty"` Resumable *Resumable `json:"resumable,omitempty"` Task *Task `json:"task,omitempty"` // null in this case UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE" } // NewTask is a response to RequestNewTask type NewTask struct { UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL" File *File `json:"file,omitempty"` // null in this case Task *Task `json:"task,omitempty"` URL *URL `json:"url,omitempty"` // {"kind": "upload#url"} } // About informs drive status type About struct { Kind string `json:"kind,omitempty"` // "drive#about" Quota *Quota `json:"quota,omitempty"` ExpiresAt string `json:"expires_at,omitempty"` Quotas struct{} `json:"quotas,omitempty"` // maybe []*Quota? } // Quota informs drive quota type Quota struct { Kind string `json:"kind,omitempty"` // "drive#quota" Limit int64 `json:"limit,omitempty,string"` // limit in bytes Usage int64 `json:"usage,omitempty,string"` // bytes in use UsageInTrash int64 `json:"usage_in_trash,omitempty,string"` // bytes in trash but this seems not working PlayTimesLimit string `json:"play_times_limit,omitempty"` // maybe in seconds PlayTimesUsage string `json:"play_times_usage,omitempty"` // maybe in seconds IsUnlimited bool `json:"is_unlimited,omitempty"` } // Share is a response to RequestShare // // used in PublicLink() type Share struct { ShareID string `json:"share_id,omitempty"` ShareURL string `json:"share_url,omitempty"` PassCode string `json:"pass_code,omitempty"` ShareText string `json:"share_text,omitempty"` } // User contains user account information // // GET https://user.mypikpak.com/v1/user/me type User struct { Sub string `json:"sub,omitempty"` // userid for internal use Name string `json:"name,omitempty"` // Username Picture string `json:"picture,omitempty"` // URL to Avatar image Email string `json:"email,omitempty"` // redacted email address Providers *[]UserProvider `json:"providers,omitempty"` // OAuth provider PhoneNumber string `json:"phone_number,omitempty"` Password string `json:"password,omitempty"` // "SET" if configured Status string `json:"status,omitempty"` // "ACTIVE" CreatedAt Time `json:"created_at,omitempty"` PasswordUpdatedAt Time `json:"password_updated_at,omitempty"` } // UserProvider details third-party authentication type UserProvider struct { ID string `json:"id,omitempty"` // e.g. "google.com" ProviderUserID string `json:"provider_user_id,omitempty"` Name string `json:"name,omitempty"` // username } // VIP includes subscription details about premium account // // GET https://api-drive.mypikpak.com/drive/v1/privilege/vip type VIP struct { Result string `json:"result,omitempty"` // "ACCEPTED" Message string `json:"message,omitempty"` RedirectURI string `json:"redirect_uri,omitempty"` Data struct { Expire Time `json:"expire,omitempty"` Status string `json:"status,omitempty"` // "invalid" or "ok" Type string `json:"type,omitempty"` // "novip" or "platinum" UserID string `json:"user_id,omitempty"` // same as User.Sub } `json:"data,omitempty"` } // DecompressResult is a response to RequestDecompress type DecompressResult struct { Status string `json:"status,omitempty"` // "OK" StatusText string `json:"status_text,omitempty"` TaskID string `json:"task_id,omitempty"` // same as File.Id FilesNum int `json:"files_num,omitempty"` // number of files in archive RedirectLink string `json:"redirect_link,omitempty"` } // ------------------------------------------------------------ // RequestShare is to request for file share type RequestShare struct { FileIDs []string `json:"file_ids,omitempty"` ShareTo string `json:"share_to,omitempty"` // "publiclink", ExpirationDays int `json:"expiration_days,omitempty"` // -1 = 'forever' PassCodeOption string `json:"pass_code_option,omitempty"` // "NOT_REQUIRED" } // RequestBatch is to request for batch actions type RequestBatch struct { IDs []string `json:"ids,omitempty"` To map[string]string `json:"to,omitempty"` } // RequestNewFile is to request for creating a new `drive#folder` or `drive#file` type RequestNewFile struct { // always required Kind string `json:"kind"` // "drive#folder" or "drive#file" Name string `json:"name"` ParentID string `json:"parent_id"` FolderType string `json:"folder_type"` // only when uploading a new file Hash string `json:"hash,omitempty"` // gcid Resumable map[string]string `json:"resumable,omitempty"` // {"provider": "PROVIDER_ALIYUN"} Size int64 `json:"size,omitempty"` UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE" } // RequestNewTask is to request for creating a new task like offline downloads // // Name and ParentID can be left empty. type RequestNewTask struct { Kind string `json:"kind,omitempty"` // "drive#file" Name string `json:"name,omitempty"` ParentID string `json:"parent_id,omitempty"` UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL" URL *URL `json:"url,omitempty"` // {"url": downloadUrl} FolderType string `json:"folder_type,omitempty"` // "" if parent_id else "DOWNLOAD" } // RequestDecompress is to request for decompress of archive files type RequestDecompress struct { Gcid string `json:"gcid,omitempty"` // same as File.Hash Password string `json:"password,omitempty"` // "" FileID string `json:"file_id,omitempty"` Files []*FileInArchive `json:"files,omitempty"` // can request selected files to be decompressed DefaultParent bool `json:"default_parent,omitempty"` } // ------------------------------------------------------------ authorization // CaptchaToken is a response to requestCaptchaToken api call type CaptchaToken struct { CaptchaToken string `json:"captcha_token"` ExpiresIn int64 `json:"expires_in"` // currently 300s // API doesn't provide Expiry field and thus it should be populated from ExpiresIn on retrieval Expiry time.Time `json:"expiry,omitempty"` URL string `json:"url,omitempty"` // a link for users to solve captcha } // expired reports whether the token is expired. // t must be non-nil. func (t *CaptchaToken) expired() bool { if t.Expiry.IsZero() { return false } expiryDelta := time.Duration(10) * time.Second // same as oauth2's defaultExpiryDelta return t.Expiry.Round(0).Add(-expiryDelta).Before(time.Now()) } // Valid reports whether t is non-nil, has an AccessToken, and is not expired. func (t *CaptchaToken) Valid() bool { return t != nil && t.CaptchaToken != "" && !t.expired() } // CaptchaTokenRequest is to request for captcha token type CaptchaTokenRequest struct { Action string `json:"action,omitempty"` CaptchaToken string `json:"captcha_token,omitempty"` ClientID string `json:"client_id,omitempty"` DeviceID string `json:"device_id,omitempty"` Meta *CaptchaTokenMeta `json:"meta,omitempty"` } // CaptchaTokenMeta contains meta info for CaptchaTokenRequest type CaptchaTokenMeta struct { CaptchaSign string `json:"captcha_sign,omitempty"` ClientVersion string `json:"client_version,omitempty"` PackageName string `json:"package_name,omitempty"` Timestamp string `json:"timestamp,omitempty"` UserID string `json:"user_id,omitempty"` // webdrive uses this instead of UserName UserName string `json:"username,omitempty"` Email string `json:"email,omitempty"` PhoneNumber string `json:"phone_number,omitempty"` } // Token represents oauth2 token used for pikpak which needs to be converted to be compatible with oauth2.Token type Token struct { TokenType string `json:"token_type"` AccessToken string `json:"access_token"` RefreshToken string `json:"refresh_token"` ExpiresIn int `json:"expires_in"` Sub string `json:"sub"` } // Expiry returns expiry from expires in, so it should be called on retrieval // e must be non-nil. func (e *Token) Expiry() (t time.Time) { if v := e.ExpiresIn; v != 0 { return time.Now().Add(time.Duration(v) * time.Second) } return } // ------------------------------------------------------------ // NOT implemented YET // RequestArchiveFileList is to request for a list of files in archive // // POST https://api-drive.mypikpak.com/decompress/v1/list type RequestArchiveFileList struct { Gcid string `json:"gcid,omitempty"` // same as api.File.Hash Path string `json:"path,omitempty"` // "" by default Password string `json:"password,omitempty"` // "" by default FileID string `json:"file_id,omitempty"` } // ArchiveFileList is a response to RequestArchiveFileList type ArchiveFileList struct { Status string `json:"status,omitempty"` // "OK" StatusText string `json:"status_text,omitempty"` // "" TaskID string `json:"task_id,omitempty"` // "" CurrentPath string `json:"current_path,omitempty"` // "" Title string `json:"title,omitempty"` FileSize int64 `json:"file_size,omitempty"` Gcid string `json:"gcid,omitempty"` // same as File.Hash Files []*FileInArchive `json:"files,omitempty"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/filelu/utils.go
backend/filelu/utils.go
package filelu import ( "fmt" ) // parseStorageToBytes converts a storage string (e.g., "10") to bytes func parseStorageToBytes(storage string) (int64, error) { var gb float64 _, err := fmt.Sscanf(storage, "%f", &gb) if err != nil { return 0, fmt.Errorf("failed to parse storage: %w", err) } return int64(gb * 1024 * 1024 * 1024), nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/filelu/filelu_test.go
backend/filelu/filelu_test.go
package filelu_test import ( "testing" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests for the FileLu backend func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestFileLu:", NilObject: nil, SkipInvalidUTF8: true, }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/filelu/filelu.go
backend/filelu/filelu.go
// Package filelu provides an interface to the FileLu storage system. package filelu import ( "context" "fmt" "io" "net/http" "os" "path" "strings" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" ) // Register the backend with Rclone func init() { fs.Register(&fs.RegInfo{ Name: "filelu", Description: "FileLu Cloud Storage", NewFs: NewFs, Options: []fs.Option{{ Name: "key", Help: "Your FileLu Rclone key from My Account", Required: true, Sensitive: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot encoder.EncodeSlash | encoder.EncodeLtGt | encoder.EncodeExclamation | encoder.EncodeDoubleQuote | encoder.EncodeSingleQuote | encoder.EncodeBackQuote | encoder.EncodeQuestion | encoder.EncodeDollar | encoder.EncodeColon | encoder.EncodeAsterisk | encoder.EncodePipe | encoder.EncodeHash | encoder.EncodePercent | encoder.EncodeBackSlash | encoder.EncodeCrLf | encoder.EncodeDel | encoder.EncodeCtl | encoder.EncodeLeftSpace | encoder.EncodeLeftPeriod | encoder.EncodeLeftTilde | encoder.EncodeLeftCrLfHtVt | encoder.EncodeRightPeriod | encoder.EncodeRightCrLfHtVt | encoder.EncodeSquareBracket | encoder.EncodeSemicolon | encoder.EncodeRightSpace | encoder.EncodeInvalidUtf8 | encoder.EncodeDot), }, }}) } // Options defines the configuration for the FileLu backend type Options struct { Key string `config:"key"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents the FileLu file system type Fs struct { name string root string opt Options features *fs.Features endpoint string pacer *pacer.Pacer srv *rest.Client client *http.Client targetFile string } // NewFs creates a new Fs object for FileLu func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) { opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, fmt.Errorf("failed to parse config: %w", err) } if opt.Key == "" { return nil, fmt.Errorf("FileLu Rclone Key is required") } client := fshttp.NewClient(ctx) if strings.TrimSpace(root) == "" { root = "" } root = strings.Trim(root, "/") filename := "" f := &Fs{ name: name, opt: *opt, endpoint: "https://filelu.com/rclone", client: client, srv: rest.NewClient(client).SetRoot("https://filelu.com/rclone"), pacer: pacer.New(), targetFile: filename, root: root, } f.features = (&fs.Features{ CanHaveEmptyDirectories: true, WriteMetadata: false, SlowHash: true, }).Fill(ctx, f) rootContainer, rootDirectory := rootSplit(f.root) if rootContainer != "" && rootDirectory != "" { // Check to see if the (container,directory) is actually an existing file oldRoot := f.root newRoot, leaf := path.Split(oldRoot) f.root = strings.Trim(newRoot, "/") _, err := f.NewObject(ctx, leaf) if err != nil { if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile { // File doesn't exist or is a directory so return old f f.root = strings.Trim(oldRoot, "/") return f, nil } return nil, err } // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // Mkdir to create directory on remote server. func (f *Fs) Mkdir(ctx context.Context, dir string) error { fullPath := path.Clean(f.root + "/" + dir) _, err := f.createFolder(ctx, fullPath) return err } // About provides usage statistics for the remote func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { accountInfo, err := f.getAccountInfo(ctx) if err != nil { return nil, err } totalStorage, err := parseStorageToBytes(accountInfo.Result.Storage) if err != nil { return nil, fmt.Errorf("failed to parse total storage: %w", err) } usedStorage, err := parseStorageToBytes(accountInfo.Result.StorageUsed) if err != nil { return nil, fmt.Errorf("failed to parse used storage: %w", err) } return &fs.Usage{ Total: fs.NewUsageValue(totalStorage), // Total bytes available Used: fs.NewUsageValue(usedStorage), // Total bytes used Free: fs.NewUsageValue(totalStorage - usedStorage), }, nil } // Purge deletes the directory and all its contents func (f *Fs) Purge(ctx context.Context, dir string) error { fullPath := path.Join(f.root, dir) if fullPath != "" { fullPath = "/" + strings.Trim(fullPath, "/") } return f.deleteFolder(ctx, fullPath) } // List returns a list of files and folders // List returns a list of files and folders for the given directory func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) { // Compose full path for API call fullPath := path.Join(f.root, dir) fullPath = "/" + strings.Trim(fullPath, "/") if fullPath == "/" { fullPath = "" } var entries fs.DirEntries result, err := f.getFolderList(ctx, fullPath) if err != nil { return nil, err } fldMap := map[string]bool{} for _, folder := range result.Result.Folders { fldMap[folder.FldID.String()] = true if f.root == "" && dir == "" && strings.Contains(folder.Path, "/") { continue } paths := strings.Split(folder.Path, fullPath+"/") remote := paths[0] if len(paths) > 1 { remote = paths[1] } if strings.Contains(remote, "/") { continue } pathsWithoutRoot := strings.Split(folder.Path, "/"+f.root+"/") remotePathWithoutRoot := pathsWithoutRoot[0] if len(pathsWithoutRoot) > 1 { remotePathWithoutRoot = pathsWithoutRoot[1] } remotePathWithoutRoot = strings.TrimPrefix(remotePathWithoutRoot, "/") entries = append(entries, fs.NewDir(remotePathWithoutRoot, time.Now())) } for _, file := range result.Result.Files { if _, ok := fldMap[file.FldID.String()]; ok { continue } remote := path.Join(dir, file.Name) // trim leading slashes remote = strings.TrimPrefix(remote, "/") obj := &Object{ fs: f, remote: remote, size: file.Size, modTime: time.Now(), } entries = append(entries, obj) } return entries, nil } // Put uploads a file directly to the destination folder in the FileLu storage system. func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { if src.Size() == 0 { return nil, fs.ErrorCantUploadEmptyFiles } err := f.uploadFile(ctx, in, src.Remote()) if err != nil { return nil, err } newObject := &Object{ fs: f, remote: src.Remote(), size: src.Size(), modTime: src.ModTime(ctx), } fs.Infof(f, "Put: Successfully uploaded new file %q", src.Remote()) return newObject, nil } // Move moves the file to the specified location func (f *Fs) Move(ctx context.Context, src fs.Object, destinationPath string) (fs.Object, error) { if strings.HasPrefix(destinationPath, "/") || strings.Contains(destinationPath, ":\\") { dir := path.Dir(destinationPath) if err := os.MkdirAll(dir, 0755); err != nil { return nil, fmt.Errorf("failed to create destination directory: %w", err) } reader, err := src.Open(ctx) if err != nil { return nil, fmt.Errorf("failed to open source file: %w", err) } defer func() { if err := reader.Close(); err != nil { fs.Logf(nil, "Failed to close file body: %v", err) } }() dest, err := os.Create(destinationPath) if err != nil { return nil, fmt.Errorf("failed to create destination file: %w", err) } defer func() { if err := dest.Close(); err != nil { fs.Logf(nil, "Failed to close file body: %v", err) } }() if _, err := io.Copy(dest, reader); err != nil { return nil, fmt.Errorf("failed to copy file content: %w", err) } if err := src.Remove(ctx); err != nil { return nil, fmt.Errorf("failed to remove source file: %w", err) } return nil, nil } reader, err := src.Open(ctx) if err != nil { return nil, fmt.Errorf("failed to open source object: %w", err) } defer func() { if err := reader.Close(); err != nil { fs.Logf(nil, "Failed to close file body: %v", err) } }() err = f.uploadFile(ctx, reader, destinationPath) if err != nil { return nil, fmt.Errorf("failed to upload file to destination: %w", err) } if err := src.Remove(ctx); err != nil { return nil, fmt.Errorf("failed to delete source file: %w", err) } return &Object{ fs: f, remote: destinationPath, size: src.Size(), modTime: src.ModTime(ctx), }, nil } // Rmdir removes a directory func (f *Fs) Rmdir(ctx context.Context, dir string) error { fullPath := path.Join(f.root, dir) if fullPath != "" { fullPath = "/" + strings.Trim(fullPath, "/") } // Step 1: Check if folder is empty listResp, err := f.getFolderList(ctx, fullPath) if err != nil { return err } if len(listResp.Result.Files) > 0 || len(listResp.Result.Folders) > 0 { return fmt.Errorf("Rmdir: directory %q is not empty", fullPath) } // Step 2: Delete the folder return f.deleteFolder(ctx, fullPath) } // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) _ fs.Purger = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil) _ fs.Mover = (*Fs)(nil) _ fs.Object = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/filelu/filelu_client.go
backend/filelu/filelu_client.go
package filelu import ( "bytes" "context" "encoding/json" "fmt" "io" "net/http" "net/url" "strings" "github.com/rclone/rclone/backend/filelu/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/lib/rest" ) // createFolder creates a folder at the specified path. func (f *Fs) createFolder(ctx context.Context, dirPath string) (*api.CreateFolderResponse, error) { encodedDir := f.fromStandardPath(dirPath) apiURL := fmt.Sprintf("%s/folder/create?folder_path=%s&key=%s", f.endpoint, url.QueryEscape(encodedDir), url.QueryEscape(f.opt.Key), // assuming f.opt.Key is the correct field ) req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil) if err != nil { return nil, fmt.Errorf("failed to create request: %w", err) } var resp *http.Response result := api.CreateFolderResponse{} err = f.pacer.Call(func() (bool, error) { var innerErr error resp, innerErr = f.client.Do(req) return fserrors.ShouldRetry(innerErr), innerErr }) if err != nil { return nil, fmt.Errorf("request failed: %w", err) } defer func() { if err := resp.Body.Close(); err != nil { fs.Logf(nil, "Failed to close response body: %v", err) } }() err = json.NewDecoder(resp.Body).Decode(&result) if err != nil { return nil, fmt.Errorf("error decoding response: %w", err) } if result.Status != 200 { return nil, fmt.Errorf("error: %s", result.Msg) } fs.Infof(f, "Successfully created folder %q with ID %v", dirPath, result.Result.FldID) return &result, nil } // getFolderList List both files and folders in a directory. func (f *Fs) getFolderList(ctx context.Context, path string) (*api.FolderListResponse, error) { encodedDir := f.fromStandardPath(path) apiURL := fmt.Sprintf("%s/folder/list?folder_path=%s&key=%s", f.endpoint, url.QueryEscape(encodedDir), url.QueryEscape(f.opt.Key), ) var body []byte err := f.pacer.Call(func() (bool, error) { req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil) if err != nil { return false, fmt.Errorf("failed to create request: %w", err) } resp, err := f.client.Do(req) if err != nil { return shouldRetry(err), fmt.Errorf("failed to list directory: %w", err) } defer func() { if err := resp.Body.Close(); err != nil { fs.Logf(nil, "Failed to close response body: %v", err) } }() body, err = io.ReadAll(resp.Body) if err != nil { return false, fmt.Errorf("error reading response body: %w", err) } return shouldRetryHTTP(resp.StatusCode), nil }) if err != nil { return nil, err } var response api.FolderListResponse if err := json.NewDecoder(bytes.NewReader(body)).Decode(&response); err != nil { return nil, fmt.Errorf("error decoding response: %w", err) } if response.Status != 200 { if strings.Contains(response.Msg, "Folder not found") { return nil, fs.ErrorDirNotFound } return nil, fmt.Errorf("API error: %s", response.Msg) } for index := range response.Result.Folders { response.Result.Folders[index].Path = f.toStandardPath(response.Result.Folders[index].Path) } for index := range response.Result.Files { response.Result.Files[index].Name = f.toStandardPath(response.Result.Files[index].Name) } return &response, nil } // deleteFolder deletes a folder at the specified path. func (f *Fs) deleteFolder(ctx context.Context, fullPath string) error { fullPath = f.fromStandardPath(fullPath) deleteURL := fmt.Sprintf("%s/folder/delete?folder_path=%s&key=%s", f.endpoint, url.QueryEscape(fullPath), url.QueryEscape(f.opt.Key), ) delResp := api.DeleteFolderResponse{} err := f.pacer.Call(func() (bool, error) { req, err := http.NewRequestWithContext(ctx, "GET", deleteURL, nil) if err != nil { return false, err } resp, err := f.client.Do(req) if err != nil { return fserrors.ShouldRetry(err), err } defer func() { if err := resp.Body.Close(); err != nil { fs.Logf(nil, "Failed to close response body: %v", err) } }() body, err := io.ReadAll(resp.Body) if err != nil { return false, err } if err := json.Unmarshal(body, &delResp); err != nil { return false, fmt.Errorf("error decoding delete response: %w", err) } if delResp.Status != 200 { return false, fmt.Errorf("delete error: %s", delResp.Msg) } return false, nil }) if err != nil { return err } fs.Infof(f, "Rmdir: successfully deleted %q", fullPath) return nil } // getDirectLink of file from FileLu to download. func (f *Fs) getDirectLink(ctx context.Context, filePath string) (string, int64, error) { filePath = f.fromStandardPath(filePath) apiURL := fmt.Sprintf("%s/file/direct_link?file_path=%s&key=%s", f.endpoint, url.QueryEscape(filePath), url.QueryEscape(f.opt.Key), ) result := api.FileDirectLinkResponse{} err := f.pacer.Call(func() (bool, error) { req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil) if err != nil { return false, fmt.Errorf("failed to create request: %w", err) } resp, err := f.client.Do(req) if err != nil { return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err) } defer func() { if err := resp.Body.Close(); err != nil { fs.Logf(nil, "Failed to close response body: %v", err) } }() if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { return false, fmt.Errorf("error decoding response: %w", err) } if result.Status != 200 { return false, fmt.Errorf("API error: %s", result.Msg) } return shouldRetryHTTP(resp.StatusCode), nil }) if err != nil { return "", 0, err } return result.Result.URL, result.Result.Size, nil } // deleteFile deletes a file based on filePath func (f *Fs) deleteFile(ctx context.Context, filePath string) error { filePath = f.fromStandardPath(filePath) apiURL := fmt.Sprintf("%s/file/remove?file_path=%s&key=%s", f.endpoint, url.QueryEscape(filePath), url.QueryEscape(f.opt.Key), ) result := api.DeleteFileResponse{} err := f.pacer.Call(func() (bool, error) { req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil) if err != nil { return false, fmt.Errorf("failed to create request: %w", err) } resp, err := f.client.Do(req) if err != nil { return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err) } defer func() { if err := resp.Body.Close(); err != nil { fs.Logf(nil, "Failed to close response body: %v", err) } }() if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { return false, fmt.Errorf("error decoding response: %w", err) } if result.Status != 200 { return false, fmt.Errorf("API error: %s", result.Msg) } return shouldRetryHTTP(resp.StatusCode), nil }) return err } // getAccountInfo retrieves account information func (f *Fs) getAccountInfo(ctx context.Context) (*api.AccountInfoResponse, error) { opts := rest.Opts{ Method: "GET", Path: "/account/info", Parameters: url.Values{ "key": {f.opt.Key}, }, } var result api.AccountInfoResponse err := f.pacer.Call(func() (bool, error) { _, callErr := f.srv.CallJSON(ctx, &opts, nil, &result) return fserrors.ShouldRetry(callErr), callErr }) if err != nil { return nil, err } if result.Status != 200 { return nil, fmt.Errorf("error: %s", result.Msg) } return &result, nil } // getFileInfo retrieves file information based on file code func (f *Fs) getFileInfo(ctx context.Context, fileCode string) (*api.FileInfoResponse, error) { u, _ := url.Parse(f.endpoint + "/file/info2") q := u.Query() q.Set("file_code", fileCode) // raw path — Go handles escaping properly here q.Set("key", f.opt.Key) u.RawQuery = q.Encode() apiURL := f.endpoint + "/file/info2?" + u.RawQuery var body []byte err := f.pacer.Call(func() (bool, error) { req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil) if err != nil { return false, fmt.Errorf("failed to create request: %w", err) } resp, err := f.client.Do(req) if err != nil { return shouldRetry(err), fmt.Errorf("failed to fetch file info: %w", err) } defer func() { if err := resp.Body.Close(); err != nil { fs.Logf(nil, "Failed to close response body: %v", err) } }() body, err = io.ReadAll(resp.Body) if err != nil { return false, fmt.Errorf("error reading response body: %w", err) } return shouldRetryHTTP(resp.StatusCode), nil }) if err != nil { return nil, err } result := api.FileInfoResponse{} if err := json.NewDecoder(bytes.NewReader(body)).Decode(&result); err != nil { return nil, fmt.Errorf("error decoding response: %w", err) } if result.Status != 200 || len(result.Result) == 0 { return nil, fs.ErrorObjectNotFound } return &result, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/filelu/filelu_helper.go
backend/filelu/filelu_helper.go
package filelu import ( "context" "errors" "fmt" "path" "strings" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/hash" ) // errFileNotFound represent file not found error var errFileNotFound = errors.New("file not found") // getFileCode retrieves the file code for a given file path func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) { // Prepare parent directory parentDir := path.Dir(filePath) // Call List to get all the files result, err := f.getFolderList(ctx, parentDir) if err != nil { return "", err } for _, file := range result.Result.Files { filePathFromServer := parentDir + "/" + file.Name if parentDir == "/" { filePathFromServer = "/" + file.Name } if filePath == filePathFromServer { return file.FileCode, nil } } return "", errFileNotFound } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } func (f *Fs) fromStandardPath(remote string) string { return f.opt.Enc.FromStandardPath(remote) } func (f *Fs) toStandardPath(remote string) string { return f.opt.Enc.ToStandardPath(remote) } // Hashes returns an empty hash set, indicating no hash support func (f *Fs) Hashes() hash.Set { return hash.NewHashSet() // Properly creates an empty hash set } // Name returns the remote name func (f *Fs) Name() string { return f.name } // Root returns the root path func (f *Fs) Root() string { return f.root } // Precision returns the precision of the remote func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported } func (f *Fs) String() string { return fmt.Sprintf("FileLu root '%s'", f.root) } // isFileCode checks if a string looks like a file code func isFileCode(s string) bool { if len(s) != 12 { return false } for _, c := range s { if !((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9')) { return false } } return true } func shouldRetry(err error) bool { return fserrors.ShouldRetry(err) } func shouldRetryHTTP(code int) bool { return code == 429 || code >= 500 } func rootSplit(absPath string) (bucket, bucketPath string) { // No bucket if absPath == "" { return "", "" } slash := strings.IndexRune(absPath, '/') // Bucket but no path if slash < 0 { return absPath, "" } return absPath[:slash], absPath[slash+1:] }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/filelu/filelu_file_uploader.go
backend/filelu/filelu_file_uploader.go
package filelu import ( "context" "encoding/json" "errors" "fmt" "io" "mime/multipart" "net/http" "net/url" "path" "strings" "github.com/rclone/rclone/fs" ) // uploadFile uploads a file to FileLu func (f *Fs) uploadFile(ctx context.Context, fileContent io.Reader, fileFullPath string) error { directory := path.Dir(fileFullPath) fileName := path.Base(fileFullPath) if directory == "." { directory = "" } destinationFolderPath := path.Join(f.root, directory) if destinationFolderPath != "" { destinationFolderPath = "/" + strings.Trim(destinationFolderPath, "/") } existingEntries, err := f.List(ctx, path.Dir(fileFullPath)) if err != nil { if errors.Is(err, fs.ErrorDirNotFound) { err = f.Mkdir(ctx, path.Dir(fileFullPath)) if err != nil { return fmt.Errorf("failed to create directory: %w", err) } } else { return fmt.Errorf("failed to list existing files: %w", err) } } for _, entry := range existingEntries { if entry.Remote() == fileFullPath { _, ok := entry.(fs.Object) if !ok { continue } // If the file exists but is different, remove it filePath := "/" + strings.Trim(destinationFolderPath+"/"+fileName, "/") err = f.deleteFile(ctx, filePath) if err != nil { return fmt.Errorf("failed to delete existing file: %w", err) } } } uploadURL, sessID, err := f.getUploadServer(ctx) if err != nil { return fmt.Errorf("failed to retrieve upload server: %w", err) } // Since the fileCode isn't used, just handle the error if _, err := f.uploadFileWithDestination(ctx, uploadURL, sessID, fileName, fileContent, destinationFolderPath); err != nil { return fmt.Errorf("failed to upload file: %w", err) } return nil } // getUploadServer gets the upload server URL with proper key authentication func (f *Fs) getUploadServer(ctx context.Context) (string, string, error) { apiURL := fmt.Sprintf("%s/upload/server?key=%s", f.endpoint, url.QueryEscape(f.opt.Key)) var result struct { Status int `json:"status"` SessID string `json:"sess_id"` Result string `json:"result"` Msg string `json:"msg"` } err := f.pacer.Call(func() (bool, error) { req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil) if err != nil { return false, fmt.Errorf("failed to create request: %w", err) } resp, err := f.client.Do(req) if err != nil { return shouldRetry(err), fmt.Errorf("failed to get upload server: %w", err) } defer func() { if err := resp.Body.Close(); err != nil { fs.Logf(nil, "Failed to close response body: %v", err) } }() if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { return false, fmt.Errorf("error decoding response: %w", err) } if result.Status != 200 { return false, fmt.Errorf("API error: %s", result.Msg) } return shouldRetryHTTP(resp.StatusCode), nil }) if err != nil { return "", "", err } return result.Result, result.SessID, nil } // uploadFileWithDestination uploads a file directly to a specified folder using file content reader. func (f *Fs) uploadFileWithDestination(ctx context.Context, uploadURL, sessID, fileName string, fileContent io.Reader, dirPath string) (string, error) { destinationPath := f.fromStandardPath(dirPath) encodedFileName := f.fromStandardPath(fileName) pr, pw := io.Pipe() writer := multipart.NewWriter(pw) isDeletionRequired := false go func() { defer func() { if err := pw.Close(); err != nil { fs.Logf(nil, "Failed to close: %v", err) } }() _ = writer.WriteField("sess_id", sessID) _ = writer.WriteField("utype", "prem") _ = writer.WriteField("fld_path", destinationPath) part, err := writer.CreateFormFile("file_0", encodedFileName) if err != nil { pw.CloseWithError(fmt.Errorf("failed to create form file: %w", err)) return } if _, err := io.Copy(part, fileContent); err != nil { isDeletionRequired = true pw.CloseWithError(fmt.Errorf("failed to copy file content: %w", err)) return } if err := writer.Close(); err != nil { pw.CloseWithError(fmt.Errorf("failed to close writer: %w", err)) } }() var fileCode string err := f.pacer.Call(func() (bool, error) { req, err := http.NewRequestWithContext(ctx, "POST", uploadURL, pr) if err != nil { return false, fmt.Errorf("failed to create upload request: %w", err) } req.Header.Set("Content-Type", writer.FormDataContentType()) resp, err := f.client.Do(req) if err != nil { return shouldRetry(err), fmt.Errorf("failed to send upload request: %w", err) } defer respBodyClose(resp.Body) var result []struct { FileCode string `json:"file_code"` FileStatus string `json:"file_status"` } if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { return false, fmt.Errorf("failed to parse upload response: %w", err) } if len(result) == 0 || result[0].FileStatus != "OK" { return false, fmt.Errorf("upload failed with status: %s", result[0].FileStatus) } fileCode = result[0].FileCode return shouldRetryHTTP(resp.StatusCode), nil }) if err != nil && isDeletionRequired { // Attempt to delete the file if upload fails _ = f.deleteFile(ctx, destinationPath+"/"+fileName) } return fileCode, err } // respBodyClose to check body response. func respBodyClose(responseBody io.Closer) { if cerr := responseBody.Close(); cerr != nil { fmt.Printf("Error closing response body: %v\n", cerr) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/filelu/filelu_object.go
backend/filelu/filelu_object.go
package filelu import ( "bytes" "context" "encoding/json" "fmt" "io" "net/http" "net/url" "path" "regexp" "strconv" "strings" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" ) // Object describes a FileLu object type Object struct { fs *Fs remote string size int64 modTime time.Time } // NewObject creates a new Object for the given remote path func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { var filePath string filePath = path.Join(f.root, remote) filePath = "/" + strings.Trim(filePath, "/") // Get File code fileCode, err := f.getFileCode(ctx, filePath) if err != nil { return nil, fs.ErrorObjectNotFound } // Get File info fileInfos, err := f.getFileInfo(ctx, fileCode) if err != nil { return nil, fmt.Errorf("failed to get file info: %w", err) } fileInfo := fileInfos.Result[0] size, _ := strconv.ParseInt(fileInfo.Size, 10, 64) returnedRemote := remote return &Object{ fs: f, remote: returnedRemote, size: size, modTime: time.Now(), }, nil } // Open opens the object for reading func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { filePath := path.Join(o.fs.root, o.remote) // Get direct link directLink, size, err := o.fs.getDirectLink(ctx, filePath) if err != nil { return nil, fmt.Errorf("failed to get direct link: %w", err) } o.size = size // Offset and Count for range download var offset int64 var count int64 fs.FixRangeOption(options, o.size) for _, option := range options { switch x := option.(type) { case *fs.RangeOption: offset, count = x.Decode(o.size) if count < 0 { count = o.size - offset } case *fs.SeekOption: offset = x.Offset count = o.size default: if option.Mandatory() { fs.Logf(o, "Unsupported mandatory option: %v", option) } } } var reader io.ReadCloser err = o.fs.pacer.Call(func() (bool, error) { req, err := http.NewRequestWithContext(ctx, "GET", directLink, nil) if err != nil { return false, fmt.Errorf("failed to create download request: %w", err) } resp, err := o.fs.client.Do(req) if err != nil { return shouldRetry(err), fmt.Errorf("failed to download file: %w", err) } if resp.StatusCode != http.StatusOK { defer func() { if err := resp.Body.Close(); err != nil { fs.Logf(nil, "Failed to close response body: %v", err) } }() return false, fmt.Errorf("failed to download file: HTTP %d", resp.StatusCode) } // Wrap the response body to handle offset and count currentContents, err := io.ReadAll(resp.Body) if err != nil { return false, fmt.Errorf("failed to read response body: %w", err) } if offset > 0 { if offset > int64(len(currentContents)) { return false, fmt.Errorf("offset %d exceeds file size %d", offset, len(currentContents)) } currentContents = currentContents[offset:] } if count > 0 && count < int64(len(currentContents)) { currentContents = currentContents[:count] } reader = io.NopCloser(bytes.NewReader(currentContents)) return false, nil }) if err != nil { return nil, err } return reader, nil } // Update updates the object with new data func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { if src.Size() <= 0 { return fs.ErrorCantUploadEmptyFiles } err := o.fs.uploadFile(ctx, in, o.remote) if err != nil { return fmt.Errorf("failed to upload file: %w", err) } o.size = src.Size() return nil } // Remove deletes the object from FileLu func (o *Object) Remove(ctx context.Context) error { fullPath := "/" + strings.Trim(path.Join(o.fs.root, o.remote), "/") err := o.fs.deleteFile(ctx, fullPath) if err != nil { return err } fs.Infof(o.fs, "Successfully deleted file: %s", fullPath) return nil } // Hash returns the MD5 hash of an object func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } var fileCode string if isFileCode(o.fs.root) { fileCode = o.fs.root } else { matches := regexp.MustCompile(`\((.*?)\)`).FindAllStringSubmatch(o.remote, -1) for _, match := range matches { if len(match) > 1 && len(match[1]) == 12 { fileCode = match[1] break } } } if fileCode == "" { return "", fmt.Errorf("no valid file code found in the remote path") } apiURL := fmt.Sprintf("%s/file/info?file_code=%s&key=%s", o.fs.endpoint, url.QueryEscape(fileCode), url.QueryEscape(o.fs.opt.Key)) var result struct { Status int `json:"status"` Msg string `json:"msg"` Result []struct { Hash string `json:"hash"` } `json:"result"` } err := o.fs.pacer.Call(func() (bool, error) { req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil) if err != nil { return false, err } resp, err := o.fs.client.Do(req) if err != nil { return shouldRetry(err), err } defer func() { if err := resp.Body.Close(); err != nil { fs.Logf(nil, "Failed to close response body: %v", err) } }() if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { return false, err } return shouldRetryHTTP(resp.StatusCode), nil }) if err != nil { return "", err } if result.Status != 200 || len(result.Result) == 0 { return "", fmt.Errorf("error: unable to fetch hash: %s", result.Msg) } return result.Result[0].Hash, nil } // String returns a string representation of the object func (o *Object) String() string { return o.remote } // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Size returns the size of the object func (o *Object) Size() int64 { return o.size } // ModTime returns the modification time of the object func (o *Object) ModTime(ctx context.Context) time.Time { return o.modTime } // SetModTime sets the modification time of the object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return fs.ErrorCantSetModTime } // Storable indicates whether the object is storable func (o *Object) Storable() bool { return true }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/filelu/api/types.go
backend/filelu/api/types.go
// Package api defines types for interacting with the FileLu API. package api import "encoding/json" // CreateFolderResponse represents the response for creating a folder. type CreateFolderResponse struct { Status int `json:"status"` Msg string `json:"msg"` Result struct { FldID any `json:"fld_id"` } `json:"result"` } // DeleteFolderResponse represents the response for deleting a folder. type DeleteFolderResponse struct { Status int `json:"status"` Msg string `json:"msg"` } // FolderListResponse represents the response for listing folders. type FolderListResponse struct { Status int `json:"status"` Msg string `json:"msg"` Result struct { Files []struct { Name string `json:"name"` FldID json.Number `json:"fld_id"` Path string `json:"path"` FileCode string `json:"file_code"` Size int64 `json:"size"` } `json:"files"` Folders []struct { Name string `json:"name"` FldID json.Number `json:"fld_id"` Path string `json:"path"` } `json:"folders"` } `json:"result"` } // FileDirectLinkResponse represents the response for a direct link to a file. type FileDirectLinkResponse struct { Status int `json:"status"` Msg string `json:"msg"` Result struct { URL string `json:"url"` Size int64 `json:"size"` } `json:"result"` } // FileInfoResponse represents the response for file information. type FileInfoResponse struct { Status int `json:"status"` Msg string `json:"msg"` Result []struct { Size string `json:"size"` Name string `json:"name"` FileCode string `json:"filecode"` Hash string `json:"hash"` Status int `json:"status"` } `json:"result"` } // DeleteFileResponse represents the response for deleting a file. type DeleteFileResponse struct { Status int `json:"status"` Msg string `json:"msg"` } // AccountInfoResponse represents the response for account information. type AccountInfoResponse struct { Status int `json:"status"` // HTTP status code of the response. Msg string `json:"msg"` // Message describing the response. Result struct { PremiumExpire string `json:"premium_expire"` // Expiration date of premium access. Email string `json:"email"` // User's email address. UType string `json:"utype"` // User type (e.g., premium or free). Storage string `json:"storage"` // Total storage available to the user. StorageUsed string `json:"storage_used"` // Amount of storage used. } `json:"result"` // Nested result structure containing account details. }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/quatrix/quatrix.go
backend/quatrix/quatrix.go
// Package quatrix provides an interface to the Quatrix by Maytech // object storage system. package quatrix // FIXME Quatrix only supports file names of 255 characters or less. Names // that will not be supported are those that contain non-printable // ascii, / or \, names with trailing spaces, and the special names // “.” and “..”. import ( "context" "errors" "fmt" "io" "net/http" "net/url" "path" "slices" "strconv" "strings" "time" "github.com/rclone/rclone/backend/quatrix/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/multipart" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" ) const ( minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential rootURL = "https://%s/api/1.0/" uploadURL = "https://%s/upload/chunked/" unlimitedUserQuota = -1 ) func init() { fs.Register(&fs.RegInfo{ Name: "quatrix", Description: "Quatrix by Maytech", NewFs: NewFs, Options: fs.Options{ { Name: "api_key", Help: "API key for accessing Quatrix account", Required: true, Sensitive: true, }, { Name: "host", Help: "Host name of Quatrix account", Required: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: encoder.Standard | encoder.EncodeBackSlash | encoder.EncodeInvalidUtf8, }, { Name: "effective_upload_time", Help: "Wanted upload time for one chunk", Advanced: true, Default: "4s", }, { Name: "minimal_chunk_size", Help: "The minimal size for one chunk", Advanced: true, Default: fs.SizeSuffix(10_000_000), }, { Name: "maximal_summary_chunk_size", Help: "The maximal summary for all chunks. It should not be less than 'transfers'*'minimal_chunk_size'", Advanced: true, Default: fs.SizeSuffix(100_000_000), }, { Name: "hard_delete", Help: "Delete files permanently rather than putting them into the trash", Advanced: true, Default: false, }, { Name: "skip_project_folders", Help: "Skip project folders in operations", Advanced: true, Default: false, }, }, }) } // Options defines the configuration for Quatrix backend type Options struct { APIKey string `config:"api_key"` Host string `config:"host"` Enc encoder.MultiEncoder `config:"encoding"` EffectiveUploadTime fs.Duration `config:"effective_upload_time"` MinimalChunkSize fs.SizeSuffix `config:"minimal_chunk_size"` MaximalSummaryChunkSize fs.SizeSuffix `config:"maximal_summary_chunk_size"` HardDelete bool `config:"hard_delete"` SkipProjectFolders bool `config:"skip_project_folders"` } // Fs represents remote Quatrix fs type Fs struct { name string root string description string features *fs.Features opt Options ci *fs.ConfigInfo srv *rest.Client // the connection to the quatrix server pacer *fs.Pacer // pacer for API calls dirCache *dircache.DirCache uploadMemoryManager *UploadMemoryManager } // Object describes a quatrix object type Object struct { fs *Fs remote string size int64 modTime time.Time id string hasMetaData bool obType string } // trimPath trims redundant slashes from quatrix 'url' func trimPath(path string) (root string) { root = strings.Trim(path, "/") return } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } // http client client := fshttp.NewClient(ctx) // since transport is a global variable that is initialized only once (due to sync.Once) // we need to reset it to have correct transport per each client (with proper values extracted from rclone config) client.Transport = fshttp.NewTransportCustom(ctx, nil) root = trimPath(root) ci := fs.GetConfig(ctx) f := &Fs{ name: name, description: "Quatrix FS for account " + opt.Host, root: root, opt: *opt, ci: ci, srv: rest.NewClient(client).SetRoot(fmt.Sprintf(rootURL, opt.Host)), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } f.features = (&fs.Features{ CaseInsensitive: false, CanHaveEmptyDirectories: true, PartialUploads: true, }).Fill(ctx, f) if f.opt.APIKey != "" { f.srv.SetHeader("Authorization", "Bearer "+f.opt.APIKey) } f.uploadMemoryManager = NewUploadMemoryManager(f.ci, &f.opt) // get quatrix root(home) id rootID, found, err := f.fileID(ctx, "", "") if err != nil { return nil, err } if !found { return nil, errors.New("root not found") } f.dirCache = dircache.New(root, rootID.FileID, f) err = f.dirCache.FindRoot(ctx, false) if err != nil { fileID, found, err := f.fileID(ctx, "", root) if err != nil { return nil, fmt.Errorf("find root %s: %w", root, err) } if !found { return f, nil } if fileID.IsFile() { root, _ = dircache.SplitPath(root) f.dirCache = dircache.New(root, rootID.FileID, f) // Correct root if definitely pointing to a file f.root = path.Dir(f.root) if f.root == "." || f.root == "/" { f.root = "" } return f, fs.ErrorIsFile } } return f, nil } // fileID gets id, parent and type of path in given parentID func (f *Fs) fileID(ctx context.Context, parentID, path string) (result *api.FileInfo, found bool, err error) { opts := rest.Opts{ Method: "POST", Path: "file/id", IgnoreStatus: true, } payload := api.FileInfoParams{ Path: f.opt.Enc.FromStandardPath(path), ParentID: parentID, } result = &api.FileInfo{} err = f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, payload, result) if resp != nil && resp.StatusCode == http.StatusNotFound { return false, nil } return shouldRetry(ctx, resp, err) }) if err != nil { return nil, false, fmt.Errorf("failed to get file id: %w", err) } if result.FileID == "" { return nil, false, nil } return result, true, nil } // FindLeaf finds a directory of name leaf in the folder with ID pathID func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (folderID string, found bool, err error) { result, found, err := f.fileID(ctx, pathID, leaf) if err != nil { return "", false, fmt.Errorf("find leaf: %w", err) } if !found { return "", false, nil } if result.IsFile() { return "", false, nil } return result.FileID, true, nil } // createDir creates directory in pathID with name leaf // // resolve - if true will resolve name conflict on server side, if false - will return error if object with this name exists func (f *Fs) createDir(ctx context.Context, pathID, leaf string, resolve bool) (newDir *api.File, err error) { opts := rest.Opts{ Method: "POST", Path: "file/makedir", } payload := api.CreateDirParams{ Name: f.opt.Enc.FromStandardName(leaf), Target: pathID, Resolve: resolve, } newDir = &api.File{} err = f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, payload, newDir) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("failed to create directory: %w", err) } return } // CreateDir makes a directory with pathID as parent and name leaf func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (dirID string, err error) { dir, err := f.createDir(ctx, pathID, leaf, false) if err != nil { return "", err } return dir.ID, nil } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return f.description + " at " + f.root } // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { return time.Microsecond } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return 0 } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } folder, err := f.metadata(ctx, directoryID, true) if err != nil { return nil, err } for _, file := range folder.Content { if f.skipFile(&file) { continue } remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name)) if file.IsDir() { f.dirCache.Put(remote, file.ID) d := fs.NewDir(remote, time.Time(file.Modified)).SetID(file.ID).SetItems(file.Size) // FIXME more info from dir? entries = append(entries, d) } else { o := &Object{ fs: f, remote: remote, } err = o.setMetaData(&file) if err != nil { fs.Debugf(file, "failed to set object metadata: %s", err) } entries = append(entries, o) } } return entries, nil } func (f *Fs) skipFile(file *api.File) bool { return f.opt.SkipProjectFolders && file.IsProjectFolder() } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // // Returns the object, leaf, directoryID and error. // // Used to create new objects func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, leaf string, directoryID string, err error) { // Create the directory for the object if it doesn't exist leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true) if err != nil { return } // Temporary Object under construction o = &Object{ fs: f, remote: remote, } return o, leaf, directoryID, nil } // Put the object into the container // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() mtime := src.ModTime(ctx) o := &Object{ fs: f, remote: remote, size: size, modTime: mtime, } return o, o.Update(ctx, in, src, options...) } func (f *Fs) rootSlash() string { if f.root == "" { return f.root } return f.root + "/" } func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } var err error if info != nil { // Set info err = o.setMetaData(info) } else { err = o.readMetaData(ctx) // reads info and meta, returning an error } if err != nil { return nil, err } return o, nil } // setMetaData sets the metadata from info func (o *Object) setMetaData(info *api.File) (err error) { if info.IsDir() { fs.Debugf(o, "%q is %q", o.remote, info.Type) return fs.ErrorIsDir } if !info.IsFile() { fs.Debugf(o, "%q is %q", o.remote, info.Type) return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile) } o.size = info.Size o.modTime = time.Time(info.ModifiedMS) o.id = info.ID o.hasMetaData = true o.obType = info.Type return nil } func (o *Object) readMetaData(ctx context.Context) (err error) { if o.hasMetaData { return nil } leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, o.remote, false) if err != nil { if err == fs.ErrorDirNotFound { return fs.ErrorObjectNotFound } return err } file, found, err := o.fs.fileID(ctx, directoryID, leaf) if err != nil { return fmt.Errorf("read metadata: fileID: %w", err) } if !found { fs.Debugf(nil, "object not found: remote %s: directory %s: leaf %s", o.remote, directoryID, leaf) return fs.ErrorObjectNotFound } result, err := o.fs.metadata(ctx, file.FileID, false) if err != nil { return fmt.Errorf("get file metadata: %w", err) } return o.setMetaData(result) } // Mkdir creates the container if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { _, err := f.dirCache.FindDir(ctx, dir, true) return err } // Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, true) } // DirCacheFlush resets the directory cache - used in testing as an // optional interface func (f *Fs) DirCacheFlush() { f.dirCache.ResetRoot() } func (f *Fs) metadata(ctx context.Context, id string, withContent bool) (result *api.File, err error) { parameters := url.Values{} if !withContent { parameters.Add("content", "0") } opts := rest.Opts{ Method: "GET", Path: path.Join("file/metadata", id), Parameters: parameters, } result = &api.File{} var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, result) return shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { return nil, fs.ErrorObjectNotFound } return nil, fmt.Errorf("failed to get file metadata: %w", err) } return result, nil } func (f *Fs) setMTime(ctx context.Context, id string, t time.Time) (result *api.File, err error) { opts := rest.Opts{ Method: "POST", Path: "file/metadata", } params := &api.SetMTimeParams{ ID: id, MTime: api.JSONTime(t), } result = &api.File{} var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, params, result) return shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { return nil, fs.ErrorObjectNotFound } return nil, fmt.Errorf("failed to set file metadata: %w", err) } return result, nil } func (f *Fs) deleteObject(ctx context.Context, id string) error { payload := &api.DeleteParams{ IDs: []string{id}, DeletePermanently: f.opt.HardDelete, } result := &api.IDList{} opts := rest.Opts{ Method: "POST", Path: "file/delete", } err := f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, payload, result) return shouldRetry(ctx, resp, err) }) if err != nil { return err } if slices.Contains(result.IDs, id) { return nil } return fmt.Errorf("file %s was not deleted successfully", id) } func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } rootID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return err } if check { file, err := f.metadata(ctx, rootID, false) if err != nil { return err } if file.IsFile() { return fs.ErrorIsFile } if file.Size != 0 { return fs.ErrorDirectoryNotEmpty } } err = f.deleteObject(ctx, rootID) if err != nil { return err } f.dirCache.FlushDir(dir) return nil } // Purge deletes all the files in the directory // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, false) } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } if srcObj.fs == f { srcPath := srcObj.rootPath() dstPath := f.rootPath(remote) if srcPath == dstPath { return nil, fmt.Errorf("can't copy %q -> %q as they are same", srcPath, dstPath) } } err := srcObj.readMetaData(ctx) if err != nil { fs.Debugf(srcObj, "read metadata for %s: %s", srcObj.rootPath(), err) return nil, err } _, _, err = srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) if err != nil { return nil, err } dstObj, dstLeaf, directoryID, err := f.createObject(ctx, remote) if err != nil { fs.Debugf(srcObj, "create empty object for %s: %s", dstObj.rootPath(), err) return nil, err } opts := rest.Opts{ Method: "POST", Path: "file/copyone", } params := &api.FileCopyMoveOneParams{ ID: srcObj.id, Target: directoryID, Resolve: true, MTime: api.JSONTime(srcObj.ModTime(ctx)), Name: dstLeaf, ResolveMode: api.OverwriteMode, } result := &api.File{} var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, params, result) return shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { return nil, fs.ErrorObjectNotFound } return nil, fmt.Errorf("failed to copy: %w", err) } err = dstObj.setMetaData(result) if err != nil { return nil, err } return dstObj, nil } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } _, _, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) if err != nil { return nil, err } // Create temporary object dstObj, dstLeaf, directoryID, err := f.createObject(ctx, remote) if err != nil { return nil, err } opts := rest.Opts{ Method: "POST", Path: "file/moveone", } params := &api.FileCopyMoveOneParams{ ID: srcObj.id, Target: directoryID, Resolve: true, MTime: api.JSONTime(srcObj.ModTime(ctx)), Name: dstLeaf, ResolveMode: api.OverwriteMode, } var resp *http.Response result := &api.File{} err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, params, result) return shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { return nil, fs.ErrorObjectNotFound } return nil, fmt.Errorf("failed to move: %w", err) } err = dstObj.setMetaData(result) if err != nil { return nil, err } return dstObj, nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) if err != nil { return err } srcInfo, err := f.metadata(ctx, srcID, false) if err != nil { return err } opts := rest.Opts{ Method: "POST", Path: "file/moveone", } params := &api.FileCopyMoveOneParams{ ID: srcID, Target: dstDirectoryID, Resolve: false, MTime: srcInfo.ModifiedMS, Name: dstLeaf, } var resp *http.Response result := &api.File{} err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, params, result) return shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { return fs.ErrorObjectNotFound } return fmt.Errorf("failed to move dir: %w", err) } srcFs.dirCache.FlushDir(srcRemote) return nil } // About gets quota information func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { opts := rest.Opts{ Method: "GET", Path: "profile/info", } var ( user api.ProfileInfo resp *http.Response ) err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &user) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("failed to read profile info: %w", err) } free := user.AccLimit - user.UserUsed if user.UserLimit > unlimitedUserQuota { free = user.UserLimit - user.UserUsed } usage = &fs.Usage{ Used: fs.NewUsageValue(user.UserUsed), // bytes in use Total: fs.NewUsageValue(user.AccLimit), // bytes total Free: fs.NewUsageValue(free), // bytes free } return usage, nil } // Fs return the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // String returns object remote path func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // rootPath returns a path for use in server given a remote func (f *Fs) rootPath(remote string) string { return f.rootSlash() + remote } // rootPath returns a path for use in local functions func (o *Object) rootPath() string { return o.fs.rootPath(o.remote) } // Size returns the size of an object in bytes func (o *Object) Size() int64 { err := o.readMetaData(context.TODO()) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return 0 } return o.size } // ModTime returns the modification time of the object func (o *Object) ModTime(ctx context.Context) time.Time { err := o.readMetaData(ctx) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return time.Now() } return o.modTime } // Storable returns a boolean showing whether this object storable func (o *Object) Storable() bool { return true } // ID returns the ID of the Object if known, or "" if not func (o *Object) ID() string { return o.id } // Hash returns the SHA-1 of an object. Not supported yet. func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) { return "", nil } // Remove an object func (o *Object) Remove(ctx context.Context) error { err := o.fs.deleteObject(ctx, o.id) if err != nil { return err } if o.obType != "F" { o.fs.dirCache.FlushDir(o.remote) } return nil } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { if o.id == "" { return nil, errors.New("can't download - no id") } linkID, err := o.fs.downloadLink(ctx, o.id) if err != nil { return nil, err } fs.FixRangeOption(options, o.size) opts := rest.Opts{ Method: "GET", Path: "/file/download/" + linkID, Options: options, } var resp *http.Response err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } return resp.Body, err } func (f *Fs) downloadLink(ctx context.Context, id string) (linkID string, err error) { linkParams := &api.IDList{ IDs: []string{id}, } opts := rest.Opts{ Method: "POST", Path: "file/download-link", } var resp *http.Response link := &api.DownloadLinkResponse{} err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, linkParams, &link) return shouldRetry(ctx, resp, err) }) if err != nil { return "", err } return link.ID, nil } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, t time.Time) error { file, err := o.fs.setMTime(ctx, o.id, t) if err != nil { return fmt.Errorf("set mtime: %w", err) } return o.setMetaData(file) } // Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { size := src.Size() modTime := src.ModTime(ctx) remote := o.Remote() // Create the directory for the object if it doesn't exist leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true) if err != nil { return err } uploadSession, err := o.uploadSession(ctx, directoryID, leaf) if err != nil { return fmt.Errorf("object update: %w", err) } o.id = uploadSession.FileID defer func() { if err == nil { return } deleteErr := o.fs.deleteObject(ctx, o.id) if deleteErr != nil { fs.Logf(o.remote, "remove: %s", deleteErr) } }() return o.dynamicUpload(ctx, size, modTime, in, uploadSession, options...) } // dynamicUpload uploads object in chunks, which are being dynamically recalculated on each iteration // depending on upload speed in order to make upload faster func (o *Object) dynamicUpload(ctx context.Context, size int64, modTime time.Time, in io.Reader, uploadSession *api.UploadLinkResponse, options ...fs.OpenOption) error { var ( speed float64 localChunk int64 ) defer o.fs.uploadMemoryManager.Return(o.id) for offset := int64(0); offset < size; offset += localChunk { localChunk = o.fs.uploadMemoryManager.Consume(o.id, size-offset, speed) rw := multipart.NewRW() _, err := io.CopyN(rw, in, localChunk) if err != nil { return fmt.Errorf("read chunk with offset %d size %d: %w", offset, localChunk, err) } start := time.Now() err = o.upload(ctx, uploadSession.UploadKey, rw, size, offset, localChunk, options...) if err != nil { return fmt.Errorf("upload chunk with offset %d size %d: %w", offset, localChunk, err) } speed = float64(localChunk) / (float64(time.Since(start)) / 1e9) } o.fs.uploadMemoryManager.Return(o.id) finalizeResult, err := o.finalize(ctx, uploadSession.UploadKey, modTime) if err != nil { return fmt.Errorf("upload %s finalize: %w", uploadSession.UploadKey, err) } if size >= 0 && finalizeResult.FileSize != size { return fmt.Errorf("expected size %d, got %d", size, finalizeResult.FileSize) } o.size = size o.modTime = modTime return nil } func (f *Fs) uploadLink(ctx context.Context, parentID, name string) (upload *api.UploadLinkResponse, err error) { opts := rest.Opts{ Method: "POST", Path: "upload/link", } payload := api.UploadLinkParams{ Name: name, ParentID: parentID, Resolve: false, } err = f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, &payload, &upload) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("failed to get upload link: %w", err) } return upload, nil } func (f *Fs) modifyLink(ctx context.Context, fileID string) (upload *api.UploadLinkResponse, err error) { opts := rest.Opts{ Method: "POST", Path: "file/modify", } payload := api.FileModifyParams{ ID: fileID, Truncate: 0, } err = f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, &payload, &upload) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("failed to get modify link: %w", err) } return upload, nil } func (o *Object) uploadSession(ctx context.Context, parentID, name string) (upload *api.UploadLinkResponse, err error) { encName := o.fs.opt.Enc.FromStandardName(name) fileID, found, err := o.fs.fileID(ctx, parentID, encName) if err != nil { return nil, fmt.Errorf("get file_id: %w", err) } if found { return o.fs.modifyLink(ctx, fileID.FileID) } return o.fs.uploadLink(ctx, parentID, encName) } func (o *Object) upload(ctx context.Context, uploadKey string, chunk io.Reader, fullSize int64, offset int64, chunkSize int64, options ...fs.OpenOption) (err error) { opts := rest.Opts{ Method: "POST", RootURL: fmt.Sprintf(uploadURL, o.fs.opt.Host) + uploadKey, Body: chunk, ContentLength: &chunkSize, ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, fullSize), Options: options, } var fileID string err = o.fs.pacer.Call(func() (bool, error) { resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &fileID) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("failed to get upload chunk: %w", err) } return nil } func (o *Object) finalize(ctx context.Context, uploadKey string, mtime time.Time) (result *api.UploadFinalizeResponse, err error) { queryParams := url.Values{} queryParams.Add("mtime", strconv.FormatFloat(float64(mtime.UTC().UnixNano())/1e9, 'f', 6, 64)) opts := rest.Opts{ Method: "GET", Path: path.Join("upload/finalize", uploadKey), Parameters: queryParams, } result = &api.UploadFinalizeResponse{} err = o.fs.pacer.Call(func() (bool, error) { resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, result) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("failed to finalize: %w", err) } return result, nil } // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) _ fs.Purger = (*Fs)(nil) _ fs.Copier = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil) _ fs.Mover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil) _ dircache.DirCacher = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil) _ fs.Object = (*Object)(nil) _ fs.IDer = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/quatrix/upload_memory.go
backend/quatrix/upload_memory.go
package quatrix import ( "sync" "time" "github.com/rclone/rclone/fs" ) // UploadMemoryManager dynamically calculates every chunk size for the transfer and increases or decreases it // depending on the upload speed. This makes general upload time smaller, because transfers that are faster // does not have to wait for the slower ones until they finish upload. type UploadMemoryManager struct { m sync.Mutex useDynamicSize bool shared int64 reserved int64 effectiveTime time.Duration fileUsage map[string]int64 } // NewUploadMemoryManager is a constructor for UploadMemoryManager func NewUploadMemoryManager(ci *fs.ConfigInfo, opt *Options) *UploadMemoryManager { useDynamicSize := true sharedMemory := int64(opt.MaximalSummaryChunkSize) - int64(opt.MinimalChunkSize)*int64(ci.Transfers) if sharedMemory <= 0 { sharedMemory = 0 useDynamicSize = false } return &UploadMemoryManager{ useDynamicSize: useDynamicSize, shared: sharedMemory, reserved: int64(opt.MinimalChunkSize), effectiveTime: time.Duration(opt.EffectiveUploadTime), fileUsage: map[string]int64{}, } } // Consume -- decide amount of memory to consume func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed float64) int64 { if !u.useDynamicSize { if neededMemory < u.reserved { return neededMemory } return u.reserved } u.m.Lock() defer u.m.Unlock() borrowed, found := u.fileUsage[fileID] if found { u.shared += borrowed borrowed = 0 } defer func() { u.fileUsage[fileID] = borrowed }() effectiveChunkSize := min(neededMemory, max(int64(speed*u.effectiveTime.Seconds()), u.reserved)) if effectiveChunkSize <= u.reserved { return effectiveChunkSize } toBorrow := effectiveChunkSize - u.reserved if toBorrow <= u.shared { u.shared -= toBorrow borrowed = toBorrow return effectiveChunkSize } borrowed = u.shared u.shared = 0 return borrowed + u.reserved } // Return returns consumed memory for the previous chunk upload to the memory pool func (u *UploadMemoryManager) Return(fileID string) { if !u.useDynamicSize { return } u.m.Lock() defer u.m.Unlock() borrowed, found := u.fileUsage[fileID] if !found { return } u.shared += borrowed delete(u.fileUsage, fileID) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/quatrix/quatrix_test.go
backend/quatrix/quatrix_test.go
// Test Quatrix filesystem interface package quatrix_test import ( "testing" "github.com/rclone/rclone/backend/quatrix" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestQuatrix:", NilObject: (*quatrix.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/quatrix/api/types.go
backend/quatrix/api/types.go
// Package api provides types used by the Quatrix API. package api import ( "strconv" "time" ) // OverwriteMode is a conflict resolve mode during copy or move. Files with conflicting names will be overwritten const OverwriteMode = "overwrite" // ProfileInfo is a profile info about quota type ProfileInfo struct { UserUsed int64 `json:"user_used"` UserLimit int64 `json:"user_limit"` AccUsed int64 `json:"acc_used"` AccLimit int64 `json:"acc_limit"` } // IDList is a general object that contains list of ids type IDList struct { IDs []string `json:"ids"` } // DeleteParams is the request to delete object type DeleteParams struct { IDs []string `json:"ids"` DeletePermanently bool `json:"delete_permanently"` } // FileInfoParams is the request to get object's (file or directory) info type FileInfoParams struct { ParentID string `json:"parent_id,omitempty"` Path string `json:"path"` } // FileInfo is the response to get object's (file or directory) info type FileInfo struct { FileID string `json:"file_id"` ParentID string `json:"parent_id"` Src string `json:"src"` Type string `json:"type"` } // IsFile returns true if object is a file // false otherwise func (fi *FileInfo) IsFile() bool { if fi == nil { return false } return fi.Type == "F" } // IsDir returns true if object is a directory // false otherwise func (fi *FileInfo) IsDir() bool { if fi == nil { return false } return fi.Type == "D" || fi.Type == "S" || fi.Type == "T" } // CreateDirParams is the request to create a directory type CreateDirParams struct { Target string `json:"target,omitempty"` Name string `json:"name"` Resolve bool `json:"resolve"` } // File represent metadata about object in Quatrix (file or directory) type File struct { ID string `json:"id"` Created JSONTime `json:"created"` Modified JSONTime `json:"modified"` Name string `json:"name"` ParentID string `json:"parent_id"` Size int64 `json:"size"` ModifiedMS JSONTime `json:"modified_ms"` Type string `json:"type"` Operations int `json:"operations"` SubType string `json:"sub_type"` Content []File `json:"content"` } // IsFile returns true if object is a file // false otherwise func (f *File) IsFile() bool { if f == nil { return false } return f.Type == "F" } // IsDir returns true if object is a directory // false otherwise func (f *File) IsDir() bool { if f == nil { return false } return f.Type == "D" || f.Type == "S" || f.Type == "T" } // IsProjectFolder returns true if object is a project folder // false otherwise func (f *File) IsProjectFolder() bool { if f == nil { return false } return f.Type == "S" } // SetMTimeParams is the request to set modification time for object type SetMTimeParams struct { ID string `json:"id,omitempty"` MTime JSONTime `json:"mtime"` } // JSONTime provides methods to marshal/unmarshal time.Time as Unix time type JSONTime time.Time // MarshalJSON returns time representation in Unix time func (u JSONTime) MarshalJSON() ([]byte, error) { return []byte(strconv.FormatFloat(float64(time.Time(u).UTC().UnixNano())/1e9, 'f', 6, 64)), nil } // UnmarshalJSON sets time from Unix time representation func (u *JSONTime) UnmarshalJSON(data []byte) error { f, err := strconv.ParseFloat(string(data), 64) if err != nil { return err } t := JSONTime(time.Unix(0, int64(f*1e9))) *u = t return nil } // String returns Unix time representation of time as string func (u JSONTime) String() string { return strconv.FormatInt(time.Time(u).UTC().Unix(), 10) } // DownloadLinkResponse is the response to download-link request type DownloadLinkResponse struct { ID string `json:"id"` } // UploadLinkParams is the request to get upload-link type UploadLinkParams struct { Name string `json:"name"` ParentID string `json:"parent_id"` Resolve bool `json:"resolve"` } // UploadLinkResponse is the response to upload-link request type UploadLinkResponse struct { Name string `json:"name"` FileID string `json:"file_id"` ParentID string `json:"parent_id"` UploadKey string `json:"upload_key"` } // UploadFinalizeResponse is the response to finalize file method type UploadFinalizeResponse struct { FileID string `json:"id"` ParentID string `json:"parent_id"` Modified int64 `json:"modified"` FileSize int64 `json:"size"` } // FileModifyParams is the request to get modify file link type FileModifyParams struct { ID string `json:"id"` Truncate int64 `json:"truncate"` } // FileCopyMoveOneParams is the request to do server-side copy and move // can be used for file or directory type FileCopyMoveOneParams struct { ID string `json:"file_id"` Target string `json:"target_id"` Name string `json:"name"` MTime JSONTime `json:"mtime"` Resolve bool `json:"resolve"` ResolveMode string `json:"resolve_mode"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/oracleobjectstorage/oracleobjectstorage_test.go
backend/oracleobjectstorage/oracleobjectstorage_test.go
//go:build !plan9 && !solaris && !js package oracleobjectstorage import ( "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestOracleObjectStorage:", TiersToTest: []string{"standard", "archive"}, NilObject: (*Object)(nil), ChunkedUpload: fstests.ChunkedUploadConfig{ MinChunkSize: minChunkSize, }, }) } func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadChunkSize(cs) } func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadCutoff(cs) } func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setCopyCutoff(cs) } var ( _ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadCutoffer = (*Fs)(nil) _ fstests.SetCopyCutoffer = (*Fs)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/oracleobjectstorage/waiter.go
backend/oracleobjectstorage/waiter.go
//go:build !plan9 && !solaris && !js package oracleobjectstorage import ( "context" "fmt" "slices" "strings" "time" "github.com/rclone/rclone/fs" ) var refreshGracePeriod = 30 * time.Second // StateRefreshFunc is a function type used for StateChangeConf that is // responsible for refreshing the item being watched for a state change. // // It returns three results. `result` is any object that will be returned // as the final object after waiting for state change. This allows you to // return the final updated object, for example an EC2 instance after refreshing // it. A nil result represents not found. // // `state` is the latest state of that object. And `err` is any error that // may have happened while refreshing the state. type StateRefreshFunc func() (result any, state string, err error) // StateChangeConf is the configuration struct used for `WaitForState`. type StateChangeConf struct { Delay time.Duration // Wait this time before starting checks Pending []string // States that are "allowed" and will continue trying Refresh StateRefreshFunc // Refreshes the current state Target []string // Target state Timeout time.Duration // The amount of time to wait before timeout MinTimeout time.Duration // Smallest time to wait before refreshes PollInterval time.Duration // Override MinTimeout/backoff and only poll this often NotFoundChecks int // Number of times to allow not found (nil result from Refresh) // This is to work around inconsistent APIs ContinuousTargetOccurrence int // Number of times the Target state has to occur continuously } // WaitForStateContext watches an object and waits for it to achieve the state // specified in the configuration using the specified Refresh() func, // waiting the number of seconds specified in the timeout configuration. // // If the Refresh function returns an error, exit immediately with that error. // // If the Refresh function returns a state other than the Target state or one // listed in Pending, return immediately with an error. // // If the Timeout is exceeded before reaching the Target state, return an // error. // // Otherwise, the result is the result of the first call to the Refresh function to // reach the target state. // // Cancellation from the passed in context will cancel the refresh loop func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (any, error) { // fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target) notfoundTick := 0 targetOccurrence := 0 // Set a default for times to check for not found if conf.NotFoundChecks == 0 { conf.NotFoundChecks = 20 } if conf.ContinuousTargetOccurrence == 0 { conf.ContinuousTargetOccurrence = 1 } type Result struct { Result any State string Error error Done bool } // Read every result from the refresh loop, waiting for a positive result.Done. resCh := make(chan Result, 1) // cancellation channel for the refresh loop cancelCh := make(chan struct{}) result := Result{} go func() { defer close(resCh) select { case <-time.After(conf.Delay): case <-cancelCh: return } // start with 0 delay for the first loop var wait time.Duration for { // store the last result resCh <- result // wait and watch for cancellation select { case <-cancelCh: return case <-time.After(wait): // first round had no wait if wait == 0 { wait = 100 * time.Millisecond } } res, currentState, err := conf.Refresh() result = Result{ Result: res, State: currentState, Error: err, } if err != nil { resCh <- result return } // If we're waiting for the absence of a thing, then return if res == nil && len(conf.Target) == 0 { targetOccurrence++ if conf.ContinuousTargetOccurrence == targetOccurrence { result.Done = true resCh <- result return } continue } if res == nil { // If we didn't find the resource, check if we have been // not finding it for a while, and if so, report an error. notfoundTick++ if notfoundTick > conf.NotFoundChecks { result.Error = &NotFoundError{ LastError: err, Retries: notfoundTick, } resCh <- result return } } else { // Reset the counter for when a resource isn't found notfoundTick = 0 found := false for _, allowed := range conf.Target { if currentState == allowed { found = true targetOccurrence++ if conf.ContinuousTargetOccurrence == targetOccurrence { result.Done = true resCh <- result return } continue } } if slices.Contains(conf.Pending, currentState) { found = true targetOccurrence = 0 } if !found && len(conf.Pending) > 0 { result.Error = &UnexpectedStateError{ LastError: err, State: result.State, ExpectedState: conf.Target, } resCh <- result return } } // Wait between refreshes using exponential backoff, except when // waiting for the target state to reoccur. if targetOccurrence == 0 { wait *= 2 } // If a poll interval has been specified, choose that interval. // Otherwise, bound the default value. if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { wait = conf.PollInterval } else { if wait < conf.MinTimeout { wait = conf.MinTimeout } else if wait > 10*time.Second { wait = 10 * time.Second } } // fs.Debugf(entityType, "[TRACE] Waiting %s before next try", wait) } }() // store the last value result from the refresh loop lastResult := Result{} timeout := time.After(conf.Timeout) for { select { case r, ok := <-resCh: // channel closed, so return the last result if !ok { return lastResult.Result, lastResult.Error } // we reached the intended state if r.Done { return r.Result, r.Error } // still waiting, store the last result lastResult = r case <-ctx.Done(): close(cancelCh) return nil, ctx.Err() case <-timeout: // fs.Debugf(entityType, "[WARN] WaitForState timeout after %s", conf.Timeout) // fs.Debugf(entityType, "[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) // cancel the goroutine and start our grace period timer close(cancelCh) timeout := time.After(refreshGracePeriod) // we need a for loop and a label to break on, because we may have // an extra response value to read, but still want to wait for the // channel to close. forSelect: for { select { case r, ok := <-resCh: if r.Done { // the last refresh loop reached the desired state return r.Result, r.Error } if !ok { // the goroutine returned break forSelect } // target state not reached, save the result for the // TimeoutError and wait for the channel to close lastResult = r case <-ctx.Done(): fs.Errorf(entityType, "Context cancellation detected, abandoning grace period") break forSelect case <-timeout: fs.Errorf(entityType, "WaitForState exceeded refresh grace period") break forSelect } } return nil, &TimeoutError{ LastError: lastResult.Error, LastState: lastResult.State, Timeout: conf.Timeout, ExpectedState: conf.Target, } } } } // NotFoundError resource not found error type NotFoundError struct { LastError error LastRequest any LastResponse any Message string Retries int } func (e *NotFoundError) Error() string { if e.Message != "" { return e.Message } if e.Retries > 0 { return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries) } return "couldn't find resource" } func (e *NotFoundError) Unwrap() error { return e.LastError } // UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending type UnexpectedStateError struct { LastError error State string ExpectedState []string } func (e *UnexpectedStateError) Error() string { return fmt.Sprintf( "unexpected state '%s', wanted target '%s'. last error: %s", e.State, strings.Join(e.ExpectedState, ", "), e.LastError, ) } func (e *UnexpectedStateError) Unwrap() error { return e.LastError } // TimeoutError is returned when WaitForState times out type TimeoutError struct { LastError error LastState string Timeout time.Duration ExpectedState []string } func (e *TimeoutError) Error() string { expectedState := "resource to be gone" if len(e.ExpectedState) > 0 { expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", ")) } extraInfo := make([]string, 0) if e.LastState != "" { extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState)) } if e.Timeout > 0 { extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String())) } suffix := "" if len(extraInfo) > 0 { suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", ")) } if e.LastError != nil { return fmt.Sprintf("timeout while waiting for %s%s: %s", expectedState, suffix, e.LastError) } return fmt.Sprintf("timeout while waiting for %s%s", expectedState, suffix) } func (e *TimeoutError) Unwrap() error { return e.LastError }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/oracleobjectstorage/client.go
backend/oracleobjectstorage/client.go
//go:build !plan9 && !solaris && !js package oracleobjectstorage import ( "context" "crypto/rsa" "errors" "net/http" "os" "path" "strings" "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/common/auth" "github.com/oracle/oci-go-sdk/v65/objectstorage" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" ) func expandPath(filepath string) (expandedPath string) { if filepath == "" { return filepath } cleanedPath := path.Clean(filepath) expandedPath = cleanedPath if strings.HasPrefix(cleanedPath, "~") { rest := cleanedPath[2:] home, err := os.UserHomeDir() if err != nil { return expandedPath } expandedPath = path.Join(home, rest) } return } func getConfigurationProvider(opt *Options) (common.ConfigurationProvider, error) { switch opt.Provider { case instancePrincipal: return auth.InstancePrincipalConfigurationProvider() case userPrincipal: expandConfigFilePath := expandPath(opt.ConfigFile) if expandConfigFilePath != "" && !fileExists(expandConfigFilePath) { fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", expandConfigFilePath) } return common.CustomProfileConfigProvider(expandConfigFilePath, opt.ConfigProfile), nil case resourcePrincipal: return auth.ResourcePrincipalConfigurationProvider() case noAuth: fs.Infof("client", "using no auth provider") return getNoAuthConfiguration() case workloadIdentity: return auth.OkeWorkloadIdentityConfigurationProvider() default: } return common.DefaultConfigProvider(), nil } func newObjectStorageClient(ctx context.Context, opt *Options) (*objectstorage.ObjectStorageClient, error) { p, err := getConfigurationProvider(opt) if err != nil { return nil, err } client, err := objectstorage.NewObjectStorageClientWithConfigurationProvider(p) if err != nil { fs.Errorf(opt.Provider, "failed to create object storage client, %v", err) return nil, err } if opt.Region != "" { client.SetRegion(opt.Region) } if opt.Endpoint != "" { client.Host = opt.Endpoint } modifyClient(ctx, opt, &client.BaseClient) return &client, err } func fileExists(filePath string) bool { if _, err := os.Stat(filePath); errors.Is(err, os.ErrNotExist) { return false } return true } func modifyClient(ctx context.Context, opt *Options, client *common.BaseClient) { client.HTTPClient = getHTTPClient(ctx) if opt.Provider == noAuth { client.Signer = getNoAuthSigner() } } // getClient makes http client according to the global options // this has rclone specific options support like dump headers, body etc. func getHTTPClient(ctx context.Context) *http.Client { return fshttp.NewClient(ctx) } var retryErrorCodes = []int{ 408, // Request Timeout 429, // Rate exceeded. 500, // Get occasional 500 Internal Server Error 503, // Service Unavailable 504, // Gateway Time-out } func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } // If this is an ocierr object, try and extract more useful information to determine if we should retry if ociError, ok := err.(common.ServiceError); ok { // Simple case, check the original embedded error in case it's generically retryable if fserrors.ShouldRetry(err) { return true, err } // If it is a timeout then we want to retry that if ociError.GetCode() == "RequestTimeout" { return true, err } } // Ok, not an oci error, check for generic failure conditions return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } func getNoAuthConfiguration() (common.ConfigurationProvider, error) { return &noAuthConfigurator{}, nil } func getNoAuthSigner() common.HTTPRequestSigner { return &noAuthSigner{} } type noAuthConfigurator struct { } type noAuthSigner struct { } func (n *noAuthSigner) Sign(*http.Request) error { return nil } func (n *noAuthConfigurator) PrivateRSAKey() (*rsa.PrivateKey, error) { return nil, nil } func (n *noAuthConfigurator) KeyID() (string, error) { return "", nil } func (n *noAuthConfigurator) TenancyOCID() (string, error) { return "", nil } func (n *noAuthConfigurator) UserOCID() (string, error) { return "", nil } func (n *noAuthConfigurator) KeyFingerprint() (string, error) { return "", nil } func (n *noAuthConfigurator) Region() (string, error) { return "", nil } func (n *noAuthConfigurator) AuthType() (common.AuthConfig, error) { return common.AuthConfig{ AuthType: common.UnknownAuthenticationType, IsFromConfigFile: false, OboToken: nil, }, nil } // Check the interfaces are satisfied var ( _ common.ConfigurationProvider = &noAuthConfigurator{} _ common.HTTPRequestSigner = &noAuthSigner{} )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/oracleobjectstorage/copy.go
backend/oracleobjectstorage/copy.go
//go:build !plan9 && !solaris && !js package oracleobjectstorage import ( "context" "fmt" "strings" "time" "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/objectstorage" "github.com/rclone/rclone/fs" ) // ------------------------------------------------------------ // Implement Copier is an optional interfaces for Fs //------------------------------------------------------------ // Copy src to this remote using server-side copy operations. // This is stored with the remote path given // It returns the destination Object and a possible error // Will only be called if src.Fs().Name() == f.Name() // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { // fs.Debugf(f, "copying %v to %v", src.Remote(), remote) srcObj, ok := src.(*Object) if !ok { // fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } // Temporary Object under construction dstObj := &Object{ fs: f, remote: remote, } err := f.copy(ctx, dstObj, srcObj) if err != nil { return nil, err } return f.NewObject(ctx, remote) } // copy does a server-side copy from dstObj <- srcObj // // If newInfo is nil then the metadata will be copied otherwise it // will be replaced with newInfo func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object) (err error) { srcBucket, srcPath := srcObj.split() dstBucket, dstPath := dstObj.split() if dstBucket != srcBucket { exists, err := f.bucketExists(ctx, dstBucket) if err != nil { return err } if !exists { err = f.makeBucket(ctx, dstBucket) if err != nil { return err } } } copyObjectDetails := objectstorage.CopyObjectDetails{ SourceObjectName: common.String(srcPath), DestinationRegion: common.String(dstObj.fs.opt.Region), DestinationNamespace: common.String(dstObj.fs.opt.Namespace), DestinationBucket: common.String(dstBucket), DestinationObjectName: common.String(dstPath), DestinationObjectMetadata: metadataWithOpcPrefix(srcObj.meta), } req := objectstorage.CopyObjectRequest{ NamespaceName: common.String(srcObj.fs.opt.Namespace), BucketName: common.String(srcBucket), CopyObjectDetails: copyObjectDetails, } useBYOKCopyObject(f, &req) var resp objectstorage.CopyObjectResponse err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CopyObject(ctx, req) return shouldRetry(ctx, resp.HTTPResponse(), err) }) if err != nil { return err } workRequestID := resp.OpcWorkRequestId timeout := time.Duration(f.opt.CopyTimeout) dstName := dstObj.String() // https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/copyingobjects.htm // To enable server side copy object, customers will have to // grant policy to objectstorage service to manage object-family // Allow service objectstorage-<region_identifier> to manage object-family in tenancy // Another option to avoid the policy is to download and reupload the file. // This download upload will work for maximum file size limit of 5GB err = copyObjectWaitForWorkRequest(ctx, workRequestID, dstName, timeout, f.srv) if err != nil { return err } return err } func copyObjectWaitForWorkRequest(ctx context.Context, wID *string, entityType string, timeout time.Duration, client *objectstorage.ObjectStorageClient) error { stateConf := &StateChangeConf{ Pending: []string{ string(objectstorage.WorkRequestStatusAccepted), string(objectstorage.WorkRequestStatusInProgress), string(objectstorage.WorkRequestStatusCanceling), }, Target: []string{ string(objectstorage.WorkRequestSummaryStatusCompleted), string(objectstorage.WorkRequestSummaryStatusCanceled), string(objectstorage.WorkRequestStatusFailed), }, Refresh: func() (any, string, error) { getWorkRequestRequest := objectstorage.GetWorkRequestRequest{} getWorkRequestRequest.WorkRequestId = wID workRequestResponse, err := client.GetWorkRequest(context.Background(), getWorkRequestRequest) wr := &workRequestResponse.WorkRequest return workRequestResponse, string(wr.Status), err }, Timeout: timeout, } wrr, e := stateConf.WaitForStateContext(ctx, entityType) if e != nil { return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, e) } wr := wrr.(objectstorage.GetWorkRequestResponse).WorkRequest if wr.Status == objectstorage.WorkRequestStatusFailed { errorMessage, _ := getObjectStorageErrorFromWorkRequest(ctx, wID, client) return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, errorMessage) } return nil } func getObjectStorageErrorFromWorkRequest(ctx context.Context, workRequestID *string, client *objectstorage.ObjectStorageClient) (string, error) { req := objectstorage.ListWorkRequestErrorsRequest{} req.WorkRequestId = workRequestID res, err := client.ListWorkRequestErrors(ctx, req) if err != nil { return "", err } allErrs := make([]string, 0) for _, errs := range res.Items { allErrs = append(allErrs, *errs.Message) } errorMessage := strings.Join(allErrs, "\n") return errorMessage, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/oracleobjectstorage/oracleobjectstorage_unsupported.go
backend/oracleobjectstorage/oracleobjectstorage_unsupported.go
// Build for oracleobjectstorage for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 || solaris || js // Package oracleobjectstorage provides an interface to the OCI object storage system. package oracleobjectstorage
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/oracleobjectstorage/options.go
backend/oracleobjectstorage/options.go
//go:build !plan9 && !solaris && !js package oracleobjectstorage import ( "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/lib/encoder" ) const ( maxSizeForCopy = 4768 * 1024 * 1024 maxUploadParts = 10000 defaultUploadConcurrency = 10 minChunkSize = fs.SizeSuffix(5 * 1024 * 1024) defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024) maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024) minSleep = 10 * time.Millisecond defaultCopyTimeoutDuration = fs.Duration(time.Minute) ) const ( userPrincipal = "user_principal_auth" instancePrincipal = "instance_principal_auth" resourcePrincipal = "resource_principal_auth" workloadIdentity = "workload_identity_auth" environmentAuth = "env_auth" noAuth = "no_auth" userPrincipalHelpText = `use an OCI user and an API key for authentication. you’ll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key. https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm` instancePrincipalHelpText = `use instance principals to authorize an instance to make API calls. each instance has its own identity, and authenticates using the certificates that are read from instance metadata. https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm` workloadIdentityHelpText = `use workload identity to grant OCI Container Engine for Kubernetes workloads policy-driven access to OCI resources using OCI Identity and Access Management (IAM). https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contenggrantingworkloadaccesstoresources.htm` resourcePrincipalHelpText = `use resource principals to make API calls` environmentAuthHelpText = `automatically pickup the credentials from runtime(env), first one to provide auth wins` noAuthHelpText = `no credentials needed, this is typically for reading public buckets` ) // Options defines the configuration for this backend type Options struct { Provider string `config:"provider"` Compartment string `config:"compartment"` Namespace string `config:"namespace"` Region string `config:"region"` Endpoint string `config:"endpoint"` Enc encoder.MultiEncoder `config:"encoding"` ConfigFile string `config:"config_file"` ConfigProfile string `config:"config_profile"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` ChunkSize fs.SizeSuffix `config:"chunk_size"` MaxUploadParts int `config:"max_upload_parts"` UploadConcurrency int `config:"upload_concurrency"` DisableChecksum bool `config:"disable_checksum"` CopyCutoff fs.SizeSuffix `config:"copy_cutoff"` CopyTimeout fs.Duration `config:"copy_timeout"` StorageTier string `config:"storage_tier"` LeavePartsOnError bool `config:"leave_parts_on_error"` AttemptResumeUpload bool `config:"attempt_resume_upload"` NoCheckBucket bool `config:"no_check_bucket"` SSEKMSKeyID string `config:"sse_kms_key_id"` SSECustomerAlgorithm string `config:"sse_customer_algorithm"` SSECustomerKey string `config:"sse_customer_key"` SSECustomerKeyFile string `config:"sse_customer_key_file"` SSECustomerKeySha256 string `config:"sse_customer_key_sha256"` } func newOptions() []fs.Option { return []fs.Option{{ Name: fs.ConfigProvider, Help: "Choose your Auth Provider", Required: true, Default: environmentAuth, Examples: []fs.OptionExample{{ Value: environmentAuth, Help: environmentAuthHelpText, }, { Value: userPrincipal, Help: userPrincipalHelpText, }, { Value: instancePrincipal, Help: instancePrincipalHelpText, }, { Value: workloadIdentity, Help: workloadIdentityHelpText, }, { Value: resourcePrincipal, Help: resourcePrincipalHelpText, }, { Value: noAuth, Help: noAuthHelpText, }}, }, { Name: "namespace", Help: "Object storage namespace", Required: true, Sensitive: true, }, { Name: "compartment", Help: "Specify compartment OCID, if you need to list buckets.\n\nList objects works without compartment OCID.", Provider: "!no_auth", Required: false, Sensitive: true, }, { Name: "region", Help: "Object storage Region", Required: true, }, { Name: "endpoint", Help: "Endpoint for Object storage API.\n\nLeave blank to use the default endpoint for the region.", Required: false, }, { Name: "config_file", Help: "Path to OCI config file", Provider: userPrincipal, Default: "~/.oci/config", Examples: []fs.OptionExample{{ Value: "~/.oci/config", Help: "oci configuration file location", }}, }, { Name: "config_profile", Help: "Profile name inside the oci config file", Provider: userPrincipal, Default: "Default", Examples: []fs.OptionExample{{ Value: "Default", Help: "Use the default profile", }}, }, { // Mapping from here: https://github.com/oracle/oci-go-sdk/blob/master/objectstorage/storage_tier.go Name: "storage_tier", Help: "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", Default: "Standard", Advanced: true, Examples: []fs.OptionExample{{ Value: "Standard", Help: "Standard storage tier, this is the default tier", }, { Value: "InfrequentAccess", Help: "InfrequentAccess storage tier", }, { Value: "Archive", Help: "Archive storage tier", }}, }, { Name: "upload_cutoff", Help: `Cutoff for switching to chunked upload. Any files larger than this will be uploaded in chunks of chunk_size. The minimum is 0 and the maximum is 5 GiB.`, Default: defaultUploadCutoff, Advanced: true, }, { Name: "chunk_size", Help: `Chunk size to use for uploading. When uploading files larger than upload_cutoff or files with unknown size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded as multipart uploads using this chunk size. Note that "upload_concurrency" chunks of this size are buffered in memory per transfer. If you are transferring large files over high-speed links and you have enough memory, then increasing this will speed up the transfers. Rclone will automatically increase the chunk size when uploading a large file of known size to stay below the 10,000 chunks limit. Files of unknown size are uploaded with the configured chunk_size. Since the default chunk size is 5 MiB and there can be at most 10,000 chunks, this means that by default the maximum size of a file you can stream upload is 48 GiB. If you wish to stream upload larger files then you will need to increase chunk_size. Increasing the chunk size decreases the accuracy of the progress statistics displayed with "-P" flag. `, Default: minChunkSize, Advanced: true, }, { Name: "max_upload_parts", Help: `Maximum number of parts in a multipart upload. This option defines the maximum number of multipart chunks to use when doing a multipart upload. OCI has max parts limit of 10,000 chunks. Rclone will automatically increase the chunk size when uploading a large file of a known size to stay below this number of chunks limit. `, Default: maxUploadParts, Advanced: true, }, { Name: "upload_concurrency", Help: `Concurrency for multipart uploads. This is the number of chunks of the same file that are uploaded concurrently. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing this may help to speed up the transfers.`, Default: defaultUploadConcurrency, Advanced: true, }, { Name: "copy_cutoff", Help: `Cutoff for switching to multipart copy. Any files larger than this that need to be server-side copied will be copied in chunks of this size. The minimum is 0 and the maximum is 5 GiB.`, Default: fs.SizeSuffix(maxSizeForCopy), Advanced: true, }, { Name: "copy_timeout", Help: `Timeout for copy. Copy is an asynchronous operation, specify timeout to wait for copy to succeed `, Default: defaultCopyTimeoutDuration, Advanced: true, }, { Name: "disable_checksum", Help: `Don't store MD5 checksum with object metadata. Normally rclone will calculate the MD5 checksum of the input before uploading it so it can add it to metadata on the object. This is great for data integrity checking but can cause long delays for large files to start uploading.`, Default: false, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // Any UTF-8 character is valid in a key, however it can't handle // invalid UTF-8 and / have a special meaning. // // The SDK can't seem to handle uploading files called '. // - initial / encoding // - doubled / encoding // - trailing / encoding // so that OSS keys are always valid file names Default: encoder.EncodeInvalidUtf8 | encoder.EncodeSlash | encoder.EncodeDot, }, { Name: "leave_parts_on_error", Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. It should be set to true for resuming uploads across different sessions. WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add additional costs if not cleaned up. `, Default: false, Advanced: true, }, { Name: "attempt_resume_upload", Help: `If true attempt to resume previously started multipart upload for the object. This will be helpful to speed up multipart transfers by resuming uploads from past session. WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is aborted and a new multipart upload is started with the new chunk size. The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully. `, Default: false, Advanced: true, }, { Name: "no_check_bucket", Help: `If set, don't attempt to check the bucket exists or create it. This can be useful when trying to minimise the number of transactions rclone does if you know the bucket exists already. It can also be needed if the user you are using does not have bucket creation permissions. `, Default: false, Advanced: true, }, { Name: "sse_customer_key_file", Help: `To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated with the object. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.'`, Advanced: true, Examples: []fs.OptionExample{{ Value: "", Help: "None", }}, }, { Name: "sse_customer_key", Help: `To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to encrypt or decrypt the data. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed. For more information, see Using Your Own Keys for Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm)`, Advanced: true, Examples: []fs.OptionExample{{ Value: "", Help: "None", }}, }, { Name: "sse_customer_key_sha256", Help: `If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption key. This value is used to check the integrity of the encryption key. see Using Your Own Keys for Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).`, Advanced: true, Examples: []fs.OptionExample{{ Value: "", Help: "None", }}, }, { Name: "sse_kms_key_id", Help: `if using your own master key in vault, this header specifies the OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.`, Advanced: true, Examples: []fs.OptionExample{{ Value: "", Help: "None", }}, }, { Name: "sse_customer_algorithm", Help: `If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. Object Storage supports "AES256" as the encryption algorithm. For more information, see Using Your Own Keys for Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).`, Advanced: true, Examples: []fs.OptionExample{{ Value: "", Help: "None", }, { Value: sseDefaultAlgorithm, Help: sseDefaultAlgorithm, }}, }} }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/oracleobjectstorage/byok.go
backend/oracleobjectstorage/byok.go
//go:build !plan9 && !solaris && !js package oracleobjectstorage import ( "crypto/sha256" "encoding/base64" "errors" "fmt" "os" "strings" "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/objectstorage" ) const ( sseDefaultAlgorithm = "AES256" ) func getSha256(p []byte) []byte { h := sha256.New() h.Write(p) return h.Sum(nil) } func validateSSECustomerKeyOptions(opt *Options) error { if opt.SSEKMSKeyID != "" && (opt.SSECustomerKeyFile != "" || opt.SSECustomerKey != "") { return errors.New("oos: can't use vault sse_kms_key_id and local sse_customer_key at the same time") } if opt.SSECustomerKey != "" && opt.SSECustomerKeyFile != "" { return errors.New("oos: can't use sse_customer_key and sse_customer_key_file at the same time") } if opt.SSEKMSKeyID != "" { return nil } err := populateSSECustomerKeys(opt) if err != nil { return err } return nil } func populateSSECustomerKeys(opt *Options) error { if opt.SSECustomerKeyFile != "" { // Reads the base64-encoded AES key data from the specified file and computes its SHA256 checksum data, err := os.ReadFile(expandPath(opt.SSECustomerKeyFile)) if err != nil { return fmt.Errorf("oos: error reading sse_customer_key_file: %v", err) } opt.SSECustomerKey = strings.TrimSpace(string(data)) } if opt.SSECustomerKey != "" { decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKey) if err != nil { return fmt.Errorf("oos: Could not decode sse_customer_key_file: %w", err) } sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded)) if opt.SSECustomerKeySha256 == "" { opt.SSECustomerKeySha256 = sha256Checksum } else if opt.SSECustomerKeySha256 != sha256Checksum { return fmt.Errorf("the computed SHA256 checksum "+ "(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)", sha256Checksum, opt.SSECustomerKeySha256) } if opt.SSECustomerAlgorithm == "" { opt.SSECustomerAlgorithm = sseDefaultAlgorithm } } return nil } // https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/usingyourencryptionkeys.htm func useBYOKPutObject(fs *Fs, request *objectstorage.PutObjectRequest) { if fs.opt.SSEKMSKeyID != "" { request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID) } if fs.opt.SSECustomerAlgorithm != "" { request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm) } if fs.opt.SSECustomerKey != "" { request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey) } if fs.opt.SSECustomerKeySha256 != "" { request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256) } } func useBYOKHeadObject(fs *Fs, request *objectstorage.HeadObjectRequest) { if fs.opt.SSECustomerAlgorithm != "" { request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm) } if fs.opt.SSECustomerKey != "" { request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey) } if fs.opt.SSECustomerKeySha256 != "" { request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256) } } func useBYOKGetObject(fs *Fs, request *objectstorage.GetObjectRequest) { if fs.opt.SSECustomerAlgorithm != "" { request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm) } if fs.opt.SSECustomerKey != "" { request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey) } if fs.opt.SSECustomerKeySha256 != "" { request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256) } } func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) { if fs.opt.SSEKMSKeyID != "" { request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID) } if fs.opt.SSECustomerAlgorithm != "" { request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm) } if fs.opt.SSECustomerKey != "" { request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey) } if fs.opt.SSECustomerKeySha256 != "" { request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/oracleobjectstorage/multipart.go
backend/oracleobjectstorage/multipart.go
//go:build !plan9 && !solaris && !js package oracleobjectstorage import ( "context" "crypto/md5" "encoding/base64" "encoding/hex" "fmt" "io" "strings" "sync" "time" "github.com/ncw/swift/v2" "github.com/rclone/rclone/lib/multipart" "github.com/rclone/rclone/lib/pool" "golang.org/x/net/http/httpguts" "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/objectstorage" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/chunksize" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/hash" ) var warnStreamUpload sync.Once // Info needed for an upload type uploadInfo struct { req *objectstorage.PutObjectRequest md5sumHex string } type objectChunkWriter struct { chunkSize int64 size int64 f *Fs bucket *string key *string uploadID *string partsToCommit []objectstorage.CommitMultipartUploadPartDetails partsToCommitMu sync.Mutex existingParts map[int]objectstorage.MultipartUploadPartSummary eTag string md5sMu sync.Mutex md5s []byte ui uploadInfo o *Object } func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error { _, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{ Open: o.fs, OpenOptions: options, }) return err } // OpenChunkWriter returns the chunk size and a ChunkWriter // // Pass in the remote and the src object // You can also use options to hint at the desired chunk size func (f *Fs) OpenChunkWriter( ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) { // Temporary Object under construction o := &Object{ fs: f, remote: remote, } ui, err := o.prepareUpload(ctx, src, options) if err != nil { return info, nil, fmt.Errorf("failed to prepare upload: %w", err) } uploadParts := f.opt.MaxUploadParts if uploadParts < 1 { uploadParts = 1 } else if uploadParts > maxUploadParts { uploadParts = maxUploadParts } size := src.Size() // calculate size of parts chunkSize := f.opt.ChunkSize // size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize // buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of // 48 GiB which seems like a not too unreasonable limit. if size == -1 { warnStreamUpload.Do(func() { fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v", f.opt.ChunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts))) }) } else { chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize) } uploadID, existingParts, err := o.createMultipartUpload(ctx, ui.req) if err != nil { return info, nil, fmt.Errorf("create multipart upload request failed: %w", err) } bucketName, bucketPath := o.split() chunkWriter := &objectChunkWriter{ chunkSize: int64(chunkSize), size: size, f: f, bucket: &bucketName, key: &bucketPath, uploadID: &uploadID, existingParts: existingParts, ui: ui, o: o, } info = fs.ChunkWriterInfo{ ChunkSize: int64(chunkSize), Concurrency: o.fs.opt.UploadConcurrency, LeavePartsOnError: o.fs.opt.LeavePartsOnError, } fs.Debugf(o, "open chunk writer: started multipart upload: %v", uploadID) return info, chunkWriter, err } // WriteChunk will write chunk number with reader bytes, where chunk number >= 0 func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) { if chunkNumber < 0 { err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber) return -1, err } // Only account after the checksum reads have been done if do, ok := reader.(pool.DelayAccountinger); ok { // To figure out this number, do a transfer and if the accounted size is 0 or a // multiple of what it should be, increase or decrease this number. do.DelayAccounting(2) } m := md5.New() currentChunkSize, err := io.Copy(m, reader) if err != nil { return -1, err } // If no data read, don't write the chunk if currentChunkSize == 0 { return 0, nil } md5sumBinary := m.Sum([]byte{}) w.addMd5(&md5sumBinary, int64(chunkNumber)) md5sum := base64.StdEncoding.EncodeToString(md5sumBinary) // Object storage requires 1 <= PartNumber <= 10000 ossPartNumber := chunkNumber + 1 if existing, ok := w.existingParts[ossPartNumber]; ok { if md5sum == *existing.Md5 { fs.Debugf(w.o, "matched uploaded part found, part num %d, skipping part, md5=%v", *existing.PartNumber, md5sum) w.addCompletedPart(existing.PartNumber, existing.Etag) return currentChunkSize, nil } } req := objectstorage.UploadPartRequest{ NamespaceName: common.String(w.f.opt.Namespace), BucketName: w.bucket, ObjectName: w.key, UploadId: w.uploadID, UploadPartNum: common.Int(ossPartNumber), ContentLength: common.Int64(currentChunkSize), ContentMD5: common.String(md5sum), } w.o.applyPartUploadOptions(w.ui.req, &req) var resp objectstorage.UploadPartResponse err = w.f.pacer.Call(func() (bool, error) { // req.UploadPartBody = io.NopCloser(bytes.NewReader(buf)) // rewind the reader on retry and after reading md5 _, err = reader.Seek(0, io.SeekStart) if err != nil { return false, err } req.UploadPartBody = io.NopCloser(reader) resp, err = w.f.srv.UploadPart(ctx, req) if err != nil { if ossPartNumber <= 8 { return shouldRetry(ctx, resp.HTTPResponse(), err) } if fserrors.ContextError(ctx, &err) { return false, err } // retry all chunks once have done the first few return true, err } return false, err }) if err != nil { fs.Errorf(w.o, "multipart upload failed to upload part:%d err: %v", ossPartNumber, err) return -1, fmt.Errorf("multipart upload failed to upload part: %w", err) } w.addCompletedPart(&ossPartNumber, resp.ETag) return currentChunkSize, err } // add a part number and etag to the completed parts func (w *objectChunkWriter) addCompletedPart(partNum *int, eTag *string) { w.partsToCommitMu.Lock() defer w.partsToCommitMu.Unlock() w.partsToCommit = append(w.partsToCommit, objectstorage.CommitMultipartUploadPartDetails{ PartNum: partNum, Etag: eTag, }) } func (w *objectChunkWriter) Close(ctx context.Context) (err error) { req := objectstorage.CommitMultipartUploadRequest{ NamespaceName: common.String(w.f.opt.Namespace), BucketName: w.bucket, ObjectName: w.key, UploadId: w.uploadID, } req.PartsToCommit = w.partsToCommit var resp objectstorage.CommitMultipartUploadResponse err = w.f.pacer.Call(func() (bool, error) { resp, err = w.f.srv.CommitMultipartUpload(ctx, req) // if multipart is corrupted, we will abort the uploadId if isMultiPartUploadCorrupted(err) { fs.Debugf(w.o, "multipart uploadId %v is corrupted, aborting...", *w.uploadID) _ = w.Abort(ctx) return false, err } return shouldRetry(ctx, resp.HTTPResponse(), err) }) if err != nil { return err } w.eTag = *resp.ETag hashOfHashes := md5.Sum(w.md5s) wantMultipartMd5 := fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hashOfHashes[:]), len(w.partsToCommit)) gotMultipartMd5 := *resp.OpcMultipartMd5 if wantMultipartMd5 != gotMultipartMd5 { fs.Errorf(w.o, "multipart upload corrupted: multipart md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5) return fmt.Errorf("multipart upload corrupted: md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5) } fs.Debugf(w.o, "multipart upload %v md5 matched: expecting %s and got %s", *w.uploadID, wantMultipartMd5, gotMultipartMd5) return nil } func isMultiPartUploadCorrupted(err error) bool { if err == nil { return false } // Check if this oci-err object, and if it is multipart commit error if ociError, ok := err.(common.ServiceError); ok { // If it is a timeout then we want to retry that if ociError.GetCode() == "InvalidUploadPart" { return true } } return false } func (w *objectChunkWriter) Abort(ctx context.Context) error { fs.Debugf(w.o, "Cancelling multipart upload") err := w.o.fs.abortMultiPartUpload( ctx, w.bucket, w.key, w.uploadID) if err != nil { fs.Debugf(w.o, "Failed to cancel multipart upload: %v", err) } else { fs.Debugf(w.o, "canceled and aborted multipart upload: %v", *w.uploadID) } return err } // addMd5 adds a binary md5 to the md5 calculated so far func (w *objectChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) { w.md5sMu.Lock() defer w.md5sMu.Unlock() start := chunkNumber * md5.Size end := start + md5.Size if extend := end - int64(len(w.md5s)); extend > 0 { w.md5s = append(w.md5s, make([]byte, extend)...) } copy(w.md5s[start:end], (*md5binary)) } func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) { bucket, bucketPath := o.split() ui.req = &objectstorage.PutObjectRequest{ NamespaceName: common.String(o.fs.opt.Namespace), BucketName: common.String(bucket), ObjectName: common.String(bucketPath), } // Set the mtime in the metadata modTime := src.ModTime(ctx) // Fetch metadata if --metadata is in use meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options) if err != nil { return ui, fmt.Errorf("failed to read metadata from source object: %w", err) } ui.req.OpcMeta = make(map[string]string, len(meta)+2) // merge metadata into request and user metadata for k, v := range meta { pv := common.String(v) k = strings.ToLower(k) switch k { case "cache-control": ui.req.CacheControl = pv case "content-disposition": ui.req.ContentDisposition = pv case "content-encoding": ui.req.ContentEncoding = pv case "content-language": ui.req.ContentLanguage = pv case "content-type": ui.req.ContentType = pv case "tier": // ignore case "mtime": // mtime in meta overrides source ModTime metaModTime, err := time.Parse(time.RFC3339Nano, v) if err != nil { fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err) } else { modTime = metaModTime } case "btime": // write as metadata since we can't set it ui.req.OpcMeta[k] = v default: ui.req.OpcMeta[k] = v } } // Set the mtime in the metadata ui.req.OpcMeta[metaMtime] = swift.TimeToFloatString(modTime) // read the md5sum if available // - for non-multipart // - so we can add a ContentMD5 // - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C // - for multipart provided checksums aren't disabled // - so we can add the md5sum in the metadata as metaMD5Hash size := src.Size() isMultipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff) var md5sumBase64 string if !isMultipart || !o.fs.opt.DisableChecksum { ui.md5sumHex, err = src.Hash(ctx, hash.MD5) if err == nil && matchMd5.MatchString(ui.md5sumHex) { hashBytes, err := hex.DecodeString(ui.md5sumHex) if err == nil { md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes) if isMultipart && !o.fs.opt.DisableChecksum { // Set the md5sum as metadata on the object if // - a multipart upload // - the ETag is not an MD5, e.g. when using SSE/SSE-C // provided checksums aren't disabled ui.req.OpcMeta[metaMD5Hash] = md5sumBase64 } } } } // Set the content type if it isn't set already if ui.req.ContentType == nil { ui.req.ContentType = common.String(fs.MimeType(ctx, src)) } if size >= 0 { ui.req.ContentLength = common.Int64(size) } if md5sumBase64 != "" { ui.req.ContentMD5 = &md5sumBase64 } o.applyPutOptions(ui.req, options...) useBYOKPutObject(o.fs, ui.req) if o.fs.opt.StorageTier != "" { storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier) if !ok { return ui, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier) } ui.req.StorageTier = storageTier } // Check metadata keys and values are valid for key, value := range ui.req.OpcMeta { if !httpguts.ValidHeaderFieldName(key) { fs.Errorf(o, "Dropping invalid metadata key %q", key) delete(ui.req.OpcMeta, key) } else if value == "" { fs.Errorf(o, "Dropping nil metadata value for key %q", key) delete(ui.req.OpcMeta, key) } else if !httpguts.ValidHeaderFieldValue(value) { fs.Errorf(o, "Dropping invalid metadata value %q for key %q", value, key) delete(ui.req.OpcMeta, key) } } return ui, nil } func (o *Object) createMultipartUpload(ctx context.Context, putReq *objectstorage.PutObjectRequest) ( uploadID string, existingParts map[int]objectstorage.MultipartUploadPartSummary, err error) { bucketName, bucketPath := o.split() err = o.fs.makeBucket(ctx, bucketName) if err != nil { fs.Errorf(o, "failed to create bucket: %v, err: %v", bucketName, err) return uploadID, existingParts, err } if o.fs.opt.AttemptResumeUpload { fs.Debugf(o, "attempting to resume upload for %v (if any)", o.remote) resumeUploads, err := o.fs.findLatestMultipartUpload(ctx, bucketName, bucketPath) if err == nil && len(resumeUploads) > 0 { uploadID = *resumeUploads[0].UploadId existingParts, err = o.fs.listMultipartUploadParts(ctx, bucketName, bucketPath, uploadID) if err == nil { fs.Debugf(o, "resuming with existing upload id: %v", uploadID) return uploadID, existingParts, err } } } req := objectstorage.CreateMultipartUploadRequest{ NamespaceName: common.String(o.fs.opt.Namespace), BucketName: common.String(bucketName), } req.Object = common.String(bucketPath) if o.fs.opt.StorageTier != "" { storageTier, ok := objectstorage.GetMappingStorageTierEnum(o.fs.opt.StorageTier) if !ok { return "", nil, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier) } req.StorageTier = storageTier } o.applyMultipartUploadOptions(putReq, &req) var resp objectstorage.CreateMultipartUploadResponse err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.CreateMultipartUpload(ctx, req) return shouldRetry(ctx, resp.HTTPResponse(), err) }) if err != nil { return "", existingParts, err } existingParts = make(map[int]objectstorage.MultipartUploadPartSummary) uploadID = *resp.UploadId fs.Debugf(o, "created new upload id: %v", uploadID) return uploadID, existingParts, err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/oracleobjectstorage/object.go
backend/oracleobjectstorage/object.go
//go:build !plan9 && !solaris && !js package oracleobjectstorage import ( "bytes" "context" "encoding/base64" "encoding/hex" "fmt" "io" "net/http" "os" "regexp" "strconv" "strings" "time" "github.com/ncw/swift/v2" "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/objectstorage" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" ) // ------------------------------------------------------------ // Object Interface Implementation // ------------------------------------------------------------ const ( metaMtime = "mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime metaMD5Hash = "md5chksum" // the meta key to store md5hash in // StandardTier object storage tier ociMetaPrefix = "opc-meta-" ) var archive = "archive" var infrequentAccess = "infrequentaccess" var standard = "standard" var storageTierMap = map[string]*string{ archive: &archive, infrequentAccess: &infrequentAccess, standard: &standard, } var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`) // Object describes a oci bucket object type Object struct { fs *Fs // what this object is part of remote string // The remote path md5 string // MD5 hash if known bytes int64 // Size of the object lastModified time.Time // The modified time of the object if known meta map[string]string // The object metadata if known - may be nil mimeType string // Content-Type of the object // Metadata as pointers to strings as they often won't be present storageTier *string // e.g. Standard } // split returns bucket and bucketPath from the object func (o *Object) split() (bucket, bucketPath string) { return o.fs.split(o.remote) } // readMetaData gets the metadata if it hasn't already been fetched func (o *Object) readMetaData(ctx context.Context) (err error) { fs.Debugf(o, "trying to read metadata %v", o.remote) if o.meta != nil { return nil } info, err := o.headObject(ctx) if err != nil { return err } return o.decodeMetaDataHead(info) } // headObject gets the metadata from the object unconditionally func (o *Object) headObject(ctx context.Context) (info *objectstorage.HeadObjectResponse, err error) { bucketName, objectPath := o.split() req := objectstorage.HeadObjectRequest{ NamespaceName: common.String(o.fs.opt.Namespace), BucketName: common.String(bucketName), ObjectName: common.String(objectPath), } useBYOKHeadObject(o.fs, &req) var response objectstorage.HeadObjectResponse err = o.fs.pacer.Call(func() (bool, error) { var err error response, err = o.fs.srv.HeadObject(ctx, req) return shouldRetry(ctx, response.HTTPResponse(), err) }) if err != nil { if svcErr, ok := err.(common.ServiceError); ok { if svcErr.GetHTTPStatusCode() == http.StatusNotFound { return nil, fs.ErrorObjectNotFound } } fs.Errorf(o, "Failed to head object: %v", err) return nil, err } o.fs.cache.MarkOK(bucketName) return &response, err } func (o *Object) decodeMetaDataHead(info *objectstorage.HeadObjectResponse) (err error) { return o.setMetaData( info.ContentLength, info.ContentMd5, info.ContentType, info.LastModified, info.StorageTier, info.OpcMeta) } func (o *Object) decodeMetaDataObject(info *objectstorage.GetObjectResponse) (err error) { return o.setMetaData( info.ContentLength, info.ContentMd5, info.ContentType, info.LastModified, info.StorageTier, info.OpcMeta) } func (o *Object) setMetaData( contentLength *int64, contentMd5 *string, contentType *string, lastModified *common.SDKTime, storageTier any, meta map[string]string) error { if contentLength != nil { o.bytes = *contentLength } if contentMd5 != nil { md5, err := o.base64ToMd5(*contentMd5) if err == nil { o.md5 = md5 } } o.meta = meta if o.meta == nil { o.meta = map[string]string{} } // Read MD5 from metadata if present if md5sumBase64, ok := o.meta[metaMD5Hash]; ok { md5, err := o.base64ToMd5(md5sumBase64) if err != nil { o.md5 = md5 } } if lastModified == nil { o.lastModified = time.Now() fs.Logf(o, "Failed to read last modified") } else { o.lastModified = lastModified.Time } if contentType != nil { o.mimeType = *contentType } if storageTier == nil || storageTier == "" { o.storageTier = storageTierMap[standard] } else { tier := strings.ToLower(fmt.Sprintf("%v", storageTier)) o.storageTier = storageTierMap[tier] } return nil } func (o *Object) base64ToMd5(md5sumBase64 string) (md5 string, err error) { md5sumBytes, err := base64.StdEncoding.DecodeString(md5sumBase64) if err != nil { fs.Debugf(o, "Failed to read md5sum from metadata %q: %v", md5sumBase64, err) return "", err } else if len(md5sumBytes) != 16 { fs.Debugf(o, "failed to read md5sum from metadata %q: wrong length", md5sumBase64) return "", fmt.Errorf("failed to read md5sum from metadata %q: wrong length", md5sumBase64) } return hex.EncodeToString(md5sumBytes), nil } // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Size returns the size of an object in bytes func (o *Object) Size() int64 { return o.bytes } // GetTier returns storage class as string func (o *Object) GetTier() string { if o.storageTier == nil || *o.storageTier == "" { return standard } return *o.storageTier } // SetTier performs changing storage class func (o *Object) SetTier(tier string) (err error) { ctx := context.TODO() tier = strings.ToLower(tier) bucketName, bucketPath := o.split() tierEnum, ok := objectstorage.GetMappingStorageTierEnum(tier) if !ok { return fmt.Errorf("not a valid storage tier %v ", tier) } req := objectstorage.UpdateObjectStorageTierRequest{ NamespaceName: common.String(o.fs.opt.Namespace), BucketName: common.String(bucketName), UpdateObjectStorageTierDetails: objectstorage.UpdateObjectStorageTierDetails{ ObjectName: common.String(bucketPath), StorageTier: tierEnum, }, } _, err = o.fs.srv.UpdateObjectStorageTier(ctx, req) if err != nil { return err } o.storageTier = storageTierMap[tier] return err } // MimeType of an Object if known, "" otherwise func (o *Object) MimeType(ctx context.Context) string { err := o.readMetaData(ctx) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return "" } return o.mimeType } // Hash returns the MD5 of an object returning a lowercase hex string func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } // Convert base64 encoded md5 into lower case hex if o.md5 == "" { err := o.readMetaData(ctx) if err != nil { return "", err } } return o.md5, nil } // ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned to the http headers func (o *Object) ModTime(ctx context.Context) (result time.Time) { if o.fs.ci.UseServerModTime { return o.lastModified } err := o.readMetaData(ctx) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return time.Now() } // read mtime out of metadata if available d, ok := o.meta[metaMtime] if !ok || d == "" { return o.lastModified } modTime, err := swift.FloatStringToTime(d) if err != nil { fs.Logf(o, "Failed to read mtime from object: %v", err) return o.lastModified } return modTime } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { err := o.readMetaData(ctx) if err != nil { return err } o.meta[metaMtime] = swift.TimeToFloatString(modTime) _, err = o.fs.Copy(ctx, o, o.remote) return err } // Storable returns if this object is storable func (o *Object) Storable() bool { return true } // Remove an object func (o *Object) Remove(ctx context.Context) error { bucketName, bucketPath := o.split() req := objectstorage.DeleteObjectRequest{ NamespaceName: common.String(o.fs.opt.Namespace), BucketName: common.String(bucketName), ObjectName: common.String(bucketPath), } err := o.fs.pacer.Call(func() (bool, error) { resp, err := o.fs.srv.DeleteObject(ctx, req) return shouldRetry(ctx, resp.HTTPResponse(), err) }) return err } // Open object file func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { bucketName, bucketPath := o.split() req := objectstorage.GetObjectRequest{ NamespaceName: common.String(o.fs.opt.Namespace), BucketName: common.String(bucketName), ObjectName: common.String(bucketPath), } o.applyGetObjectOptions(&req, options...) useBYOKGetObject(o.fs, &req) var resp objectstorage.GetObjectResponse err := o.fs.pacer.Call(func() (bool, error) { var err error resp, err = o.fs.srv.GetObject(ctx, req) return shouldRetry(ctx, resp.HTTPResponse(), err) }) if err != nil { return nil, err } // read size from ContentLength or ContentRange bytes := resp.ContentLength if resp.ContentRange != nil { var contentRange = *resp.ContentRange slash := strings.IndexRune(contentRange, '/') if slash >= 0 { i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64) if err == nil { bytes = &i } else { fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err) } } else { fs.Debugf(o, "Failed to find length in %q", contentRange) } } err = o.decodeMetaDataObject(&resp) if err != nil { return nil, err } o.bytes = *bytes return resp.HTTPResponse().Body, nil } func isZeroLength(streamReader io.Reader) bool { switch v := streamReader.(type) { case *bytes.Buffer: return v.Len() == 0 case *bytes.Reader: return v.Len() == 0 case *strings.Reader: return v.Len() == 0 case *os.File: fi, err := v.Stat() if err != nil { return false } return fi.Size() == 0 default: return false } } // Update an object if it has changed func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { bucketName, _ := o.split() err = o.fs.makeBucket(ctx, bucketName) if err != nil { return err } // determine if we like upload single or multipart. size := src.Size() multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff) if isZeroLength(in) { multipart = false } if multipart { err = o.uploadMultipart(ctx, src, in, options...) if err != nil { return err } } else { ui, err := o.prepareUpload(ctx, src, options) if err != nil { return fmt.Errorf("failed to prepare upload: %w", err) } var resp objectstorage.PutObjectResponse err = o.fs.pacer.CallNoRetry(func() (bool, error) { ui.req.PutObjectBody = io.NopCloser(in) resp, err = o.fs.srv.PutObject(ctx, *ui.req) return shouldRetry(ctx, resp.HTTPResponse(), err) }) if err != nil { fs.Errorf(o, "put object failed %v", err) return err } } // Read the metadata from the newly created object o.meta = nil // wipe old metadata return o.readMetaData(ctx) } func (o *Object) applyPutOptions(req *objectstorage.PutObjectRequest, options ...fs.OpenOption) { // Apply upload options for _, option := range options { key, value := option.Header() lowerKey := strings.ToLower(key) switch lowerKey { case "": // ignore case "cache-control": req.CacheControl = common.String(value) case "content-disposition": req.ContentDisposition = common.String(value) case "content-encoding": req.ContentEncoding = common.String(value) case "content-language": req.ContentLanguage = common.String(value) case "content-type": req.ContentType = common.String(value) default: if strings.HasPrefix(lowerKey, ociMetaPrefix) { req.OpcMeta[lowerKey] = value } else { fs.Errorf(o, "Don't know how to set key %q on upload", key) } } } } func (o *Object) applyGetObjectOptions(req *objectstorage.GetObjectRequest, options ...fs.OpenOption) { fs.FixRangeOption(options, o.bytes) for _, option := range options { switch option.(type) { case *fs.RangeOption, *fs.SeekOption: _, value := option.Header() req.Range = &value default: if option.Mandatory() { fs.Logf(o, "Unsupported mandatory option: %v", option) } } } // Apply upload options for _, option := range options { key, value := option.Header() lowerKey := strings.ToLower(key) switch lowerKey { case "": // ignore case "cache-control": req.HttpResponseCacheControl = common.String(value) case "content-disposition": req.HttpResponseContentDisposition = common.String(value) case "content-encoding": req.HttpResponseContentEncoding = common.String(value) case "content-language": req.HttpResponseContentLanguage = common.String(value) case "content-type": req.HttpResponseContentType = common.String(value) case "range": // do nothing default: fs.Errorf(o, "Don't know how to set key %q on upload", key) } } } func (o *Object) applyMultipartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.CreateMultipartUploadRequest) { req.ContentType = putReq.ContentType req.ContentLanguage = putReq.ContentLanguage req.ContentEncoding = putReq.ContentEncoding req.ContentDisposition = putReq.ContentDisposition req.CacheControl = putReq.CacheControl req.Metadata = metadataWithOpcPrefix(putReq.OpcMeta) req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm req.OpcSseCustomerKey = putReq.OpcSseCustomerKey req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256 req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId } func (o *Object) applyPartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.UploadPartRequest) { req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm req.OpcSseCustomerKey = putReq.OpcSseCustomerKey req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256 req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId } func metadataWithOpcPrefix(src map[string]string) map[string]string { dst := make(map[string]string) for lowerKey, value := range src { if !strings.HasPrefix(lowerKey, ociMetaPrefix) { dst[ociMetaPrefix+lowerKey] = value } } return dst }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/oracleobjectstorage/oracleobjectstorage.go
backend/oracleobjectstorage/oracleobjectstorage.go
//go:build !plan9 && !solaris && !js // Package oracleobjectstorage provides an interface to the OCI object storage system. package oracleobjectstorage import ( "context" "fmt" "io" "net/http" "path" "strings" "time" "github.com/ncw/swift/v2" "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/objectstorage" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/pacer" ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "oracleobjectstorage", Description: "Oracle Cloud Infrastructure Object Storage", Prefix: "oos", NewFs: NewFs, CommandHelp: commandHelp, Options: newOptions(), MetadataInfo: &fs.MetadataInfo{ System: systemMetadataInfo, Help: `User metadata is stored as opc-meta- keys.`, }, }) } var systemMetadataInfo = map[string]fs.MetadataHelp{ "opc-meta-mode": { Help: "File type and mode", Type: "octal, unix style", Example: "0100664", }, "opc-meta-uid": { Help: "User ID of owner", Type: "decimal number", Example: "500", }, "opc-meta-gid": { Help: "Group ID of owner", Type: "decimal number", Example: "500", }, "opc-meta-atime": { Help: "Time of last access", Type: "ISO 8601", Example: "2025-06-30T22:27:43-04:00", }, "opc-meta-mtime": { Help: "Time of last modification", Type: "ISO 8601", Example: "2025-06-30T22:27:43-04:00", }, "opc-meta-btime": { Help: "Time of file birth (creation)", Type: "ISO 8601", Example: "2025-06-30T22:27:43-04:00", }, } // Fs represents a remote object storage server type Fs struct { name string // name of this remote root string // the path we are working on if any opt Options // parsed config options ci *fs.ConfigInfo // global config features *fs.Features // optional features srv *objectstorage.ObjectStorageClient // the connection to the object storage rootBucket string // bucket part of root (if any) rootDirectory string // directory part of root (if any) cache *bucket.Cache // cache for bucket creation status pacer *fs.Pacer // To pace the API calls } // NewFs Initialize backend func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } err = validateSSECustomerKeyOptions(opt) if err != nil { return nil, err } ci := fs.GetConfig(ctx) objectStorageClient, err := newObjectStorageClient(ctx, opt) if err != nil { return nil, err } pc := fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))) // Set pacer retries to 2 (1 try and 1 retry) because we are // relying on SDK retry mechanism, but we allow 2 attempts to // retry directory listings after XMLSyntaxError pc.SetRetries(2) f := &Fs{ name: name, opt: *opt, ci: ci, srv: objectStorageClient, cache: bucket.NewCache(), pacer: pc, } f.setRoot(root) f.features = (&fs.Features{ ReadMetadata: true, ReadMimeType: true, WriteMimeType: true, BucketBased: true, BucketBasedRootOK: true, SetTier: true, GetTier: true, SlowModTime: true, }).Fill(ctx, f) if f.rootBucket != "" && f.rootDirectory != "" && !strings.HasSuffix(root, "/") { // Check to see if the (bucket,directory) is actually an existing file oldRoot := f.root newRoot, leaf := path.Split(oldRoot) f.setRoot(newRoot) _, err := f.NewObject(ctx, leaf) if err != nil { // File doesn't exist or is a directory so return old f f.setRoot(oldRoot) return f, nil } // return an error with fs which points to the parent return f, fs.ErrorIsFile } return f, err } func checkUploadChunkSize(cs fs.SizeSuffix) error { if cs < minChunkSize { return fmt.Errorf("%s is less than %s", cs, minChunkSize) } return nil } func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadChunkSize(cs) if err == nil { old, f.opt.ChunkSize = f.opt.ChunkSize, cs } return } func checkUploadCutoff(cs fs.SizeSuffix) error { if cs > maxUploadCutoff { return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff) } return nil } func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadCutoff(cs) if err == nil { old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs } return } func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadChunkSize(cs) if err == nil { old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs } return } // ------------------------------------------------------------ // Implement backed that represents a remote object storage server // Fs is the interface a cloud storage system must provide // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { if f.rootBucket == "" { return "oos:root" } if f.rootDirectory == "" { return fmt.Sprintf("oos:bucket %s", f.rootBucket) } return fmt.Sprintf("oos:bucket %s, path %s", f.rootBucket, f.rootDirectory) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // Precision of the remote func (f *Fs) Precision() time.Duration { return time.Millisecond } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.MD5) } // setRoot changes the root of the Fs func (f *Fs) setRoot(root string) { f.root = parsePath(root) f.rootBucket, f.rootDirectory = bucket.Split(f.root) } // parsePath parses a remote 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // split returns bucket and bucketPath from the rootRelativePath // relative to f.root func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) { bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath)) return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath) } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return list.WithListP(ctx, dir, f) } // ListP lists the objects and directories of the Fs starting // from dir non recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error { list := list.NewHelper(callback) bucketName, directory := f.split(dir) fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir) if bucketName == "" { if directory != "" { return fs.ErrorListBucketRequired } entries, err := f.listBuckets(ctx) if err != nil { return err } for _, entry := range entries { err = list.Add(entry) if err != nil { return err } } } else { err := f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "", list.Add) if err != nil { return err } } return list.Flush() } // listFn is called from list to handle an object. type listFn func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error // list the objects into the function supplied from // the bucket and root supplied // (bucket, directory) is the starting directory // If prefix is set then it is removed from all file names // If addBucket is set then it adds the bucket to the start of the remotes generated // If recurse is set the function will recursively list // If limit is > 0 then it limits to that many files (must be less than 1000) // If hidden is set then it will list the hidden (deleted) files too. // if findFile is set it will look for files called (bucket, directory) func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, limit int, fn listFn) (err error) { if prefix != "" { prefix += "/" } if directory != "" { directory += "/" } delimiter := "" if !recurse { delimiter = "/" } chunkSize := 1000 if limit > 0 { chunkSize = limit } var request = objectstorage.ListObjectsRequest{ NamespaceName: common.String(f.opt.Namespace), BucketName: common.String(bucket), Prefix: common.String(directory), Limit: common.Int(chunkSize), Fields: common.String("name,size,etag,timeCreated,md5,timeModified,storageTier,archivalState"), } if delimiter != "" { request.Delimiter = common.String(delimiter) } for { var resp objectstorage.ListObjectsResponse err = f.pacer.Call(func() (bool, error) { var err error resp, err = f.srv.ListObjects(ctx, request) return shouldRetry(ctx, resp.HTTPResponse(), err) }) if err != nil { if ociError, ok := err.(common.ServiceError); ok { // If it is a timeout then we want to retry that if ociError.GetHTTPStatusCode() == http.StatusNotFound { err = fs.ErrorDirNotFound } } if f.rootBucket == "" { // if listing from the root ignore wrong region requests returning // empty directory if reqErr, ok := err.(common.ServiceError); ok { // 301 if wrong region for bucket if reqErr.GetHTTPStatusCode() == http.StatusMovedPermanently { fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket) return nil } } } return err } if !recurse { for _, commonPrefix := range resp.ListObjects.Prefixes { if commonPrefix == "" { fs.Logf(f, "Nil common prefix received") continue } remote := commonPrefix remote = f.opt.Enc.ToStandardPath(remote) if !strings.HasPrefix(remote, prefix) { fs.Logf(f, "Odd name received %q", remote) continue } remote = remote[len(prefix):] if addBucket { remote = path.Join(bucket, remote) } remote = strings.TrimSuffix(remote, "/") err = fn(remote, &objectstorage.ObjectSummary{Name: &remote}, true) if err != nil { return err } } } for i := range resp.Objects { object := &resp.Objects[i] // Finish if file name no longer has prefix //if prefix != "" && !strings.HasPrefix(file.Name, prefix) { // return nil //} remote := *object.Name remote = f.opt.Enc.ToStandardPath(remote) if !strings.HasPrefix(remote, prefix) { continue } remote = remote[len(prefix):] // Check for directory isDirectory := remote == "" || strings.HasSuffix(remote, "/") if addBucket { remote = path.Join(bucket, remote) } // is this a directory marker? if isDirectory && object.Size != nil && *object.Size == 0 { continue // skip directory marker } if isDirectory && len(remote) > 1 { remote = remote[:len(remote)-1] } err = fn(remote, object, isDirectory) if err != nil { return err } } // end if no NextFileName if resp.NextStartWith == nil { break } request.Start = resp.NextStartWith } return nil } // Convert a list item into a DirEntry func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectstorage.ObjectSummary, isDirectory bool) (fs.DirEntry, error) { if isDirectory { size := int64(0) if object.Size != nil { size = *object.Size } d := fs.NewDir(remote, time.Time{}).SetSize(size) return d, nil } o, err := f.newObjectWithInfo(ctx, remote, object) if err != nil { return nil, err } return o, nil } // listDir lists a single directory func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) { fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error { entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) if err != nil { return err } if entry != nil { return callback(entry) } return nil } err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn) if err != nil { return err } // bucket must be present if listing succeeded f.cache.MarkOK(bucket) return nil } // listBuckets returns all the buckets to out func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) { if f.opt.Provider == noAuth { return nil, fmt.Errorf("can't list buckets with %v provider, use a valid auth provider in config file", noAuth) } var request = objectstorage.ListBucketsRequest{ NamespaceName: common.String(f.opt.Namespace), CompartmentId: common.String(f.opt.Compartment), } var resp objectstorage.ListBucketsResponse for { err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.ListBuckets(ctx, request) return shouldRetry(ctx, resp.HTTPResponse(), err) }) if err != nil { return nil, err } for _, item := range resp.Items { bucketName := f.opt.Enc.ToStandardName(*item.Name) f.cache.MarkOK(bucketName) d := fs.NewDir(bucketName, item.TimeCreated.Time) entries = append(entries, d) } if resp.OpcNextPage == nil { break } request.Page = resp.OpcNextPage } return entries, nil } // Return an Object from a path // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *objectstorage.ObjectSummary) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } if info != nil { // Set info but not meta if info.TimeModified == nil { fs.Logf(o, "Failed to read last modified") o.lastModified = time.Now() } else { o.lastModified = info.TimeModified.Time } if info.Md5 != nil { md5, err := o.base64ToMd5(*info.Md5) if err != nil { o.md5 = md5 } } o.bytes = *info.Size o.storageTier = storageTierMap[strings.ToLower(string(info.StorageTier))] } else { err := o.readMetaData(ctx) // reads info and headers, returning an error if err != nil { return nil, err } } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // Put the object into the bucket // Copy the reader in to the new object which is returned // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // Temporary Object under construction o := &Object{ fs: f, remote: src.Remote(), } return o, o.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(ctx, in, src, options...) } // Mkdir creates the bucket if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { bucketName, _ := f.split(dir) return f.makeBucket(ctx, bucketName) } // makeBucket creates the bucket if it doesn't exist func (f *Fs) makeBucket(ctx context.Context, bucketName string) error { if f.opt.NoCheckBucket { return nil } return f.cache.Create(bucketName, func() error { details := objectstorage.CreateBucketDetails{ Name: common.String(bucketName), CompartmentId: common.String(f.opt.Compartment), PublicAccessType: objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess, } req := objectstorage.CreateBucketRequest{ NamespaceName: common.String(f.opt.Namespace), CreateBucketDetails: details, } err := f.pacer.Call(func() (bool, error) { resp, err := f.srv.CreateBucket(ctx, req) return shouldRetry(ctx, resp.HTTPResponse(), err) }) if err == nil { fs.Infof(f, "Bucket %q created with accessType %q", bucketName, objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess) } if svcErr, ok := err.(common.ServiceError); ok { if code := svcErr.GetCode(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" { err = nil } } return err }, func() (bool, error) { return f.bucketExists(ctx, bucketName) }) } // Check if the bucket exists // // NB this can return incorrect results if called immediately after bucket deletion func (f *Fs) bucketExists(ctx context.Context, bucketName string) (bool, error) { req := objectstorage.HeadBucketRequest{ NamespaceName: common.String(f.opt.Namespace), BucketName: common.String(bucketName), } err := f.pacer.Call(func() (bool, error) { resp, err := f.srv.HeadBucket(ctx, req) return shouldRetry(ctx, resp.HTTPResponse(), err) }) if err == nil { return true, nil } if err, ok := err.(common.ServiceError); ok { if err.GetHTTPStatusCode() == http.StatusNotFound { return false, nil } } return false, err } // Rmdir delete an empty bucket. if bucket is not empty this is will fail with appropriate error func (f *Fs) Rmdir(ctx context.Context, dir string) error { bucketName, directory := f.split(dir) if bucketName == "" || directory != "" { return nil } return f.cache.Remove(bucketName, func() error { req := objectstorage.DeleteBucketRequest{ NamespaceName: common.String(f.opt.Namespace), BucketName: common.String(bucketName), } err := f.pacer.Call(func() (bool, error) { resp, err := f.srv.DeleteBucket(ctx, req) return shouldRetry(ctx, resp.HTTPResponse(), err) }) if err == nil { fs.Infof(f, "Bucket %q deleted", bucketName) } return err }) } func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID *string) (err error) { if uploadID == nil || *uploadID == "" { return nil } request := objectstorage.AbortMultipartUploadRequest{ NamespaceName: common.String(f.opt.Namespace), BucketName: bucketName, ObjectName: bucketPath, UploadId: uploadID, } err = f.pacer.Call(func() (bool, error) { resp, err := f.srv.AbortMultipartUpload(ctx, request) return shouldRetry(ctx, resp.HTTPResponse(), err) }) return err } // cleanUpBucket removes all pending multipart uploads for a given bucket over the age of maxAge func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Duration, uploads []*objectstorage.MultipartUpload) (err error) { fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge) for _, upload := range uploads { if upload.TimeCreated != nil && upload.Object != nil && upload.UploadId != nil { age := time.Since(upload.TimeCreated.Time) what := fmt.Sprintf("pending multipart upload for bucket %q key %q dated %v (%v ago)", bucket, *upload.Object, upload.TimeCreated, age) if age > maxAge { fs.Infof(f, "removing %s", what) if operations.SkipDestructive(ctx, what, "remove pending upload") { continue } _ = f.abortMultiPartUpload(ctx, upload.Bucket, upload.Object, upload.UploadId) } } else { fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.") } } return err } // CleanUp removes all pending multipart uploads func (f *Fs) cleanUp(ctx context.Context, maxAge time.Duration) (err error) { uploadsMap, err := f.listMultipartUploadsAll(ctx) if err != nil { return err } for bucketName, uploads := range uploadsMap { cleanErr := f.cleanUpBucket(ctx, bucketName, maxAge, uploads) if err != nil { fs.Errorf(f, "Failed to cleanup bucket %q: %v", bucketName, cleanErr) err = cleanErr } } return err } // CleanUp removes all pending multipart uploads older than 24 hours func (f *Fs) CleanUp(ctx context.Context) (err error) { return f.cleanUp(ctx, 24*time.Hour) } // ------------------------------------------------------------ // Implement ListRer is an optional interfaces for Fs //------------------------------------------------------------ /* ListR lists the objects and directories of the Fs starting from dir recursively into out. dir should be "" to start from the root, and should not have trailing slashes. This should return ErrDirNotFound if the directory isn't found. It should call callback for each tranche of entries read. These need not be returned in any particular order. If callback returns an error then the listing will stop immediately. Don't implement this unless you have a more efficient way of listing recursively that doing a directory traversal. */ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { bucketName, directory := f.split(dir) list := list.NewHelper(callback) listR := func(bucket, directory, prefix string, addBucket bool) error { return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error { entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) if err != nil { return err } return list.Add(entry) }) } if bucketName == "" { entries, err := f.listBuckets(ctx) if err != nil { return err } for _, entry := range entries { err = list.Add(entry) if err != nil { return err } bucketName := entry.Remote() err = listR(bucketName, "", f.rootDirectory, true) if err != nil { return err } // bucket must be present if listing succeeded f.cache.MarkOK(bucketName) } } else { err = listR(bucketName, directory, f.rootDirectory, f.rootBucket == "") if err != nil { return err } // bucket must be present if listing succeeded f.cache.MarkOK(bucketName) } return list.Flush() } // Metadata returns metadata for an object // // It should return nil if there is no Metadata func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) { err = o.readMetaData(ctx) if err != nil { return nil, err } metadata = make(fs.Metadata, len(o.meta)+7) for k, v := range o.meta { switch k { case metaMtime: if modTime, err := swift.FloatStringToTime(v); err == nil { metadata["mtime"] = modTime.Format(time.RFC3339Nano) } case metaMD5Hash: // don't write hash metadata default: metadata[k] = v } } if o.mimeType != "" { metadata["content-type"] = o.mimeType } if !o.lastModified.IsZero() { metadata["btime"] = o.lastModified.Format(time.RFC3339Nano) } return metadata, nil } // Check the interfaces are satisfied var ( _ fs.Fs = &Fs{} _ fs.Copier = &Fs{} _ fs.PutStreamer = &Fs{} _ fs.ListRer = &Fs{} _ fs.ListPer = &Fs{} _ fs.Commander = &Fs{} _ fs.CleanUpper = &Fs{} _ fs.OpenChunkWriter = &Fs{} _ fs.Object = &Object{} _ fs.MimeTyper = &Object{} _ fs.GetTierer = &Object{} _ fs.SetTierer = &Object{} )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/oracleobjectstorage/command.go
backend/oracleobjectstorage/command.go
//go:build !plan9 && !solaris && !js package oracleobjectstorage import ( "context" "fmt" "sort" "strconv" "strings" "sync" "time" "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/objectstorage" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/operations" ) // ------------------------------------------------------------ // Command Interface Implementation // ------------------------------------------------------------ const ( operationRename = "rename" operationListMultiPart = "list-multipart-uploads" operationCleanup = "cleanup" operationRestore = "restore" ) var commandHelp = []fs.CommandHelp{{ Name: operationRename, Short: "change the name of an object.", Long: `This command can be used to rename a object. Usage example: ` + "```console" + ` rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name ` + "```", Opts: nil, }, { Name: operationListMultiPart, Short: "List the unfinished multipart uploads.", Long: `This command lists the unfinished multipart uploads in JSON format. Usage example: ` + "```console" + ` rclone backend list-multipart-uploads oos:bucket/path/to/object ` + "```" + ` It returns a dictionary of buckets with values as lists of unfinished multipart uploads. You can call it with no bucket in which case it lists all bucket, with a bucket or with a bucket and path. ` + "```json" + ` { "test-bucket": [ { "namespace": "test-namespace", "bucket": "test-bucket", "object": "600m.bin", "uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8", "timeCreated": "2022-07-29T06:21:16.595Z", "storageTier": "Standard" } ] }`, }, { Name: operationCleanup, Short: "Remove unfinished multipart uploads.", Long: `This command removes unfinished multipart uploads of age greater than max-age which defaults to 24 hours. Note that you can use --interactive/-i or --dry-run with this command to see what it would do. Usage examples: ` + "```console" + ` rclone backend cleanup oos:bucket/path/to/object rclone backend cleanup -o max-age=7w oos:bucket/path/to/object ` + "```" + ` Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.`, Opts: map[string]string{ "max-age": "Max age of upload to delete.", }, }, { Name: operationRestore, Short: "Restore objects from Archive to Standard storage.", Long: `This command can be used to restore one or more objects from Archive to Standard storage. Usage examples: ` + "```console" + ` rclone backend restore oos:bucket/path/to/directory -o hours=HOURS rclone backend restore oos:bucket -o hours=HOURS ` + "```" + ` This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags ` + "```console" + ` rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72 ` + "```" + ` All the objects shown will be marked for restore, then: ` + "```console" + ` rclone backend restore --include "*.txt" oos:bucket/path -o hours=72 ` + "```" + ` It returns a list of status dictionaries with Object Name and Status keys. The Status will be "RESTORED"" if it was successful or an error message if not. ` + "```json" + ` [ { "Object": "test.txt" "Status": "RESTORED", }, { "Object": "test/file4.txt" "Status": "RESTORED", } ] ` + "```", Opts: map[string]string{ "hours": `The number of hours for which this object will be restored. Default is 24 hrs.`, }, }, } /* Command the backend to run a named command The command run is name args may be used to read arguments from opts may be used to read optional arguments from The result should be capable of being JSON encoded If it is a string or a []string it will be shown to the user otherwise it will be JSON encoded and shown to the user like that */ func (f *Fs) Command(ctx context.Context, commandName string, args []string, opt map[string]string) (result any, err error) { // fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt) switch commandName { case operationRename: if len(args) < 2 { return nil, fmt.Errorf("path to object or its new name to rename is empty") } remote := args[0] newName := args[1] return f.rename(ctx, remote, newName) case operationListMultiPart: return f.listMultipartUploadsAll(ctx) case operationCleanup: maxAge := 24 * time.Hour if opt["max-age"] != "" { maxAge, err = fs.ParseDuration(opt["max-age"]) if err != nil { return nil, fmt.Errorf("bad max-age: %w", err) } } return nil, f.cleanUp(ctx, maxAge) case operationRestore: return f.restore(ctx, opt) default: return nil, fs.ErrorCommandNotFound } } func (f *Fs) rename(ctx context.Context, remote, newName string) (any, error) { if remote == "" { return nil, fmt.Errorf("path to object file cannot be empty") } if newName == "" { return nil, fmt.Errorf("the object's new name cannot be empty") } o := &Object{ fs: f, remote: remote, } bucketName, objectPath := o.split() err := o.readMetaData(ctx) if err != nil { fs.Errorf(f, "failed to read object:%v %v ", objectPath, err) if strings.HasPrefix(objectPath, bucketName) { fs.Errorf(f, "warn: ensure object path: %v is relative to bucket:%v and doesn't include the bucket name", objectPath, bucketName) } return nil, fs.ErrorNotAFile } details := objectstorage.RenameObjectDetails{ SourceName: common.String(objectPath), NewName: common.String(newName), } request := objectstorage.RenameObjectRequest{ NamespaceName: common.String(f.opt.Namespace), BucketName: common.String(bucketName), RenameObjectDetails: details, OpcClientRequestId: nil, RequestMetadata: common.RequestMetadata{}, } var response objectstorage.RenameObjectResponse err = f.pacer.Call(func() (bool, error) { response, err = f.srv.RenameObject(ctx, request) return shouldRetry(ctx, response.HTTPResponse(), err) }) if err != nil { return nil, err } fs.Infof(f, "success: renamed object-path: %v to %v", objectPath, newName) return "renamed successfully", nil } func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]*objectstorage.MultipartUpload, err error) { uploadsMap = make(map[string][]*objectstorage.MultipartUpload) bucket, directory := f.split("") if bucket != "" { uploads, err := f.listMultipartUploads(ctx, bucket, directory) if err != nil { return uploadsMap, err } uploadsMap[bucket] = uploads return uploadsMap, nil } entries, err := f.listBuckets(ctx) if err != nil { return uploadsMap, err } for _, entry := range entries { bucket := entry.Remote() uploads, listErr := f.listMultipartUploads(ctx, bucket, "") if listErr != nil { err = listErr fs.Errorf(f, "%v", err) } uploadsMap[bucket] = uploads } return uploadsMap, err } // listMultipartUploads lists all outstanding multipart uploads for (bucket, key) // // Note that rather lazily we treat key as a prefix, so it matches // directories and objects. This could surprise the user if they ask // for "dir" and it returns "dirKey" func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) ( uploads []*objectstorage.MultipartUpload, err error) { return f.listMultipartUploadsObject(ctx, bucketName, directory, false) } // listMultipartUploads finds first outstanding multipart uploads for (bucket, key) // // Note that rather lazily we treat key as a prefix, so it matches // directories and objects. This could surprise the user if they ask // for "dir" and it returns "dirKey" func (f *Fs) findLatestMultipartUpload(ctx context.Context, bucketName, directory string) ( uploads []*objectstorage.MultipartUpload, err error) { pastUploads, err := f.listMultipartUploadsObject(ctx, bucketName, directory, true) if err != nil { return nil, err } if len(pastUploads) > 0 { sort.Slice(pastUploads, func(i, j int) bool { return pastUploads[i].TimeCreated.After(pastUploads[j].TimeCreated.Time) }) return pastUploads[:1], nil } return nil, err } func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directory string, exact bool) ( uploads []*objectstorage.MultipartUpload, err error) { uploads = []*objectstorage.MultipartUpload{} req := objectstorage.ListMultipartUploadsRequest{ NamespaceName: common.String(f.opt.Namespace), BucketName: common.String(bucketName), } var response objectstorage.ListMultipartUploadsResponse for { err = f.pacer.Call(func() (bool, error) { response, err = f.srv.ListMultipartUploads(ctx, req) return shouldRetry(ctx, response.HTTPResponse(), err) }) if err != nil { // fs.Debugf(f, "failed to list multi part uploads %v", err) return uploads, err } for index, item := range response.Items { if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) { continue } if exact { if *item.Object == directory { uploads = append(uploads, &response.Items[index]) } } else { uploads = append(uploads, &response.Items[index]) } } if response.OpcNextPage == nil { break } req.Page = response.OpcNextPage } return uploads, nil } func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPath string, uploadID string) ( uploadedParts map[int]objectstorage.MultipartUploadPartSummary, err error) { uploadedParts = make(map[int]objectstorage.MultipartUploadPartSummary) req := objectstorage.ListMultipartUploadPartsRequest{ NamespaceName: common.String(f.opt.Namespace), BucketName: common.String(bucketName), ObjectName: common.String(bucketPath), UploadId: common.String(uploadID), Limit: common.Int(1000), } var response objectstorage.ListMultipartUploadPartsResponse for { err = f.pacer.Call(func() (bool, error) { response, err = f.srv.ListMultipartUploadParts(ctx, req) return shouldRetry(ctx, response.HTTPResponse(), err) }) if err != nil { return uploadedParts, err } for _, item := range response.Items { uploadedParts[*item.PartNumber] = item } if response.OpcNextPage == nil { break } req.Page = response.OpcNextPage } return uploadedParts, nil } func (f *Fs) restore(ctx context.Context, opt map[string]string) (any, error) { req := objectstorage.RestoreObjectsRequest{ NamespaceName: common.String(f.opt.Namespace), RestoreObjectsDetails: objectstorage.RestoreObjectsDetails{}, } if hours := opt["hours"]; hours != "" { ihours, err := strconv.Atoi(hours) if err != nil { return nil, fmt.Errorf("bad value for hours: %w", err) } req.RestoreObjectsDetails.Hours = &ihours } type status struct { Object string Status string } var ( outMu sync.Mutex out = []status{} err error ) err = operations.ListFn(ctx, f, func(obj fs.Object) { // Remember this is run --checkers times concurrently o, ok := obj.(*Object) st := status{Object: obj.Remote(), Status: "RESTORED"} defer func() { outMu.Lock() out = append(out, st) outMu.Unlock() }() if !ok { st.Status = "Not an OCI Object Storage object" return } if o.storageTier == nil || (*o.storageTier != "archive") { st.Status = "Object not in Archive storage tier" return } if operations.SkipDestructive(ctx, obj, "restore") { return } bucket, bucketPath := o.split() reqCopy := req reqCopy.BucketName = &bucket reqCopy.ObjectName = &bucketPath var response objectstorage.RestoreObjectsResponse err = f.pacer.Call(func() (bool, error) { response, err = f.srv.RestoreObjects(ctx, reqCopy) return shouldRetry(ctx, response.HTTPResponse(), err) }) if err != nil { st.Status = err.Error() } }) if err != nil { return out, err } return out, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/yandex/yandex.go
backend/yandex/yandex.go
// Package yandex provides an interface to the Yandex storage system. package yandex import ( "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "path" "strconv" "strings" "time" "github.com/rclone/rclone/backend/yandex/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/rest" ) // oAuth const ( rcloneClientID = "ac39b43b9eba4cae8ffb788c06d816a8" rcloneEncryptedClientSecret = "EfyyNZ3YUEwXM5yAhi72G9YwKn2mkFrYwJNS7cY0TJAhFlX9K-uJFbGlpO-RYjrJ" rootURL = "https://cloud-api.yandex.com/v1/disk" minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second // may needs to be increased, testing needed decayConstant = 2 // bigger for slower decay, exponential userAgentTemplae = `Yandex.Disk {"os":"windows","dtype":"ydisk3","vsn":"3.2.37.4977","id":"6BD01244C7A94456BBCEE7EEC990AEAD","id2":"0F370CD40C594A4783BC839C846B999C","session_id":"%s"}` ) // Globals var ( // Description of how to auth for this app oauthConfig = &oauthutil.Config{ AuthURL: "https://oauth.yandex.com/authorize", //same as https://oauth.yandex.ru/authorize TokenURL: "https://oauth.yandex.com/token", //same as https://oauth.yandex.ru/token ClientID: rcloneClientID, ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), RedirectURL: oauthutil.RedirectURL, } ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "yandex", Description: "Yandex Disk", NewFs: NewFs, Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { return oauthutil.ConfigOut("", &oauthutil.Options{ OAuth2Config: oauthConfig, }) }, Options: append(oauthutil.SharedOptions, []fs.Option{{ Name: "hard_delete", Help: "Delete files permanently rather than putting them into the trash.", Default: false, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // Of the control characters \t \n \r are allowed // it doesn't seem worth making an exception for this Default: (encoder.Display | encoder.EncodeInvalidUtf8), }, { Name: "spoof_ua", Help: "Set the user agent to match an official version of the yandex disk client. May help with upload performance.", Default: true, Advanced: true, Hide: fs.OptionHideConfigurator, }}...), }) } // Options defines the configuration for this backend type Options struct { Token string `config:"token"` HardDelete bool `config:"hard_delete"` Enc encoder.MultiEncoder `config:"encoding"` SpoofUserAgent bool `config:"spoof_ua"` } // Fs represents a remote yandex type Fs struct { name string root string // root path opt Options // parsed options ci *fs.ConfigInfo // global config features *fs.Features // optional features srv *rest.Client // the connection to the yandex server pacer *fs.Pacer // pacer for API calls diskRoot string // root path with "disk:/" container name } // Object describes a swift object type Object struct { fs *Fs // what this object is part of remote string // The remote path hasMetaData bool // whether info below has been set md5sum string // The MD5Sum of the object size int64 // Bytes in the object modTime time.Time // Modified time of the object mimeType string // Content type according to the server } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("Yandex %s", f.root) } // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { return time.Nanosecond } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.MD5) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // errorHandler parses a non 2xx error response into an error func errorHandler(resp *http.Response) error { // Decode error response errResponse := new(api.ErrorResponse) err := rest.DecodeJSON(resp, &errResponse) if err != nil { fs.Debugf(nil, "Couldn't decode error response: %v", err) } if errResponse.Message == "" { errResponse.Message = resp.Status } if errResponse.StatusCode == 0 { errResponse.StatusCode = resp.StatusCode } return errResponse } // Sets root in f func (f *Fs) setRoot(root string) { //Set root path f.root = strings.Trim(root, "/") //Set disk root path. //Adding "disk:" to root path as all paths on disk start with it var diskRoot string if f.root == "" { diskRoot = "disk:/" } else { diskRoot = "disk:/" + f.root + "/" } f.diskRoot = diskRoot } // filePath returns an escaped file path (f.root, file) func (f *Fs) filePath(file string) string { return path.Join(f.diskRoot, file) } // dirPath returns an escaped file path (f.root, file) ending with '/' func (f *Fs) dirPath(file string) string { return path.Join(f.diskRoot, file) + "/" } func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.ResourceInfoRequestOptions) (*api.ResourceInfoResponse, error) { opts := rest.Opts{ Method: "GET", Path: "/resources", Parameters: url.Values{}, } opts.Parameters.Set("path", f.opt.Enc.FromStandardPath(path)) if options.SortMode != nil { opts.Parameters.Set("sort", options.SortMode.String()) } if options.Limit != 0 { opts.Parameters.Set("limit", strconv.FormatUint(options.Limit, 10)) } if options.Offset != 0 { opts.Parameters.Set("offset", strconv.FormatUint(options.Offset, 10)) } if options.Fields != nil { opts.Parameters.Set("fields", strings.Join(options.Fields, ",")) } var err error var info api.ResourceInfoResponse var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &info) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } info.Name = f.opt.Enc.ToStandardName(info.Name) return &info, nil } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } ctx, ci := fs.AddConfig(ctx) if fs.ConfigOptionsInfo.Get("user_agent").IsDefault() && opt.SpoofUserAgent { randomSessionID, _ := random.Password(128) ci.UserAgent = fmt.Sprintf(userAgentTemplae, randomSessionID) } token, err := oauthutil.GetToken(name, m) if err != nil { return nil, fmt.Errorf("couldn't read OAuth token: %w", err) } if token.RefreshToken == "" { return nil, errors.New("unable to get RefreshToken. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend") } if token.TokenType != "OAuth" { token.TokenType = "OAuth" err = oauthutil.PutToken(name, m, token, false) if err != nil { return nil, fmt.Errorf("couldn't save OAuth token: %w", err) } fs.Logf(nil, "Automatically upgraded OAuth config.") } oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig) if err != nil { return nil, fmt.Errorf("failed to configure Yandex: %w", err) } f := &Fs{ name: name, opt: *opt, ci: ci, srv: rest.NewClient(oAuthClient).SetRoot(rootURL), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } f.setRoot(root) f.features = (&fs.Features{ ReadMimeType: true, WriteMimeType: false, // Yandex ignores the mime type we send CanHaveEmptyDirectories: true, }).Fill(ctx, f) f.srv.SetErrorHandler(errorHandler) // Check to see if the object exists and is a file //request object meta info // Check to see if the object exists and is a file //request object meta info if info, err := f.readMetaDataForPath(ctx, f.diskRoot, &api.ResourceInfoRequestOptions{}); err != nil { } else if info.ResourceType == "file" { rootDir := path.Dir(root) if rootDir == "." { rootDir = "" } f.setRoot(rootDir) // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // Convert a list item into a DirEntry func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.ResourceInfoResponse) (fs.DirEntry, error) { switch object.ResourceType { case "dir": t, err := time.Parse(time.RFC3339Nano, object.Modified) if err != nil { return nil, fmt.Errorf("error parsing time in directory item: %w", err) } d := fs.NewDir(remote, t).SetSize(object.Size) return d, nil case "file": o, err := f.newObjectWithInfo(ctx, remote, object) if err != nil { return nil, err } return o, nil default: fs.Debugf(f, "Unknown resource type %q", object.ResourceType) } return nil, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { root := f.dirPath(dir) var limit uint64 = 1000 // max number of objects per request var itemsCount uint64 // number of items per page in response var offset uint64 // for the next page of requests for { opts := &api.ResourceInfoRequestOptions{ Limit: limit, Offset: offset, } info, err := f.readMetaDataForPath(ctx, root, opts) if err != nil { if apiErr, ok := err.(*api.ErrorResponse); ok { // does not exist if apiErr.ErrorName == "DiskNotFoundError" { return nil, fs.ErrorDirNotFound } } return nil, err } itemsCount = uint64(len(info.Embedded.Items)) if info.ResourceType == "dir" { //list all subdirs for _, element := range info.Embedded.Items { element.Name = f.opt.Enc.ToStandardName(element.Name) remote := path.Join(dir, element.Name) entry, err := f.itemToDirEntry(ctx, remote, &element) if err != nil { return nil, err } if entry != nil { entries = append(entries, entry) } } } else if info.ResourceType == "file" { return nil, fs.ErrorIsFile } //offset for the next page of items offset += itemsCount //check if we reached end of list if itemsCount < limit { break } } return entries, nil } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.ResourceInfoResponse) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } var err error if info != nil { err = o.setMetaData(info) } else { err = o.readMetaData(ctx) if apiErr, ok := err.(*api.ErrorResponse); ok { // does not exist if apiErr.ErrorName == "DiskNotFoundError" { return nil, fs.ErrorObjectNotFound } } } if err != nil { return nil, err } return o, nil } // NewObject finds the Object at remote. If it can't be found it // returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // // Used to create new objects func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object) { // Temporary Object under construction o = &Object{ fs: f, remote: remote, size: size, modTime: modTime, } return o } // Put the object // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size()) return o, o.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(ctx, in, src, options...) } // CreateDir makes a directory func (f *Fs) CreateDir(ctx context.Context, path string) (err error) { //fmt.Printf("CreateDir: %s\n", path) var resp *http.Response opts := rest.Opts{ Method: "PUT", Path: "/resources", Parameters: url.Values{}, NoResponse: true, } // If creating a directory with a : use (undocumented) disk: prefix if strings.ContainsRune(path, ':') { path = "disk:" + path } opts.Parameters.Set("path", f.opt.Enc.FromStandardPath(path)) err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) if err != nil { // fmt.Printf("CreateDir %q Error: %s\n", path, err.Error()) return err } // fmt.Printf("...Id %q\n", *info.Id) return nil } // This really needs improvement and especially proper error checking // but Yandex does not publish a List of possible errors and when they're // expected to occur. func (f *Fs) mkDirs(ctx context.Context, path string) (err error) { //trim filename from path //dirString := strings.TrimSuffix(path, filepath.Base(path)) //trim "disk:" from path dirString := strings.TrimPrefix(path, "disk:") if dirString == "" { return nil } if err = f.CreateDir(ctx, dirString); err != nil { if apiErr, ok := err.(*api.ErrorResponse); ok { // already exists if apiErr.ErrorName != "DiskPathPointsToExistentDirectoryError" { // 2 if it fails then create all directories in the path from root. dirs := strings.Split(dirString, "/") //path separator var mkdirpath strings.Builder mkdirpath.WriteString("/") //path separator / for _, element := range dirs { if element != "" { mkdirpath.WriteString(element + "/") //path separator / _ = f.CreateDir(ctx, mkdirpath.String()) // ignore errors while creating dirs } } } return nil } } return err } func (f *Fs) mkParentDirs(ctx context.Context, resPath string) error { // defer log.Trace(dirPath, "")("") // chop off trailing / if it exists parent := path.Dir(strings.TrimSuffix(resPath, "/")) if parent == "." { parent = "" } return f.mkDirs(ctx, parent) } // Mkdir creates the container if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { path := f.filePath(dir) return f.mkDirs(ctx, path) } // waitForJob waits for the job with status in url to complete func (f *Fs) waitForJob(ctx context.Context, location string) (err error) { opts := rest.Opts{ RootURL: location, Method: "GET", } deadline := time.Now().Add(f.ci.TimeoutOrInfinite()) for time.Now().Before(deadline) { var resp *http.Response var body []byte err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) if fserrors.ContextError(ctx, &err) { return false, err } if err != nil { return fserrors.ShouldRetry(err), err } body, err = rest.ReadBody(resp) return fserrors.ShouldRetry(err), err }) if err != nil { return err } // Try to decode the body first as an api.AsyncOperationStatus var status api.AsyncStatus err = json.Unmarshal(body, &status) if err != nil { return fmt.Errorf("async status result not JSON: %q: %w", body, err) } switch status.Status { case "failure": return fmt.Errorf("async operation returned %q", status.Status) case "success": return nil } time.Sleep(1 * time.Second) } return fmt.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite()) } func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err error) { opts := rest.Opts{ Method: "DELETE", Path: "/resources", Parameters: url.Values{}, } opts.Parameters.Set("path", f.opt.Enc.FromStandardPath(path)) opts.Parameters.Set("permanently", strconv.FormatBool(hardDelete)) var resp *http.Response var body []byte err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) if fserrors.ContextError(ctx, &err) { return false, err } if err != nil { return fserrors.ShouldRetry(err), err } body, err = rest.ReadBody(resp) return fserrors.ShouldRetry(err), err }) if err != nil { return err } // if 202 Accepted it's an async operation we have to wait for it complete before retuning if resp.StatusCode == 202 { var info api.AsyncInfo err = json.Unmarshal(body, &info) if err != nil { return fmt.Errorf("async info result not JSON: %q: %w", body, err) } return f.waitForJob(ctx, info.HRef) } return nil } // purgeCheck remotes the root directory, if check is set then it // refuses to do so if it has anything in func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := f.filePath(dir) if check { //to comply with rclone logic we check if the directory is empty before delete. //send request to get list of objects in this directory. info, err := f.readMetaDataForPath(ctx, root, &api.ResourceInfoRequestOptions{}) if err != nil { return fmt.Errorf("rmdir failed: %w", err) } if len(info.Embedded.Items) != 0 { return fs.ErrorDirectoryNotEmpty } } //delete directory return f.delete(ctx, root, f.opt.HardDelete) } // Rmdir deletes the container // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, true) } // Purge deletes all the files in the directory // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, false) } // copyOrMoves copies or moves directories or files depending on the method parameter func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite bool) (err error) { opts := rest.Opts{ Method: "POST", Path: "/resources/" + method, Parameters: url.Values{}, } opts.Parameters.Set("from", f.opt.Enc.FromStandardPath(src)) opts.Parameters.Set("path", f.opt.Enc.FromStandardPath(dst)) opts.Parameters.Set("overwrite", strconv.FormatBool(overwrite)) var resp *http.Response var body []byte err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) if fserrors.ContextError(ctx, &err) { return false, err } if err != nil { return fserrors.ShouldRetry(err), err } body, err = rest.ReadBody(resp) return fserrors.ShouldRetry(err), err }) if err != nil { return err } // if 202 Accepted it's an async operation we have to wait for it complete before retuning if resp.StatusCode == 202 { var info api.AsyncInfo err = json.Unmarshal(body, &info) if err != nil { return fmt.Errorf("async info result not JSON: %q: %w", body, err) } return f.waitForJob(ctx, info.HRef) } return nil } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } dstPath := f.filePath(remote) err = f.mkParentDirs(ctx, dstPath) if err != nil { return nil, err } // Find and remove existing object // // Note that the overwrite flag doesn't seem to work for server side copy cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy") if err != nil { return nil, err } defer cleanup(&err) err = f.copyOrMove(ctx, "copy", srcObj.filePath(), dstPath, false) if err != nil { return nil, fmt.Errorf("couldn't copy file: %w", err) } return f.NewObject(ctx, remote) } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } dstPath := f.filePath(remote) err := f.mkParentDirs(ctx, dstPath) if err != nil { return nil, err } err = f.copyOrMove(ctx, "move", srcObj.filePath(), dstPath, false) if err != nil { return nil, fmt.Errorf("couldn't move file: %w", err) } return f.NewObject(ctx, remote) } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } srcPath := path.Join(srcFs.diskRoot, srcRemote) dstPath := f.dirPath(dstRemote) //fmt.Printf("Move src: %s (FullPath: %s), dst: %s (FullPath: %s)\n", srcRemote, srcPath, dstRemote, dstPath) // Refuse to move to or from the root if srcPath == "disk:/" || dstPath == "disk:/" { fs.Debugf(src, "DirMove error: Can't move root") return errors.New("can't move root directory") } err := f.mkParentDirs(ctx, dstPath) if err != nil { return err } _, err = f.readMetaDataForPath(ctx, dstPath, &api.ResourceInfoRequestOptions{}) if apiErr, ok := err.(*api.ErrorResponse); ok { if apiErr.ErrorName != "DiskNotFoundError" { return err } } else if err != nil { return err } else { return fs.ErrorDirExists } err = f.copyOrMove(ctx, "move", srcPath, dstPath, false) if err != nil { return fmt.Errorf("couldn't move directory: %w", err) } return nil } // PublicLink generates a public link to the remote path (usually readable by anyone) func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) { var path string if unlink { path = "/resources/unpublish" } else { path = "/resources/publish" } opts := rest.Opts{ Method: "PUT", Path: f.opt.Enc.FromStandardPath(path), Parameters: url.Values{}, NoResponse: true, } opts.Parameters.Set("path", f.opt.Enc.FromStandardPath(f.filePath(remote))) var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) if apiErr, ok := err.(*api.ErrorResponse); ok { // does not exist if apiErr.ErrorName == "DiskNotFoundError" { return "", fs.ErrorObjectNotFound } } if err != nil { if unlink { return "", fmt.Errorf("couldn't remove public link: %w", err) } return "", fmt.Errorf("couldn't create public link: %w", err) } info, err := f.readMetaDataForPath(ctx, f.filePath(remote), &api.ResourceInfoRequestOptions{}) if err != nil { return "", err } if info.PublicURL == "" { return "", errors.New("couldn't create public link - no link path received") } return info.PublicURL, nil } // CleanUp permanently deletes all trashed files/folders func (f *Fs) CleanUp(ctx context.Context) (err error) { var resp *http.Response opts := rest.Opts{ Method: "DELETE", Path: "/trash/resources", NoResponse: true, } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) return err } // About gets quota information func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { opts := rest.Opts{ Method: "GET", Path: "/", } var resp *http.Response var info api.DiskInfo var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &info) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } usage := &fs.Usage{ Total: fs.NewUsageValue(info.TotalSpace), Used: fs.NewUsageValue(info.UsedSpace), Free: fs.NewUsageValue(info.TotalSpace - info.UsedSpace), } return usage, nil } // ------------------------------------------------------------ // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Returns the full remote path for the object func (o *Object) filePath() string { return o.fs.filePath(o.remote) } // setMetaData sets the fs data from a storage.Object func (o *Object) setMetaData(info *api.ResourceInfoResponse) (err error) { o.hasMetaData = true o.size = info.Size o.md5sum = info.Md5 o.mimeType = info.MimeType var modTimeString string modTimeObj, ok := info.CustomProperties["rclone_modified"] if ok { // read modTime from rclone_modified custom_property of object modTimeString, ok = modTimeObj.(string) } if !ok { // read modTime from Modified property of object as a fallback modTimeString = info.Modified } t, err := time.Parse(time.RFC3339Nano, modTimeString) if err != nil { return fmt.Errorf("failed to parse modtime from %q: %w", modTimeString, err) } o.modTime = t return nil } // readMetaData reads and sets the new metadata for a storage.Object func (o *Object) readMetaData(ctx context.Context) (err error) { if o.hasMetaData { return nil } info, err := o.fs.readMetaDataForPath(ctx, o.filePath(), &api.ResourceInfoRequestOptions{}) if err != nil { return err } if info.ResourceType == "dir" { return fs.ErrorIsDir } else if info.ResourceType != "file" { return fs.ErrorNotAFile } return o.setMetaData(info) } // ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime(ctx context.Context) time.Time { err := o.readMetaData(ctx) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return time.Now() } return o.modTime } // Size returns the size of an object in bytes func (o *Object) Size() int64 { ctx := context.TODO() err := o.readMetaData(ctx) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return 0 } return o.size } // Hash returns the Md5sum of an object returning a lowercase hex string func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } return o.md5sum, nil } // Storable returns whether this object is storable func (o *Object) Storable() bool { return true } func (o *Object) setCustomProperty(ctx context.Context, property string, value string) (err error) { var resp *http.Response opts := rest.Opts{ Method: "PATCH", Path: "/resources", Parameters: url.Values{}, NoResponse: true, } opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath())) rcm := map[string]any{ property: value, } cpr := api.CustomPropertyResponse{CustomProperties: rcm} err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.CallJSON(ctx, &opts, &cpr, nil) return shouldRetry(ctx, resp, err) }) return err } // SetModTime sets the modification time of the local fs object // // Commits the datastore func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { // set custom_property 'rclone_modified' of object to modTime err := o.setCustomProperty(ctx, "rclone_modified", modTime.Format(time.RFC3339Nano)) if err != nil { return err } o.modTime = modTime return nil } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { // prepare download var resp *http.Response var dl api.AsyncInfo opts := rest.Opts{ Method: "GET", Path: "/resources/download", Parameters: url.Values{}, } opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath())) err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &dl) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } // perform the download opts = rest.Opts{ RootURL: dl.HRef, Method: "GET", Options: options, } err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } return resp.Body, err } func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeType string, options ...fs.OpenOption) (err error) { // prepare upload var resp *http.Response var ur api.AsyncInfo opts := rest.Opts{ Method: "GET", Path: "/resources/upload", Parameters: url.Values{}, Options: options, } opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath())) opts.Parameters.Set("overwrite", strconv.FormatBool(overwrite)) err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &ur) return shouldRetry(ctx, resp, err) }) if err != nil { return err } // perform the actual upload opts = rest.Opts{ RootURL: ur.HRef, Method: "PUT", ContentType: mimeType, Body: in, NoResponse: true, } err = o.fs.pacer.CallNoRetry(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) return err } // Update the already existing object // // Copy the reader into the object updating modTime and size. // // The new object may have been created if an error is returned func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { in1 := readers.NewCountingReader(in) modTime := src.ModTime(ctx) remote := o.filePath() //create full path to file before upload. err := o.fs.mkParentDirs(ctx, remote) if err != nil { return err } //upload file err = o.upload(ctx, in1, true, fs.MimeType(ctx, src), options...) if err != nil { return err } //if file uploaded successfully then return metadata o.modTime = modTime o.md5sum = "" // according to unit tests after put the md5 is empty. o.size = int64(in1.BytesRead()) // better solution o.readMetaData() ? //and set modTime of uploaded file err = o.SetModTime(ctx, modTime) return err } // Remove an object func (o *Object) Remove(ctx context.Context) error { return o.fs.delete(ctx, o.filePath(), o.fs.opt.HardDelete) } // MimeType of an Object if known, "" otherwise func (o *Object) MimeType(ctx context.Context) string { return o.mimeType }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/yandex/yandex_test.go
backend/yandex/yandex_test.go
// Test Yandex filesystem interface package yandex_test import ( "testing" "github.com/rclone/rclone/backend/yandex" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestYandex:", NilObject: (*yandex.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/yandex/api/types.go
backend/yandex/api/types.go
// Package api provides types used by the Yandex API. package api import ( "fmt" "strings" ) // DiskInfo contains disk metadata type DiskInfo struct { TotalSpace int64 `json:"total_space"` UsedSpace int64 `json:"used_space"` TrashSize int64 `json:"trash_size"` } // ResourceInfoRequestOptions struct type ResourceInfoRequestOptions struct { SortMode *SortMode Limit uint64 Offset uint64 Fields []string } // ResourceInfoResponse struct is returned by the API for metadata requests. type ResourceInfoResponse struct { PublicKey string `json:"public_key"` Name string `json:"name"` Created string `json:"created"` CustomProperties map[string]any `json:"custom_properties"` Preview string `json:"preview"` PublicURL string `json:"public_url"` OriginPath string `json:"origin_path"` Modified string `json:"modified"` Path string `json:"path"` Md5 string `json:"md5"` ResourceType string `json:"type"` MimeType string `json:"mime_type"` Size int64 `json:"size"` Embedded *ResourceListResponse `json:"_embedded"` } // ResourceListResponse struct type ResourceListResponse struct { Sort *SortMode `json:"sort"` PublicKey string `json:"public_key"` Items []ResourceInfoResponse `json:"items"` Path string `json:"path"` Limit *uint64 `json:"limit"` Offset *uint64 `json:"offset"` Total *uint64 `json:"total"` } // AsyncInfo struct is returned by the API for various async operations. type AsyncInfo struct { HRef string `json:"href"` Method string `json:"method"` Templated bool `json:"templated"` } // AsyncStatus is returned when requesting the status of an async operations. Possible values in-progress, success, failure type AsyncStatus struct { Status string `json:"status"` } // CustomPropertyResponse struct we send and is returned by the API for CustomProperty request. type CustomPropertyResponse struct { CustomProperties map[string]any `json:"custom_properties"` } // SortMode struct - sort mode type SortMode struct { mode string } // Default - sort mode func (m *SortMode) Default() *SortMode { return &SortMode{ mode: "", } } // ByName - sort mode func (m *SortMode) ByName() *SortMode { return &SortMode{ mode: "name", } } // ByPath - sort mode func (m *SortMode) ByPath() *SortMode { return &SortMode{ mode: "path", } } // ByCreated - sort mode func (m *SortMode) ByCreated() *SortMode { return &SortMode{ mode: "created", } } // ByModified - sort mode func (m *SortMode) ByModified() *SortMode { return &SortMode{ mode: "modified", } } // BySize - sort mode func (m *SortMode) BySize() *SortMode { return &SortMode{ mode: "size", } } // Reverse - sort mode func (m *SortMode) Reverse() *SortMode { if strings.HasPrefix(m.mode, "-") { return &SortMode{ mode: m.mode[1:], } } return &SortMode{ mode: "-" + m.mode, } } func (m *SortMode) String() string { return m.mode } // UnmarshalJSON sort mode func (m *SortMode) UnmarshalJSON(value []byte) error { if len(value) == 0 { m.mode = "" return nil } m.mode = string(value) if strings.HasPrefix(m.mode, "\"") && strings.HasSuffix(m.mode, "\"") { m.mode = m.mode[1 : len(m.mode)-1] } return nil } // ErrorResponse represents erroneous API response. // Implements go's built in `error`. type ErrorResponse struct { ErrorName string `json:"error"` Description string `json:"description"` Message string `json:"message"` StatusCode int `json:""` } func (e *ErrorResponse) Error() string { return fmt.Sprintf("[%d - %s] %s (%s)", e.StatusCode, e.ErrorName, e.Description, e.Message) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/combine/combine_test.go
backend/combine/combine_test.go
// Test Combine filesystem interface package combine_test import ( "testing" _ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/memory" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" ) var ( unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"} unimplementableObjectMethods = []string{} ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { if *fstest.RemoteName == "" { t.Skip("Skipping as -remote not set") } fstests.Run(t, &fstests.Opt{ RemoteName: *fstest.RemoteName, UnimplementableFsMethods: unimplementableFsMethods, UnimplementableObjectMethods: unimplementableObjectMethods, }) } func TestLocal(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("Skipping as -remote set") } dirs := MakeTestDirs(t, 3) upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=" + dirs[2] name := "TestCombineLocal" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":dir1", ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "combine"}, {Name: name, Key: "upstreams", Value: upstreams}, }, QuickTestOK: true, UnimplementableFsMethods: unimplementableFsMethods, UnimplementableObjectMethods: unimplementableObjectMethods, }) } func TestMemory(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("Skipping as -remote set") } upstreams := "dir1=:memory:dir1 dir2=:memory:dir2 dir3=:memory:dir3" name := "TestCombineMemory" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":dir1", ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "combine"}, {Name: name, Key: "upstreams", Value: upstreams}, }, QuickTestOK: true, UnimplementableFsMethods: unimplementableFsMethods, UnimplementableObjectMethods: unimplementableObjectMethods, }) } func TestMixed(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("Skipping as -remote set") } dirs := MakeTestDirs(t, 2) upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=:memory:dir3" name := "TestCombineMixed" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":dir1", ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "combine"}, {Name: name, Key: "upstreams", Value: upstreams}, }, UnimplementableFsMethods: unimplementableFsMethods, UnimplementableObjectMethods: unimplementableObjectMethods, }) } // MakeTestDirs makes directories in /tmp for testing func MakeTestDirs(t *testing.T, n int) (dirs []string) { for i := 1; i <= n; i++ { dir := t.TempDir() dirs = append(dirs, dir) } return dirs }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/combine/combine.go
backend/combine/combine.go
// Package combine implements a backend to combine multiple remotes in a directory tree package combine /* Have API to add/remove branches in the combine */ import ( "context" "errors" "fmt" "io" "path" "strings" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/walk" "golang.org/x/sync/errgroup" ) // Register with Fs func init() { fsi := &fs.RegInfo{ Name: "combine", Description: "Combine several remotes into one", NewFs: NewFs, MetadataInfo: &fs.MetadataInfo{ Help: `Any metadata supported by the underlying remote is read and written.`, }, Options: []fs.Option{{ Name: "upstreams", Help: `Upstreams for combining These should be in the form dir=remote:path dir2=remote2:path Where before the = is specified the root directory and after is the remote to put there. Embedded spaces can be added using quotes "dir=remote:path with space" "dir2=remote2:path with space" `, Required: true, Default: fs.SpaceSepList(nil), }}, } fs.Register(fsi) } // Options defines the configuration for this backend type Options struct { Upstreams fs.SpaceSepList `config:"upstreams"` } // Fs represents a combine of upstreams type Fs struct { name string // name of this remote features *fs.Features // optional features opt Options // options for this Fs root string // the path we are working on hashSet hash.Set // common hashes when time.Time // directory times upstreams map[string]*upstream // map of upstreams } // adjustment stores the info to add a prefix to a path or chop characters off type adjustment struct { root string rootSlash string mountpoint string mountpointSlash string } // newAdjustment makes a new path adjustment adjusting between mountpoint and root // // mountpoint is the point the upstream is mounted and root is the combine root func newAdjustment(root, mountpoint string) (a adjustment) { return adjustment{ root: root, rootSlash: root + "/", mountpoint: mountpoint, mountpointSlash: mountpoint + "/", } } var errNotUnderRoot = errors.New("file not under root") // do makes the adjustment on s, mapping an upstream path into a combine path func (a *adjustment) do(s string) (string, error) { absPath := join(a.mountpoint, s) if a.root == "" { return absPath, nil } if absPath == a.root { return "", nil } if !strings.HasPrefix(absPath, a.rootSlash) { return "", errNotUnderRoot } return absPath[len(a.rootSlash):], nil } // undo makes the adjustment on s, mapping a combine path into an upstream path func (a *adjustment) undo(s string) (string, error) { absPath := join(a.root, s) if absPath == a.mountpoint { return "", nil } if !strings.HasPrefix(absPath, a.mountpointSlash) { return "", errNotUnderRoot } return absPath[len(a.mountpointSlash):], nil } // upstream represents an upstream Fs type upstream struct { f fs.Fs parent *Fs dir string // directory the upstream is mounted pathAdjustment adjustment // how to fiddle with the path } // Create an upstream from the directory it is mounted on and the remote func (f *Fs) newUpstream(ctx context.Context, dir, remote string) (*upstream, error) { uFs, err := cache.Get(ctx, remote) if err == fs.ErrorIsFile { return nil, fmt.Errorf("can't combine files yet, only directories %q: %w", remote, err) } if err != nil { return nil, fmt.Errorf("failed to create upstream %q: %w", remote, err) } u := &upstream{ f: uFs, parent: f, dir: dir, pathAdjustment: newAdjustment(f.root, dir), } cache.PinUntilFinalized(u.f, u) return u, nil } // NewFs constructs an Fs from the path. // // The returned Fs is the actual Fs, referenced by remote in the config func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) { // defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err) // Parse config into Options struct opt := new(Options) err = configstruct.Set(m, opt) if err != nil { return nil, err } // Backward compatible to old config if len(opt.Upstreams) == 0 { return nil, errors.New("combine can't point to an empty upstream - check the value of the upstreams setting") } for _, u := range opt.Upstreams { if strings.HasPrefix(u, name+":") { return nil, errors.New("can't point combine remote at itself - check the value of the upstreams setting") } } isDir := false for strings.HasSuffix(root, "/") { root = root[:len(root)-1] isDir = true } f := &Fs{ name: name, root: root, opt: *opt, upstreams: make(map[string]*upstream, len(opt.Upstreams)), when: time.Now(), } g, gCtx := errgroup.WithContext(ctx) var mu sync.Mutex for _, upstream := range opt.Upstreams { g.Go(func() (err error) { equal := strings.IndexRune(upstream, '=') if equal < 0 { return fmt.Errorf("no \"=\" in upstream definition %q", upstream) } dir, remote := upstream[:equal], upstream[equal+1:] if dir == "" { return fmt.Errorf("empty dir in upstream definition %q", upstream) } if remote == "" { return fmt.Errorf("empty remote in upstream definition %q", upstream) } if strings.ContainsRune(dir, '/') { return fmt.Errorf("dirs can't contain / (yet): %q", dir) } u, err := f.newUpstream(gCtx, dir, remote) if err != nil { return err } mu.Lock() if _, found := f.upstreams[dir]; found { err = fmt.Errorf("duplicate directory name %q", dir) } else { f.upstreams[dir] = u } mu.Unlock() return err }) } err = g.Wait() if err != nil { return nil, err } // check features var features = (&fs.Features{ CaseInsensitive: true, DuplicateFiles: false, ReadMimeType: true, WriteMimeType: true, CanHaveEmptyDirectories: true, BucketBased: true, SetTier: true, GetTier: true, ReadMetadata: true, WriteMetadata: true, UserMetadata: true, ReadDirMetadata: true, WriteDirMetadata: true, WriteDirSetModTime: true, UserDirMetadata: true, DirModTimeUpdatesOnWrite: true, PartialUploads: true, }).Fill(ctx, f) canMove, slowHash := true, false for _, u := range f.upstreams { features = features.Mask(ctx, u.f) // Mask all upstream fs if !operations.CanServerSideMove(u.f) { canMove = false } slowHash = slowHash || u.f.Features().SlowHash } // We can move if all remotes support Move or Copy if canMove { features.Move = f.Move } // If any of upstreams are SlowHash, propagate it features.SlowHash = slowHash // Enable ListR when upstreams either support ListR or is local // But not when all upstreams are local if features.ListR == nil { for _, u := range f.upstreams { if u.f.Features().ListR != nil { features.ListR = f.ListR } else if !u.f.Features().IsLocal { features.ListR = nil break } } } // Enable ListP always features.ListP = f.ListP // Enable Purge when any upstreams support it if features.Purge == nil { for _, u := range f.upstreams { if u.f.Features().Purge != nil { features.Purge = f.Purge break } } } // Enable Shutdown when any upstreams support it if features.Shutdown == nil { for _, u := range f.upstreams { if u.f.Features().Shutdown != nil { features.Shutdown = f.Shutdown break } } } // Enable DirCacheFlush when any upstreams support it if features.DirCacheFlush == nil { for _, u := range f.upstreams { if u.f.Features().DirCacheFlush != nil { features.DirCacheFlush = f.DirCacheFlush break } } } // Enable CleanUp when any upstreams support it if features.CleanUp == nil { for _, u := range f.upstreams { if u.f.Features().CleanUp != nil { features.CleanUp = f.CleanUp break } } } // Enable ChangeNotify when any upstreams support it if features.ChangeNotify == nil { for _, u := range f.upstreams { if u.f.Features().ChangeNotify != nil { features.ChangeNotify = f.ChangeNotify break } } } // show that we wrap other backends features.Overlay = true f.features = features // Get common intersection of hashes var hashSet hash.Set var first = true for _, u := range f.upstreams { if first { hashSet = u.f.Hashes() first = false } else { hashSet = hashSet.Overlap(u.f.Hashes()) } } f.hashSet = hashSet // Check to see if the root is actually a file if f.root != "" && !isDir { _, err := f.NewObject(ctx, "") if err != nil { if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile || err == fs.ErrorIsDir { // File doesn't exist or is a directory so return old f return f, nil } return nil, err } // Check to see if the root path is actually an existing file f.root = path.Dir(f.root) if f.root == "." { f.root = "" } // Adjust path adjustment to remove leaf for _, u := range f.upstreams { u.pathAdjustment = newAdjustment(f.root, u.dir) } return f, fs.ErrorIsFile } return f, nil } // Run a function over all the upstreams in parallel func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error { g, gCtx := errgroup.WithContext(ctx) for _, u := range f.upstreams { g.Go(func() (err error) { return fn(gCtx, u) }) } return g.Wait() } // join the elements together but unlike path.Join return empty string func join(elem ...string) string { result := path.Join(elem...) if result == "." { return "" } if len(result) > 0 && result[0] == '/' { result = result[1:] } return result } // find the upstream for the remote passed in, returning the upstream and the adjusted path func (f *Fs) findUpstream(remote string) (u *upstream, uRemote string, err error) { // defer log.Trace(remote, "")("f=%v, uRemote=%q, err=%v", &u, &uRemote, &err) for _, u := range f.upstreams { uRemote, err = u.pathAdjustment.undo(remote) if err == nil { return u, uRemote, nil } } return nil, "", fmt.Errorf("combine for remote %q: %w", remote, fs.ErrorDirNotFound) } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("combine root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // Rmdir removes the root directory of the Fs object func (f *Fs) Rmdir(ctx context.Context, dir string) error { // The root always exists if f.root == "" && dir == "" { return nil } u, uRemote, err := f.findUpstream(dir) if err != nil { return err } return u.f.Rmdir(ctx, uRemote) } // Hashes returns hash.HashNone to indicate remote hashing is unavailable func (f *Fs) Hashes() hash.Set { return f.hashSet } // Mkdir makes the root directory of the Fs object func (f *Fs) Mkdir(ctx context.Context, dir string) error { // The root always exists if f.root == "" && dir == "" { return nil } u, uRemote, err := f.findUpstream(dir) if err != nil { return err } return u.f.Mkdir(ctx, uRemote) } // MkdirMetadata makes the root directory of the Fs object func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) { u, uRemote, err := f.findUpstream(dir) if err != nil { return nil, err } do := u.f.Features().MkdirMetadata if do == nil { return nil, fs.ErrorNotImplemented } newDir, err := do(ctx, uRemote, metadata) if err != nil { return nil, err } entries := fs.DirEntries{newDir} entries, err = u.wrapEntries(ctx, entries) if err != nil { return nil, err } newDir, ok := entries[0].(fs.Directory) if !ok { return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0]) } return newDir, nil } // purge the upstream or fallback to a slow way func (u *upstream) purge(ctx context.Context, dir string) (err error) { if do := u.f.Features().Purge; do != nil { err = do(ctx, dir) } else { err = operations.Purge(ctx, u.f, dir) } return err } // Purge all files in the directory // // Implement this if you have a way of deleting all the files // quicker than just running Remove() on the result of List() // // Return an error if it doesn't exist func (f *Fs) Purge(ctx context.Context, dir string) error { if f.root == "" && dir == "" { return f.multithread(ctx, func(ctx context.Context, u *upstream) error { return u.purge(ctx, "") }) } u, uRemote, err := f.findUpstream(dir) if err != nil { return err } return u.purge(ctx, uRemote) } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } dstU, dstRemote, err := f.findUpstream(remote) if err != nil { return nil, err } do := dstU.f.Features().Copy if do == nil { return nil, fs.ErrorCantCopy } o, err := do(ctx, srcObj.Object, dstRemote) if err != nil { return nil, err } return dstU.newObject(o), nil } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } dstU, dstRemote, err := f.findUpstream(remote) if err != nil { return nil, err } do := dstU.f.Features().Move useCopy := false if do == nil { do = dstU.f.Features().Copy if do == nil { return nil, fs.ErrorCantMove } useCopy = true } o, err := do(ctx, srcObj.Object, dstRemote) if err != nil { return nil, err } // If did Copy then remove the source object if useCopy { err = srcObj.Remove(ctx) if err != nil { return nil, err } } return dstU.newObject(o), nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) { // defer log.Trace(f, "src=%v, srcRemote=%q, dstRemote=%q", src, srcRemote, dstRemote)("err=%v", &err) srcFs, ok := src.(*Fs) if !ok { fs.Debugf(src, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } dstU, dstURemote, err := f.findUpstream(dstRemote) if err != nil { return err } srcU, srcURemote, err := srcFs.findUpstream(srcRemote) if err != nil { return err } do := dstU.f.Features().DirMove if do == nil { return fs.ErrorCantDirMove } fs.Logf(dstU.f, "srcU.f=%v, srcURemote=%q, dstURemote=%q", srcU.f, srcURemote, dstURemote) return do(ctx, srcU.f, srcURemote, dstURemote) } // ChangeNotify calls the passed function with a path // that has had changes. If the implementation // uses polling, it should adhere to the given interval. // At least one value will be written to the channel, // specifying the initial value and updated values might // follow. A 0 Duration should pause the polling. // The ChangeNotify implementation must empty the channel // regularly. When the channel gets closed, the implementation // should stop polling and release resources. func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) { var uChans []chan time.Duration for _, u := range f.upstreams { if do := u.f.Features().ChangeNotify; do != nil { ch := make(chan time.Duration) uChans = append(uChans, ch) wrappedNotifyFunc := func(path string, entryType fs.EntryType) { newPath, err := u.pathAdjustment.do(path) if err != nil { fs.Logf(f, "ChangeNotify: unable to process %q: %s", path, err) return } fs.Debugf(f, "ChangeNotify: path %q entryType %d", newPath, entryType) notifyFunc(newPath, entryType) } do(ctx, wrappedNotifyFunc, ch) } } go func() { for i := range ch { for _, c := range uChans { c <- i } } for _, c := range uChans { close(c) } }() } // DirCacheFlush resets the directory cache - used in testing // as an optional interface func (f *Fs) DirCacheFlush() { ctx := context.Background() _ = f.multithread(ctx, func(ctx context.Context, u *upstream) error { if do := u.f.Features().DirCacheFlush; do != nil { do() } return nil }) } func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) { srcPath := src.Remote() u, uRemote, err := f.findUpstream(srcPath) if err != nil { return nil, err } uSrc := fs.NewOverrideRemote(src, uRemote) var o fs.Object if stream { o, err = u.f.Features().PutStream(ctx, in, uSrc, options...) } else { o, err = u.f.Put(ctx, in, uSrc, options...) } if err != nil { return nil, err } return u.newObject(o), nil } // Put in to the remote path with the modTime given of the given size // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { o, err := f.NewObject(ctx, src.Remote()) switch err { case nil: return o, o.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: return f.put(ctx, in, src, false, options...) default: return nil, err } } // PutStream uploads to the remote path with the modTime given of indeterminate size // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { o, err := f.NewObject(ctx, src.Remote()) switch err { case nil: return o, o.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: return f.put(ctx, in, src, true, options...) default: return nil, err } } // About gets quota information from the Fs func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { usage := &fs.Usage{ Total: new(int64), Used: new(int64), Trashed: new(int64), Other: new(int64), Free: new(int64), Objects: new(int64), } for _, u := range f.upstreams { doAbout := u.f.Features().About if doAbout == nil { continue } usg, err := doAbout(ctx) if errors.Is(err, fs.ErrorDirNotFound) { continue } if err != nil { return nil, err } if usg.Total != nil && usage.Total != nil { *usage.Total += *usg.Total } else { usage.Total = nil } if usg.Used != nil && usage.Used != nil { *usage.Used += *usg.Used } else { usage.Used = nil } if usg.Trashed != nil && usage.Trashed != nil { *usage.Trashed += *usg.Trashed } else { usage.Trashed = nil } if usg.Other != nil && usage.Other != nil { *usage.Other += *usg.Other } else { usage.Other = nil } if usg.Free != nil && usage.Free != nil { *usage.Free += *usg.Free } else { usage.Free = nil } if usg.Objects != nil && usage.Objects != nil { *usage.Objects += *usg.Objects } else { usage.Objects = nil } } return usage, nil } // Wraps entries for this upstream func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.DirEntries, error) { for i, entry := range entries { switch x := entry.(type) { case fs.Object: entries[i] = u.newObject(x) case fs.Directory: newPath, err := u.pathAdjustment.do(x.Remote()) if err != nil { return nil, err } newDir := fs.NewDirWrapper(newPath, x) entries[i] = newDir default: return nil, fmt.Errorf("unknown entry type %T", entry) } } return entries, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return list.WithListP(ctx, dir, f) } // ListP lists the objects and directories of the Fs starting // from dir non recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error { // defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err) if f.root == "" && dir == "" { entries := make(fs.DirEntries, 0, len(f.upstreams)) for combineDir := range f.upstreams { d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when)) entries = append(entries, d) } return callback(entries) } u, uRemote, err := f.findUpstream(dir) if err != nil { return err } wrappedCallback := func(entries fs.DirEntries) error { entries, err := u.wrapEntries(ctx, entries) if err != nil { return err } return callback(entries) } listP := u.f.Features().ListP if listP == nil { entries, err := u.f.List(ctx, uRemote) if err != nil { return err } return wrappedCallback(entries) } return listP(ctx, uRemote, wrappedCallback) } // ListR lists the objects and directories of the Fs starting // from dir recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { // defer log.Trace(f, "dir=%q, callback=%v", dir, callback)("err=%v", &err) if f.root == "" && dir == "" { rootEntries, err := f.List(ctx, "") if err != nil { return err } err = callback(rootEntries) if err != nil { return err } var mu sync.Mutex syncCallback := func(entries fs.DirEntries) error { mu.Lock() defer mu.Unlock() return callback(entries) } err = f.multithread(ctx, func(ctx context.Context, u *upstream) error { return f.ListR(ctx, u.dir, syncCallback) }) if err != nil { return err } return nil } u, uRemote, err := f.findUpstream(dir) if err != nil { return err } wrapCallback := func(entries fs.DirEntries) error { entries, err := u.wrapEntries(ctx, entries) if err != nil { return err } return callback(entries) } if do := u.f.Features().ListR; do != nil { err = do(ctx, uRemote, wrapCallback) } else { err = walk.ListR(ctx, u.f, uRemote, true, -1, walk.ListAll, wrapCallback) } if err == fs.ErrorDirNotFound { err = nil } return err } // NewObject creates a new remote combine file object func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { u, uRemote, err := f.findUpstream(remote) if err != nil { return nil, err } if uRemote == "" || strings.HasSuffix(uRemote, "/") { return nil, fs.ErrorIsDir } o, err := u.f.NewObject(ctx, uRemote) if err != nil { return nil, err } return u.newObject(o), nil } // Precision is the greatest Precision of all upstreams func (f *Fs) Precision() time.Duration { var greatestPrecision time.Duration for _, u := range f.upstreams { uPrecision := u.f.Precision() if uPrecision > greatestPrecision { greatestPrecision = uPrecision } } return greatestPrecision } // Shutdown the backend, closing any background tasks and any // cached connections. func (f *Fs) Shutdown(ctx context.Context) error { return f.multithread(ctx, func(ctx context.Context, u *upstream) error { if do := u.f.Features().Shutdown; do != nil { return do(ctx) } return nil }) } // PublicLink generates a public link to the remote path (usually readable by anyone) func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { u, uRemote, err := f.findUpstream(remote) if err != nil { return "", err } do := u.f.Features().PublicLink if do == nil { return "", fs.ErrorNotImplemented } return do(ctx, uRemote, expire, unlink) } // PutUnchecked in to the remote path with the modTime given of the given size // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error // // May create duplicates or return errors if src already // exists. func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { srcPath := src.Remote() u, uRemote, err := f.findUpstream(srcPath) if err != nil { return nil, err } do := u.f.Features().PutUnchecked if do == nil { return nil, fs.ErrorNotImplemented } uSrc := fs.NewOverrideRemote(src, uRemote) return do(ctx, in, uSrc, options...) } // MergeDirs merges the contents of all the directories passed // in into the first one and rmdirs the other directories. func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { if len(dirs) == 0 { return nil } var ( u *upstream uDirs []fs.Directory ) for _, dir := range dirs { uNew, uDir, err := f.findUpstream(dir.Remote()) if err != nil { return err } if u == nil { u = uNew } else if u != uNew { return fmt.Errorf("can't merge directories from different upstreams") } uDirs = append(uDirs, fs.NewOverrideDirectory(dir, uDir)) } do := u.f.Features().MergeDirs if do == nil { return fs.ErrorNotImplemented } return do(ctx, uDirs) } // DirSetModTime sets the directory modtime for dir func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error { u, uDir, err := f.findUpstream(dir) if err != nil { return err } if uDir == "" { fs.Debugf(dir, "Can't set modtime on upstream root. skipping.") return nil } if do := u.f.Features().DirSetModTime; do != nil { return do(ctx, uDir, modTime) } return fs.ErrorNotImplemented } // CleanUp the trash in the Fs // // Implement this if you have a way of emptying the trash or // otherwise cleaning up old versions of files. func (f *Fs) CleanUp(ctx context.Context) error { return f.multithread(ctx, func(ctx context.Context, u *upstream) error { if do := u.f.Features().CleanUp; do != nil { return do(ctx) } return nil }) } // OpenWriterAt opens with a handle for random access writes // // Pass in the remote desired and the size if known. // // It truncates any existing object func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) { u, uRemote, err := f.findUpstream(remote) if err != nil { return nil, err } do := u.f.Features().OpenWriterAt if do == nil { return nil, fs.ErrorNotImplemented } return do(ctx, uRemote, size) } // Object describes a wrapped Object // // This is a wrapped Object which knows its path prefix type Object struct { fs.Object u *upstream } func (u *upstream) newObject(o fs.Object) *Object { return &Object{ Object: o, u: u, } } // Fs returns read only access to the Fs that this object is part of func (o *Object) Fs() fs.Info { return o.u.parent } // String returns the remote path func (o *Object) String() string { return o.Remote() } // Remote returns the remote path func (o *Object) Remote() string { newPath, err := o.u.pathAdjustment.do(o.Object.String()) if err != nil { fs.Errorf(o.Object, "Bad object: %v", err) return err.Error() } return newPath } // MimeType returns the content type of the Object if known func (o *Object) MimeType(ctx context.Context) (mimeType string) { if do, ok := o.Object.(fs.MimeTyper); ok { mimeType = do.MimeType(ctx) } return mimeType } // UnWrap returns the Object that this Object is wrapping or // nil if it isn't wrapping anything func (o *Object) UnWrap() fs.Object { return o.Object } // GetTier returns storage tier or class of the Object func (o *Object) GetTier() string { do, ok := o.Object.(fs.GetTierer) if !ok { return "" } return do.GetTier() } // ID returns the ID of the Object if known, or "" if not func (o *Object) ID() string { do, ok := o.Object.(fs.IDer) if !ok { return "" } return do.ID() } // Metadata returns metadata for an object // // It should return nil if there is no Metadata func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) { do, ok := o.Object.(fs.Metadataer) if !ok { return nil, nil } return do.Metadata(ctx) } // SetMetadata sets metadata for an Object // // It should return fs.ErrorNotImplemented if it can't set metadata func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error { do, ok := o.Object.(fs.SetMetadataer) if !ok { return fs.ErrorNotImplemented } return do.SetMetadata(ctx, metadata) } // SetTier performs changing storage tier of the Object if // multiple storage classes supported func (o *Object) SetTier(tier string) error { do, ok := o.Object.(fs.SetTierer) if !ok { return errors.New("underlying remote does not support SetTier") } return do.SetTier(tier) } // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) _ fs.Purger = (*Fs)(nil) _ fs.PutStreamer = (*Fs)(nil) _ fs.Copier = (*Fs)(nil) _ fs.Mover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil) _ fs.ChangeNotifier = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil) _ fs.ListRer = (*Fs)(nil) _ fs.Shutdowner = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil) _ fs.PutUncheckeder = (*Fs)(nil) _ fs.MergeDirser = (*Fs)(nil) _ fs.DirSetModTimer = (*Fs)(nil) _ fs.MkdirMetadataer = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil) _ fs.OpenWriterAter = (*Fs)(nil) _ fs.FullObject = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/combine/combine_internal_test.go
backend/combine/combine_internal_test.go
package combine import ( "fmt" "testing" "github.com/stretchr/testify/assert" ) func TestAdjustmentDo(t *testing.T) { for _, test := range []struct { root string mountpoint string in string want string wantErr error }{ { root: "", mountpoint: "mountpoint", in: "path/to/file.txt", want: "mountpoint/path/to/file.txt", }, { root: "mountpoint", mountpoint: "mountpoint", in: "path/to/file.txt", want: "path/to/file.txt", }, { root: "mountpoint/path", mountpoint: "mountpoint", in: "path/to/file.txt", want: "to/file.txt", }, { root: "mountpoint/path", mountpoint: "mountpoint", in: "wrongpath/to/file.txt", want: "", wantErr: errNotUnderRoot, }, } { what := fmt.Sprintf("%+v", test) a := newAdjustment(test.root, test.mountpoint) got, gotErr := a.do(test.in) assert.Equal(t, test.wantErr, gotErr) assert.Equal(t, test.want, got, what) } } func TestAdjustmentUndo(t *testing.T) { for _, test := range []struct { root string mountpoint string in string want string wantErr error }{ { root: "", mountpoint: "mountpoint", in: "mountpoint/path/to/file.txt", want: "path/to/file.txt", }, { root: "mountpoint", mountpoint: "mountpoint", in: "path/to/file.txt", want: "path/to/file.txt", }, { root: "mountpoint/path", mountpoint: "mountpoint", in: "to/file.txt", want: "path/to/file.txt", }, { root: "wrongmountpoint/path", mountpoint: "mountpoint", in: "to/file.txt", want: "", wantErr: errNotUnderRoot, }, } { what := fmt.Sprintf("%+v", test) a := newAdjustment(test.root, test.mountpoint) got, gotErr := a.undo(test.in) assert.Equal(t, test.wantErr, gotErr) assert.Equal(t, test.want, got, what) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sftp/sftp_test.go
backend/sftp/sftp_test.go
// Test Sftp filesystem interface //go:build !plan9 package sftp_test import ( "testing" "github.com/rclone/rclone/backend/sftp" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestSFTPOpenssh:", NilObject: (*sftp.Object)(nil), }) } func TestIntegration2(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("skipping as -remote is set") } fstests.Run(t, &fstests.Opt{ RemoteName: "TestSFTPRclone:", NilObject: (*sftp.Object)(nil), }) } func TestIntegration3(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("skipping as -remote is set") } fstests.Run(t, &fstests.Opt{ RemoteName: "TestSFTPRcloneSSH:", NilObject: (*sftp.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sftp/ssh_external_test.go
backend/sftp/ssh_external_test.go
//go:build !plan9 package sftp import ( "testing" "time" "github.com/rclone/rclone/fs" "github.com/stretchr/testify/assert" ) // TestSSHExternalWaitMultipleCalls verifies that calling Wait() multiple times // doesn't cause zombie processes func TestSSHExternalWaitMultipleCalls(t *testing.T) { // Create a minimal Fs object for testing opt := &Options{ SSH: fs.SpaceSepList{"echo", "test"}, } f := &Fs{ opt: *opt, } // Create a new SSH session session := f.newSSHSessionExternal() // Start a simple command that exits quickly err := session.Start("exit 0") assert.NoError(t, err) // Give the command time to complete time.Sleep(100 * time.Millisecond) // Call Wait() multiple times - this should not cause issues err1 := session.Wait() err2 := session.Wait() err3 := session.Wait() // All calls should return the same result (no error in this case) assert.NoError(t, err1) assert.NoError(t, err2) assert.NoError(t, err3) // Verify the process has exited assert.True(t, session.exited()) } // TestSSHExternalCloseMultipleCalls verifies that calling Close() multiple times // followed by Wait() calls doesn't cause zombie processes func TestSSHExternalCloseMultipleCalls(t *testing.T) { // Create a minimal Fs object for testing opt := &Options{ SSH: fs.SpaceSepList{"sleep", "10"}, } f := &Fs{ opt: *opt, } // Create a new SSH session session := f.newSSHSessionExternal() // Start a long-running command err := session.Start("sleep 10") if err != nil { t.Skip("Cannot start sleep command:", err) } // Close should cancel and wait for the process _ = session.Close() // Additional Wait() calls should return the same error err2 := session.Wait() err3 := session.Wait() // All should complete without panicking // err1 could be nil or an error depending on how the process was killed // err2 and err3 should be the same assert.Equal(t, err2, err3, "Subsequent Wait() calls should return same result") // Verify the process has exited assert.True(t, session.exited()) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sftp/ssh_internal.go
backend/sftp/ssh_internal.go
//go:build !plan9 package sftp import ( "context" "io" "net" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/lib/proxy" "golang.org/x/crypto/ssh" ) // Internal ssh connections with "golang.org/x/crypto/ssh" type sshClientInternal struct { srv *ssh.Client } // newSSHClientInternal starts a client connection to the given SSH server. It is a // convenience function that connects to the given network address, // initiates the SSH handshake, and then sets up a Client. func (f *Fs) newSSHClientInternal(ctx context.Context, network, addr string, sshConfig *ssh.ClientConfig) (sshClient, error) { baseDialer := fshttp.NewDialer(ctx) var ( conn net.Conn err error ) if f.opt.SocksProxy != "" { conn, err = proxy.SOCKS5Dial(network, addr, f.opt.SocksProxy, baseDialer) } else if f.proxyURL != nil { conn, err = proxy.HTTPConnectDial(network, addr, f.proxyURL, baseDialer) } else { conn, err = baseDialer.Dial(network, addr) } if err != nil { return nil, err } c, chans, reqs, err := ssh.NewClientConn(conn, addr, sshConfig) if err != nil { return nil, err } fs.Debugf(f, "New connection %s->%s to %q", c.LocalAddr(), c.RemoteAddr(), c.ServerVersion()) srv := ssh.NewClient(c, chans, reqs) return sshClientInternal{srv}, nil } // Wait for connection to close func (s sshClientInternal) Wait() error { return s.srv.Conn.Wait() } // Send a keepalive over the ssh connection func (s sshClientInternal) SendKeepAlive() { _, _, err := s.srv.SendRequest("keepalive@openssh.com", true, nil) if err != nil { fs.Debugf(nil, "Failed to send keep alive: %v", err) } } // Close the connection func (s sshClientInternal) Close() error { return s.srv.Close() } // CanReuse indicates if this client can be reused func (s sshClientInternal) CanReuse() bool { return true } // Check interfaces var _ sshClient = sshClientInternal{} // Thin wrapper for *ssh.Session to implement sshSession interface type sshSessionInternal struct { *ssh.Session } // Set the stdout func (s sshSessionInternal) SetStdout(wr io.Writer) { s.Session.Stdout = wr } // Set the stderr func (s sshSessionInternal) SetStderr(wr io.Writer) { s.Session.Stderr = wr } // NewSession makes an sshSession from an sshClient func (s sshClientInternal) NewSession() (sshSession, error) { session, err := s.srv.NewSession() if err != nil { return nil, err } return sshSessionInternal{Session: session}, nil } // Check interfaces var _ sshSession = sshSessionInternal{}
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sftp/sftp_unsupported.go
backend/sftp/sftp_unsupported.go
// Build for sftp for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 // Package sftp provides a filesystem interface using github.com/pkg/sftp package sftp
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sftp/sftp.go
backend/sftp/sftp.go
//go:build !plan9 // Package sftp provides a filesystem interface using github.com/pkg/sftp package sftp import ( "bytes" "context" "errors" "fmt" "io" iofs "io/fs" "net/url" "os" "path" "regexp" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/pkg/sftp" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/readers" sshagent "github.com/xanzy/ssh-agent" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/knownhosts" ) const ( defaultShellType = "unix" shellTypeNotSupported = "none" hashCommandNotSupported = "none" minSleep = 100 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential keepAliveInterval = time.Minute // send keepalives every this long while running commands ) var ( currentUser = env.CurrentUser() posixWinAbsPathRegex = regexp.MustCompile(`^/[a-zA-Z]\:($|/)`) // E.g. "/C:" or anything starting with "/C:/" unixShellEscapeRegex = regexp.MustCompile("[^A-Za-z0-9_.,:/\\@\u0080-\uFFFFFFFF\n-]") ) func init() { fsi := &fs.RegInfo{ Name: "sftp", Description: "SSH/SFTP", NewFs: NewFs, Options: []fs.Option{{ Name: "host", Help: "SSH host to connect to.\n\nE.g. \"example.com\".", Required: true, Sensitive: true, }, { Name: "user", Help: "SSH username.", Default: currentUser, Sensitive: true, }, { Name: "port", Help: "SSH port number.", Default: 22, }, { Name: "pass", Help: "SSH password, leave blank to use ssh-agent.", IsPassword: true, }, { Name: "key_pem", Help: `Raw PEM-encoded private key. Note that this should be on a single line with line endings replaced with '\n', eg key_pem = -----BEGIN RSA PRIVATE KEY-----\nMaMbaIXtE\n0gAMbMbaSsd\nMbaass\n-----END RSA PRIVATE KEY----- This will generate the single line correctly: awk '{printf "%s\\n", $0}' < ~/.ssh/id_rsa If specified, it will override the key_file parameter.`, Sensitive: true, }, { Name: "key_file", Help: "Path to PEM-encoded private key file.\n\nLeave blank or set key-use-agent to use ssh-agent." + env.ShellExpandHelp, }, { Name: "key_file_pass", Help: `The passphrase to decrypt the PEM-encoded private key file. Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys in the new OpenSSH format can't be used.`, IsPassword: true, Sensitive: true, }, { Name: "pubkey", Help: `SSH public certificate for public certificate based authentication. Set this if you have a signed certificate you want to use for authentication. If specified will override pubkey_file.`, }, { Name: "pubkey_file", Help: `Optional path to public key file. Set this if you have a signed certificate you want to use for authentication.` + env.ShellExpandHelp, }, { Name: "known_hosts_file", Help: `Optional path to known_hosts file. Set this value to enable server host key validation.` + env.ShellExpandHelp, Advanced: true, Examples: []fs.OptionExample{{ Value: "~/.ssh/known_hosts", Help: "Use OpenSSH's known_hosts file.", }}, }, { Name: "key_use_agent", Help: `When set forces the usage of the ssh-agent. When key-file is also set, the ".pub" file of the specified key-file is read and only the associated key is requested from the ssh-agent. This allows to avoid ` + "`Too many authentication failures for *username*`" + ` errors when the ssh-agent contains many keys.`, Default: false, }, { Name: "use_insecure_cipher", Help: `Enable the use of insecure ciphers and key exchange methods. This enables the use of the following insecure ciphers and key exchange methods: - aes128-cbc - aes192-cbc - aes256-cbc - 3des-cbc - diffie-hellman-group-exchange-sha256 - diffie-hellman-group-exchange-sha1 Those algorithms are insecure and may allow plaintext data to be recovered by an attacker. This must be false if you use either ciphers or key_exchange advanced options. `, Default: false, Examples: []fs.OptionExample{ { Value: "false", Help: "Use default Cipher list.", }, { Value: "true", Help: "Enables the use of the aes128-cbc cipher and diffie-hellman-group-exchange-sha256, diffie-hellman-group-exchange-sha1 key exchange.", }, }, }, { Name: "disable_hashcheck", Default: false, Help: "Disable the execution of SSH commands to determine if remote file hashing is available.\n\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.", }, { Name: "ask_password", Default: false, Help: `Allow asking for SFTP password when needed. If this is set and no password is supplied then rclone will: - ask for a password - not contact the ssh agent `, Advanced: true, }, { Name: "path_override", Default: "", Help: `Override path used by SSH shell commands. This allows checksum calculation when SFTP and SSH paths are different. This issue affects among others Synology NAS boxes. E.g. if shared folders can be found in directories representing volumes: rclone sync /home/local/directory remote:/directory --sftp-path-override /volume2/directory E.g. if home directory can be found in a shared folder called "home": rclone sync /home/local/directory remote:/home/directory --sftp-path-override /volume1/homes/USER/directory To specify only the path to the SFTP remote's root, and allow rclone to add any relative subpaths automatically (including unwrapping/decrypting remotes as necessary), add the '@' character to the beginning of the path. E.g. the first example above could be rewritten as: rclone sync /home/local/directory remote:/directory --sftp-path-override @/volume2 Note that when using this method with Synology "home" folders, the full "/homes/USER" path should be specified instead of "/home". E.g. the second example above should be rewritten as: rclone sync /home/local/directory remote:/homes/USER/directory --sftp-path-override @/volume1`, Advanced: true, }, { Name: "set_modtime", Default: true, Help: "Set the modified time on the remote if set.", Advanced: true, }, { Name: "shell_type", Default: "", Help: "The type of SSH shell on remote server, if any.\n\nLeave blank for autodetect.", Advanced: true, Examples: []fs.OptionExample{ { Value: shellTypeNotSupported, Help: "No shell access", }, { Value: "unix", Help: "Unix shell", }, { Value: "powershell", Help: "PowerShell", }, { Value: "cmd", Help: "Windows Command Prompt", }, }, }, { Name: "hashes", Help: `Comma separated list of supported checksum types.`, Default: fs.CommaSepList{}, Advanced: true, }, { Name: "md5sum_command", Default: "", Help: "The command used to read MD5 hashes.\n\nLeave blank for autodetect.", Advanced: true, }, { Name: "sha1sum_command", Default: "", Help: "The command used to read SHA-1 hashes.\n\nLeave blank for autodetect.", Advanced: true, }, { Name: "crc32sum_command", Default: "", Help: "The command used to read CRC-32 hashes.\n\nLeave blank for autodetect.", Advanced: true, }, { Name: "sha256sum_command", Default: "", Help: "The command used to read SHA-256 hashes.\n\nLeave blank for autodetect.", Advanced: true, }, { Name: "blake3sum_command", Default: "", Help: "The command used to read BLAKE3 hashes.\n\nLeave blank for autodetect.", Advanced: true, }, { Name: "xxh3sum_command", Default: "", Help: "The command used to read XXH3 hashes.\n\nLeave blank for autodetect.", Advanced: true, }, { Name: "xxh128sum_command", Default: "", Help: "The command used to read XXH128 hashes.\n\nLeave blank for autodetect.", Advanced: true, }, { Name: "skip_links", Default: false, Help: "Set to skip any symlinks and any other non regular files.", Advanced: true, }, { Name: "subsystem", Default: "sftp", Help: "Specifies the SSH2 subsystem on the remote host.", Advanced: true, }, { Name: "server_command", Default: "", Help: `Specifies the path or command to run a sftp server on the remote host. The subsystem option is ignored when server_command is defined. If adding server_command to the configuration file please note that it should not be enclosed in quotes, since that will make rclone fail. A working example is: [remote_name] type = sftp server_command = sudo /usr/libexec/openssh/sftp-server`, Advanced: true, }, { Name: "use_fstat", Default: false, Help: `If set use fstat instead of stat. Some servers limit the amount of open files and calling Stat after opening the file will throw an error from the server. Setting this flag will call Fstat instead of Stat which is called on an already open file handle. It has been found that this helps with IBM Sterling SFTP servers which have "extractability" level set to 1 which means only 1 file can be opened at any given time. `, Advanced: true, }, { Name: "disable_concurrent_reads", Default: false, Help: `If set don't use concurrent reads. Normally concurrent reads are safe to use and not using them will degrade performance, so this option is disabled by default. Some servers limit the amount number of times a file can be downloaded. Using concurrent reads can trigger this limit, so if you have a server which returns Failed to copy: file does not exist Then you may need to enable this flag. If concurrent reads are disabled, the use_fstat option is ignored. `, Advanced: true, }, { Name: "disable_concurrent_writes", Default: false, Help: `If set don't use concurrent writes. Normally rclone uses concurrent writes to upload files. This improves the performance greatly, especially for distant servers. This option disables concurrent writes should that be necessary. `, Advanced: true, }, { Name: "idle_timeout", Default: fs.Duration(60 * time.Second), Help: `Max time before closing idle connections. If no connections have been returned to the connection pool in the time given, rclone will empty the connection pool. Set to 0 to keep connections indefinitely. `, Advanced: true, }, { Name: "chunk_size", Help: `Upload and download chunk size. This controls the maximum size of payload in SFTP protocol packets. The RFC limits this to 32768 bytes (32k), which is the default. However, a lot of servers support larger sizes, typically limited to a maximum total package size of 256k, and setting it larger will increase transfer speed dramatically on high latency links. This includes OpenSSH, and, for example, using the value of 255k works well, leaving plenty of room for overhead while still being within a total packet size of 256k. Make sure to test thoroughly before using a value higher than 32k, and only use it if you always connect to the same server or after sufficiently broad testing. If you get errors such as "failed to send packet payload: EOF", lots of "connection lost", or "corrupted on transfer", when copying a larger file, try lowering the value. The server run by [rclone serve sftp](/commands/rclone_serve_sftp) sends packets with standard 32k maximum payload so you must not set a different chunk_size when downloading files, but it accepts packets up to the 256k total size, so for uploads the chunk_size can be set as for the OpenSSH example above. `, Default: 32 * fs.Kibi, Advanced: true, }, { Name: "concurrency", Help: `The maximum number of outstanding requests for one file This controls the maximum number of outstanding requests for one file. Increasing it will increase throughput on high latency links at the cost of using more memory. `, Default: 64, Advanced: true, }, { Name: "connections", Help: strings.ReplaceAll(`Maximum number of SFTP simultaneous connections, 0 for unlimited. Note that setting this is very likely to cause deadlocks so it should be used with care. If you are doing a sync or copy then make sure connections is one more than the sum of |--transfers| and |--checkers|. If you use |--check-first| then it just needs to be one more than the maximum of |--checkers| and |--transfers|. So for |connections 3| you'd use |--checkers 2 --transfers 2 --check-first| or |--checkers 1 --transfers 1|. `, "|", "`"), Default: 0, Advanced: true, }, { Name: "set_env", Default: fs.SpaceSepList{}, Help: `Environment variables to pass to sftp and commands Set environment variables in the form: VAR=value to be passed to the sftp client and to any commands run (eg md5sum). Pass multiple variables space separated, eg VAR1=value VAR2=value and pass variables with spaces in quotes, eg "VAR3=value with space" "VAR4=value with space" VAR5=nospacehere `, Advanced: true, }, { Name: "ciphers", Default: fs.SpaceSepList{}, Help: `Space separated list of ciphers to be used for session encryption, ordered by preference. At least one must match with server configuration. This can be checked for example using ssh -Q cipher. This must not be set if use_insecure_cipher is true. Example: aes128-ctr aes192-ctr aes256-ctr aes128-gcm@openssh.com aes256-gcm@openssh.com `, Advanced: true, }, { Name: "key_exchange", Default: fs.SpaceSepList{}, Help: `Space separated list of key exchange algorithms, ordered by preference. At least one must match with server configuration. This can be checked for example using ssh -Q kex. This must not be set if use_insecure_cipher is true. Example: sntrup761x25519-sha512@openssh.com curve25519-sha256 curve25519-sha256@libssh.org ecdh-sha2-nistp256 `, Advanced: true, }, { Name: "macs", Default: fs.SpaceSepList{}, Help: `Space separated list of MACs (message authentication code) algorithms, ordered by preference. At least one must match with server configuration. This can be checked for example using ssh -Q mac. Example: umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com `, Advanced: true, }, { Name: "host_key_algorithms", Default: fs.SpaceSepList{}, Help: `Space separated list of host key algorithms, ordered by preference. At least one must match with server configuration. This can be checked for example using ssh -Q HostKeyAlgorithms. Note: This can affect the outcome of key negotiation with the server even if server host key validation is not enabled. Example: ssh-ed25519 ssh-rsa ssh-dss `, Advanced: true, }, { Name: "ssh", Default: fs.SpaceSepList{}, Help: `Path and arguments to external ssh binary. Normally rclone will use its internal ssh library to connect to the SFTP server. However it does not implement all possible ssh options so it may be desirable to use an external ssh binary. Rclone ignores all the internal config if you use this option and expects you to configure the ssh binary with the user/host/port and any other options you need. **Important** The ssh command must log in without asking for a password so needs to be configured with keys or certificates. Rclone will run the command supplied either with the additional arguments "-s sftp" to access the SFTP subsystem or with commands such as "md5sum /path/to/file" appended to read checksums. Any arguments with spaces in should be surrounded by "double quotes". An example setting might be: ssh -o ServerAliveInterval=20 user@example.com Note that when using an external ssh binary rclone makes a new ssh connection for every hash it calculates. `, }, { Name: "socks_proxy", Default: "", Help: `Socks 5 proxy host. Supports the format user:pass@host:port, user@host:port, host:port. Example: myUser:myPass@localhost:9005 `, Advanced: true, }, { Name: "http_proxy", Default: "", Help: `URL for HTTP CONNECT proxy Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb. `, Advanced: true, }, { Name: "copy_is_hardlink", Default: false, Help: `Set to enable server side copies using hardlinks. The SFTP protocol does not define a copy command so normally server side copies are not allowed with the sftp backend. However the SFTP protocol does support hardlinking, and if you enable this flag then the sftp backend will support server side copies. These will be implemented by doing a hardlink from the source to the destination. Not all sftp servers support this. Note that hardlinking two files together will use no additional space as the source and the destination will be the same file. This feature may be useful backups made with --copy-dest.`, Advanced: true, }}, } fs.Register(fsi) } // Options defines the configuration for this backend type Options struct { Host string `config:"host"` User string `config:"user"` Port string `config:"port"` Pass string `config:"pass"` KeyPem string `config:"key_pem"` KeyFile string `config:"key_file"` KeyFilePass string `config:"key_file_pass"` PubKey string `config:"pubkey"` PubKeyFile string `config:"pubkey_file"` KnownHostsFile string `config:"known_hosts_file"` KeyUseAgent bool `config:"key_use_agent"` UseInsecureCipher bool `config:"use_insecure_cipher"` DisableHashCheck bool `config:"disable_hashcheck"` AskPassword bool `config:"ask_password"` PathOverride string `config:"path_override"` SetModTime bool `config:"set_modtime"` ShellType string `config:"shell_type"` Hashes fs.CommaSepList `config:"hashes"` Md5sumCommand string `config:"md5sum_command"` Sha1sumCommand string `config:"sha1sum_command"` Crc32sumCommand string `config:"crc32sum_command"` Sha256sumCommand string `config:"sha256sum_command"` Blake3sumCommand string `config:"blake3sum_command"` Xxh3sumCommand string `config:"xxh3sum_command"` Xxh128sumCommand string `config:"xxh128sum_command"` SkipLinks bool `config:"skip_links"` Subsystem string `config:"subsystem"` ServerCommand string `config:"server_command"` UseFstat bool `config:"use_fstat"` DisableConcurrentReads bool `config:"disable_concurrent_reads"` DisableConcurrentWrites bool `config:"disable_concurrent_writes"` IdleTimeout fs.Duration `config:"idle_timeout"` ChunkSize fs.SizeSuffix `config:"chunk_size"` Concurrency int `config:"concurrency"` Connections int `config:"connections"` SetEnv fs.SpaceSepList `config:"set_env"` Ciphers fs.SpaceSepList `config:"ciphers"` KeyExchange fs.SpaceSepList `config:"key_exchange"` MACs fs.SpaceSepList `config:"macs"` HostKeyAlgorithms fs.SpaceSepList `config:"host_key_algorithms"` SSH fs.SpaceSepList `config:"ssh"` SocksProxy string `config:"socks_proxy"` HTTPProxy string `config:"http_proxy"` CopyIsHardlink bool `config:"copy_is_hardlink"` } // Fs stores the interface to the remote SFTP files type Fs struct { name string root string absRoot string shellRoot string shellType string opt Options // parsed options ci *fs.ConfigInfo // global config m configmap.Mapper // config features *fs.Features // optional features config *ssh.ClientConfig url string mkdirLock *stringLock cachedHashes *hash.Set poolMu sync.Mutex pool []*conn drain *time.Timer // used to drain the pool when we stop using the connections pacer *fs.Pacer // pacer for operations savedpswd string sessions atomic.Int32 // count in use sessions tokens *pacer.TokenDispenser proxyURL *url.URL // address of HTTP proxy read from environment } // Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading) type Object struct { fs *Fs remote string size int64 // size of the object modTime uint32 // modification time of the object as unix time mode os.FileMode // mode bits from the file md5sum *string // Cached MD5 checksum sha1sum *string // Cached SHA-1 checksum crc32sum *string // Cached CRC-32 checksum sha256sum *string // Cached SHA-256 checksum blake3sum *string // Cached BLAKE3 checksum xxh3sum *string // Cached XXH3 checksum xxh128sum *string // Cached XXH128 checksum } // conn encapsulates an ssh client and corresponding sftp client type conn struct { sshClient sshClient sftpClient *sftp.Client err chan error } // Wait for connection to close func (c *conn) wait() { c.err <- c.sshClient.Wait() } // Send keepalives every interval over the ssh connection until done is closed func (c *conn) sendKeepAlives(interval time.Duration) (done chan struct{}) { done = make(chan struct{}) go func() { t := time.NewTicker(interval) defer t.Stop() for { select { case <-t.C: c.sshClient.SendKeepAlive() case <-done: return } } }() return done } // Closes the connection func (c *conn) close() error { sftpErr := c.sftpClient.Close() sshErr := c.sshClient.Close() if sftpErr != nil { return sftpErr } return sshErr } // Returns an error if closed func (c *conn) closed() error { select { case err := <-c.err: return err default: } return nil } // Show that we are using an ssh session // // Call removeSession() when done func (f *Fs) addSession() { f.sessions.Add(1) } // Show the ssh session is no longer in use func (f *Fs) removeSession() { f.sessions.Add(-1) } // getSessions shows whether there are any sessions in use func (f *Fs) getSessions() int32 { return f.sessions.Load() } // Open a new connection to the SFTP server. func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) { // Rate limit rate of new connections c = &conn{ err: make(chan error, 1), } if len(f.opt.SSH) == 0 { c.sshClient, err = f.newSSHClientInternal(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config) } else { c.sshClient, err = f.newSSHClientExternal() } if err != nil { return nil, fmt.Errorf("couldn't connect SSH: %w", err) } c.sftpClient, err = f.newSftpClient(c.sshClient) if err != nil { _ = c.sshClient.Close() return nil, fmt.Errorf("couldn't initialise SFTP: %w", err) } go c.wait() return c, nil } // Set any environment variables on the ssh.Session func (f *Fs) setEnv(s sshSession) error { for _, env := range f.opt.SetEnv { equal := strings.IndexRune(env, '=') if equal < 0 { return fmt.Errorf("no = found in env var %q", env) } // fs.Debugf(f, "Setting env %q = %q", env[:equal], env[equal+1:]) err := s.Setenv(env[:equal], env[equal+1:]) if err != nil { return fmt.Errorf("failed to set env var %q: %w", env[:equal], err) } } return nil } // Creates a new SFTP client on conn, using the specified subsystem // or sftp server, and zero or more option functions func (f *Fs) newSftpClient(client sshClient, opts ...sftp.ClientOption) (*sftp.Client, error) { s, err := client.NewSession() if err != nil { return nil, err } err = f.setEnv(s) if err != nil { return nil, err } pw, err := s.StdinPipe() if err != nil { return nil, err } pr, err := s.StdoutPipe() if err != nil { return nil, err } if f.opt.ServerCommand != "" { if err := s.Start(f.opt.ServerCommand); err != nil { return nil, err } } else { if err := s.RequestSubsystem(f.opt.Subsystem); err != nil { return nil, err } } opts = opts[:len(opts):len(opts)] // make sure we don't overwrite the callers opts opts = append(opts, sftp.UseFstat(f.opt.UseFstat), sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads), sftp.UseConcurrentWrites(!f.opt.DisableConcurrentWrites), sftp.MaxPacketUnchecked(int(f.opt.ChunkSize)), sftp.MaxConcurrentRequestsPerFile(f.opt.Concurrency), ) return sftp.NewClientPipe(pr, pw, opts...) } // Get an SFTP connection from the pool, or open a new one func (f *Fs) getSftpConnection(ctx context.Context) (c *conn, err error) { accounting.LimitTPS(ctx) if f.opt.Connections > 0 { f.tokens.Get() } f.poolMu.Lock() for len(f.pool) > 0 { c = f.pool[0] f.pool = f.pool[1:] err := c.closed() if err == nil { break } fs.Errorf(f, "Discarding closed SSH connection: %v", err) c = nil } f.poolMu.Unlock() if c != nil { return c, nil } err = f.pacer.Call(func() (bool, error) { c, err = f.sftpConnection(ctx) if err != nil { return true, err } return false, nil }) if f.opt.Connections > 0 && c == nil { f.tokens.Put() } return c, err } // Return an SFTP connection to the pool // // It nils the pointed to connection out so it can't be reused // // if err is not nil then it checks the connection is alive using a // Getwd request func (f *Fs) putSftpConnection(pc **conn, err error) { if f.opt.Connections > 0 { defer f.tokens.Put() } c := *pc if !c.sshClient.CanReuse() { return } *pc = nil if err != nil { // work out if this is an expected error isRegularError := false var statusErr *sftp.StatusError var pathErr *os.PathError switch { case errors.Is(err, os.ErrNotExist): isRegularError = true case errors.As(err, &statusErr): isRegularError = true case errors.As(err, &pathErr): isRegularError = true } // If not a regular SFTP error code then check the connection if !isRegularError { _, nopErr := c.sftpClient.Getwd() if nopErr != nil { fs.Debugf(f, "Connection failed, closing: %v", nopErr) _ = c.close() return } fs.Debugf(f, "Connection OK after error: %v", err) } } f.poolMu.Lock() f.pool = append(f.pool, c) if f.opt.IdleTimeout > 0 { f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer } f.poolMu.Unlock() } // Drain the pool of any connections func (f *Fs) drainPool(ctx context.Context) (err error) { f.poolMu.Lock() defer f.poolMu.Unlock() if sessions := f.getSessions(); sessions != 0 { fs.Debugf(f, "Not closing %d unused connections as %d sessions active", len(f.pool), sessions) if f.opt.IdleTimeout > 0 { f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer } return nil } if f.opt.IdleTimeout > 0 { f.drain.Stop() } if len(f.pool) != 0 { fs.Debugf(f, "Closing %d unused connections", len(f.pool)) } for i, c := range f.pool { if cErr := c.closed(); cErr == nil { cErr = c.close() if cErr != nil { fs.Debugf(f, "Ignoring error closing connection: %v", cErr) } } f.pool[i] = nil } f.pool = nil return nil } // NewFs creates a new Fs object from the name and root. It connects to // the host specified in the config file. func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // This will hold the Fs object. We need to create it here // so we can refer to it in the SSH callback, but it's populated // in NewFsWithConnection f := &Fs{ ci: fs.GetConfig(ctx), } // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } if len(opt.SSH) != 0 && ((opt.User != currentUser && opt.User != "") || opt.Host != "" || (opt.Port != "22" && opt.Port != "")) { fs.Logf(name, "--sftp-ssh is in use - ignoring user/host/port from config - set in the parameters to --sftp-ssh (remove them from the config to silence this warning)") } f.tokens = pacer.NewTokenDispenser(opt.Connections) if opt.User == "" { opt.User = currentUser } if opt.Port == "" { opt.Port = "22" } // get proxy URL if set if opt.HTTPProxy != "" { proxyURL, err := url.Parse(opt.HTTPProxy) if err != nil { return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err) } f.proxyURL = proxyURL } sshConfig := &ssh.ClientConfig{ User: opt.User, Auth: []ssh.AuthMethod{}, HostKeyCallback: ssh.InsecureIgnoreHostKey(), Timeout: time.Duration(f.ci.ConnectTimeout), ClientVersion: "SSH-2.0-" + f.ci.UserAgent, } if len(opt.HostKeyAlgorithms) != 0 { sshConfig.HostKeyAlgorithms = []string(opt.HostKeyAlgorithms) } if opt.KnownHostsFile != "" { hostcallback, err := knownhosts.New(env.ShellExpand(opt.KnownHostsFile)) if err != nil { return nil, fmt.Errorf("couldn't parse known_hosts_file: %w", err) } sshConfig.HostKeyCallback = hostcallback } if opt.UseInsecureCipher && (opt.Ciphers != nil || opt.KeyExchange != nil) { return nil, fmt.Errorf("use_insecure_cipher must be false if ciphers or key_exchange are set in advanced configuration") } sshConfig.Config.SetDefaults() if opt.UseInsecureCipher { sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc", "aes192-cbc", "aes256-cbc", "3des-cbc") sshConfig.Config.KeyExchanges = append(sshConfig.Config.KeyExchanges, "diffie-hellman-group-exchange-sha1", "diffie-hellman-group-exchange-sha256") } else { if opt.Ciphers != nil { sshConfig.Config.Ciphers = opt.Ciphers } if opt.KeyExchange != nil { sshConfig.Config.KeyExchanges = opt.KeyExchange } } if opt.MACs != nil { sshConfig.Config.MACs = opt.MACs } keyFile := env.ShellExpand(opt.KeyFile) pubkeyFile := env.ShellExpand(opt.PubKeyFile) //keyPem := env.ShellExpand(opt.KeyPem) // Add ssh agent-auth if no password or file or key PEM specified if (len(opt.SSH) == 0 && opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent { sshAgentClient, _, err := sshagent.New() if err != nil { return nil, fmt.Errorf("couldn't connect to ssh-agent: %w", err) } signers, err := sshAgentClient.Signers() if err != nil { return nil, fmt.Errorf("couldn't read ssh agent signers: %w", err) } if keyFile != "" { // If `opt.KeyUseAgent` is false, then it's expected that `opt.KeyFile` contains the private key // and `${opt.KeyFile}.pub` contains the public key. // // If `opt.KeyUseAgent` is true, then it's expected that `opt.KeyFile` contains the public key. // This is how it works with openssh; the `IdentityFile` in openssh config points to the public key. // It's not necessary to specify the public key explicitly when using ssh-agent, since openssh and rclone // will try all the keys they find in the ssh-agent until they find one that works. But just like // `IdentityFile` is used in openssh config to limit the search to one specific key, so does // `opt.KeyFile` in rclone config limit the search to that specific key. // // However, previous versions of rclone would always expect to find the public key in // `${opt.KeyFile}.pub` even if `opt.KeyUseAgent` was true. So for the sake of backward compatibility // we still first attempt to read the public key from `${opt.KeyFile}.pub`. But if it fails with // an `fs.ErrNotExist` then we also try to read the public key from `opt.KeyFile`. pubBytes, err := os.ReadFile(keyFile + ".pub") if err != nil { if errors.Is(err, iofs.ErrNotExist) && opt.KeyUseAgent { pubBytes, err = os.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("failed to read public key file: %w", err) } } else { return nil, fmt.Errorf("failed to read public key file: %w", err) } } pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes) if err != nil { return nil, fmt.Errorf("failed to parse public key file: %w", err) } pubM := pub.Marshal() found := false
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sftp/stringlock_test.go
backend/sftp/stringlock_test.go
//go:build !plan9 package sftp import ( "fmt" "sync" "testing" "time" "github.com/stretchr/testify/assert" ) func TestStringLock(t *testing.T) { var wg sync.WaitGroup counter := [3]int{} lock := newStringLock() const ( outer = 10 inner = 100 total = outer * inner ) for range outer { for j := range counter { wg.Add(1) go func(j int) { defer wg.Done() ID := fmt.Sprintf("%d", j) for range inner { lock.Lock(ID) n := counter[j] time.Sleep(1 * time.Millisecond) counter[j] = n + 1 lock.Unlock(ID) } }(j) } } wg.Wait() assert.Equal(t, [3]int{total, total, total}, counter) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sftp/sftp_internal_test.go
backend/sftp/sftp_internal_test.go
//go:build !plan9 package sftp import ( "fmt" "testing" "github.com/stretchr/testify/assert" ) func TestShellEscapeUnix(t *testing.T) { for i, test := range []struct { unescaped, escaped string }{ {"", ""}, {"/this/is/harmless", "/this/is/harmless"}, {"$(rm -rf /)", "\\$\\(rm\\ -rf\\ /\\)"}, {"/test/\n", "/test/'\n'"}, {":\"'", ":\\\"\\'"}, } { got, err := quoteOrEscapeShellPath("unix", test.unescaped) assert.NoError(t, err) assert.Equal(t, test.escaped, got, fmt.Sprintf("Test %d unescaped = %q", i, test.unescaped)) } } func TestShellEscapeCmd(t *testing.T) { for i, test := range []struct { unescaped, escaped string ok bool }{ {"", "\"\"", true}, {"c:/this/is/harmless", "\"c:/this/is/harmless\"", true}, {"c:/test&notepad", "\"c:/test&notepad\"", true}, {"c:/test\"&\"notepad", "", false}, } { got, err := quoteOrEscapeShellPath("cmd", test.unescaped) if test.ok { assert.NoError(t, err) assert.Equal(t, test.escaped, got, fmt.Sprintf("Test %d unescaped = %q", i, test.unescaped)) } else { assert.Error(t, err) } } } func TestShellEscapePowerShell(t *testing.T) { for i, test := range []struct { unescaped, escaped string }{ {"", "''"}, {"c:/this/is/harmless", "'c:/this/is/harmless'"}, {"c:/test&notepad", "'c:/test&notepad'"}, {"c:/test\"&\"notepad", "'c:/test\"&\"notepad'"}, {"c:/test'&'notepad", "'c:/test''&''notepad'"}, } { got, err := quoteOrEscapeShellPath("powershell", test.unescaped) assert.NoError(t, err) assert.Equal(t, test.escaped, got, fmt.Sprintf("Test %d unescaped = %q", i, test.unescaped)) } } func TestParseHash(t *testing.T) { for i, test := range []struct { sshOutput, checksum string }{ {"8dbc7733dbd10d2efc5c0a0d8dad90f958581821 RELEASE.md\n", "8dbc7733dbd10d2efc5c0a0d8dad90f958581821"}, {"03cfd743661f07975fa2f1220c5194cbaff48451 -\n", "03cfd743661f07975fa2f1220c5194cbaff48451"}, } { got := parseHash([]byte(test.sshOutput)) assert.Equal(t, test.checksum, got, fmt.Sprintf("Test %d sshOutput = %q", i, test.sshOutput)) } } func TestParseUsage(t *testing.T) { for i, test := range []struct { sshOutput string usage [3]int64 }{ {"Filesystem 1K-blocks Used Available Use% Mounted on\n/dev/root 91283092 81111888 10154820 89% /", [3]int64{93473886208, 83058573312, 10398535680}}, {"Filesystem 1K-blocks Used Available Use% Mounted on\ntmpfs 818256 1636 816620 1% /run", [3]int64{837894144, 1675264, 836218880}}, {"Filesystem 1024-blocks Used Available Capacity iused ifree %iused Mounted on\n/dev/disk0s2 244277768 94454848 149566920 39% 997820 4293969459 0% /", [3]int64{250140434432, 96721764352, 153156526080}}, } { gotSpaceTotal, gotSpaceUsed, gotSpaceAvail := parseUsage([]byte(test.sshOutput)) assert.Equal(t, test.usage, [3]int64{gotSpaceTotal, gotSpaceUsed, gotSpaceAvail}, fmt.Sprintf("Test %d sshOutput = %q", i, test.sshOutput)) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sftp/ssh.go
backend/sftp/ssh.go
//go:build !plan9 package sftp import "io" // Interfaces for ssh client and session implemented in ssh_internal.go and ssh_external.go // An interface for an ssh client to abstract over internal ssh library and external binary type sshClient interface { // Wait blocks until the connection has shut down, and returns the // error causing the shutdown. Wait() error // SendKeepAlive sends a keepalive message to keep the connection open SendKeepAlive() // Close the connection Close() error // NewSession opens a new sshSession for this sshClient. (A // session is a remote execution of a program.) NewSession() (sshSession, error) // CanReuse indicates if this client can be reused CanReuse() bool } // An interface for an ssh session to abstract over internal ssh library and external binary type sshSession interface { // Setenv sets an environment variable that will be applied to any // command executed by Shell or Run. Setenv(name, value string) error // Start runs cmd on the remote host. Typically, the remote // server passes cmd to the shell for interpretation. // A Session only accepts one call to Run, Start or Shell. Start(cmd string) error // StdinPipe returns a pipe that will be connected to the // remote command's standard input when the command starts. StdinPipe() (io.WriteCloser, error) // StdoutPipe returns a pipe that will be connected to the // remote command's standard output when the command starts. // There is a fixed amount of buffering that is shared between // stdout and stderr streams. If the StdoutPipe reader is // not serviced fast enough it may eventually cause the // remote command to block. StdoutPipe() (io.Reader, error) // RequestSubsystem requests the association of a subsystem // with the session on the remote host. A subsystem is a // predefined command that runs in the background when the ssh // session is initiated RequestSubsystem(subsystem string) error // Run runs cmd on the remote host. Typically, the remote // server passes cmd to the shell for interpretation. // A Session only accepts one call to Run, Start, Shell, Output, // or CombinedOutput. Run(cmd string) error // Close the session Close() error // Set the stdout SetStdout(io.Writer) // Set the stderr SetStderr(io.Writer) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sftp/ssh_external.go
backend/sftp/ssh_external.go
//go:build !plan9 package sftp import ( "context" "errors" "fmt" "io" "os/exec" "slices" "strings" "sync" "time" "github.com/rclone/rclone/fs" ) // Implement the sshClient interface for external ssh programs type sshClientExternal struct { f *Fs session *sshSessionExternal } func (f *Fs) newSSHClientExternal() (sshClient, error) { return &sshClientExternal{f: f}, nil } // Wait for connection to close func (s *sshClientExternal) Wait() error { if s.session == nil { return nil } return s.session.Wait() } // Send a keepalive over the ssh connection func (s *sshClientExternal) SendKeepAlive() { // Up to the user to configure -o ServerAliveInterval=20 on their ssh connections } // Close the connection func (s *sshClientExternal) Close() error { if s.session == nil { return nil } return s.session.Close() } // NewSession makes a new external SSH connection func (s *sshClientExternal) NewSession() (sshSession, error) { session := s.f.newSSHSessionExternal() if s.session == nil { // Store the first session so Wait() and Close() can use it s.session = session } else { fs.Debugf(s.f, "ssh external: creating additional session") } return session, nil } // CanReuse indicates if this client can be reused func (s *sshClientExternal) CanReuse() bool { if s.session == nil { return true } exited := s.session.exited() canReuse := !exited && s.session.runningSFTP // fs.Debugf(s.f, "ssh external: CanReuse %v, exited=%v runningSFTP=%v", canReuse, exited, s.session.runningSFTP) return canReuse } // Check interfaces var _ sshClient = &sshClientExternal{} // implement the sshSession interface for external ssh binary type sshSessionExternal struct { f *Fs cmd *exec.Cmd cancel func() startCalled bool runningSFTP bool waitOnce sync.Once // ensure Wait() is only called once waitErr error // result of the Wait() call } func (f *Fs) newSSHSessionExternal() *sshSessionExternal { s := &sshSessionExternal{ f: f, } // Make a cancellation function for this to call in Close() ctx, cancel := context.WithCancel(context.Background()) s.cancel = cancel // Connect to a remote host and request the sftp subsystem via // the 'ssh' command. This assumes that passwordless login is // correctly configured. ssh := slices.Clone(s.f.opt.SSH) s.cmd = exec.CommandContext(ctx, ssh[0], ssh[1:]...) // Allow the command a short time only to shut down s.cmd.WaitDelay = time.Second return s } // Setenv sets an environment variable that will be applied to any // command executed by Shell or Run. func (s *sshSessionExternal) Setenv(name, value string) error { return errors.New("ssh external: can't set environment variables") } const requestSubsystem = "***Subsystem***:" // Start runs cmd on the remote host. Typically, the remote // server passes cmd to the shell for interpretation. // A Session only accepts one call to Run, Start or Shell. func (s *sshSessionExternal) Start(cmd string) error { if s.startCalled { return errors.New("internal error: ssh external: command already running") } s.startCalled = true // Adjust the args if strings.HasPrefix(cmd, requestSubsystem) { s.cmd.Args = append(s.cmd.Args, "-s", cmd[len(requestSubsystem):]) s.runningSFTP = true } else { s.cmd.Args = append(s.cmd.Args, cmd) s.runningSFTP = false } fs.Debugf(s.f, "ssh external: running: %v", fs.SpaceSepList(s.cmd.Args)) // start the process err := s.cmd.Start() if err != nil { return fmt.Errorf("ssh external: start process: %w", err) } return nil } // RequestSubsystem requests the association of a subsystem // with the session on the remote host. A subsystem is a // predefined command that runs in the background when the ssh // session is initiated func (s *sshSessionExternal) RequestSubsystem(subsystem string) error { return s.Start(requestSubsystem + subsystem) } // StdinPipe returns a pipe that will be connected to the // remote command's standard input when the command starts. func (s *sshSessionExternal) StdinPipe() (io.WriteCloser, error) { rd, err := s.cmd.StdinPipe() if err != nil { return nil, fmt.Errorf("ssh external: stdin pipe: %w", err) } return rd, nil } // StdoutPipe returns a pipe that will be connected to the // remote command's standard output when the command starts. // There is a fixed amount of buffering that is shared between // stdout and stderr streams. If the StdoutPipe reader is // not serviced fast enough it may eventually cause the // remote command to block. func (s *sshSessionExternal) StdoutPipe() (io.Reader, error) { wr, err := s.cmd.StdoutPipe() if err != nil { return nil, fmt.Errorf("ssh external: stdout pipe: %w", err) } return wr, nil } // Return whether the command has finished or not func (s *sshSessionExternal) exited() bool { return s.cmd.ProcessState != nil } // Wait for the command to exit func (s *sshSessionExternal) Wait() error { // Use sync.Once to ensure we only wait for the process once. // This is safe even if Wait() is called from multiple goroutines. s.waitOnce.Do(func() { s.waitErr = s.cmd.Wait() if s.waitErr == nil { fs.Debugf(s.f, "ssh external: command exited OK") } else { fs.Debugf(s.f, "ssh external: command exited with error: %v", s.waitErr) } }) return s.waitErr } // Run runs cmd on the remote host. Typically, the remote // server passes cmd to the shell for interpretation. // A Session only accepts one call to Run, Start, Shell, Output, // or CombinedOutput. func (s *sshSessionExternal) Run(cmd string) error { err := s.Start(cmd) if err != nil { return err } return s.Wait() } // Close the external ssh func (s *sshSessionExternal) Close() error { fs.Debugf(s.f, "ssh external: close") // Cancel the context which kills the process s.cancel() // Wait for it to finish _ = s.Wait() return nil } // Set the stdout func (s *sshSessionExternal) SetStdout(wr io.Writer) { s.cmd.Stdout = wr } // Set the stderr func (s *sshSessionExternal) SetStderr(wr io.Writer) { s.cmd.Stderr = wr } // Check interfaces var _ sshSession = &sshSessionExternal{}
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sftp/stringlock.go
backend/sftp/stringlock.go
//go:build !plan9 package sftp import "sync" // stringLock locks for string IDs passed in type stringLock struct { mu sync.Mutex // mutex to protect below locks map[string]chan struct{} // map of locks } // newStringLock creates a stringLock func newStringLock() *stringLock { return &stringLock{ locks: make(map[string]chan struct{}), } } // Lock locks on the id passed in func (l *stringLock) Lock(ID string) { l.mu.Lock() for { ch, ok := l.locks[ID] if !ok { break } // Wait for the channel to be closed l.mu.Unlock() // fs.Logf(nil, "Waiting for stringLock on %q", ID) <-ch l.mu.Lock() } l.locks[ID] = make(chan struct{}) l.mu.Unlock() } // Unlock unlocks on the id passed in. Will panic if Lock with the // given id wasn't called first. func (l *stringLock) Unlock(ID string) { l.mu.Lock() ch, ok := l.locks[ID] if !ok { panic("stringLock: Unlock before Lock") } close(ch) delete(l.locks, ID) l.mu.Unlock() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/pcloud/writer_at.go
backend/pcloud/writer_at.go
package pcloud import ( "bytes" "context" "crypto/sha1" "encoding/hex" "fmt" "net/url" "strconv" "time" "github.com/rclone/rclone/backend/pcloud/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/rest" ) // writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud. type writerAt struct { ctx context.Context fs *Fs size int64 remote string fileID int64 } // Close implements WriterAt.Close. func (c *writerAt) Close() error { // Avoiding race conditions: Depending on the tcp connection, there might be // caching issues when checking the size immediately after write. // Hence we try avoiding them by checking the resulting size on a different connection. if c.size < 0 { // Without knowing the size, we cannot do size checks. // Falling back to a sleep of 1s for sake of hope. time.Sleep(1 * time.Second) return nil } sizeOk := false sizeLastSeen := int64(0) for retry := range 5 { fs.Debugf(c.remote, "checking file size: try %d/5", retry) obj, err := c.fs.NewObject(c.ctx, c.remote) if err != nil { return fmt.Errorf("get uploaded obj: %w", err) } sizeLastSeen = obj.Size() if obj.Size() == c.size { sizeOk = true break } time.Sleep(1 * time.Second) } if !sizeOk { return fmt.Errorf("incorrect size after upload: got %d, want %d", sizeLastSeen, c.size) } return nil } // WriteAt implements fs.WriteAt. func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) { contentLength := len(buffer) inSHA1Bytes := sha1.Sum(buffer) inSHA1 := hex.EncodeToString(inSHA1Bytes[:]) client, err := c.fs.newSingleConnClient(c.ctx) if err != nil { return 0, fmt.Errorf("create client: %w", err) } openResult, err := fileOpen(c.ctx, client, c.fs, c.fileID) if err != nil { return 0, fmt.Errorf("open file: %w", err) } // get target hash outChecksum, err := fileChecksum(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, int64(contentLength)) if err != nil { return 0, err } outSHA1 := outChecksum.SHA1 if outSHA1 == "" || inSHA1 == "" { return 0, fmt.Errorf("expect both hashes to be filled: src: %q, target: %q", inSHA1, outSHA1) } // check hash of buffer, skip if fits if inSHA1 == outSHA1 { return contentLength, nil } // upload buffer with offset if necessary if _, err := filePWrite(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, buffer); err != nil { return 0, err } // close fd if _, err := fileClose(c.ctx, client, c.fs.pacer, openResult.FileDescriptor); err != nil { return contentLength, fmt.Errorf("close fd: %w", err) } return contentLength, nil } // Call pcloud file_open using folderid and name with O_CREAT and O_WRITE flags, see [API Doc.] // [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html func fileOpenNew(ctx context.Context, c *rest.Client, srcFs *Fs, directoryID, filename string) (*api.FileOpenResponse, error) { opts := rest.Opts{ Method: "PUT", Path: "/file_open", Parameters: url.Values{}, TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding ExtraHeaders: map[string]string{ "Connection": "keep-alive", }, } filename = srcFs.opt.Enc.FromStandardName(filename) opts.Parameters.Set("name", filename) opts.Parameters.Set("folderid", dirIDtoNumber(directoryID)) opts.Parameters.Set("flags", "0x0042") // O_CREAT, O_WRITE result := &api.FileOpenResponse{} err := srcFs.pacer.CallNoRetry(func() (bool, error) { resp, err := c.CallJSON(ctx, &opts, nil, result) err = result.Error.Update(err) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("open new file descriptor: %w", err) } return result, nil } // Call pcloud file_open using fileid with O_WRITE flags, see [API Doc.] // [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html func fileOpen(ctx context.Context, c *rest.Client, srcFs *Fs, fileID int64) (*api.FileOpenResponse, error) { opts := rest.Opts{ Method: "PUT", Path: "/file_open", Parameters: url.Values{}, TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding ExtraHeaders: map[string]string{ "Connection": "keep-alive", }, } opts.Parameters.Set("fileid", strconv.FormatInt(fileID, 10)) opts.Parameters.Set("flags", "0x0002") // O_WRITE result := &api.FileOpenResponse{} err := srcFs.pacer.CallNoRetry(func() (bool, error) { resp, err := c.CallJSON(ctx, &opts, nil, result) err = result.Error.Update(err) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("open new file descriptor: %w", err) } return result, nil } // Call pcloud file_checksum, see [API Doc.] // [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html func fileChecksum( ctx context.Context, client *rest.Client, pacer *fs.Pacer, fd, offset, count int64, ) (*api.FileChecksumResponse, error) { opts := rest.Opts{ Method: "PUT", Path: "/file_checksum", Parameters: url.Values{}, TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding ExtraHeaders: map[string]string{ "Connection": "keep-alive", }, } opts.Parameters.Set("fd", strconv.FormatInt(fd, 10)) opts.Parameters.Set("offset", strconv.FormatInt(offset, 10)) opts.Parameters.Set("count", strconv.FormatInt(count, 10)) result := &api.FileChecksumResponse{} err := pacer.CallNoRetry(func() (bool, error) { resp, err := client.CallJSON(ctx, &opts, nil, result) err = result.Error.Update(err) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", fd, offset, count, err) } return result, nil } // Call pcloud file_pwrite, see [API Doc.] // [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html func filePWrite( ctx context.Context, client *rest.Client, pacer *fs.Pacer, fd int64, offset int64, buf []byte, ) (*api.FilePWriteResponse, error) { contentLength := int64(len(buf)) opts := rest.Opts{ Method: "PUT", Path: "/file_pwrite", Body: bytes.NewReader(buf), ContentLength: &contentLength, Parameters: url.Values{}, TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding Close: false, ExtraHeaders: map[string]string{ "Connection": "keep-alive", }, } opts.Parameters.Set("fd", strconv.FormatInt(fd, 10)) opts.Parameters.Set("offset", strconv.FormatInt(offset, 10)) result := &api.FilePWriteResponse{} err := pacer.CallNoRetry(func() (bool, error) { resp, err := client.CallJSON(ctx, &opts, nil, result) err = result.Error.Update(err) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, fd, offset, err) } return result, nil } // Call pcloud file_close, see [API Doc.] // [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html func fileClose( ctx context.Context, client *rest.Client, pacer *fs.Pacer, fd int64, ) (*api.FileCloseResponse, error) { opts := rest.Opts{ Method: "PUT", Path: "/file_close", Parameters: url.Values{}, TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding Close: true, } opts.Parameters.Set("fd", strconv.FormatInt(fd, 10)) result := &api.FileCloseResponse{} err := pacer.CallNoRetry(func() (bool, error) { resp, err := client.CallJSON(ctx, &opts, nil, result) err = result.Error.Update(err) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("close file descriptor: %w", err) } return result, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/pcloud/pcloud.go
backend/pcloud/pcloud.go
// Package pcloud provides an interface to the Pcloud // object storage system. package pcloud // FIXME cleanup returns login required? // FIXME mime type? Fix overview if implement. import ( "context" "errors" "fmt" "io" "net/http" "net/url" "path" "strconv" "strings" "time" "github.com/rclone/rclone/backend/pcloud/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" "golang.org/x/oauth2" ) const ( rcloneClientID = "DnONSzyJXpm" rcloneEncryptedClientSecret = "ej1OIF39VOQQ0PXaSdK9ztkLw3tdLNscW2157TKNQdQKkICR4uU7aFg4eFM" minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential defaultHostname = "api.pcloud.com" ) // Globals var ( // Description of how to auth for this app oauthConfig = &oauthutil.Config{ Scopes: nil, AuthURL: "https://my.pcloud.com/oauth2/authorize", // TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL ClientID: rcloneClientID, ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), RedirectURL: oauthutil.RedirectLocalhostURL, } ) // Update the TokenURL with the actual hostname func updateTokenURL(oauthConfig *oauthutil.Config, hostname string) { oauthConfig.TokenURL = "https://" + hostname + "/oauth2_token" } // Register with Fs func init() { updateTokenURL(oauthConfig, defaultHostname) fs.Register(&fs.RegInfo{ Name: "pcloud", Description: "Pcloud", NewFs: NewFs, Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { optc := new(Options) err := configstruct.Set(m, optc) if err != nil { fs.Errorf(nil, "Failed to read config: %v", err) } updateTokenURL(oauthConfig, optc.Hostname) checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error { if auth == nil || auth.Form == nil { return errors.New("form not found in response") } hostname := auth.Form.Get("hostname") if hostname == "" { hostname = defaultHostname } // Save the hostname in the config m.Set("hostname", hostname) // Update the token URL updateTokenURL(oauthConfig, hostname) fs.Debugf(nil, "pcloud: got hostname %q", hostname) return nil } return oauthutil.ConfigOut("", &oauthutil.Options{ OAuth2Config: oauthConfig, CheckAuth: checkAuth, StateBlankOK: true, // pCloud seems to drop the state parameter now - see #4210 }) }, Options: append(oauthutil.SharedOptions, []fs.Option{{ Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // Encode invalid UTF-8 bytes as json doesn't handle them properly. // // TODO: Investigate Unicode simplification (\ gets converted to \ server-side) Default: (encoder.Display | encoder.EncodeBackSlash | encoder.EncodeInvalidUtf8), }, { Name: "root_folder_id", Help: "Fill in for rclone to use a non root folder as its starting point.", Default: "d0", Advanced: true, Sensitive: true, }, { Name: "hostname", Help: `Hostname to connect to. This is normally set when rclone initially does the oauth connection, however you will need to set it by hand if you are using remote config with rclone authorize. `, Default: defaultHostname, Advanced: true, Examples: []fs.OptionExample{{ Value: defaultHostname, Help: "Original/US region", }, { Value: "eapi.pcloud.com", Help: "EU region", }}, }, { Name: "username", Help: `Your pcloud username. This is only required when you want to use the cleanup command. Due to a bug in the pcloud API the required API does not support OAuth authentication so we have to rely on user password authentication for it.`, Advanced: true, Sensitive: true, }, { Name: "password", Help: "Your pcloud password.", IsPassword: true, Advanced: true, }, }...), }) } // Options defines the configuration for this backend type Options struct { Enc encoder.MultiEncoder `config:"encoding"` RootFolderID string `config:"root_folder_id"` Hostname string `config:"hostname"` Username string `config:"username"` Password string `config:"password"` } // Fs represents a remote pcloud type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options features *fs.Features // optional features ts *oauthutil.TokenSource // the token source, used to create new clients srv *rest.Client // the connection to the server cleanupSrv *rest.Client // the connection used for the cleanup method dirCache *dircache.DirCache // Map of directory path to directory id pacer *fs.Pacer // pacer for API calls tokenRenewer *oauthutil.Renew // renew the token on expiry lastDiffID int64 // change tracking state for diff long-polling } // Object describes a pcloud object // // Will definitely have info but maybe not meta type Object struct { fs *Fs // what this object is part of remote string // The remote path hasMetaData bool // whether info below has been set size int64 // size of the object modTime time.Time // modification time of the object id string // ID of the object md5 string // MD5 if known sha1 string // SHA1 if known sha256 string // SHA256 if known link *api.GetFileLinkResult } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("pcloud root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // parsePath parses a pcloud 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } doRetry := false // Check if it is an api.Error if apiErr, ok := err.(*api.Error); ok { // See https://docs.pcloud.com/errors/ for error treatment // Errors are classified as 1xxx, 2xxx, etc. switch apiErr.Result / 1000 { case 4: // 4xxx: rate limiting doRetry = true case 5: // 5xxx: internal errors doRetry = true } } if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Contains(resp.Header["Www-Authenticate"][0], "expired_token") { doRetry = true fs.Debugf(nil, "Should retry: %v", err) } return doRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // readMetaDataForPath reads the metadata from the path func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) { // defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, fs.ErrorObjectNotFound } return nil, err } found, err := f.listAll(ctx, directoryID, false, true, false, func(item *api.Item) bool { if item.Name == leaf { info = item return true } return false }) if err != nil { return nil, err } if !found { return nil, fs.ErrorObjectNotFound } return info, nil } // errorHandler parses a non 2xx error response into an error func errorHandler(resp *http.Response) error { // Decode error response errResponse := new(api.Error) err := rest.DecodeJSON(resp, &errResponse) if err != nil { fs.Debugf(nil, "Couldn't decode error response: %v", err) } if errResponse.ErrorString == "" { errResponse.ErrorString = resp.Status } if errResponse.Result == 0 { errResponse.Result = resp.StatusCode } return errResponse } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } root = parsePath(root) oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig) if err != nil { return nil, fmt.Errorf("failed to configure Pcloud: %w", err) } updateTokenURL(oauthConfig, opt.Hostname) canCleanup := opt.Username != "" && opt.Password != "" f := &Fs{ name: name, root: root, opt: *opt, ts: ts, srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } if canCleanup { f.cleanupSrv = rest.NewClient(fshttp.NewClient(ctx)).SetRoot("https://" + opt.Hostname) } f.features = (&fs.Features{ CaseInsensitive: false, CanHaveEmptyDirectories: true, PartialUploads: true, }).Fill(ctx, f) if !canCleanup { f.features.CleanUp = nil } f.srv.SetErrorHandler(errorHandler) // Renew the token in the background f.tokenRenewer = oauthutil.NewRenew(f.String(), f.ts, func() error { _, err := f.readMetaDataForPath(ctx, "") return err }) // Get rootFolderID rootID := f.opt.RootFolderID f.dirCache = dircache.New(root, rootID, f) // Find the current root err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) tempF := *f tempF.dirCache = dircache.New(newRoot, rootID, &tempF) tempF.root = newRoot // Make new Fs which is the parent err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } _, err := tempF.newObjectWithInfo(ctx, remote, nil) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f return f, nil } return nil, err } // XXX: update the old f here instead of returning tempF, since // `features` were already filled with functions having *f as a receiver. // See https://github.com/rclone/rclone/issues/2182 f.dirCache = tempF.dirCache f.root = tempF.root // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // XOpenWriterAt opens with a handle for random access writes // // Pass in the remote desired and the size if known. // // It truncates any existing object. // // OpenWriterAt disabled because it seems to have been disabled at pcloud // PUT /file_open?flags=XXX&folderid=XXX&name=XXX HTTP/1.1 // // { // "result": 2003, // "error": "Access denied. You do not have permissions to perform this operation." // } func (f *Fs) XOpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) { client, err := f.newSingleConnClient(ctx) if err != nil { return nil, fmt.Errorf("create client: %w", err) } // init an empty file leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) if err != nil { return nil, fmt.Errorf("resolve src: %w", err) } openResult, err := fileOpenNew(ctx, client, f, directoryID, leaf) if err != nil { return nil, fmt.Errorf("open file: %w", err) } if _, err := fileClose(ctx, client, f.pacer, openResult.FileDescriptor); err != nil { return nil, fmt.Errorf("close file: %w", err) } writer := &writerAt{ ctx: ctx, fs: f, size: size, remote: remote, fileID: openResult.Fileid, } return writer, nil } // Create a new http client, accepting keep-alive headers, limited to single connection. // Necessary for pcloud fileops API, as it binds the session to the underlying TCP connection. // File descriptors are only valid within the same connection and auto-closed when the connection is closed, // hence we need a separate client (with single connection) for each fd to avoid all sorts of errors and race conditions. func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) { baseClient := fshttp.NewClient(ctx) baseClient.Transport = fshttp.NewTransportCustom(ctx, func(t *http.Transport) { t.MaxConnsPerHost = 1 t.DisableKeepAlives = false }) // Set our own http client in the context ctx = oauthutil.Context(ctx, baseClient) // create a new oauth client, reuse the token source oAuthClient := oauth2.NewClient(ctx, f.ts) return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } var err error if info != nil { // Set info err = o.setMetaData(info) } else { err = o.readMetaData(ctx) // reads info and meta, returning an error } if err != nil { return nil, err } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // FindLeaf finds a directory of name leaf in the folder with ID pathID func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { // Find the leaf in pathID found, err = f.listAll(ctx, pathID, true, false, false, func(item *api.Item) bool { if item.Name == leaf { pathIDOut = item.ID return true } return false }) return pathIDOut, found, err } // CreateDir makes a directory with pathID as parent and name leaf func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) var resp *http.Response var result api.ItemResult opts := rest.Opts{ Method: "POST", Path: "/createfolder", Parameters: url.Values{}, } opts.Parameters.Set("name", f.opt.Enc.FromStandardName(leaf)) opts.Parameters.Set("folderid", dirIDtoNumber(pathID)) err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) err = result.Error.Update(err) return shouldRetry(ctx, resp, err) }) if err != nil { //fmt.Printf("...Error %v\n", err) return "", err } // fmt.Printf("...Id %q\n", *info.Id) return result.Metadata.ID, nil } // Converts a dirID which is usually 'd' followed by digits into just // the digits func dirIDtoNumber(dirID string) string { if len(dirID) > 0 && dirID[0] == 'd' { return dirID[1:] } fs.Debugf(nil, "Invalid directory id %q", dirID) return dirID } // Converts a fileID which is usually 'f' followed by digits into just // the digits func fileIDtoNumber(fileID string) string { if len(fileID) > 0 && fileID[0] == 'f' { return fileID[1:] } fs.Debugf(nil, "Invalid file id %q", fileID) return fileID } // list the objects into the function supplied // // If directories is set it only sends directories // User function to process a File item from listAll // // Should return true to finish processing type listAllFn func(*api.Item) bool // Lists the directory required calling the user function on each item found // // If the user fn ever returns true then it early exits with found = true func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, recursive bool, fn listAllFn) (found bool, err error) { opts := rest.Opts{ Method: "GET", Path: "/listfolder", Parameters: url.Values{}, } if recursive { opts.Parameters.Set("recursive", "1") } opts.Parameters.Set("folderid", dirIDtoNumber(dirID)) var result api.ItemResult var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) err = result.Error.Update(err) return shouldRetry(ctx, resp, err) }) if err != nil { return found, fmt.Errorf("couldn't list files: %w", err) } var recursiveContents func(is []api.Item, path string) recursiveContents = func(is []api.Item, path string) { for i := range is { item := &is[i] if item.IsFolder { if filesOnly { continue } } else { if directoriesOnly { continue } } item.Name = path + f.opt.Enc.ToStandardName(item.Name) if fn(item) { found = true break } if recursive { recursiveContents(item.Contents, item.Name+"/") } } } recursiveContents(result.Metadata.Contents, "") return } // listHelper iterates over all items from the directory // and calls the callback for each element. func (f *Fs) listHelper(ctx context.Context, dir string, recursive bool, callback func(entries fs.DirEntry) error) (err error) { directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return err } var iErr error _, err = f.listAll(ctx, directoryID, false, false, recursive, func(info *api.Item) bool { remote := path.Join(dir, info.Name) if info.IsFolder { // cache the directory ID for later lookups f.dirCache.Put(remote, info.ID) d := fs.NewDir(remote, info.ModTime()).SetID(info.ID) // FIXME more info from dir? iErr = callback(d) } else { o, err := f.newObjectWithInfo(ctx, remote, info) if err != nil { iErr = err return true } iErr = callback(o) } if iErr != nil { return true } return false }) if err != nil { return err } if iErr != nil { return iErr } return nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return list.WithListP(ctx, dir, f) } // ListP lists the objects and directories of the Fs starting // from dir non recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { list := list.NewHelper(callback) err = f.listHelper(ctx, dir, false, func(o fs.DirEntry) error { return list.Add(o) }) if err != nil { return err } return list.Flush() } // ListR lists the objects and directories of the Fs starting // from dir recursively into out. func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { list := list.NewHelper(callback) err = f.listHelper(ctx, dir, true, func(o fs.DirEntry) error { return list.Add(o) }) if err != nil { return err } return list.Flush() } // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // // Returns the object, leaf, directoryID and error. // // Used to create new objects func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { // Create the directory for the object if it doesn't exist leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true) if err != nil { return } // Temporary Object under construction o = &Object{ fs: f, remote: remote, } return o, leaf, directoryID, nil } // Put the object into the container // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() modTime := src.ModTime(ctx) o, _, _, err := f.createObject(ctx, remote, modTime, size) if err != nil { return nil, err } return o, o.Update(ctx, in, src, options...) } // Mkdir creates the container if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { _, err := f.dirCache.FindDir(ctx, dir, true) return err } // purgeCheck removes the root directory, if check is set then it // refuses to do so if it has anything in func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } dc := f.dirCache rootID, err := dc.FindDir(ctx, dir, false) if err != nil { return err } opts := rest.Opts{ Method: "POST", Path: "/deletefolder", Parameters: url.Values{}, } opts.Parameters.Set("folderid", dirIDtoNumber(rootID)) if !check { opts.Path = "/deletefolderrecursive" } var resp *http.Response var result api.ItemResult err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) err = result.Error.Update(err) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("rmdir failed: %w", err) } f.dirCache.FlushDir(dir) if err != nil { return err } return nil } // Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, true) } // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { return time.Second } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } err := srcObj.readMetaData(ctx) if err != nil { return nil, err } // Create temporary object dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } // Copy the object opts := rest.Opts{ Method: "POST", Path: "/copyfile", Parameters: url.Values{}, } opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id)) opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf)) opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID)) opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(srcObj.modTime.Unix()))) var resp *http.Response var result api.ItemResult err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) err = result.Error.Update(err) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } err = dstObj.setMetaData(&result.Metadata) if err != nil { return nil, err } return dstObj, nil } // Purge deletes all the files in the directory // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, false) } // CleanUp empties the trash func (f *Fs) CleanUp(ctx context.Context) error { rootID, err := f.dirCache.RootID(ctx, false) if err != nil { return err } opts := rest.Opts{ Method: "POST", Path: "/trash_clear", Parameters: url.Values{}, } opts.Parameters.Set("folderid", dirIDtoNumber(rootID)) opts.Parameters.Set("username", f.opt.Username) opts.Parameters.Set("password", obscure.MustReveal(f.opt.Password)) var resp *http.Response var result api.Error return f.pacer.Call(func() (bool, error) { resp, err = f.cleanupSrv.CallJSON(ctx, &opts, nil, &result) err = result.Update(err) return shouldRetry(ctx, resp, err) }) } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } // Create temporary object dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } // Do the move opts := rest.Opts{ Method: "POST", Path: "/renamefile", Parameters: url.Values{}, } opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id)) opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf)) opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID)) var resp *http.Response var result api.ItemResult err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) err = result.Error.Update(err) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } err = dstObj.setMetaData(&result.Metadata) if err != nil { return nil, err } return dstObj, nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) if err != nil { return err } // Do the move opts := rest.Opts{ Method: "POST", Path: "/renamefolder", Parameters: url.Values{}, } opts.Parameters.Set("folderid", dirIDtoNumber(srcID)) opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(dstLeaf)) opts.Parameters.Set("tofolderid", dirIDtoNumber(dstDirectoryID)) var resp *http.Response var result api.ItemResult err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) err = result.Error.Update(err) return shouldRetry(ctx, resp, err) }) if err != nil { return err } srcFs.dirCache.FlushDir(srcRemote) return nil } // DirCacheFlush resets the directory cache - used in testing as an // optional interface func (f *Fs) DirCacheFlush() { f.dirCache.ResetRoot() } func (f *Fs) linkDir(ctx context.Context, dirID string, expire fs.Duration) (string, error) { opts := rest.Opts{ Method: "POST", Path: "/getfolderpublink", Parameters: url.Values{}, } var result api.PubLinkResult opts.Parameters.Set("folderid", dirIDtoNumber(dirID)) err := f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, nil, &result) err = result.Error.Update(err) return shouldRetry(ctx, resp, err) }) if err != nil { return "", err } return result.Link, err } func (f *Fs) linkFile(ctx context.Context, path string, expire fs.Duration) (string, error) { obj, err := f.NewObject(ctx, path) if err != nil { return "", err } o := obj.(*Object) opts := rest.Opts{ Method: "POST", Path: "/getfilepublink", Parameters: url.Values{}, } var result api.PubLinkResult opts.Parameters.Set("fileid", fileIDtoNumber(o.id)) err = f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, nil, &result) err = result.Error.Update(err) return shouldRetry(ctx, resp, err) }) if err != nil { return "", err } return result.Link, nil } // PublicLink adds a "readable by anyone with link" permission on the given file or folder. func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { dirID, err := f.dirCache.FindDir(ctx, remote, false) if err == fs.ErrorDirNotFound { return f.linkFile(ctx, remote, expire) } if err != nil { return "", err } return f.linkDir(ctx, dirID, expire) } // About gets quota information func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { opts := rest.Opts{ Method: "POST", Path: "/userinfo", } var resp *http.Response var q api.UserInfo err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &q) err = q.Error.Update(err) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } free := max(q.Quota-q.UsedQuota, 0) usage = &fs.Usage{ Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used Used: fs.NewUsageValue(q.UsedQuota), // bytes in use Free: fs.NewUsageValue(free), // bytes which can be uploaded before reaching the quota } return usage, nil } // Shutdown shutdown the fs func (f *Fs) Shutdown(ctx context.Context) error { f.tokenRenewer.Shutdown() return nil } // ChangeNotify implements fs.Features.ChangeNotify func (f *Fs) ChangeNotify(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) { // Start long-poll loop in background go f.changeNotifyLoop(ctx, notify, ch) } // changeNotifyLoop contains the blocking long-poll logic. func (f *Fs) changeNotifyLoop(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) { // Standard polling interval interval := 30 * time.Second // Start with diffID = 0 to get the current state var diffID int64 // Helper to process changes from the diff API handleChanges := func(entries []map[string]any) { notifiedPaths := make(map[string]bool) for _, entry := range entries { meta, ok := entry["metadata"].(map[string]any) if !ok { continue } // Robust extraction of ParentFolderID var pid int64 if val, ok := meta["parentfolderid"]; ok { switch v := val.(type) { case float64: pid = int64(v) case int64: pid = v case int: pid = int64(v) } } // Resolve the path using dirCache.GetInv // pCloud uses "d" prefix for directory IDs in cache, but API returns numbers dirID := fmt.Sprintf("d%d", pid) parentPath, ok := f.dirCache.GetInv(dirID) if !ok { // Parent not in cache, so we can ignore this change as it is outside // of what the mount has seen or cares about. continue } name, _ := meta["name"].(string) fullPath := path.Join(parentPath, name) // Determine EntryType (File or Directory) entryType := fs.EntryObject if isFolder, ok := meta["isfolder"].(bool); ok && isFolder { entryType = fs.EntryDirectory } // Deduplicate notifications for this batch if !notifiedPaths[fullPath] { fs.Debugf(f, "ChangeNotify: detected change in %q (type: %v)", fullPath, entryType) notify(fullPath, entryType) notifiedPaths[fullPath] = true } } } for { // Check context and channel select { case <-ctx.Done(): return case newInterval, ok := <-ch: if !ok { return } interval = newInterval default: } // Setup /diff Request opts := rest.Opts{ Method: "GET", Path: "/diff", Parameters: url.Values{}, } if diffID != 0 {
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/pcloud/pcloud_test.go
backend/pcloud/pcloud_test.go
// Test Pcloud filesystem interface package pcloud_test import ( "testing" "github.com/rclone/rclone/backend/pcloud" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestPcloud:", NilObject: (*pcloud.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/pcloud/api/types.go
backend/pcloud/api/types.go
// Package api has type definitions for pcloud // // Converted from the API docs with help from https://mholt.github.io/json-to-go/ package api import ( "fmt" "time" ) const ( // Sun, 16 Mar 2014 17:26:04 +0000 timeFormat = `"` + time.RFC1123Z + `"` ) // Time represents date and time information for the // pcloud API, by using RFC1123Z type Time time.Time // MarshalJSON turns a Time into JSON (in UTC) func (t *Time) MarshalJSON() (out []byte, err error) { timeString := (*time.Time)(t).Format(timeFormat) return []byte(timeString), nil } // UnmarshalJSON turns JSON into a Time func (t *Time) UnmarshalJSON(data []byte) error { newT, err := time.Parse(timeFormat, string(data)) if err != nil { return err } *t = Time(newT) return nil } // Error is returned from pcloud when things go wrong // // If result is 0 then everything is OK type Error struct { Result int `json:"result"` ErrorString string `json:"error"` } // Error returns a string for the error and satisfies the error interface func (e *Error) Error() string { return fmt.Sprintf("pcloud error: %s (%d)", e.ErrorString, e.Result) } // Update returns err directly if it was != nil, otherwise it returns // an Error or nil if no error was detected func (e *Error) Update(err error) error { if err != nil { return err } if e.Result == 0 { return nil } return e } // Check Error satisfies the error interface var _ error = (*Error)(nil) // Item describes a folder or a file as returned by Get Folder Items and others type Item struct { Path string `json:"path"` Name string `json:"name"` Created Time `json:"created"` IsMine bool `json:"ismine"` Thumb bool `json:"thumb"` Modified Time `json:"modified"` Comments int `json:"comments"` ID string `json:"id"` IsShared bool `json:"isshared"` IsDeleted bool `json:"isdeleted"` Icon string `json:"icon"` IsFolder bool `json:"isfolder"` ParentFolderID int64 `json:"parentfolderid"` FolderID int64 `json:"folderid,omitempty"` Height int `json:"height,omitempty"` FileID int64 `json:"fileid,omitempty"` Width int `json:"width,omitempty"` Hash uint64 `json:"hash,omitempty"` Category int `json:"category,omitempty"` Size int64 `json:"size,omitempty"` ContentType string `json:"contenttype,omitempty"` Contents []Item `json:"contents"` } // ModTime returns the modification time of the item func (i *Item) ModTime() (t time.Time) { t = time.Time(i.Modified) if t.IsZero() { t = time.Time(i.Created) } return t } // ItemResult is returned from the /listfolder, /createfolder, /deletefolder, /deletefile, etc. methods type ItemResult struct { Error Metadata Item `json:"metadata"` } // Hashes contains the supported hashes type Hashes struct { SHA1 string `json:"sha1"` MD5 string `json:"md5"` SHA256 string `json:"sha256"` } // FileTruncateResponse is the response from /file_truncate type FileTruncateResponse struct { Error } // FileCloseResponse is the response from /file_close type FileCloseResponse struct { Error } // FileOpenResponse is the response from /file_open type FileOpenResponse struct { Error Fileid int64 `json:"fileid"` FileDescriptor int64 `json:"fd"` } // FileChecksumResponse is the response from /file_checksum type FileChecksumResponse struct { Error MD5 string `json:"md5"` SHA1 string `json:"sha1"` SHA256 string `json:"sha256"` } // FilePWriteResponse is the response from /file_pwrite type FilePWriteResponse struct { Error Bytes int64 `json:"bytes"` } // UploadFileResponse is the response from /uploadfile type UploadFileResponse struct { Error Items []Item `json:"metadata"` Checksums []Hashes `json:"checksums"` Fileids []int64 `json:"fileids"` } // GetFileLinkResult is returned from /getfilelink type GetFileLinkResult struct { Error Dwltag string `json:"dwltag"` Hash uint64 `json:"hash"` Size int64 `json:"size"` Expires Time `json:"expires"` Path string `json:"path"` Hosts []string `json:"hosts"` } // IsValid returns whether the link is valid and has not expired func (g *GetFileLinkResult) IsValid() bool { if g == nil { return false } if len(g.Hosts) == 0 { return false } return time.Until(time.Time(g.Expires)) > 30*time.Second } // URL returns a URL from the Path and Hosts. Check with IsValid // before calling. func (g *GetFileLinkResult) URL() string { // FIXME rotate the hosts? return "https://" + g.Hosts[0] + g.Path } // ChecksumFileResult is returned from /checksumfile type ChecksumFileResult struct { Error Hashes Metadata Item `json:"metadata"` } // PubLinkResult is returned from /getfilepublink and /getfolderpublink type PubLinkResult struct { Error LinkID int `json:"linkid"` Link string `json:"link"` LinkCode string `json:"code"` } // UserInfo is returned from /userinfo type UserInfo struct { Error Cryptosetup bool `json:"cryptosetup"` Plan int `json:"plan"` CryptoSubscription bool `json:"cryptosubscription"` PublicLinkQuota int64 `json:"publiclinkquota"` Email string `json:"email"` UserID int `json:"userid"` Quota int64 `json:"quota"` TrashRevretentionDays int `json:"trashrevretentiondays"` Premium bool `json:"premium"` PremiumLifetime bool `json:"premiumlifetime"` EmailVerified bool `json:"emailverified"` UsedQuota int64 `json:"usedquota"` Language string `json:"language"` Business bool `json:"business"` CryptoLifetime bool `json:"cryptolifetime"` Registered string `json:"registered"` Journey struct { Claimed bool `json:"claimed"` Steps struct { VerifyMail bool `json:"verifymail"` UploadFile bool `json:"uploadfile"` AutoUpload bool `json:"autoupload"` DownloadApp bool `json:"downloadapp"` DownloadDrive bool `json:"downloaddrive"` } `json:"steps"` } `json:"journey"` } // DiffResult is the response from /diff type DiffResult struct { Result int `json:"result"` DiffID int64 `json:"diffid"` Entries []map[string]any `json:"entries"` Error string `json:"error"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/onedrive/onedrive.go
backend/onedrive/onedrive.go
// Package onedrive provides an interface to the Microsoft OneDrive // object storage system. package onedrive import ( "context" _ "embed" "encoding/base64" "encoding/hex" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "path" "regexp" "strconv" "strings" "sync" "time" "github.com/rclone/rclone/backend/onedrive/api" "github.com/rclone/rclone/backend/onedrive/quickxorhash" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/rest" ) const ( rcloneClientID = "b15665d9-eda6-4092-8539-0eec376afd59" rcloneEncryptedClientSecret = "_JUdzh3LnKNqSPcf4Wu5fgMFIQOI8glZu_akYgR8yf6egowNBg-R" minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential configDriveID = "drive_id" configDriveType = "drive_type" driveTypePersonal = "personal" driveTypeBusiness = "business" driveTypeSharepoint = "documentLibrary" defaultChunkSize = 10 * fs.Mebi chunkSizeMultiple = 320 * fs.Kibi maxSinglePartSize = 4 * fs.Mebi regionGlobal = "global" regionUS = "us" regionDE = "de" regionCN = "cn" ) // Globals var ( // Define the paths used for token operations commonPathPrefix = "/common" // prefix for the paths if tenant isn't known authPath = "/oauth2/v2.0/authorize" tokenPath = "/oauth2/v2.0/token" scopeAccess = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "Sites.Read.All", "offline_access"} scopeAccessWithoutSites = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"} // When using client credential OAuth flow, scope of .default is required in order // to use the permissions configured for the application within the tenant scopeAccessClientCred = fs.SpaceSepList{".default"} // Base config for how to auth oauthConfig = &oauthutil.Config{ Scopes: scopeAccess, ClientID: rcloneClientID, ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), RedirectURL: oauthutil.RedirectLocalhostURL, } graphAPIEndpoint = map[string]string{ "global": "https://graph.microsoft.com", "us": "https://graph.microsoft.us", "de": "https://graph.microsoft.de", "cn": "https://microsoftgraph.chinacloudapi.cn", } authEndpoint = map[string]string{ "global": "https://login.microsoftonline.com", "us": "https://login.microsoftonline.us", "de": "https://login.microsoftonline.de", "cn": "https://login.chinacloudapi.cn", } // QuickXorHashType is the hash.Type for OneDrive QuickXorHashType hash.Type //go:embed metadata.md metadataHelp string ) // Register with Fs func init() { QuickXorHashType = hash.RegisterHash("quickxor", "QuickXorHash", 40, quickxorhash.New) fs.Register(&fs.RegInfo{ Name: "onedrive", Description: "Microsoft OneDrive", NewFs: NewFs, Config: Config, MetadataInfo: &fs.MetadataInfo{ System: systemMetadataInfo, Help: metadataHelp, }, Options: append(oauthutil.SharedOptions, []fs.Option{{ Name: "region", Help: "Choose national cloud region for OneDrive.", Default: "global", Examples: []fs.OptionExample{ { Value: regionGlobal, Help: "Microsoft Cloud Global", }, { Value: regionUS, Help: "Microsoft Cloud for US Government", }, { Value: regionDE, Help: "Microsoft Cloud Germany (deprecated - try " + regionGlobal + " region first).", }, { Value: regionCN, Help: "Azure and Office 365 operated by Vnet Group in China", }, }, }, { Name: "upload_cutoff", Help: `Cutoff for switching to chunked upload. Any files larger than this will be uploaded in chunks of chunk_size. This is disabled by default as uploading using single part uploads causes rclone to use twice the storage on Onedrive business as when rclone sets the modification time after the upload Onedrive creates a new version. See: https://github.com/rclone/rclone/issues/1716 `, Default: fs.SizeSuffix(-1), Advanced: true, }, { Name: "chunk_size", Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes). Above this size files will be chunked - must be multiple of 320k (327,680 bytes) and should not exceed 250M (262,144,000 bytes) else you may encounter \"Microsoft.SharePoint.Client.InvalidClientQueryException: The request message is too big.\" Note that the chunks will be buffered into memory.`, Default: defaultChunkSize, Advanced: true, }, { Name: "drive_id", Help: "The ID of the drive to use.", Default: "", Advanced: true, Sensitive: true, }, { Name: "drive_type", Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").", Default: "", Advanced: true, }, { Name: "root_folder_id", Help: `ID of the root folder. This isn't normally needed, but in special circumstances you might know the folder ID that you wish to access but not be able to get there through a path traversal. `, Advanced: true, Sensitive: true, }, { Name: "access_scopes", Help: `Set scopes to be requested by rclone. Choose or manually enter a custom space separated list with all scopes, that rclone should request. `, Default: scopeAccess, Advanced: true, Examples: []fs.OptionExample{ { Value: "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access", Help: "Read and write access to all resources", }, { Value: "Files.Read Files.Read.All Sites.Read.All offline_access", Help: "Read only access to all resources", }, { Value: "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All offline_access", Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true", }, }, }, { Name: "tenant", Help: `ID of the service principal's tenant. Also called its directory ID. Set this if using - Client Credential flow `, Sensitive: true, }, { Name: "disable_site_permission", Help: `Disable the request for Sites.Read.All permission. If set to true, you will no longer be able to search for a SharePoint site when configuring drive ID, because rclone will not request Sites.Read.All permission. Set it to true if your organization didn't assign Sites.Read.All permission to the application, and your organization disallows users to consent app permission request on their own.`, Default: false, Advanced: true, Hide: fs.OptionHideBoth, }, { Name: "expose_onenote_files", Help: `Set to make OneNote files show up in directory listings. By default, rclone will hide OneNote files in directory listings because operations like "Open" and "Update" won't work on them. But this behaviour may also prevent you from deleting them. If you want to delete OneNote files or otherwise want them to show up in directory listing, set this option.`, Default: false, Advanced: true, }, { Name: "server_side_across_configs", Default: false, Help: `Deprecated: use --server-side-across-configs instead. Allow server-side operations (e.g. copy) to work across different onedrive configs. This will work if you are copying between two OneDrive *Personal* drives AND the files to copy are already shared between them. Additionally, it should also function for a user who has access permissions both between Onedrive for *business* and *SharePoint* under the *same tenant*, and between *SharePoint* and another *SharePoint* under the *same tenant*. In other cases, rclone will fall back to normal copy (which will be slightly slower).`, Advanced: true, }, { Name: "list_chunk", Help: "Size of listing chunk.", Default: 1000, Advanced: true, }, { Name: "no_versions", Default: false, Help: `Remove all versions on modifying operations. Onedrive for business creates versions when rclone uploads new files overwriting an existing one and when it sets the modification time. These versions take up space out of the quota. This flag checks for versions after file upload and setting modification time and removes all but the last version. **NB** Onedrive personal can't currently delete versions so don't use this flag there. `, Advanced: true, }, { Name: "hard_delete", Help: `Permanently delete files on removal. Normally files will get sent to the recycle bin on deletion. Setting this flag causes them to be permanently deleted. Use with care. OneDrive personal accounts do not support the permanentDelete API, it only applies to OneDrive for Business and SharePoint document libraries. `, Advanced: true, Default: false, }, { Name: "link_scope", Default: "anonymous", Help: `Set the scope of the links created by the link command.`, Advanced: true, Examples: []fs.OptionExample{{ Value: "anonymous", Help: "Anyone with the link has access, without needing to sign in.\nThis may include people outside of your organization.\nAnonymous link support may be disabled by an administrator.", }, { Value: "organization", Help: "Anyone signed into your organization (tenant) can use the link to get access.\nOnly available in OneDrive for Business and SharePoint.", }}, }, { Name: "link_type", Default: "view", Help: `Set the type of the links created by the link command.`, Advanced: true, Examples: []fs.OptionExample{{ Value: "view", Help: "Creates a read-only link to the item.", }, { Value: "edit", Help: "Creates a read-write link to the item.", }, { Value: "embed", Help: "Creates an embeddable link to the item.", }}, }, { Name: "link_password", Default: "", Help: `Set the password for links created by the link command. At the time of writing this only works with OneDrive personal paid accounts. `, Advanced: true, Sensitive: true, }, { Name: "hash_type", Default: "auto", Help: `Specify the hash in use for the backend. This specifies the hash type in use. If set to "auto" it will use the default hash which is QuickXorHash. Before rclone 1.62 an SHA1 hash was used by default for Onedrive Personal. For 1.62 and later the default is to use a QuickXorHash for all onedrive types. If an SHA1 hash is desired then set this option accordingly. From July 2023 QuickXorHash will be the only available hash for both OneDrive for Business and OneDrive Personal. This can be set to "none" to not use any hashes. If the hash requested does not exist on the object, it will be returned as an empty string which is treated as a missing hash by rclone. `, Examples: []fs.OptionExample{{ Value: "auto", Help: "Rclone chooses the best hash", }, { Value: "quickxor", Help: "QuickXor", }, { Value: "sha1", Help: "SHA1", }, { Value: "sha256", Help: "SHA256", }, { Value: "crc32", Help: "CRC32", }, { Value: "none", Help: "None - don't use any hashes", }}, Advanced: true, }, { Name: "av_override", Default: false, Help: `Allows download of files the server thinks has a virus. The onedrive/sharepoint server may check files uploaded with an Anti Virus checker. If it detects any potential viruses or malware it will block download of the file. In this case you will see a message like this server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden: If you are 100% sure you want to download this file anyway then use the --onedrive-av-override flag, or av_override = true in the config file. `, Advanced: true, }, { Name: "delta", Default: false, Help: strings.ReplaceAll(`If set rclone will use delta listing to implement recursive listings. If this flag is set the onedrive backend will advertise |ListR| support for recursive listings. Setting this flag speeds up these things greatly: rclone lsf -R onedrive: rclone size onedrive: rclone rc vfs/refresh recursive=true **However** the delta listing API **only** works at the root of the drive. If you use it not at the root then it recurses from the root and discards all the data that is not under the directory you asked for. So it will be correct but may not be very efficient. This is why this flag is not set as the default. As a rule of thumb if nearly all of your data is under rclone's root directory (the |root/directory| in |onedrive:root/directory|) then using this flag will be be a big performance win. If your data is mostly not under the root then using this flag will be a big performance loss. It is recommended if you are mounting your onedrive at the root (or near the root when using crypt) and using rclone |rc vfs/refresh|. `, "|", "`"), Advanced: true, }, { Name: "metadata_permissions", Help: `Control whether permissions should be read or written in metadata. Reading permissions metadata from files can be done quickly, but it isn't always desirable to set the permissions from the metadata. `, Advanced: true, Default: rwOff, Examples: rwExamples, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // List of replaced characters: // < (less than) -> '<' // FULLWIDTH LESS-THAN SIGN // > (greater than) -> '>' // FULLWIDTH GREATER-THAN SIGN // : (colon) -> ':' // FULLWIDTH COLON // " (double quote) -> '"' // FULLWIDTH QUOTATION MARK // \ (backslash) -> '\' // FULLWIDTH REVERSE SOLIDUS // | (vertical line) -> '|' // FULLWIDTH VERTICAL LINE // ? (question mark) -> '?' // FULLWIDTH QUESTION MARK // * (asterisk) -> '*' // FULLWIDTH ASTERISK // // Folder names cannot begin with a tilde ('~') // List of replaced characters: // ~ (tilde) -> '~' // FULLWIDTH TILDE // // Additionally names can't begin with a space ( ) or end with a period (.) or space ( ). // List of replaced characters: // . (period) -> '.' // FULLWIDTH FULL STOP // (space) -> '␠' // SYMBOL FOR SPACE // // Also encode invalid UTF-8 bytes as json doesn't handle them. // // The OneDrive API documentation lists the set of reserved characters, but // testing showed this list is incomplete. This are the differences: // - " (double quote) is rejected, but missing in the documentation // - space at the end of file and folder names is rejected, but missing in the documentation // - period at the end of file names is rejected, but missing in the documentation // // Adding these restrictions to the OneDrive API documentation yields exactly // the same rules as the Windows naming conventions. // // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/addressing-driveitems?view=odsp-graph-online#path-encoding Default: (encoder.Display | encoder.EncodeBackSlash | encoder.EncodeLeftSpace | encoder.EncodeLeftTilde | encoder.EncodeRightPeriod | encoder.EncodeRightSpace | encoder.EncodeWin | encoder.EncodeInvalidUtf8), }}...), }) } // Get the region and graphURL from the config func getRegionURL(m configmap.Mapper) (region, graphURL string) { region, _ = m.Get("region") graphURL = graphAPIEndpoint[region] + "/v1.0" return region, graphURL } // Config for chooseDrive type chooseDriveOpt struct { opts rest.Opts finalDriveID string siteID string relativePath string } // chooseDrive returns a query to choose which drive the user is interested in func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest.Client, opt chooseDriveOpt) (*fs.ConfigOut, error) { _, graphURL := getRegionURL(m) // if we use server-relative URL for finding the drive if opt.relativePath != "" { opt.opts = rest.Opts{ Method: "GET", RootURL: graphURL, Path: "/sites/root:" + opt.relativePath, } site := api.SiteResource{} _, err := srv.CallJSON(ctx, &opt.opts, nil, &site) if err != nil { return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available site by relative path: %v", err)) } opt.siteID = site.SiteID } // if we have a siteID we need to ask for the drives if opt.siteID != "" { opt.opts = rest.Opts{ Method: "GET", RootURL: graphURL, Path: "/sites/" + opt.siteID + "/drives", } } drives := api.DrivesResponse{} // We don't have the final ID yet? // query Microsoft Graph if opt.finalDriveID == "" { _, err := srv.CallJSON(ctx, &opt.opts, nil, &drives) if err != nil { return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err)) } // Also call /me/drive as sometimes /me/drives doesn't return it #4068 if opt.opts.Path == "/me/drives" { opt.opts.Path = "/me/drive" meDrive := api.DriveResource{} _, err := srv.CallJSON(ctx, &opt.opts, nil, &meDrive) if err != nil { return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err)) } found := false for _, drive := range drives.Drives { if drive.DriveID == meDrive.DriveID { found = true break } } // add the me drive if not found already if !found { fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive) drives.Drives = append(drives.Drives, meDrive) } } } else { drives.Drives = append(drives.Drives, api.DriveResource{ DriveID: opt.finalDriveID, DriveName: "Chosen Drive ID", DriveType: "drive", }) } if len(drives.Drives) == 0 { return fs.ConfigError("choose_type", "No drives found") } return fs.ConfigChoose("driveid_final", "config_driveid", "Select drive you want to use", len(drives.Drives), func(i int) (string, string) { drive := drives.Drives[i] return drive.DriveID, fmt.Sprintf("%s (%s)", drive.DriveName, drive.DriveType) }) } // Make the oauth config for the backend func makeOauthConfig(ctx context.Context, opt *Options) (*oauthutil.Config, error) { // Copy the default oauthConfig oauthConfig := *oauthConfig // Set the scopes oauthConfig.Scopes = opt.AccessScopes if opt.DisableSitePermission { oauthConfig.Scopes = scopeAccessWithoutSites } // Construct the auth URLs prefix := commonPathPrefix if opt.Tenant != "" { prefix = "/" + opt.Tenant } oauthConfig.TokenURL = authEndpoint[opt.Region] + prefix + tokenPath oauthConfig.AuthURL = authEndpoint[opt.Region] + prefix + authPath // Check to see if we are using client credentials flow if opt.ClientCredentials { // Override scope to .default oauthConfig.Scopes = scopeAccessClientCred if opt.Tenant == "" { return nil, fmt.Errorf("tenant parameter must be set when using %s", config.ConfigClientCredentials) } } return &oauthConfig, nil } // Config the backend func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) { opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } _, graphURL := getRegionURL(m) // Check to see if this is the start of the state machine execution if conf.State == "" { conf, err := makeOauthConfig(ctx, opt) if err != nil { return nil, err } return oauthutil.ConfigOut("choose_type", &oauthutil.Options{ OAuth2Config: conf, }) } oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig) if err != nil { return nil, fmt.Errorf("failed to configure OneDrive: %w", err) } // Create a REST client, build on the OAuth client created above srv := rest.NewClient(oAuthClient) switch conf.State { case "choose_type": return fs.ConfigChooseExclusiveFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{ Value: "onedrive", Help: "OneDrive Personal or Business", }, { Value: "sharepoint", Help: "Root Sharepoint site", }, { Value: "url", Help: "Sharepoint site name or URL\nE.g. mysite or https://contoso.sharepoint.com/sites/mysite", }, { Value: "search", Help: "Search for a Sharepoint site", }, { Value: "driveid", Help: "Type in driveID (advanced)", }, { Value: "siteid", Help: "Type in SiteID (advanced)", }, { Value: "path", Help: "Sharepoint server-relative path (advanced)\nE.g. /teams/hr", }}) case "choose_type_done": // Jump to next state according to config chosen return fs.ConfigGoto(conf.Result) case "onedrive": return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ opts: rest.Opts{ Method: "GET", RootURL: graphURL, Path: "/me/drives", }, }) case "sharepoint": return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ opts: rest.Opts{ Method: "GET", RootURL: graphURL, Path: "/sites/root/drives", }, }) case "driveid": out, err := fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID") if err != nil { return out, err } // Default the drive_id to the previous version in the config out.Option.Default, _ = m.Get("drive_id") return out, nil case "driveid_end": return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ finalDriveID: conf.Result, }) case "siteid": return fs.ConfigInput("siteid_end", "config_siteid", "Site ID") case "siteid_end": return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ siteID: conf.Result, }) case "url": return fs.ConfigInput("url_end", "config_site_url", `Site URL Examples: - "mysite" - "https://XXX.sharepoint.com/sites/mysite" - "https://XXX.sharepoint.com/teams/ID" `) case "url_end": siteURL := conf.Result re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`) match := re.FindStringSubmatch(siteURL) if len(match) == 2 { return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ relativePath: match[1], }) } return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ relativePath: "/sites/" + siteURL, }) case "path": return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`) case "path_end": return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ relativePath: conf.Result, }) case "search": return fs.ConfigInput("search_end", "config_search_term", `Search term`) case "search_end": searchTerm := conf.Result opts := rest.Opts{ Method: "GET", RootURL: graphURL, Path: "/sites?search=" + searchTerm, } sites := api.SiteResponse{} _, err := srv.CallJSON(ctx, &opts, nil, &sites) if err != nil { return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available sites: %v", err)) } if len(sites.Sites) == 0 { return fs.ConfigError("choose_type", fmt.Sprintf("search for %q returned no results", searchTerm)) } return fs.ConfigChoose("search_sites", "config_site", `Select the Site you want to use`, len(sites.Sites), func(i int) (string, string) { site := sites.Sites[i] return site.SiteID, fmt.Sprintf("%s (%s)", site.SiteName, site.SiteURL) }) case "search_sites": return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ siteID: conf.Result, }) case "driveid_final": finalDriveID := conf.Result // Test the driveID and get drive type opts := rest.Opts{ Method: "GET", RootURL: graphURL, Path: "/drives/" + finalDriveID + "/root", } var rootItem api.Item _, err = srv.CallJSON(ctx, &opts, nil, &rootItem) if err != nil { return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query root for drive %q: %v", finalDriveID, err)) } m.Set(configDriveID, finalDriveID) m.Set(configDriveType, rootItem.ParentReference.DriveType) return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)) case "driveid_final_end": if conf.Result == "true" { return nil, nil } return fs.ConfigGoto("choose_type") } return nil, fmt.Errorf("unknown state %q", conf.State) } // Options defines the configuration for this backend type Options struct { Region string `config:"region"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` ChunkSize fs.SizeSuffix `config:"chunk_size"` DriveID string `config:"drive_id"` DriveType string `config:"drive_type"` RootFolderID string `config:"root_folder_id"` DisableSitePermission bool `config:"disable_site_permission"` ClientCredentials bool `config:"client_credentials"` AccessScopes fs.SpaceSepList `config:"access_scopes"` Tenant string `config:"tenant"` ExposeOneNoteFiles bool `config:"expose_onenote_files"` ServerSideAcrossConfigs bool `config:"server_side_across_configs"` ListChunk int64 `config:"list_chunk"` NoVersions bool `config:"no_versions"` HardDelete bool `config:"hard_delete"` LinkScope string `config:"link_scope"` LinkType string `config:"link_type"` LinkPassword string `config:"link_password"` HashType string `config:"hash_type"` AVOverride bool `config:"av_override"` Delta bool `config:"delta"` Enc encoder.MultiEncoder `config:"encoding"` MetadataPermissions rwChoice `config:"metadata_permissions"` } // Fs represents a remote OneDrive type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options ci *fs.ConfigInfo // global config features *fs.Features // optional features srv *rest.Client // the connection to the OneDrive server unAuth *rest.Client // no authentication connection to the OneDrive server dirCache *dircache.DirCache // Map of directory path to directory id pacer *fs.Pacer // pacer for API calls tokenRenewer *oauthutil.Renew // renew the token on expiry driveID string // ID to use for querying Microsoft Graph driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive hashType hash.Type // type of the hash we are using } // Object describes a OneDrive object // // Will definitely have info but maybe not meta type Object struct { fs *Fs // what this object is part of remote string // The remote path hasMetaData bool // whether info below has been set isOneNoteFile bool // Whether the object is a OneNote file size int64 // size of the object modTime time.Time // modification time of the object id string // ID of the object hash string // Hash of the content, usually QuickXorHash but set as hash_type mimeType string // Content-Type of object from server (may not be as uploaded) meta *Metadata // metadata properties } // Directory describes a OneDrive directory type Directory struct { fs *Fs // what this object is part of remote string // The remote path size int64 // size of directory and contents or -1 if unknown items int64 // number of objects or -1 for unknown id string // dir ID meta *Metadata // metadata properties } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("OneDrive root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // parsePath parses a OneDrive 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } var ( gatewayTimeoutError sync.Once errAsyncJobAccessDenied = errors.New("async job failed - access denied") ) // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } retry := false if resp != nil { switch resp.StatusCode { case 400: if apiErr, ok := err.(*api.Error); ok { if apiErr.ErrorInfo.InnerError.Code == "pathIsTooLong" { return false, fserrors.NoRetryError(err) } } case 401: if len(resp.Header["Www-Authenticate"]) == 1 && strings.Contains(resp.Header["Www-Authenticate"][0], "expired_token") { retry = true fs.Debugf(nil, "Should retry: %v", err) } else if err != nil && strings.Contains(err.Error(), "Unable to initialize RPS") { retry = true fs.Debugf(nil, "HTTP 401: Unable to initialize RPS. Trying again.") } case 429, 503: // Too Many Requests, Server Too Busy // see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online if values := resp.Header["Retry-After"]; len(values) == 1 && values[0] != "" { retryAfter, parseErr := strconv.Atoi(values[0]) if parseErr != nil { fs.Debugf(nil, "Failed to parse Retry-After: %q: %v", values[0], parseErr) } else { duration := time.Second * time.Duration(retryAfter) retry = true err = pacer.RetryAfterError(err, duration) fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", retryAfter) } } case 504: // Gateway timeout gatewayTimeoutError.Do(func() { fs.Errorf(nil, "%v: upload chunks may be taking too long - try reducing --onedrive-chunk-size or decreasing --transfers", err) }) case 507: // Insufficient Storage return false, fserrors.FatalError(err) } } return retry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID. // if `relPath` == "", it reads the metadata for the item with that ID. // // We address items using the pattern `drives/driveID/items/itemID:/relativePath` // instead of simply using `drives/driveID/root:/itemPath` because it works for // "shared with me" folders in OneDrive Personal (See #2536, #2778) // This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480 // // If `relPath` == ”, do not append the slash (See #3664) func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) { opts, _ := f.newOptsCallWithIDPath(normalizedID, relPath, true, "GET", "") err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &info) return shouldRetry(ctx, resp, err) }) return info, resp, err } // readMetaDataForPath reads the metadata from the path (relative to the absolute root)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/onedrive/onedrive_test.go
backend/onedrive/onedrive_test.go
// Test OneDrive filesystem interface package onedrive import ( "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestOneDrive:", NilObject: (*Object)(nil), ChunkedUpload: fstests.ChunkedUploadConfig{ CeilChunkSize: fstests.NextMultipleOf(chunkSizeMultiple), }, }) } // TestIntegrationCn runs integration tests against the remote func TestIntegrationCn(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("skipping as -remote is set") } fstests.Run(t, &fstests.Opt{ RemoteName: "TestOneDriveCn:", NilObject: (*Object)(nil), ChunkedUpload: fstests.ChunkedUploadConfig{ CeilChunkSize: fstests.NextMultipleOf(chunkSizeMultiple), }, }) } func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadChunkSize(cs) } var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/onedrive/metadata.go
backend/onedrive/metadata.go
package onedrive import ( "context" "encoding/json" "errors" "fmt" "net/http" "slices" "strings" "time" "github.com/rclone/rclone/backend/onedrive/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/errcount" ) const ( dirMimeType = "inode/directory" timeFormatIn = time.RFC3339 timeFormatOut = "2006-01-02T15:04:05.999Z" // mS for OneDrive Personal, otherwise only S ) // system metadata keys which this backend owns var systemMetadataInfo = map[string]fs.MetadataHelp{ "content-type": { Help: "The MIME type of the file.", Type: "string", Example: "text/plain", ReadOnly: true, }, "mtime": { Help: "Time of last modification with S accuracy (mS for OneDrive Personal).", Type: "RFC 3339", Example: "2006-01-02T15:04:05Z", }, "btime": { Help: "Time of file birth (creation) with S accuracy (mS for OneDrive Personal).", Type: "RFC 3339", Example: "2006-01-02T15:04:05Z", }, "utime": { Help: "Time of upload with S accuracy (mS for OneDrive Personal).", Type: "RFC 3339", Example: "2006-01-02T15:04:05Z", ReadOnly: true, }, "created-by-display-name": { Help: "Display name of the user that created the item.", Type: "string", Example: "John Doe", ReadOnly: true, }, "created-by-id": { Help: "ID of the user that created the item.", Type: "string", Example: "48d31887-5fad-4d73-a9f5-3c356e68a038", ReadOnly: true, }, "description": { Help: "A short description of the file. Max 1024 characters. Only supported for OneDrive Personal.", Type: "string", Example: "Contract for signing", }, "id": { Help: "The unique identifier of the item within OneDrive.", Type: "string", Example: "01BYE5RZ6QN3ZWBTUFOFD3GSPGOHDJD36K", ReadOnly: true, }, "last-modified-by-display-name": { Help: "Display name of the user that last modified the item.", Type: "string", Example: "John Doe", ReadOnly: true, }, "last-modified-by-id": { Help: "ID of the user that last modified the item.", Type: "string", Example: "48d31887-5fad-4d73-a9f5-3c356e68a038", ReadOnly: true, }, "malware-detected": { Help: "Whether OneDrive has detected that the item contains malware.", Type: "boolean", Example: "true", ReadOnly: true, }, "package-type": { Help: "If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others.", Type: "string", Example: "oneNote", ReadOnly: true, }, "shared-owner-id": { Help: "ID of the owner of the shared item (if shared).", Type: "string", Example: "48d31887-5fad-4d73-a9f5-3c356e68a038", ReadOnly: true, }, "shared-by-id": { Help: "ID of the user that shared the item (if shared).", Type: "string", Example: "48d31887-5fad-4d73-a9f5-3c356e68a038", ReadOnly: true, }, "shared-scope": { Help: "If shared, indicates the scope of how the item is shared: anonymous, organization, or users.", Type: "string", Example: "users", ReadOnly: true, }, "shared-time": { Help: "Time when the item was shared, with S accuracy (mS for OneDrive Personal).", Type: "RFC 3339", Example: "2006-01-02T15:04:05Z", ReadOnly: true, }, "permissions": { Help: "Permissions in a JSON dump of OneDrive format. Enable with --onedrive-metadata-permissions. Properties: id, grantedTo, grantedToIdentities, invitation, inheritedFrom, link, roles, shareId", Type: "JSON", Example: "{}", }, } // rwChoices type for fs.Bits type rwChoices struct{} func (rwChoices) Choices() []fs.BitsChoicesInfo { return []fs.BitsChoicesInfo{ {Bit: uint64(rwOff), Name: "off"}, {Bit: uint64(rwRead), Name: "read"}, {Bit: uint64(rwWrite), Name: "write"}, {Bit: uint64(rwFailOK), Name: "failok"}, } } // rwChoice type alias type rwChoice = fs.Bits[rwChoices] const ( rwRead rwChoice = 1 << iota rwWrite rwFailOK rwOff rwChoice = 0 ) // Examples for the options var rwExamples = fs.OptionExamples{{ Value: rwOff.String(), Help: "Do not read or write the value", }, { Value: rwRead.String(), Help: "Read the value only", }, { Value: rwWrite.String(), Help: "Write the value only", }, { Value: (rwRead | rwWrite).String(), Help: "Read and Write the value.", }, { Value: rwFailOK.String(), Help: "If writing fails log errors only, don't fail the transfer", }} // Metadata describes metadata properties shared by both Objects and Directories type Metadata struct { fs *Fs // what this object/dir is part of remote string // remote, for convenience when obj/dir not in scope mimeType string // Content-Type of object from server (may not be as uploaded) description string // Provides a user-visible description of the item. Read-write. Only on OneDrive Personal mtime time.Time // Time of last modification with S accuracy. btime time.Time // Time of file birth (creation) with S accuracy. utime time.Time // Time of upload with S accuracy. createdBy api.IdentitySet // user that created the item lastModifiedBy api.IdentitySet // user that last modified the item malwareDetected bool // Whether OneDrive has detected that the item contains malware. packageType string // If present, indicates that this item is a package instead of a folder or file. shared *api.SharedType // information about the shared state of the item, if shared normalizedID string // the normalized ID of the object or dir permissions []*api.PermissionsType // The current set of permissions for the item. Note that to save API calls, this is not guaranteed to be cached on the object. Use m.Get() to refresh. queuedPermissions []*api.PermissionsType // The set of permissions queued to be updated. permsAddOnly bool // Whether to disable "update" and "remove" (for example, during server-side copy when the dst will have new IDs) } // Get retrieves the cached metadata and converts it to fs.Metadata. // This is most typically used when OneDrive is the source (as opposed to the dest). // If m.fs.opt.MetadataPermissions includes "read" then this will also include permissions, which requires an API call. // Get does not use an API call otherwise. func (m *Metadata) Get(ctx context.Context) (metadata fs.Metadata, err error) { metadata = make(fs.Metadata, 17) metadata["content-type"] = m.mimeType metadata["mtime"] = m.mtime.Format(timeFormatOut) metadata["btime"] = m.btime.Format(timeFormatOut) metadata["utime"] = m.utime.Format(timeFormatOut) metadata["created-by-display-name"] = m.createdBy.User.DisplayName metadata["created-by-id"] = m.createdBy.User.ID if m.description != "" { metadata["description"] = m.description } metadata["id"] = m.normalizedID metadata["last-modified-by-display-name"] = m.lastModifiedBy.User.DisplayName metadata["last-modified-by-id"] = m.lastModifiedBy.User.ID metadata["malware-detected"] = fmt.Sprint(m.malwareDetected) if m.packageType != "" { metadata["package-type"] = m.packageType } if m.shared != nil { metadata["shared-owner-id"] = m.shared.Owner.User.ID metadata["shared-by-id"] = m.shared.SharedBy.User.ID metadata["shared-scope"] = m.shared.Scope metadata["shared-time"] = time.Time(m.shared.SharedDateTime).Format(timeFormatOut) } if m.fs.opt.MetadataPermissions.IsSet(rwRead) { p, _, err := m.fs.getPermissions(ctx, m.normalizedID) if err != nil { return nil, fmt.Errorf("failed to get permissions: %w", err) } m.permissions = p if len(p) > 0 { fs.PrettyPrint(m.permissions, "perms", fs.LogLevelDebug) buf, err := json.Marshal(m.permissions) if err != nil { return nil, fmt.Errorf("failed to marshal permissions: %w", err) } metadata["permissions"] = string(buf) } } return metadata, nil } // Set takes fs.Metadata and parses/converts it to cached Metadata. // This is most typically used when OneDrive is the destination (as opposed to the source). // It does not actually update the remote (use Write for that.) // It sets only the writeable metadata properties (i.e. read-only properties are skipped.) // Permissions are included if m.fs.opt.MetadataPermissions includes "write". // It returns errors if writeable properties can't be parsed. // It does not return errors for unsupported properties that may be passed in. // It returns the number of writeable properties set (if it is 0, we can skip the Write API call.) func (m *Metadata) Set(ctx context.Context, metadata fs.Metadata) (numSet int, err error) { numSet = 0 for k, v := range metadata { switch k { case "mtime": t, err := time.Parse(timeFormatIn, v) if err != nil { return numSet, fmt.Errorf("failed to parse metadata %q = %q: %w", k, v, err) } m.mtime = t numSet++ case "btime": t, err := time.Parse(timeFormatIn, v) if err != nil { return numSet, fmt.Errorf("failed to parse metadata %q = %q: %w", k, v, err) } m.btime = t numSet++ case "description": if m.fs.driveType != driveTypePersonal { fs.Debugf(m.remote, "metadata description is only supported for OneDrive Personal -- skipping: %s", v) continue } m.description = v numSet++ case "permissions": if !m.fs.opt.MetadataPermissions.IsSet(rwWrite) { continue } var perms []*api.PermissionsType err := json.Unmarshal([]byte(v), &perms) if err != nil { return numSet, fmt.Errorf("failed to unmarshal permissions: %w", err) } m.queuedPermissions = perms numSet++ default: fs.Debugf(m.remote, "skipping unsupported metadata item: %s: %s", k, v) } } if numSet == 0 { fs.Infof(m.remote, "no writeable metadata found: %v", metadata) } return numSet, nil } // toAPIMetadata converts object/dir Metadata to api.Metadata for API calls. // If btime is missing but mtime is present, mtime is also used as the btime, as otherwise it would get overwritten. func (m *Metadata) toAPIMetadata() api.Metadata { update := api.Metadata{ FileSystemInfo: &api.FileSystemInfoFacet{}, } if m.description != "" && m.fs.driveType == driveTypePersonal { update.Description = m.description } if !m.mtime.IsZero() { update.FileSystemInfo.LastModifiedDateTime = api.Timestamp(m.mtime) } if !m.btime.IsZero() { update.FileSystemInfo.CreatedDateTime = api.Timestamp(m.btime) } if m.btime.IsZero() && !m.mtime.IsZero() { // use mtime as btime if missing m.btime = m.mtime update.FileSystemInfo.CreatedDateTime = api.Timestamp(m.btime) } return update } // Write takes the cached Metadata and sets it on the remote, using API calls. // If m.fs.opt.MetadataPermissions includes "write" and updatePermissions == true, permissions are also set. // Calling Write without any writeable metadata will result in an error. func (m *Metadata) Write(ctx context.Context, updatePermissions bool) (*api.Item, error) { update := m.toAPIMetadata() if update.IsEmpty() { return nil, fmt.Errorf("%v: no writeable metadata found: %v", m.remote, m) } opts := m.fs.newOptsCallWithPath(ctx, m.remote, "PATCH", "") var info *api.Item err := m.fs.pacer.Call(func() (bool, error) { resp, err := m.fs.srv.CallJSON(ctx, &opts, &update, &info) return shouldRetry(ctx, resp, err) }) if err != nil { fs.Debugf(m.remote, "errored metadata: %v", m) return nil, fmt.Errorf("%v: error updating metadata: %v", m.remote, err) } if m.fs.opt.MetadataPermissions.IsSet(rwWrite) && updatePermissions { m.normalizedID = info.GetID() err = m.WritePermissions(ctx) if err != nil { fs.Errorf(m.remote, "error writing permissions: %v", err) return info, err } } // update the struct since we have fresh info m.fs.setSystemMetadata(info, m, m.remote, m.mimeType) return info, err } // RefreshPermissions fetches the current permissions from the remote and caches them as Metadata func (m *Metadata) RefreshPermissions(ctx context.Context) (err error) { if m.normalizedID == "" { return errors.New("internal error: normalizedID is missing") } p, _, err := m.fs.getPermissions(ctx, m.normalizedID) if err != nil { return fmt.Errorf("failed to refresh permissions: %w", err) } m.permissions = p return nil } // WritePermissions sets the permissions (and no other metadata) on the remote. // m.permissions (the existing perms) and m.queuedPermissions (the new perms to be set) must be set correctly before calling this. // m.permissions == nil will not error, as it is valid to add permissions when there were previously none. // If successful, m.permissions will be set with the new current permissions and m.queuedPermissions will be nil. func (m *Metadata) WritePermissions(ctx context.Context) (err error) { if !m.fs.opt.MetadataPermissions.IsSet(rwWrite) { return errors.New("can't write permissions without --onedrive-metadata-permissions write") } if m.normalizedID == "" { return errors.New("internal error: normalizedID is missing") } if m.fs.opt.MetadataPermissions.IsSet(rwFailOK) { // If failok is set, allow the permissions setting to fail and only log an ERROR defer func() { if err != nil { fs.Errorf(m.fs, "Ignoring error as failok is set: %v", err) err = nil } }() } // compare current to queued and sort into add/update/remove queues add, update, remove := m.sortPermissions() fs.Debugf(m.remote, "metadata permissions: to add: %d to update: %d to remove: %d", len(add), len(update), len(remove)) _, err = m.processPermissions(ctx, add, update, remove) if err != nil { return fmt.Errorf("failed to process permissions: %w", err) } err = m.RefreshPermissions(ctx) fs.Debugf(m.remote, "updated permissions (now has %d permissions)", len(m.permissions)) if err != nil { return fmt.Errorf("failed to get permissions: %w", err) } m.queuedPermissions = nil return nil } // Order the permissions so that any with users come first. // // This is to work around a quirk with Graph: // // 1. You are adding permissions for both a group and a user. // 2. The user is a member of the group. // 3. The permissions for the group and user are the same. // 4. You are adding the group permission before the user permission. // // When all of the above are true, Graph indicates it has added the // user permission, but it immediately drops it // // See: https://github.com/rclone/rclone/issues/8465 func (m *Metadata) orderPermissions(xs []*api.PermissionsType) { // Return true if identity has any user permissions hasUserIdentity := func(identity *api.IdentitySet) bool { if identity == nil { return false } return identity.User.ID != "" || identity.User.DisplayName != "" || identity.User.Email != "" || identity.User.LoginName != "" } // Return true if p has any user permissions hasUser := func(p *api.PermissionsType) bool { if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) { return true } return slices.ContainsFunc(p.GetGrantedToIdentities(m.fs.driveType), hasUserIdentity) } // Put Permissions with a user first, leaving unsorted otherwise slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int { aHasUser := hasUser(a) bHasUser := hasUser(b) if aHasUser && !bHasUser { return -1 } else if !aHasUser && bHasUser { return 1 } return 0 }) } // sortPermissions sorts the permissions (to be written) into add, update, and remove queues func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType) { new, old := m.queuedPermissions, m.permissions if len(old) == 0 || m.permsAddOnly { m.orderPermissions(new) return new, nil, nil // they must all be "add" } for _, n := range new { if n == nil { continue } if n.ID != "" { // sanity check: ensure there's a matching "old" id with a non-matching role if !slices.ContainsFunc(old, func(o *api.PermissionsType) bool { return o.ID == n.ID && slices.Compare(o.Roles, n.Roles) != 0 && len(o.Roles) > 0 && len(n.Roles) > 0 && !slices.Contains(o.Roles, api.OwnerRole) }) { fs.Debugf(m.remote, "skipping update for invalid roles: %v (perm ID: %v)", n.Roles, n.ID) continue } if m.fs.driveType != driveTypePersonal && n.Link != nil && n.Link.WebURL != "" { // special case to work around API limitation -- can't update a sharing link perm so need to remove + add instead // https://learn.microsoft.com/en-us/answers/questions/986279/why-is-update-permission-graph-api-for-files-not-w // https://github.com/microsoftgraph/msgraph-sdk-dotnet/issues/1135 fs.Debugf(m.remote, "sortPermissions: can't update due to API limitation, will remove + add instead: %v", n.Roles) remove = append(remove, n) add = append(add, n) continue } fs.Debugf(m.remote, "sortPermissions: will update role to %v", n.Roles) update = append(update, n) } else { fs.Debugf(m.remote, "sortPermissions: will add permission: %v %v", n, n.Roles) add = append(add, n) } } for _, o := range old { if slices.Contains(o.Roles, api.OwnerRole) { fs.Debugf(m.remote, "skipping remove permission -- can't remove 'owner' role") continue } newHasOld := slices.ContainsFunc(new, func(n *api.PermissionsType) bool { if n == nil || n.ID == "" { return false // can't remove perms without an ID } return n.ID == o.ID }) if !newHasOld && o.ID != "" && !slices.Contains(add, o) && !slices.Contains(update, o) { fs.Debugf(m.remote, "sortPermissions: will remove permission: %v %v (perm ID: %v)", o, o.Roles, o.ID) remove = append(remove, o) } } m.orderPermissions(add) m.orderPermissions(update) m.orderPermissions(remove) return add, update, remove } // processPermissions executes the add, update, and remove queues for writing permissions func (m *Metadata) processPermissions(ctx context.Context, add, update, remove []*api.PermissionsType) (newPermissions []*api.PermissionsType, err error) { errs := errcount.New() for _, p := range remove { // remove (need to do these first because of remove + add workaround) _, err := m.removePermission(ctx, p) if err != nil { fs.Errorf(m.remote, "Failed to remove permission: %v", err) errs.Add(err) } } for _, p := range add { // add newPs, _, err := m.addPermission(ctx, p) if err != nil { fs.Errorf(m.remote, "Failed to add permission: %v", err) errs.Add(err) continue } newPermissions = append(newPermissions, newPs...) } for _, p := range update { // update newP, _, err := m.updatePermission(ctx, p) if err != nil { fs.Errorf(m.remote, "Failed to update permission: %v", err) errs.Add(err) continue } newPermissions = append(newPermissions, newP) } err = errs.Err("failed to set permissions") if err != nil { err = fserrors.NoRetryError(err) } return newPermissions, err } // fillRecipients looks for recipients to add from the permission passed in. // It looks for an email address in identity.User.Email, ID, and DisplayName, otherwise it uses the identity.User.ID as r.ObjectID. // It considers both "GrantedTo" and "GrantedToIdentities". func fillRecipients(p *api.PermissionsType, driveType string) (recipients []api.DriveRecipient) { if p == nil { return recipients } ids := make(map[string]struct{}, len(p.GetGrantedToIdentities(driveType))+1) isUnique := func(s string) bool { _, ok := ids[s] return !ok && s != "" } addRecipient := func(identity *api.IdentitySet) { r := api.DriveRecipient{} id := "" if strings.ContainsRune(identity.User.Email, '@') { id = identity.User.Email r.Email = id } else if strings.ContainsRune(identity.User.ID, '@') { id = identity.User.ID r.Email = id } else if strings.ContainsRune(identity.User.DisplayName, '@') { id = identity.User.DisplayName r.Email = id } else { id = identity.User.ID r.ObjectID = id } if !isUnique(id) { return } ids[id] = struct{}{} recipients = append(recipients, r) } forIdentitySet := func(iSet *api.IdentitySet) { if iSet == nil { return } iS := *iSet forIdentity := func(i api.Identity) { if i != (api.Identity{}) { iS.User = i addRecipient(&iS) } } forIdentity(iS.User) forIdentity(iS.SiteUser) forIdentity(iS.Group) forIdentity(iS.SiteGroup) forIdentity(iS.Application) forIdentity(iS.Device) } for _, identitySet := range p.GetGrantedToIdentities(driveType) { forIdentitySet(identitySet) } forIdentitySet(p.GetGrantedTo(driveType)) return recipients } // addPermission adds new permissions to an object or dir. // if p.Link.Scope == "anonymous" then it will also create a Public Link. func (m *Metadata) addPermission(ctx context.Context, p *api.PermissionsType) (newPs []*api.PermissionsType, resp *http.Response, err error) { opts := m.fs.newOptsCall(m.normalizedID, "POST", "/invite") req := &api.AddPermissionsRequest{ Recipients: fillRecipients(p, m.fs.driveType), RequireSignIn: m.fs.driveType != driveTypePersonal, // personal and business have conflicting requirements Roles: p.Roles, } if m.fs.driveType != driveTypePersonal { req.RetainInheritedPermissions = false // not supported for personal } if p.Link != nil && p.Link.Scope == api.AnonymousScope { link, err := m.fs.PublicLink(ctx, m.remote, fs.DurationOff, false) if err != nil { return nil, nil, err } p.Link.WebURL = link newPs = append(newPs, p) if len(req.Recipients) == 0 { return newPs, nil, nil } } if len(req.Recipients) == 0 { fs.Debugf(m.remote, "skipping add permission -- at least one valid recipient is required") return nil, nil, nil } if len(req.Roles) == 0 { return nil, nil, errors.New("at least one role is required to add a permission (choices: read, write, owner, member)") } if slices.Contains(req.Roles, api.OwnerRole) { fs.Debugf(m.remote, "skipping add permission -- can't invite a user with 'owner' role") return nil, nil, nil } newP := &api.PermissionsResponse{} err = m.fs.pacer.Call(func() (bool, error) { resp, err = m.fs.srv.CallJSON(ctx, &opts, &req, &newP) return shouldRetry(ctx, resp, err) }) return newP.Value, resp, err } // updatePermission updates an existing permission on an object or dir. // This requires the permission ID and a role to update (which will error if it is the same as the existing role.) // Role is the only property that can be updated. func (m *Metadata) updatePermission(ctx context.Context, p *api.PermissionsType) (newP *api.PermissionsType, resp *http.Response, err error) { opts := m.fs.newOptsCall(m.normalizedID, "PATCH", "/permissions/"+p.ID) req := api.UpdatePermissionsRequest{Roles: p.Roles} // roles is the only property that can be updated if len(req.Roles) == 0 { return nil, nil, errors.New("at least one role is required to update a permission (choices: read, write, owner, member)") } newP = &api.PermissionsType{} err = m.fs.pacer.Call(func() (bool, error) { resp, err = m.fs.srv.CallJSON(ctx, &opts, &req, &newP) return shouldRetry(ctx, resp, err) }) return newP, resp, err } // removePermission removes an existing permission on an object or dir. // This requires the permission ID. func (m *Metadata) removePermission(ctx context.Context, p *api.PermissionsType) (resp *http.Response, err error) { opts := m.fs.newOptsCall(m.normalizedID, "DELETE", "/permissions/"+p.ID) opts.NoResponse = true err = m.fs.pacer.Call(func() (bool, error) { resp, err = m.fs.srv.CallJSON(ctx, &opts, nil, nil) return shouldRetry(ctx, resp, err) }) return resp, err } // getPermissions gets the current permissions for an object or dir, from the API. func (f *Fs) getPermissions(ctx context.Context, normalizedID string) (p []*api.PermissionsType, resp *http.Response, err error) { opts := f.newOptsCall(normalizedID, "GET", "/permissions") permResp := &api.PermissionsResponse{} err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &permResp) return shouldRetry(ctx, resp, err) }) return permResp.Value, resp, err } func (f *Fs) newMetadata(remote string) *Metadata { return &Metadata{fs: f, remote: remote} } // returns true if metadata includes a "permissions" key and f.opt.MetadataPermissions includes "write". func (f *Fs) needsUpdatePermissions(metadata fs.Metadata) bool { _, ok := metadata["permissions"] return ok && f.opt.MetadataPermissions.IsSet(rwWrite) } // returns a non-zero btime if we have one // otherwise falls back to mtime func (o *Object) tryGetBtime(modTime time.Time) time.Time { if o.meta != nil && !o.meta.btime.IsZero() { return o.meta.btime } return modTime } // adds metadata (except permissions) if --metadata is in use func (o *Object) fetchMetadataForCreate(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, modTime time.Time) (createRequest api.CreateUploadRequest, metadata fs.Metadata, err error) { createRequest = api.CreateUploadRequest{ // we set mtime no matter what Item: api.Metadata{ FileSystemInfo: &api.FileSystemInfoFacet{ CreatedDateTime: api.Timestamp(o.tryGetBtime(modTime)), LastModifiedDateTime: api.Timestamp(modTime), }, }, } meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options) if err != nil { return createRequest, nil, fmt.Errorf("failed to read metadata from source object: %w", err) } if meta == nil { return createRequest, nil, nil // no metadata or --metadata not in use, so just return mtime } if o.meta == nil { o.meta = o.fs.newMetadata(o.Remote()) } o.meta.mtime = modTime numSet, err := o.meta.Set(ctx, meta) if err != nil { return createRequest, meta, err } if numSet == 0 { return createRequest, meta, nil } createRequest.Item = o.meta.toAPIMetadata() return createRequest, meta, nil } // Fetch metadata and update updateInfo if --metadata is in use // modtime will still be set when there is no metadata to set // // May return info=nil and err=nil if there was no metadata to update. func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *Object) (info *api.Item, err error) { meta, err := fs.GetMetadataOptions(ctx, f, src, options) if err != nil { return nil, fmt.Errorf("failed to read metadata from source object: %w", err) } if meta == nil { return updateInfo.setModTime(ctx, src.ModTime(ctx)) // no metadata or --metadata not in use, so just set modtime } if updateInfo.meta == nil { updateInfo.meta = f.newMetadata(updateInfo.Remote()) } newInfo, err := updateInfo.updateMetadata(ctx, meta) if newInfo == nil { return info, err } return newInfo, err } // updateMetadata calls Get, Set, and Write // // May return info=nil and err=nil if there was no metadata to update. func (o *Object) updateMetadata(ctx context.Context, meta fs.Metadata) (info *api.Item, err error) { _, err = o.meta.Get(ctx) // refresh permissions if err != nil { return nil, err } numSet, err := o.meta.Set(ctx, meta) if err != nil { return nil, err } if numSet == 0 { return nil, nil } info, err = o.meta.Write(ctx, o.fs.needsUpdatePermissions(meta)) if err != nil { return info, err } err = o.setMetaData(info) if err != nil { return info, err } // Remove versions if required if o.fs.opt.NoVersions { err := o.deleteVersions(ctx) if err != nil { return info, fmt.Errorf("%v: Failed to remove versions: %v", o, err) } } return info, nil } // MkdirMetadata makes the directory passed in as dir. // // It shouldn't return an error if it already exists. // // If the metadata is not nil it is set. // // It returns the directory that was created. func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) { var info *api.Item var meta *Metadata dirID, err := f.dirCache.FindDir(ctx, dir, false) if err == fs.ErrorDirNotFound { // Directory does not exist so create it var leaf, parentID string leaf, parentID, err = f.dirCache.FindPath(ctx, dir, true) if err != nil { return nil, err } info, meta, err = f.createDir(ctx, parentID, dir, leaf, metadata) if err != nil { return nil, err } if f.driveType != driveTypePersonal { // for some reason, OneDrive Business needs this extra step to set modtime, while Personal does not. Seems like a bug... fs.Debugf(dir, "setting time %v", meta.mtime) info, err = meta.Write(ctx, false) } } else if err == nil { // Directory exists and needs updating info, meta, err = f.updateDir(ctx, dirID, dir, metadata) } if err != nil { return nil, err } // Convert the info into a directory entry parent, _ := dircache.SplitPath(dir) entry, err := f.itemToDirEntry(ctx, parent, info) if err != nil { return nil, err } directory, ok := entry.(*Directory) if !ok { return nil, fmt.Errorf("internal error: expecting %T to be a *Directory", entry) } directory.meta = meta f.setSystemMetadata(info, directory.meta, entry.Remote(), dirMimeType) dirEntry, ok := entry.(fs.Directory) if !ok { return nil, fmt.Errorf("internal error: expecting %T to be an fs.Directory", entry) } return dirEntry, nil } // createDir makes a directory with pathID as parent and name leaf with optional metadata func (f *Fs) createDir(ctx context.Context, pathID, dirWithLeaf, leaf string, metadata fs.Metadata) (info *api.Item, meta *Metadata, err error) { // fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf) var resp *http.Response opts := f.newOptsCall(pathID, "POST", "/children") mkdir := api.CreateItemWithMetadataRequest{ CreateItemRequest: api.CreateItemRequest{ Name: f.opt.Enc.FromStandardName(leaf), ConflictBehavior: "fail", }, } m := f.newMetadata(dirWithLeaf) m.mimeType = dirMimeType numSet := 0 if len(metadata) > 0 { numSet, err = m.Set(ctx, metadata) if err != nil { return nil, m, err } if numSet > 0 { mkdir.Metadata = m.toAPIMetadata() } } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, m, err } if f.needsUpdatePermissions(metadata) && numSet > 0 { // permissions must be done as a separate step m.normalizedID = info.GetID() err = m.RefreshPermissions(ctx) if err != nil { return info, m, err } err = m.WritePermissions(ctx) if err != nil { fs.Errorf(m.remote, "error writing permissions: %v", err) return info, m, err } } return info, m, nil } // updateDir updates an existing a directory with the metadata passed in func (f *Fs) updateDir(ctx context.Context, dirID, remote string, metadata fs.Metadata) (info *api.Item, meta *Metadata, err error) { d := f.newDir(dirID, remote) _, err = d.meta.Set(ctx, metadata) if err != nil { return nil, nil, err } info, err = d.meta.Write(ctx, f.needsUpdatePermissions(metadata)) return info, d.meta, err } func (f *Fs) newDir(dirID, remote string) (d *Directory) { d = &Directory{ fs: f, remote: remote, size: -1, items: -1, id: dirID, meta: f.newMetadata(remote), } d.meta.normalizedID = dirID return d } // Metadata returns metadata for a DirEntry // // It should return nil if there is no Metadata func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) { err = o.readMetaData(ctx) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return nil, err } return o.meta.Get(ctx) } // DirSetModTime sets the directory modtime for dir func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error { dirID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return err } d := f.newDir(dirID, dir) return d.SetModTime(ctx, modTime) } // SetModTime sets the metadata on the DirEntry to set the modification date // // If there is any other metadata it does not overwrite it. func (d *Directory) SetModTime(ctx context.Context, t time.Time) error { btime := t if d.meta != nil && !d.meta.btime.IsZero() { btime = d.meta.btime // if we already have a non-zero btime, preserve it } d.meta = d.fs.newMetadata(d.remote) // set only the mtime and btime d.meta.mtime = t d.meta.btime = btime _, err := d.meta.Write(ctx, false) return err } // Metadata returns metadata for a DirEntry // // It should return nil if there is no Metadata func (d *Directory) Metadata(ctx context.Context) (metadata fs.Metadata, err error) { return d.meta.Get(ctx) } // SetMetadata sets metadata for a Directory //
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/onedrive/metadata_test.go
backend/onedrive/metadata_test.go
package onedrive import ( "encoding/json" "testing" "github.com/rclone/rclone/backend/onedrive/api" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestOrderPermissions(t *testing.T) { tests := []struct { name string input []*api.PermissionsType expected []string }{ { name: "empty", input: []*api.PermissionsType{}, expected: []string(nil), }, { name: "users first, then group, then none", input: []*api.PermissionsType{ {ID: "1", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group1"}}}, {ID: "2", GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{DisplayName: "Alice"}}}}, {ID: "3", GrantedTo: &api.IdentitySet{User: api.Identity{DisplayName: "Alice"}}}, {ID: "4"}, }, expected: []string{"2", "3", "1", "4"}, }, { name: "same type unsorted", input: []*api.PermissionsType{ {ID: "b", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group B"}}}, {ID: "a", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group A"}}}, {ID: "c", GrantedToIdentities: []*api.IdentitySet{{Group: api.Identity{DisplayName: "Group A"}}, {User: api.Identity{DisplayName: "Alice"}}}}, }, expected: []string{"c", "b", "a"}, }, { name: "all user identities", input: []*api.PermissionsType{ {ID: "c", GrantedTo: &api.IdentitySet{User: api.Identity{DisplayName: "Bob"}}}, {ID: "a", GrantedTo: &api.IdentitySet{User: api.Identity{Email: "alice@example.com"}}}, {ID: "b", GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{LoginName: "user3"}}}}, }, expected: []string{"c", "a", "b"}, }, { name: "no user or group info", input: []*api.PermissionsType{ {ID: "z"}, {ID: "x"}, {ID: "y"}, }, expected: []string{"z", "x", "y"}, }, } for _, driveType := range []string{driveTypePersonal, driveTypeBusiness} { t.Run(driveType, func(t *testing.T) { for _, tt := range tests { m := &Metadata{fs: &Fs{driveType: driveType}} t.Run(tt.name, func(t *testing.T) { if driveType == driveTypeBusiness { for i := range tt.input { tt.input[i].GrantedToV2 = tt.input[i].GrantedTo tt.input[i].GrantedTo = nil tt.input[i].GrantedToIdentitiesV2 = tt.input[i].GrantedToIdentities tt.input[i].GrantedToIdentities = nil } } m.orderPermissions(tt.input) var gotIDs []string for _, p := range tt.input { gotIDs = append(gotIDs, p.ID) } assert.Equal(t, tt.expected, gotIDs) }) } }) } } func TestOrderPermissionsJSON(t *testing.T) { testJSON := `[ { "id": "1", "grantedToV2": { "group": { "id": "group@example.com" } }, "roles": [ "write" ] }, { "id": "2", "grantedToV2": { "user": { "id": "user@example.com" } }, "roles": [ "write" ] } ]` var testPerms []*api.PermissionsType err := json.Unmarshal([]byte(testJSON), &testPerms) require.NoError(t, err) m := &Metadata{fs: &Fs{driveType: driveTypeBusiness}} m.orderPermissions(testPerms) var gotIDs []string for _, p := range testPerms { gotIDs = append(gotIDs, p.ID) } assert.Equal(t, []string{"2", "1"}, gotIDs) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/onedrive/onedrive_internal_test.go
backend/onedrive/onedrive_internal_test.go
package onedrive import ( "context" "encoding/json" "fmt" "slices" "testing" "time" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/backend/onedrive/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/lib/random" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // go test -timeout 30m -run ^TestIntegration/FsMkdir/FsPutFiles/Internal$ github.com/rclone/rclone/backend/onedrive -remote TestOneDrive:meta -v // go test -timeout 30m -run ^TestIntegration/FsMkdir/FsPutFiles/Internal$ github.com/rclone/rclone/backend/onedrive -remote TestOneDriveBusiness:meta -v // go run ./fstest/test_all -remotes TestOneDriveBusiness:meta,TestOneDrive:meta -verbose -maxtries 1 var ( t1 = fstest.Time("2023-08-26T23:13:06.499999999Z") t2 = fstest.Time("2020-02-29T12:34:56.789Z") t3 = time.Date(1994, time.December, 24, 9+12, 0, 0, 525600, time.FixedZone("Eastern Standard Time", -5)) ctx = context.Background() content = "hello" ) const ( testUserID = "ryan@contoso.com" // demo user from doc examples (can't share files with yourself) // https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_invite?view=odsp-graph-online#http-request-1 ) // TestMain drives the tests func TestMain(m *testing.M) { fstest.TestMain(m) } // TestWritePermissions tests reading and writing permissions func (f *Fs) TestWritePermissions(t *testing.T, r *fstest.Run) { // setup ctx, ci := fs.AddConfig(ctx) ci.Metadata = true _ = f.opt.MetadataPermissions.Set("read,write") file1 := r.WriteFile(randomFilename(), content, t2) // add a permission with "read" role permissions := defaultPermissions(f.driveType) permissions[0].Roles[0] = api.ReadRole expectedMeta, actualMeta := f.putWithMeta(ctx, t, &file1, permissions) f.compareMeta(t, expectedMeta, actualMeta, false) expectedP, actualP := unmarshalPerms(t, expectedMeta["permissions"]), unmarshalPerms(t, actualMeta["permissions"]) found, num := false, 0 foundCount := 0 for i, p := range actualP { for _, identity := range p.GetGrantedToIdentities(f.driveType) { if identity.User.DisplayName == testUserID { // note: expected will always be element 0 here, but actual may be variable based on org settings assert.Equal(t, expectedP[0].Roles, p.Roles) found, num = true, i foundCount++ } } if f.driveType == driveTypePersonal { if p.GetGrantedTo(f.driveType) != nil && p.GetGrantedTo(f.driveType).User != (api.Identity{}) && p.GetGrantedTo(f.driveType).User.ID == testUserID { // shows up in a different place on biz vs. personal assert.Equal(t, expectedP[0].Roles, p.Roles) found, num = true, i foundCount++ } } } assert.True(t, found, fmt.Sprintf("no permission found with expected role (want: \n\n%v \n\ngot: \n\n%v\n\n)", indent(t, expectedMeta["permissions"]), indent(t, actualMeta["permissions"]))) assert.Equal(t, 1, foundCount, "expected to find exactly 1 match") // update it to "write" permissions = actualP permissions[num].Roles[0] = api.WriteRole expectedMeta, actualMeta = f.putWithMeta(ctx, t, &file1, permissions) f.compareMeta(t, expectedMeta, actualMeta, false) if f.driveType != driveTypePersonal { // zero out some things we expect to be different expectedP, actualP = unmarshalPerms(t, expectedMeta["permissions"]), unmarshalPerms(t, actualMeta["permissions"]) normalize(expectedP) normalize(actualP) expectedMeta.Set("permissions", marshalPerms(t, expectedP)) actualMeta.Set("permissions", marshalPerms(t, actualP)) } assert.JSONEq(t, expectedMeta["permissions"], actualMeta["permissions"]) // remove it permissions[num] = nil _, actualMeta = f.putWithMeta(ctx, t, &file1, permissions) if f.driveType == driveTypePersonal { perms, ok := actualMeta["permissions"] assert.False(t, ok, fmt.Sprintf("permissions metadata key was unexpectedly found: %v", perms)) return } _, actualP = unmarshalPerms(t, expectedMeta["permissions"]), unmarshalPerms(t, actualMeta["permissions"]) found = false var foundP *api.PermissionsType for _, p := range actualP { if p.GetGrantedTo(f.driveType) == nil || p.GetGrantedTo(f.driveType).User == (api.Identity{}) || p.GetGrantedTo(f.driveType).User.ID != testUserID { continue } found = true foundP = p } assert.False(t, found, fmt.Sprintf("permission was found but expected to be removed: %v", foundP)) } // TestUploadSinglePart tests reading/writing permissions using uploadSinglepart() // This is only used when file size is exactly 0. func (f *Fs) TestUploadSinglePart(t *testing.T, r *fstest.Run) { content = "" f.TestWritePermissions(t, r) content = "hello" } // TestReadPermissions tests that no permissions are written when --onedrive-metadata-permissions has "read" but not "write" func (f *Fs) TestReadPermissions(t *testing.T, r *fstest.Run) { // setup ctx, ci := fs.AddConfig(ctx) ci.Metadata = true file1 := r.WriteFile(randomFilename(), "hello", t2) // try adding a permission without --onedrive-metadata-permissions -- should fail // test that what we got before vs. after is the same _ = f.opt.MetadataPermissions.Set("read") _, expectedMeta := f.putWithMeta(ctx, t, &file1, []*api.PermissionsType{}) // return var intentionally switched here permissions := defaultPermissions(f.driveType) _, actualMeta := f.putWithMeta(ctx, t, &file1, permissions) if f.driveType == driveTypePersonal { perms, ok := actualMeta["permissions"] assert.False(t, ok, fmt.Sprintf("permissions metadata key was unexpectedly found: %v", perms)) return } assert.JSONEq(t, expectedMeta["permissions"], actualMeta["permissions"]) } // TestReadMetadata tests that all the read-only system properties are present and non-blank func (f *Fs) TestReadMetadata(t *testing.T, r *fstest.Run) { // setup ctx, ci := fs.AddConfig(ctx) ci.Metadata = true file1 := r.WriteFile(randomFilename(), "hello", t2) permissions := defaultPermissions(f.driveType) _ = f.opt.MetadataPermissions.Set("read,write") _, actualMeta := f.putWithMeta(ctx, t, &file1, permissions) optionals := []string{"package-type", "shared-by-id", "shared-scope", "shared-time", "shared-owner-id"} // not always present for k := range systemMetadataInfo { if slices.Contains(optionals, k) { continue } if k == "description" && f.driveType != driveTypePersonal { continue // not supported } gotV, ok := actualMeta[k] assert.True(t, ok, fmt.Sprintf("property is missing: %v", k)) assert.NotEmpty(t, gotV, fmt.Sprintf("property is blank: %v", k)) } } // TestDirectoryMetadata tests reading and writing modtime and other metadata and permissions for directories func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) { // setup ctx, ci := fs.AddConfig(ctx) ci.Metadata = true _ = f.opt.MetadataPermissions.Set("read,write") permissions := defaultPermissions(f.driveType) permissions[0].Roles[0] = api.ReadRole expectedMeta := fs.Metadata{ "mtime": t1.Format(timeFormatOut), "btime": t2.Format(timeFormatOut), "content-type": dirMimeType, "description": "that is so meta!", } b, err := json.MarshalIndent(permissions, "", "\t") assert.NoError(t, err) expectedMeta.Set("permissions", string(b)) compareDirMeta := func(expectedMeta, actualMeta fs.Metadata, ignoreID bool) { f.compareMeta(t, expectedMeta, actualMeta, ignoreID) // check that all required system properties are present optionals := []string{"package-type", "shared-by-id", "shared-scope", "shared-time", "shared-owner-id"} // not always present for k := range systemMetadataInfo { if slices.Contains(optionals, k) { continue } if k == "description" && f.driveType != driveTypePersonal { continue // not supported } gotV, ok := actualMeta[k] assert.True(t, ok, fmt.Sprintf("property is missing: %v", k)) assert.NotEmpty(t, gotV, fmt.Sprintf("property is blank: %v", k)) } } newDst, err := operations.MkdirMetadata(ctx, f, "subdir", expectedMeta) assert.NoError(t, err) require.NotNil(t, newDst) assert.Equal(t, "subdir", newDst.Remote()) actualMeta, err := fs.GetMetadata(ctx, newDst) assert.NoError(t, err) assert.NotNil(t, actualMeta) compareDirMeta(expectedMeta, actualMeta, false) // modtime fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t1, newDst.ModTime(ctx), f.Precision()) // try changing it and re-check it newDst, err = operations.SetDirModTime(ctx, f, newDst, "", t2) assert.NoError(t, err) fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t2, newDst.ModTime(ctx), f.Precision()) // ensure that f.DirSetModTime also works err = f.DirSetModTime(ctx, "subdir", t3) assert.NoError(t, err) entries, err := f.List(ctx, "") assert.NoError(t, err) entries.ForDir(func(dir fs.Directory) { if dir.Remote() == "subdir" { fstest.AssertTimeEqualWithPrecision(t, dir.Remote(), t3, dir.ModTime(ctx), f.Precision()) } }) // test updating metadata on existing dir actualMeta, err = fs.GetMetadata(ctx, newDst) // get fresh info as we've been changing modtimes assert.NoError(t, err) expectedMeta = actualMeta expectedMeta.Set("description", "metadata is fun!") expectedMeta.Set("btime", t3.Format(timeFormatOut)) expectedMeta.Set("mtime", t1.Format(timeFormatOut)) expectedMeta.Set("content-type", dirMimeType) perms := unmarshalPerms(t, expectedMeta["permissions"]) perms[0].Roles[0] = api.WriteRole b, err = json.MarshalIndent(perms, "", "\t") assert.NoError(t, err) expectedMeta.Set("permissions", string(b)) newDst, err = operations.MkdirMetadata(ctx, f, "subdir", expectedMeta) assert.NoError(t, err) require.NotNil(t, newDst) assert.Equal(t, "subdir", newDst.Remote()) actualMeta, err = fs.GetMetadata(ctx, newDst) assert.NoError(t, err) assert.NotNil(t, actualMeta) compareDirMeta(expectedMeta, actualMeta, false) // test copying metadata from one dir to another copiedDir, err := operations.CopyDirMetadata(ctx, f, nil, "subdir2", newDst) assert.NoError(t, err) require.NotNil(t, copiedDir) assert.Equal(t, "subdir2", copiedDir.Remote()) actualMeta, err = fs.GetMetadata(ctx, copiedDir) assert.NoError(t, err) assert.NotNil(t, actualMeta) compareDirMeta(expectedMeta, actualMeta, true) // test DirModTimeUpdatesOnWrite expectedTime := copiedDir.ModTime(ctx) assert.True(t, !expectedTime.IsZero()) r.WriteObject(ctx, copiedDir.Remote()+"/"+randomFilename(), "hi there", t3) entries, err = f.List(ctx, "") assert.NoError(t, err) entries.ForDir(func(dir fs.Directory) { if dir.Remote() == copiedDir.Remote() { assert.True(t, expectedTime.Equal(dir.ModTime(ctx)), fmt.Sprintf("want %v got %v", expectedTime, dir.ModTime(ctx))) } }) } // TestServerSideCopyMove tests server-side Copy and Move func (f *Fs) TestServerSideCopyMove(t *testing.T, r *fstest.Run) { // setup ctx, ci := fs.AddConfig(ctx) ci.Metadata = true _ = f.opt.MetadataPermissions.Set("read,write") file1 := r.WriteFile(randomFilename(), content, t2) // add a permission with "read" role permissions := defaultPermissions(f.driveType) permissions[0].Roles[0] = api.ReadRole expectedMeta, actualMeta := f.putWithMeta(ctx, t, &file1, permissions) f.compareMeta(t, expectedMeta, actualMeta, false) comparePerms := func(expectedMeta, actualMeta fs.Metadata) (newExpectedMeta, newActualMeta fs.Metadata) { expectedP, actualP := unmarshalPerms(t, expectedMeta["permissions"]), unmarshalPerms(t, actualMeta["permissions"]) normalize(expectedP) normalize(actualP) expectedMeta.Set("permissions", marshalPerms(t, expectedP)) actualMeta.Set("permissions", marshalPerms(t, actualP)) assert.JSONEq(t, expectedMeta["permissions"], actualMeta["permissions"]) return expectedMeta, actualMeta } // Copy obj1, err := f.NewObject(ctx, file1.Path) assert.NoError(t, err) originalMeta := actualMeta obj2, err := f.Copy(ctx, obj1, randomFilename()) assert.NoError(t, err) actualMeta, err = fs.GetMetadata(ctx, obj2) assert.NoError(t, err) expectedMeta, actualMeta = comparePerms(originalMeta, actualMeta) f.compareMeta(t, expectedMeta, actualMeta, true) // Move obj3, err := f.Move(ctx, obj1, randomFilename()) assert.NoError(t, err) actualMeta, err = fs.GetMetadata(ctx, obj3) assert.NoError(t, err) expectedMeta, actualMeta = comparePerms(originalMeta, actualMeta) f.compareMeta(t, expectedMeta, actualMeta, true) } // TestMetadataMapper tests adding permissions with the --metadata-mapper func (f *Fs) TestMetadataMapper(t *testing.T, r *fstest.Run) { // setup ctx, ci := fs.AddConfig(ctx) ci.Metadata = true _ = f.opt.MetadataPermissions.Set("read,write") file1 := r.WriteFile(randomFilename(), content, t2) blob := `{"Metadata":{"permissions":"[{\"grantedToIdentities\":[{\"user\":{\"id\":\"ryan@contoso.com\"}}],\"roles\":[\"read\"]}]"}}` if f.driveType != driveTypePersonal { blob = `{"Metadata":{"permissions":"[{\"grantedToIdentitiesV2\":[{\"user\":{\"id\":\"ryan@contoso.com\"}}],\"roles\":[\"read\"]}]"}}` } // Copy ci.MetadataMapper = []string{"echo", blob} require.NoError(t, ci.Dump.Set("mapper")) obj1, err := r.Flocal.NewObject(ctx, file1.Path) assert.NoError(t, err) obj2, err := operations.Copy(ctx, f, nil, randomFilename(), obj1) assert.NoError(t, err) actualMeta, err := fs.GetMetadata(ctx, obj2) assert.NoError(t, err) actualP := unmarshalPerms(t, actualMeta["permissions"]) found := false foundCount := 0 for _, p := range actualP { for _, identity := range p.GetGrantedToIdentities(f.driveType) { if identity.User.DisplayName == testUserID { assert.Equal(t, []api.Role{api.ReadRole}, p.Roles) found = true foundCount++ } } if f.driveType == driveTypePersonal { if p.GetGrantedTo(f.driveType) != nil && p.GetGrantedTo(f.driveType).User != (api.Identity{}) && p.GetGrantedTo(f.driveType).User.ID == testUserID { // shows up in a different place on biz vs. personal assert.Equal(t, []api.Role{api.ReadRole}, p.Roles) found = true foundCount++ } } } assert.True(t, found, fmt.Sprintf("no permission found with expected role (want: \n\n%v \n\ngot: \n\n%v\n\n)", blob, actualMeta)) assert.Equal(t, 1, foundCount, "expected to find exactly 1 match") } // helper function to put an object with metadata and permissions func (f *Fs) putWithMeta(ctx context.Context, t *testing.T, file *fstest.Item, perms []*api.PermissionsType) (expectedMeta, actualMeta fs.Metadata) { t.Helper() expectedMeta = fs.Metadata{ "mtime": t1.Format(timeFormatOut), "btime": t2.Format(timeFormatOut), "description": "that is so meta!", } expectedMeta.Set("permissions", marshalPerms(t, perms)) obj := fstests.PutTestContentsMetadata(ctx, t, f, file, false, content, true, "plain/text", expectedMeta) do, ok := obj.(fs.Metadataer) require.True(t, ok) actualMeta, err := do.Metadata(ctx) require.NoError(t, err) return expectedMeta, actualMeta } func randomFilename() string { return "some file-" + random.String(8) + ".txt" } func (f *Fs) compareMeta(t *testing.T, expectedMeta, actualMeta fs.Metadata, ignoreID bool) { t.Helper() for k, v := range expectedMeta { gotV, ok := actualMeta[k] switch k { case "shared-owner-id", "shared-time", "shared-by-id", "shared-scope": continue case "permissions": continue case "utime": assert.True(t, ok, fmt.Sprintf("expected metadata key is missing: %v", k)) if f.driveType == driveTypePersonal { compareTimeStrings(t, k, v, gotV, time.Minute) // read-only upload time, so slight difference expected -- use larger precision continue } compareTimeStrings(t, k, expectedMeta["btime"], gotV, time.Minute) // another bizarre difference between personal and business... continue case "id": if ignoreID { continue // different id is expected when copying meta from one item to another } case "mtime", "btime": assert.True(t, ok, fmt.Sprintf("expected metadata key is missing: %v", k)) compareTimeStrings(t, k, v, gotV, time.Second) continue case "description": if f.driveType != driveTypePersonal { continue // not supported } } assert.True(t, ok, fmt.Sprintf("expected metadata key is missing: %v", k)) assert.Equal(t, v, gotV, actualMeta) } } func compareTimeStrings(t *testing.T, remote, want, got string, precision time.Duration) { wantT, err := time.Parse(timeFormatIn, want) assert.NoError(t, err) gotT, err := time.Parse(timeFormatIn, got) assert.NoError(t, err) fstest.AssertTimeEqualWithPrecision(t, remote, wantT, gotT, precision) } func marshalPerms(t *testing.T, p []*api.PermissionsType) string { b, err := json.MarshalIndent(p, "", "\t") assert.NoError(t, err) return string(b) } func unmarshalPerms(t *testing.T, perms string) (p []*api.PermissionsType) { t.Helper() err := json.Unmarshal([]byte(perms), &p) assert.NoError(t, err) return p } func indent(t *testing.T, s string) string { p := unmarshalPerms(t, s) return marshalPerms(t, p) } func defaultPermissions(driveType string) []*api.PermissionsType { if driveType == driveTypePersonal { return []*api.PermissionsType{{ GrantedTo: &api.IdentitySet{User: api.Identity{}}, GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{ID: testUserID}}}, Roles: []api.Role{api.WriteRole}, }} } return []*api.PermissionsType{{ GrantedToV2: &api.IdentitySet{User: api.Identity{}}, GrantedToIdentitiesV2: []*api.IdentitySet{{User: api.Identity{ID: testUserID}}}, Roles: []api.Role{api.WriteRole}, }} } // zeroes out some things we expect to be different when copying/moving between objects func normalize(Ps []*api.PermissionsType) { for _, ep := range Ps { ep.ID = "" ep.Link = nil ep.ShareID = "" } } func (f *Fs) resetTestDefaults(r *fstest.Run) { ci := fs.GetConfig(ctx) ci.Metadata = false _ = f.opt.MetadataPermissions.Set("off") r.Finalise() } // InternalTest dispatches all internal tests func (f *Fs) InternalTest(t *testing.T) { newTestF := func() (*Fs, *fstest.Run) { r := fstest.NewRunIndividual(t) testF, ok := r.Fremote.(*Fs) if !ok { t.FailNow() } return testF, r } testF, r := newTestF() t.Run("TestWritePermissions", func(t *testing.T) { testF.TestWritePermissions(t, r) }) testF.resetTestDefaults(r) testF, r = newTestF() t.Run("TestUploadSinglePart", func(t *testing.T) { testF.TestUploadSinglePart(t, r) }) testF.resetTestDefaults(r) testF, r = newTestF() t.Run("TestReadPermissions", func(t *testing.T) { testF.TestReadPermissions(t, r) }) testF.resetTestDefaults(r) testF, r = newTestF() t.Run("TestReadMetadata", func(t *testing.T) { testF.TestReadMetadata(t, r) }) testF.resetTestDefaults(r) testF, r = newTestF() t.Run("TestDirectoryMetadata", func(t *testing.T) { testF.TestDirectoryMetadata(t, r) }) testF.resetTestDefaults(r) testF, r = newTestF() t.Run("TestServerSideCopyMove", func(t *testing.T) { testF.TestServerSideCopyMove(t, r) }) testF.resetTestDefaults(r) t.Run("TestMetadataMapper", func(t *testing.T) { testF.TestMetadataMapper(t, r) }) testF.resetTestDefaults(r) } var _ fstests.InternalTester = (*Fs)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/onedrive/quickxorhash/quickxorhash_test.go
backend/onedrive/quickxorhash/quickxorhash_test.go
package quickxorhash import ( "crypto/rand" "encoding/base64" "fmt" "hash" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var testVectors = []struct { size int in string out string }{ {0, ``, "AAAAAAAAAAAAAAAAAAAAAAAAAAA="}, {1, `Sg==`, "SgAAAAAAAAAAAAAAAQAAAAAAAAA="}, {2, `tbQ=`, "taAFAAAAAAAAAAAAAgAAAAAAAAA="}, {3, `0pZP`, "0rDEEwAAAAAAAAAAAwAAAAAAAAA="}, {4, `jRRDVA==`, "jaDAEKgAAAAAAAAABAAAAAAAAAA="}, {5, `eAV52qE=`, "eChAHrQRCgAAAAAABQAAAAAAAAA="}, {6, `luBZlaT6`, "lgBHFipBCn0AAAAABgAAAAAAAAA="}, {7, `qaApEj66lw==`, "qQBFCiTgA11cAgAABwAAAAAAAAA="}, {8, `/aNzzCFPS/A=`, "/RjFHJgRgicsAR4ACAAAAAAAAAA="}, {9, `n6Neh7p6fFgm`, "nxiFFw6hCz3wAQsmCQAAAAAAAAA="}, {10, `J9iPGCbfZSTNyw==`, "J8DGIzBggm+UgQTNUgYAAAAAAAA="}, {11, `i+UZyUGJKh+ISbk=`, "iyhHBpIRhESo4AOIQ0IuAAAAAAA="}, {12, `h490d57Pqz5q2rtT`, "h3gEHe7giWeswgdq3MYupgAAAAA="}, {13, `vPgoDjOfO6fm71RxLw==`, "vMAHChwwg0/s4BTmdQcV4vACAAA="}, {14, `XoJ1AsoR4fDYJrDqYs4=`, "XhBEHQSgjAiEAx7YPgEs1CEGZwA="}, {15, `gQaybEqS/4UlDc8e4IJm`, "gDCALNigBEn8oxAlZ8AzPAAOQZg="}, {16, `2fuxhBJXtpWFe8dOfdGeHw==`, "O9tHLAghgSvYohKFyMMxnNCHaHg="}, {17, `XBV6YKU9V7yMakZnFIxIkuU=`, "HbplHsBQih5cgReMQYMRzkABRiA="}, {18, `XJZSOiNO2bmfKnTKD7fztcQX`, "/6ZArHQwAidkIxefQgEdlPGAW8w="}, {19, `g8VtAh+2Kf4k0kY5tzji2i2zmA==`, "wDNrgwHWAVukwB8kg4YRcnALHIg="}, {20, `T6LYJIfDh81JrAK309H2JMJTXis=`, "zBTHrspn3mEcohlJdIUAbjGNaNg="}, {21, `DWAAX5/CIfrmErgZa8ot6ZraeSbu`, "LR2Z0PjuRYGKQB/mhQAuMrAGZbQ="}, {22, `N9abi3qy/mC1THZuVLHPpx7SgwtLOA==`, "1KTYttCBEen8Hwy1doId3ECFWDw="}, {23, `LlUe7wHerLqEtbSZLZgZa9u0m7hbiFs=`, "TqVZpxs3cN61BnuFvwUtMtECTGQ="}, {24, `bU2j/0XYdgfPFD4691jV0AOUEUPR4Z5E`, "bnLBiLpVgnxVkXhNsIAPdHAPLFQ="}, {25, `lScPwPsyUsH2T1Qsr31wXtP55Wqbe47Uyg==`, "VDMSy8eI26nBHCB0e8gVWPCKPsA="}, {26, `rJaKh1dLR1k+4hynliTZMGf8Nd4qKKoZiAM=`, "r7bjwkl8OYQeNaMcCY8fTmEJEmQ="}, {27, `pPsT0CPmHrd3Frsnva1pB/z1ytARLeHEYRCo`, "Rdg7rCcDomL59pL0s6GuTvqLVqQ="}, {28, `wSRChaqmrsnMrfB2yqI43eRWbro+f9kBvh+01w==`, "YTtloIi6frI7HX3vdLvE7I2iUOA="}, {29, `apL67KMIRxQeE9k1/RuW09ppPjbF1WeQpTjSWtI=`, "CIpedls+ZlSQ654fl+X26+Q7LVU="}, {30, `53yx0/QgMTVb7OOzHRHbkS7ghyRc+sIXxi7XHKgT`, "zfJtLGFgR9DB3Q64fAFIp+S5iOY="}, {31, `PwXNnutoLLmxD8TTog52k8cQkukmT87TTnDipKLHQw==`, "PTaGs7yV3FUyBy/SfU6xJRlCJlI="}, {32, `NbYXsp5/K6mR+NmHwExjvWeWDJFnXTKWVlzYHoesp2E=`, "wjuAuWDiq04qDt1R8hHWDDcwVoQ="}, {33, `qQ70RB++JAR5ljNv3lJt1PpqETPsckopfonItu18Cr3E`, "FkJaeg/0Z5+euShYlLpE2tJh+Lo="}, {34, `RhzSatQTQ9/RFvpHyQa1WLdkr3nIk6MjJUma998YRtp44A==`, "SPN2D29reImAqJezlqV2DLbi8tk="}, {35, `DND1u1uZ5SqZVpRUk6NxSUdVo7IjjL9zs4A1evDNCDLcXWc=`, "S6lBk2hxI2SWBfn7nbEl7D19UUs="}, {36, `jEi62utFz69JMYHjg1iXy7oO6ZpZSLcVd2B+pjm6BGsv/CWi`, "s0lYU9tr/bp9xsnrrjYgRS5EvV8="}, {37, `hfS3DZZnhy0hv7nJdXLv/oJOtIgAuP9SInt/v8KeuO4/IvVh4A==`, "CV+HQCdd2A/e/vdi12f2UU55GLA="}, {38, `EkPQAC6ymuRrYjIXD/LT/4Vb+7aTjYVZOHzC8GPCEtYDP0+T3Nc=`, "kE9H9sEmr3vHBYUiPbvsrcDgSEo="}, {39, `vtBOGIENG7yQ/N7xNWPNIgy66Gk/I2Ur/ZhdFNUK9/1FCZuu/KeS`, "+Fgp3HBimtCzUAyiinj3pkarYTk="}, {40, `YnF4smoy9hox2jBlJ3VUa4qyCRhOZbWcmFGIiszTT4zAdYHsqJazyg==`, "arkIn+ELddmE8N34J9ydyFKW+9w="}, {41, `0n7nl3YJtipy6yeUbVPWtc2h45WbF9u8hTz5tNwj3dZZwfXWkk+GN3g=`, "YJLNK7JR64j9aODWfqDvEe/u6NU="}, {42, `FnIIPHayc1pHkY4Lh8+zhWwG8xk6Knk/D3cZU1/fOUmRAoJ6CeztvMOL`, "22RPOylMtdk7xO/QEQiMli4ql0k="}, {43, `J82VT7ND0Eg1MorSfJMUhn+qocF7PsUpdQAMrDiHJ2JcPZAHZ2nyuwjoKg==`, "pOR5eYfwCLRJbJsidpc1rIJYwtM="}, {44, `Zbu+78+e35ZIymV5KTDdub5McyI3FEO8fDxs62uWHQ9U3Oh3ZqgaZ30SnmQ=`, "DbvbTkgNTgWRqRidA9r1jhtUjro="}, {45, `lgybK3Da7LEeY5aeeNrqcdHvv6mD1W4cuQ3/rUj2C/CNcSI0cAMw6vtpVY3y`, "700RQByn1lRQSSme9npQB/Ye+bY="}, {46, `jStZgKHv4QyJLvF2bYbIUZi/FscHALfKHAssTXkrV1byVR9eACwW9DNZQRHQwg==`, "uwN55He8xgE4g93dH9163xPew4U="}, {47, `V1PSud3giF5WW72JB/bgtltsWtEB5V+a+wUALOJOGuqztzVXUZYrvoP3XV++gM0=`, "U+3ZfUF/6mwOoHJcSHkQkckfTDA="}, {48, `VXs4t4tfXGiWAL6dlhEMm0YQF0f2w9rzX0CvIVeuW56o6/ec2auMpKeU2VeteEK5`, "sq24lSf7wXLH8eigHl07X+qPTps="}, {49, `bLUn3jLH+HFUsG3ptWTHgNvtr3eEv9lfKBf0jm6uhpqhRwtbEQ7Ovj/hYQf42zfdtQ==`, "uC8xrnopGiHebGuwgq607WRQyxQ="}, {50, `4SVmjtXIL8BB8SfkbR5Cpaljm2jpyUfAhIBf65XmKxHlz9dy5XixgiE/q1lv+esZW/E=`, "wxZ0rxkMQEnRNAp8ZgEZLT4RdLM="}, {51, `pMljctlXeFUqbG3BppyiNbojQO3ygg6nZPeUZaQcVyJ+Clgiw3Q8ntLe8+02ZSfyCc39`, "aZEPmNvOXnTt7z7wt+ewV7QGMlg="}, {52, `C16uQlxsHxMWnV2gJhFPuJ2/guZ4N1YgmNvAwL1yrouGQtwieGx8WvZsmYRnX72JnbVtTw==`, "QtlSNqXhVij64MMhKJ3EsDFB/z8="}, {53, `7ZVDOywvrl3L0GyKjjcNg2CcTI81n2CeUbzdYWcZOSCEnA/xrNHpiK01HOcGh3BbxuS4S6g=`, "4NznNJc4nmXeApfiCFTq/H5LbHw="}, {54, `JXm2tTVqpYuuz2Cc+ZnPusUb8vccPGrzWK2oVwLLl/FjpFoxO9FxGlhnB08iu8Q/XQSdzHn+`, "IwE5+2pKNcK366I2k2BzZYPibSI="}, {55, `TiiU1mxzYBSGZuE+TX0l9USWBilQ7dEml5lLrzNPh75xmhjIK8SGqVAkvIMgAmcMB+raXdMPZg==`, "yECGHtgR128ScP4XlvF96eLbIBE="}, {56, `zz+Q4zi6wh0fCJUFU9yUOqEVxlIA93gybXHOtXIPwQQ44pW4fyh6BRgc1bOneRuSWp85hwlTJl8=`, "+3Ef4D6yuoC8J+rbFqU1cegverE="}, {57, `sa6SHK9z/G505bysK5KgRO2z2cTksDkLoFc7sv0tWBmf2G2mCiozf2Ce6EIO+W1fRsrrtn/eeOAV`, "xZg1CwMNAjN0AIXw2yh4+1N3oos="}, {58, `0qx0xdyTHhnKJ22IeTlAjRpWw6y2sOOWFP75XJ7cleGJQiV2kyrmQOST4DGHIL0qqA7sMOdzKyTV iw==`, "bS0tRYPkP1Gfc+ZsBm9PMzPunG8="}, {59, `QuzaF0+5ooig6OLEWeibZUENl8EaiXAQvK9UjBEauMeuFFDCtNcGs25BDtJGGbX90gH4VZvCCDNC q4s=`, "rggokuJq1OGNOfB6aDp2g4rdPgw="}, {60, `+wg2x23GZQmMLkdv9MeAdettIWDmyK6Wr+ba23XD+Pvvq1lIMn9QIQT4Z7QHJE3iC/ZMFgaId9VA yY3d`, "ahQbTmOdiKUNdhYRHgv5/Ky+Y6k="}, {61, `y0ydRgreRQwP95vpNP92ioI+7wFiyldHRbr1SfoPNdbKGFA0lBREaBEGNhf9yixmfE+Azo2AuROx b7Yc7g==`, "cJKFc0dXfiN4hMg1lcMf5E4gqvo="}, {62, `LxlVvGXSQlSubK8r0pGf9zf7s/3RHe75a2WlSXQf3gZFR/BtRnR7fCIcaG//CbGfodBFp06DBx/S 9hUV8Bk=`, "NwuwhhRWX8QZ/vhWKWgQ1+rNomI="}, {63, `L+LSB8kmGMnHaWVA5P/+qFnfQliXvgJW7d2JGAgT6+koi5NQujFW1bwQVoXrBVyob/gBxGizUoJM gid5gGNo`, "ndX/KZBtFoeO3xKeo1ajO/Jy+rY="}, {64, `Mb7EGva2rEE5fENDL85P+BsapHEEjv2/siVhKjvAQe02feExVOQSkfmuYzU/kTF1MaKjPmKF/w+c bvwfdWL8aQ==`, "n1anP5NfvD4XDYWIeRPW3ZkPv1Y="}, {111, `jyibxJSzO6ZiZ0O1qe3tG/bvIAYssvukh9suIT5wEy1JBINVgPiqdsTW0cOpP0aUfP7mgqLfADkz I/m/GgCuVhr8oFLrOCoTx1/psBOWwhltCbhUx51Icm9aH8tY4Z3ccU+6BKpYQkLCy0B/A9Zc`, "hZfLIilSITC6N3e3tQ/iSgEzkto="}, {128, `ikwCorI7PKWz17EI50jZCGbV9JU2E8bXVfxNMg5zdmqSZ2NlsQPp0kqYIPjzwTg1MBtfWPg53k0h 0P2naJNEVgrqpoHTfV2b3pJ4m0zYPTJmUX4Bg/lOxcnCxAYKU29Y5F0U8Quz7ZXFBEweftXxJ7RS 4r6N7BzJrPsLhY7hgck=`, "imAoFvCWlDn4yVw3/oq1PDbbm6U="}, {222, `PfxMcUd0vIW6VbHG/uj/Y0W6qEoKmyBD0nYebEKazKaKG+UaDqBEcmQjbfQeVnVLuodMoPp7P7TR 1htX5n2VnkHh22xDyoJ8C/ZQKiSNqQfXvh83judf4RVr9exJCud8Uvgip6aVZTaPrJHVjQhMCp/d EnGvqg0oN5OVkM2qqAXvA0teKUDhgNM71sDBVBCGXxNOR2bpbD1iM4dnuT0ey4L+loXEHTL0fqMe UcEi2asgImnlNakwenDzz0x57aBwyq3AspCFGB1ncX4yYCr/OaCcS5OKi/00WH+wNQU3`, "QX/YEpG0gDsmhEpCdWhsxDzsfVE="}, {256, `qwGf2ESubE5jOUHHyc94ORczFYYbc2OmEzo+hBIyzJiNwAzC8PvJqtTzwkWkSslgHFGWQZR2BV5+ uYTrYT7HVwRM40vqfj0dBgeDENyTenIOL1LHkjtDKoXEnQ0mXAHoJ8PjbNC93zi5TovVRXTNzfGE s5dpWVqxUzb5lc7dwkyvOluBw482mQ4xrzYyIY1t+//OrNi1ObGXuUw2jBQOFfJVj2Y6BOyYmfB1 y36eBxi3zxeG5d5NYjm2GSh6e08QMAwu3zrINcqIzLOuNIiGXBtl7DjKt7b5wqi4oFiRpZsCyx2s mhSrdrtK/CkdU6nDN+34vSR/M8rZpWQdBE7a8g==`, "WYT9JY3JIo/pEBp+tIM6Gt2nyTM="}, {333, `w0LGhqU1WXFbdavqDE4kAjEzWLGGzmTNikzqnsiXHx2KRReKVTxkv27u3UcEz9+lbMvYl4xFf2Z4 aE1xRBBNd1Ke5C0zToSaYw5o4B/7X99nKK2/XaUX1byLow2aju2XJl2OpKpJg+tSJ2fmjIJTkfuY Uz574dFX6/VXxSxwGH/xQEAKS5TCsBK3CwnuG1p5SAsQq3gGVozDWyjEBcWDMdy8/AIFrj/y03Lf c/RNRCQTAfZbnf2QwV7sluw4fH3XJr07UoD0YqN+7XZzidtrwqMY26fpLZnyZjnBEt1FAZWO7RnK G5asg8xRk9YaDdedXdQSJAOy6bWEWlABj+tVAigBxavaluUH8LOj+yfCFldJjNLdi90fVHkUD/m4 Mr5OtmupNMXPwuG3EQlqWUVpQoYpUYKLsk7a5Mvg6UFkiH596y5IbJEVCI1Kb3D1`, "e3+wo77iKcILiZegnzyUNcjCdoQ="}, } func TestQuickXorHash(t *testing.T) { for _, test := range testVectors { what := fmt.Sprintf("test size %d", test.size) in, err := base64.StdEncoding.DecodeString(test.in) require.NoError(t, err, what) got := Sum(in) want, err := base64.StdEncoding.DecodeString(test.out) require.NoError(t, err, what) assert.Equal(t, want, got[:], what) } } func TestQuickXorHashByBlock(t *testing.T) { for _, blockSize := range []int{1, 2, 4, 7, 8, 16, 32, 64, 128, 256, 512} { for _, test := range testVectors { what := fmt.Sprintf("test size %d blockSize %d", test.size, blockSize) in, err := base64.StdEncoding.DecodeString(test.in) require.NoError(t, err, what) h := New() for i := 0; i < len(in); i += blockSize { end := min(i+blockSize, len(in)) n, err := h.Write(in[i:end]) require.Equal(t, end-i, n, what) require.NoError(t, err, what) } got := h.Sum(nil) want, err := base64.StdEncoding.DecodeString(test.out) require.NoError(t, err, what) assert.Equal(t, want, got, test.size, what) } } } func TestSize(t *testing.T) { d := New() assert.Equal(t, 20, d.Size()) } func TestBlockSize(t *testing.T) { d := New() assert.Equal(t, 64, d.BlockSize()) } func TestReset(t *testing.T) { d := New() zeroHash := d.Sum(nil) _, _ = d.Write([]byte{1}) assert.NotEqual(t, zeroHash, d.Sum(nil)) d.Reset() assert.Equal(t, zeroHash, d.Sum(nil)) } // check interface var _ hash.Hash = (*quickXorHash)(nil) func BenchmarkQuickXorHash(b *testing.B) { b.SetBytes(1 << 20) buf := make([]byte, 1<<20) n, err := rand.Read(buf) require.NoError(b, err) require.Equal(b, len(buf), n) h := New() for b.Loop() { h.Reset() h.Write(buf) h.Sum(nil) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/onedrive/quickxorhash/quickxorhash.go
backend/onedrive/quickxorhash/quickxorhash.go
// Package quickxorhash provides the quickXorHash algorithm which is a // quick, simple non-cryptographic hash algorithm that works by XORing // the bytes in a circular-shifting fashion. // // It is used by Microsoft Onedrive for Business to hash data. // // See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash package quickxorhash // This code was ported from a fast C-implementation from // https://github.com/namazso/QuickXorHash // which has licenced as BSD Zero Clause License // // BSD Zero Clause License // // Copyright (c) 2022 namazso <admin@namazso.eu> // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH // REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY // AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, // INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM // LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR // OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR // PERFORMANCE OF THIS SOFTWARE. import ( "crypto/subtle" "hash" ) const ( // BlockSize is the preferred size for hashing BlockSize = 64 // Size of the output checksum Size = 20 shift = 11 widthInBits = 8 * Size dataSize = shift * widthInBits ) type quickXorHash struct { data [dataSize]byte size uint64 } // New returns a new hash.Hash computing the quickXorHash checksum. func New() hash.Hash { return &quickXorHash{} } // xor dst with src func xorBytes(dst, src []byte) int { return subtle.XORBytes(dst, src, dst) } // Write (via the embedded io.Writer interface) adds more data to the running hash. // It never returns an error. // // Write writes len(p) bytes from p to the underlying data stream. It returns // the number of bytes written from p (0 <= n <= len(p)) and any error // encountered that caused the write to stop early. Write must return a non-nil // error if it returns n < len(p). Write must not modify the slice data, even // temporarily. // // Implementations must not retain p. func (q *quickXorHash) Write(p []byte) (n int, err error) { var i int // fill last remain lastRemain := q.size % dataSize if lastRemain != 0 { i += xorBytes(q.data[lastRemain:], p) } if i != len(p) { for len(p)-i >= dataSize { i += xorBytes(q.data[:], p[i:]) } xorBytes(q.data[:], p[i:]) } q.size += uint64(len(p)) return len(p), nil } // Calculate the current checksum func (q *quickXorHash) checkSum() (h [Size + 1]byte) { for i := range dataSize { shift := (i * 11) % 160 shiftBytes := shift / 8 shiftBits := shift % 8 shifted := int(q.data[i]) << shiftBits h[shiftBytes] ^= byte(shifted) h[shiftBytes+1] ^= byte(shifted >> 8) } h[0] ^= h[20] // XOR the file length with the least significant bits in little endian format d := q.size h[Size-8] ^= byte(d >> (8 * 0)) h[Size-7] ^= byte(d >> (8 * 1)) h[Size-6] ^= byte(d >> (8 * 2)) h[Size-5] ^= byte(d >> (8 * 3)) h[Size-4] ^= byte(d >> (8 * 4)) h[Size-3] ^= byte(d >> (8 * 5)) h[Size-2] ^= byte(d >> (8 * 6)) h[Size-1] ^= byte(d >> (8 * 7)) return h } // Sum appends the current hash to b and returns the resulting slice. // It does not change the underlying hash state. func (q *quickXorHash) Sum(b []byte) []byte { hash := q.checkSum() return append(b, hash[:Size]...) } // Reset resets the Hash to its initial state. func (q *quickXorHash) Reset() { *q = quickXorHash{} } // Size returns the number of bytes Sum will return. func (q *quickXorHash) Size() int { return Size } // BlockSize returns the hash's underlying block size. // The Write method must be able to accept any amount // of data, but it may operate more efficiently if all writes // are a multiple of the block size. func (q *quickXorHash) BlockSize() int { return BlockSize } // Sum returns the quickXorHash checksum of the data. func Sum(data []byte) (h [Size]byte) { var d quickXorHash _, _ = d.Write(data) s := d.checkSum() copy(h[:], s[:]) return h }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/onedrive/api/types.go
backend/onedrive/api/types.go
// Package api provides types used by the OneDrive API. package api import ( "strings" "time" ) const ( timeFormat = `"` + "2006-01-02T15:04:05.999Z" + `"` // PackageTypeOneNote is the package type value for OneNote files PackageTypeOneNote = "oneNote" ) // Error is returned from OneDrive when things go wrong type Error struct { ErrorInfo struct { Code string `json:"code"` Message string `json:"message"` InnerError struct { Code string `json:"code"` } `json:"innererror"` } `json:"error"` } // Error returns a string for the error and satisfies the error interface func (e *Error) Error() string { out := e.ErrorInfo.Code if e.ErrorInfo.InnerError.Code != "" { out += ": " + e.ErrorInfo.InnerError.Code } out += ": " + e.ErrorInfo.Message return out } // Check Error satisfies the error interface var _ error = (*Error)(nil) // Identity represents an identity of an actor. For example, and actor // can be a user, device, or application. type Identity struct { DisplayName string `json:"displayName,omitempty"` ID string `json:"id,omitempty"` Email string `json:"email,omitempty"` // not officially documented, but seems to sometimes exist LoginName string `json:"loginName,omitempty"` // SharePoint only } // IdentitySet is a keyed collection of Identity objects. It is used // to represent a set of identities associated with various events for // an item, such as created by or last modified by. type IdentitySet struct { User Identity `json:"user,omitempty"` Application Identity `json:"application,omitempty"` Device Identity `json:"device,omitempty"` Group Identity `json:"group,omitempty"` SiteGroup Identity `json:"siteGroup,omitempty"` // The SharePoint group associated with this action. Optional. SiteUser Identity `json:"siteUser,omitempty"` // The SharePoint user associated with this action. Optional. } // Quota groups storage space quota-related information on OneDrive into a single structure. type Quota struct { Total int64 `json:"total"` Used int64 `json:"used"` Remaining int64 `json:"remaining"` Deleted int64 `json:"deleted"` State string `json:"state"` // normal | nearing | critical | exceeded } // Drive is a representation of a drive resource type Drive struct { ID string `json:"id"` DriveType string `json:"driveType"` Owner IdentitySet `json:"owner"` Quota Quota `json:"quota"` } // Timestamp represents date and time information for the // OneDrive API, by using ISO 8601 and is always in UTC time. type Timestamp time.Time // MarshalJSON turns a Timestamp into JSON (in UTC) func (t *Timestamp) MarshalJSON() (out []byte, err error) { timeString := (*time.Time)(t).UTC().Format(timeFormat) return []byte(timeString), nil } // UnmarshalJSON turns JSON into a Timestamp func (t *Timestamp) UnmarshalJSON(data []byte) error { newT, err := time.Parse(timeFormat, string(data)) if err != nil { return err } *t = Timestamp(newT) return nil } // ItemReference groups data needed to reference a OneDrive item // across the service into a single structure. type ItemReference struct { DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only. ID string `json:"id"` // Unique identifier for the item. Read/Write. Path string `json:"path"` // Path that used to navigate to the item. Read/Write. DriveType string `json:"driveType"` // Type of the drive, Read-Only } // GetID returns a normalized ID of the item // If DriveID is known it will be prefixed to the ID with # separator // Can be parsed using onedrive.parseNormalizedID(normalizedID) func (i *ItemReference) GetID() string { if !strings.Contains(i.ID, "#") { return i.DriveID + "#" + i.ID } return i.ID } // RemoteItemFacet groups data needed to reference a OneDrive remote item type RemoteItemFacet struct { ID string `json:"id"` // The unique identifier of the item within the remote Drive. Read-only. Name string `json:"name"` // The name of the item (filename and extension). Read-write. CreatedBy IdentitySet `json:"createdBy"` // Identity of the user, device, and application which created the item. Read-only. LastModifiedBy IdentitySet `json:"lastModifiedBy"` // Identity of the user, device, and application which last modified the item. Read-only. CreatedDateTime Timestamp `json:"createdDateTime"` // Date and time of item creation. Read-only. LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only. Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only. File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only. Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only. FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write. ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write. Size int64 `json:"size"` // Size of the item in bytes. Read-only. WebURL string `json:"webUrl"` // URL that displays the resource in the browser. Read-only. } // FolderFacet groups folder-related data on OneDrive into a single structure type FolderFacet struct { ChildCount int64 `json:"childCount"` // Number of children contained immediately within this container. } // HashesType groups different types of hashes into a single structure, for an item on OneDrive. type HashesType struct { Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available) Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available) QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available) Sha256Hash string `json:"sha256Hash"` // hex encoded SHA256 value of the file (if available) } // FileFacet groups file-related data on OneDrive into a single structure. type FileFacet struct { MimeType string `json:"mimeType"` // The MIME type for the file. This is determined by logic on the server and might not be the value provided when the file was uploaded. Hashes HashesType `json:"hashes"` // Hashes of the file's binary content, if available. } // FileSystemInfoFacet contains properties that are reported by the // device's local file system for the local version of an item. This // facet can be used to specify the last modified date or created date // of the item as it was on the local device. type FileSystemInfoFacet struct { CreatedDateTime Timestamp `json:"createdDateTime,omitempty"` // The UTC date and time the file was created on a client. LastModifiedDateTime Timestamp `json:"lastModifiedDateTime,omitempty"` // The UTC date and time the file was last modified on a client. } // DeletedFacet indicates that the item on OneDrive has been // deleted. In this version of the API, the presence (non-null) of the // facet value indicates that the file was deleted. A null (or // missing) value indicates that the file is not deleted. type DeletedFacet struct{} // PackageFacet indicates that a DriveItem is the top level item // in a "package" or a collection of items that should be treated as a collection instead of individual items. // `oneNote` is the only currently defined value. type PackageFacet struct { Type string `json:"type"` } // SharedType indicates a DriveItem has been shared with others. The resource includes information about how the item is shared. // If a Driveitem has a non-null shared facet, the item has been shared. type SharedType struct { Owner IdentitySet `json:"owner,omitempty"` // The identity of the owner of the shared item. Read-only. Scope string `json:"scope,omitempty"` // Indicates the scope of how the item is shared: anonymous, organization, or users. Read-only. SharedBy IdentitySet `json:"sharedBy,omitempty"` // The identity of the user who shared the item. Read-only. SharedDateTime Timestamp `json:"sharedDateTime,omitempty"` // The UTC date and time when the item was shared. Read-only. } // SharingInvitationType groups invitation-related data items into a single structure. type SharingInvitationType struct { Email string `json:"email,omitempty"` // The email address provided for the recipient of the sharing invitation. Read-only. InvitedBy *IdentitySet `json:"invitedBy,omitempty"` // Provides information about who sent the invitation that created this permission, if that information is available. Read-only. SignInRequired bool `json:"signInRequired,omitempty"` // If true the recipient of the invitation needs to sign in in order to access the shared item. Read-only. } // SharingLinkType groups link-related data items into a single structure. // If a Permission resource has a non-null sharingLink facet, the permission represents a sharing link (as opposed to permissions granted to a person or group). type SharingLinkType struct { Application *Identity `json:"application,omitempty"` // The app the link is associated with. Type LinkType `json:"type,omitempty"` // The type of the link created. Scope LinkScope `json:"scope,omitempty"` // The scope of the link represented by this permission. Value anonymous indicates the link is usable by anyone, organization indicates the link is only usable for users signed into the same tenant. WebHTML string `json:"webHtml,omitempty"` // For embed links, this property contains the HTML code for an <iframe> element that will embed the item in a webpage. WebURL string `json:"webUrl,omitempty"` // A URL that opens the item in the browser on the OneDrive website. } // LinkType represents the type of SharingLinkType created. type LinkType string const ( // ViewLinkType (role: read) A view-only sharing link, allowing read-only access. ViewLinkType LinkType = "view" // EditLinkType (role: write) An edit sharing link, allowing read-write access. EditLinkType LinkType = "edit" // EmbedLinkType (role: read) A view-only sharing link that can be used to embed // content into a host webpage. Embed links are not available for OneDrive for // Business or SharePoint. EmbedLinkType LinkType = "embed" ) // LinkScope represents the scope of the link represented by this permission. // Value anonymous indicates the link is usable by anyone, organization indicates the link is only usable for users signed into the same tenant. type LinkScope string const ( // AnonymousScope = Anyone with the link has access, without needing to sign in. // This may include people outside of your organization. AnonymousScope LinkScope = "anonymous" // OrganizationScope = Anyone signed into your organization (tenant) can use the // link to get access. Only available in OneDrive for Business and SharePoint. OrganizationScope LinkScope = "organization" ) // PermissionsType provides information about a sharing permission granted for a DriveItem resource. // Sharing permissions have a number of different forms. The Permission resource represents these different forms through facets on the resource. type PermissionsType struct { ID string `json:"id"` // The unique identifier of the permission among all permissions on the item. Read-only. GrantedTo *IdentitySet `json:"grantedTo,omitempty"` // For user type permissions, the details of the users & applications for this permission. Read-only. Deprecated on OneDrive Business only. GrantedToIdentities []*IdentitySet `json:"grantedToIdentities,omitempty"` // For link type permissions, the details of the users to whom permission was granted. Read-only. Deprecated on OneDrive Business only. GrantedToV2 *IdentitySet `json:"grantedToV2,omitempty"` // For user type permissions, the details of the users & applications for this permission. Read-only. Not available for OneDrive Personal. GrantedToIdentitiesV2 []*IdentitySet `json:"grantedToIdentitiesV2,omitempty"` // For link type permissions, the details of the users to whom permission was granted. Read-only. Not available for OneDrive Personal. Invitation *SharingInvitationType `json:"invitation,omitempty"` // Details of any associated sharing invitation for this permission. Read-only. InheritedFrom *ItemReference `json:"inheritedFrom,omitempty"` // Provides a reference to the ancestor of the current permission, if it is inherited from an ancestor. Read-only. Link *SharingLinkType `json:"link,omitempty"` // Provides the link details of the current permission, if it is a link type permissions. Read-only. Roles []Role `json:"roles,omitempty"` // The type of permission (read, write, owner, member). Read-only. ShareID string `json:"shareId,omitempty"` // A unique token that can be used to access this shared item via the shares API. Read-only. } // Role represents the type of permission (read, write, owner, member) type Role string const ( // ReadRole provides the ability to read the metadata and contents of the item. ReadRole Role = "read" // WriteRole provides the ability to read and modify the metadata and contents of the item. WriteRole Role = "write" // OwnerRole represents the owner role for SharePoint and OneDrive for Business. OwnerRole Role = "owner" // MemberRole represents the member role for SharePoint and OneDrive for Business. MemberRole Role = "member" ) // PermissionsResponse is the response to the list permissions method type PermissionsResponse struct { Value []*PermissionsType `json:"value"` // An array of Item objects } // AddPermissionsRequest is the request for the add permissions method type AddPermissionsRequest struct { Recipients []DriveRecipient `json:"recipients,omitempty"` // A collection of recipients who will receive access and the sharing invitation. Message string `json:"message,omitempty"` // A plain text formatted message that is included in the sharing invitation. Maximum length 2000 characters. RequireSignIn bool `json:"requireSignIn,omitempty"` // Specifies whether the recipient of the invitation is required to sign-in to view the shared item. SendInvitation bool `json:"sendInvitation,omitempty"` // If true, a sharing link is sent to the recipient. Otherwise, a permission is granted directly without sending a notification. Roles []Role `json:"roles,omitempty"` // Specify the roles that are to be granted to the recipients of the sharing invitation. RetainInheritedPermissions bool `json:"retainInheritedPermissions,omitempty"` // Optional. If true (default), any existing inherited permissions are retained on the shared item when sharing this item for the first time. If false, all existing permissions are removed when sharing for the first time. OneDrive Business Only. } // UpdatePermissionsRequest is the request for the update permissions method type UpdatePermissionsRequest struct { Roles []Role `json:"roles,omitempty"` // Specify the roles that are to be granted to the recipients of the sharing invitation. } // DriveRecipient represents a person, group, or other recipient to share with using the invite action. type DriveRecipient struct { Email string `json:"email,omitempty"` // The email address for the recipient, if the recipient has an associated email address. Alias string `json:"alias,omitempty"` // The alias of the domain object, for cases where an email address is unavailable (e.g. security groups). ObjectID string `json:"objectId,omitempty"` // The unique identifier for the recipient in the directory. } // Item represents metadata for an item in OneDrive type Item struct { ID string `json:"id"` // The unique identifier of the item within the Drive. Read-only. Name string `json:"name"` // The name of the item (filename and extension). Read-write. ETag string `json:"eTag"` // eTag for the entire item (metadata + content). Read-only. CTag string `json:"cTag"` // An eTag for the content of the item. This eTag is not changed if only the metadata is changed. Read-only. CreatedBy IdentitySet `json:"createdBy"` // Identity of the user, device, and application which created the item. Read-only. LastModifiedBy IdentitySet `json:"lastModifiedBy"` // Identity of the user, device, and application which last modified the item. Read-only. CreatedDateTime Timestamp `json:"createdDateTime"` // Date and time of item creation. Read-only. LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only. Size int64 `json:"size"` // Size of the item in bytes. Read-only. ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write. WebURL string `json:"webUrl"` // URL that displays the resource in the browser. Read-only. Description string `json:"description,omitempty"` // Provides a user-visible description of the item. Read-write. Only on OneDrive Personal. Undocumented limit of 1024 characters. Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only. File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only. RemoteItem *RemoteItemFacet `json:"remoteItem"` // Remote Item metadata, if the item is a remote shared item. Read-only. FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write. // Image *ImageFacet `json:"image"` // Image metadata, if the item is an image. Read-only. // Photo *PhotoFacet `json:"photo"` // Photo metadata, if the item is a photo. Read-only. // Audio *AudioFacet `json:"audio"` // Audio metadata, if the item is an audio file. Read-only. // Video *VideoFacet `json:"video"` // Video metadata, if the item is a video. Read-only. // Location *LocationFacet `json:"location"` // Location metadata, if the item has location data. Read-only. Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only. Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only. Malware *struct{} `json:"malware,omitempty"` // Malware metadata, if the item was detected to contain malware. Read-only. (Currently has no properties.) Shared *SharedType `json:"shared,omitempty"` // Indicates that the item has been shared with others and provides information about the shared state of the item. Read-only. } // Metadata represents a request to update Metadata. // It includes only the writeable properties. // omitempty is intentionally included for all, per https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_update?view=odsp-graph-online#request-body type Metadata struct { Description string `json:"description,omitempty"` // Provides a user-visible description of the item. Read-write. Only on OneDrive Personal. Undocumented limit of 1024 characters. FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo,omitempty"` // File system information on client. Read-write. } // IsEmpty returns true if the metadata is empty (there is nothing to set) func (m Metadata) IsEmpty() bool { return m.Description == "" && m.FileSystemInfo == &FileSystemInfoFacet{} } // DeltaResponse is the response to the view delta method type DeltaResponse struct { Value []Item `json:"value"` // An array of Item objects which have been created, modified, or deleted. NextLink string `json:"@odata.nextLink"` // A URL to retrieve the next available page of changes. DeltaLink string `json:"@odata.deltaLink"` // A URL returned instead of @odata.nextLink after all current changes have been returned. Used to read the next set of changes in the future. DeltaToken string `json:"@delta.token"` // A token value that can be used in the query string on manually-crafted calls to view.delta. Not needed if you're using nextLink and deltaLink. } // ListChildrenResponse is the response to the list children method type ListChildrenResponse struct { Value []Item `json:"value"` // An array of Item objects NextLink string `json:"@odata.nextLink"` // A URL to retrieve the next available page of items. } // CreateItemRequest is the request to create an item object type CreateItemRequest struct { Name string `json:"name"` // Name of the folder to be created. Folder FolderFacet `json:"folder"` // Empty Folder facet to indicate that folder is the type of resource to be created. ConflictBehavior string `json:"@name.conflictBehavior"` // Determines what to do if an item with a matching name already exists in this folder. Accepted values are: rename, replace, and fail (the default). } // CreateItemWithMetadataRequest is like CreateItemRequest but also allows setting Metadata type CreateItemWithMetadataRequest struct { CreateItemRequest Metadata } // SetFileSystemInfo is used to Update an object's FileSystemInfo. type SetFileSystemInfo struct { FileSystemInfo FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write. } // CreateUploadRequest is used by CreateUploadSession to set the dates correctly type CreateUploadRequest struct { Item Metadata `json:"item"` } // CreateUploadResponse is the response from creating an upload session type CreateUploadResponse struct { UploadURL string `json:"uploadUrl"` // "https://sn3302.up.1drv.com/up/fe6987415ace7X4e1eF866337", ExpirationDateTime Timestamp `json:"expirationDateTime"` // "2015-01-29T09:21:55.523Z", NextExpectedRanges []string `json:"nextExpectedRanges"` // ["0-"] } // UploadFragmentResponse is the response from uploading a fragment type UploadFragmentResponse struct { ExpirationDateTime Timestamp `json:"expirationDateTime"` // "2015-01-29T09:21:55.523Z", NextExpectedRanges []string `json:"nextExpectedRanges"` // ["0-"] } // CopyItemRequest is the request to copy an item object // // Note: The parentReference should include either an id or path but // not both. If both are included, they need to reference the same // item or an error will occur. type CopyItemRequest struct { ParentReference ItemReference `json:"parentReference"` // Reference to the parent item the copy will be created in. Name *string `json:"name"` // Optional The new name for the copy. If this isn't provided, the same name will be used as the original. } // MoveItemRequest is the request to copy an item object // // Note: The parentReference should include either an id or path but // not both. If both are included, they need to reference the same // item or an error will occur. type MoveItemRequest struct { ParentReference *ItemReference `json:"parentReference,omitempty"` // Reference to the destination parent directory Name string `json:"name,omitempty"` // Optional The new name for the file. If this isn't provided, the same name will be used as the original. FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo,omitempty"` // File system information on client. Read-write. } // CreateShareLinkRequest is the request to create a sharing link // Always Type:view and Scope:anonymous for public sharing type CreateShareLinkRequest struct { Type string `json:"type"` // Link type in View, Edit or Embed Scope string `json:"scope,omitempty"` // Scope in anonymous, organization Password string `json:"password,omitempty"` // The password of the sharing link that is set by the creator. Optional and OneDrive Personal only. Expiry *time.Time `json:"expirationDateTime,omitempty"` // A String with format of yyyy-MM-ddTHH:mm:ssZ of DateTime indicates the expiration time of the permission. } // CreateShareLinkResponse is the response from CreateShareLinkRequest type CreateShareLinkResponse struct { ID string `json:"id"` Roles []string `json:"roles"` Link struct { Type string `json:"type"` Scope string `json:"scope"` WebURL string `json:"webUrl"` Application struct { ID string `json:"id"` DisplayName string `json:"displayName"` } `json:"application"` } `json:"link"` } // AsyncOperationStatus provides information on the status of an asynchronous job progress. // // The following API calls return AsyncOperationStatus resources: // // Copy Item // Upload From URL type AsyncOperationStatus struct { PercentageComplete float64 `json:"percentageComplete"` // A float value between 0 and 100 that indicates the percentage complete. Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting" ErrorCode string `json:"errorCode"` // Not officially documented :( } // GetID returns a normalized ID of the item // If DriveID is known it will be prefixed to the ID with # separator // Can be parsed using onedrive.parseNormalizedID(normalizedID) func (i *Item) GetID() string { if i.IsRemote() && i.RemoteItem.ID != "" { return i.RemoteItem.ParentReference.DriveID + "#" + i.RemoteItem.ID } else if i.ParentReference != nil && !strings.Contains(i.ID, "#") { return i.ParentReference.DriveID + "#" + i.ID } return i.ID } // GetDriveID returns a normalized ParentReference of the item func (i *Item) GetDriveID() string { return i.GetParentReference().DriveID } // GetName returns a normalized Name of the item func (i *Item) GetName() string { if i.IsRemote() && i.RemoteItem.Name != "" { return i.RemoteItem.Name } return i.Name } // GetFolder returns a normalized Folder of the item func (i *Item) GetFolder() *FolderFacet { if i.IsRemote() && i.RemoteItem.Folder != nil { return i.RemoteItem.Folder } return i.Folder } // GetPackage returns a normalized Package of the item func (i *Item) GetPackage() *PackageFacet { if i.IsRemote() && i.RemoteItem.Package != nil { return i.RemoteItem.Package } return i.Package } // GetPackageType returns the package type of the item if available, // otherwise "" func (i *Item) GetPackageType() string { pack := i.GetPackage() if pack == nil { return "" } return pack.Type } // GetFile returns a normalized File of the item func (i *Item) GetFile() *FileFacet { if i.IsRemote() && i.RemoteItem.File != nil { return i.RemoteItem.File } return i.File } // GetFileSystemInfo returns a normalized FileSystemInfo of the item func (i *Item) GetFileSystemInfo() *FileSystemInfoFacet { if i.IsRemote() && i.RemoteItem.FileSystemInfo != nil { return i.RemoteItem.FileSystemInfo } return i.FileSystemInfo } // GetSize returns a normalized Size of the item func (i *Item) GetSize() int64 { if i.IsRemote() && i.RemoteItem.Size != 0 { return i.RemoteItem.Size } return i.Size } // GetWebURL returns a normalized WebURL of the item func (i *Item) GetWebURL() string { if i.IsRemote() && i.RemoteItem.WebURL != "" { return i.RemoteItem.WebURL } return i.WebURL } // GetCreatedBy returns a normalized CreatedBy of the item func (i *Item) GetCreatedBy() IdentitySet { if i.IsRemote() && i.RemoteItem.CreatedBy != (IdentitySet{}) { return i.RemoteItem.CreatedBy } return i.CreatedBy } // GetLastModifiedBy returns a normalized LastModifiedBy of the item func (i *Item) GetLastModifiedBy() IdentitySet { if i.IsRemote() && i.RemoteItem.LastModifiedBy != (IdentitySet{}) { return i.RemoteItem.LastModifiedBy } return i.LastModifiedBy } // GetCreatedDateTime returns a normalized CreatedDateTime of the item func (i *Item) GetCreatedDateTime() Timestamp { if i.IsRemote() && i.RemoteItem.CreatedDateTime != (Timestamp{}) { return i.RemoteItem.CreatedDateTime } return i.CreatedDateTime } // GetLastModifiedDateTime returns a normalized LastModifiedDateTime of the item func (i *Item) GetLastModifiedDateTime() Timestamp { if i.IsRemote() && i.RemoteItem.LastModifiedDateTime != (Timestamp{}) { return i.RemoteItem.LastModifiedDateTime } return i.LastModifiedDateTime } // GetParentReference returns a normalized ParentReference of the item func (i *Item) GetParentReference() *ItemReference { if i.IsRemote() && i.ParentReference == nil { return i.RemoteItem.ParentReference } return i.ParentReference } // MalwareDetected returns true if OneDrive has detected that this item contains malware. func (i *Item) MalwareDetected() bool { return i.Malware != nil } // IsRemote checks if item is a remote item func (i *Item) IsRemote() bool { return i.RemoteItem != nil } // User details for each version type User struct { Email string `json:"email"` ID string `json:"id"` DisplayName string `json:"displayName"` } // LastModifiedBy for each version type LastModifiedBy struct { User User `json:"user"` } // Version info type Version struct { ID string `json:"id"` LastModifiedDateTime time.Time `json:"lastModifiedDateTime"` Size int `json:"size"` LastModifiedBy LastModifiedBy `json:"lastModifiedBy"` } // VersionsResponse is returned from /versions type VersionsResponse struct { Versions []Version `json:"value"` } // DriveResource is returned from /me/drive type DriveResource struct { DriveID string `json:"id"` DriveName string `json:"name"` DriveType string `json:"driveType"` } // DrivesResponse is returned from /sites/{siteID}/drives", type DrivesResponse struct { Drives []DriveResource `json:"value"` } // SiteResource is part of the response from "/sites/root:" type SiteResource struct { SiteID string `json:"id"` SiteName string `json:"displayName"` SiteURL string `json:"webUrl"` } // SiteResponse is returned from "/sites/root:" type SiteResponse struct { Sites []SiteResource `json:"value"` } // GetGrantedTo returns the GrantedTo property. // This is to get around the odd problem of // GrantedTo being deprecated on OneDrive Business, while // GrantedToV2 is unavailable on OneDrive Personal. func (p *PermissionsType) GetGrantedTo(driveType string) *IdentitySet { if driveType == "personal" { return p.GrantedTo } return p.GrantedToV2 } // GetGrantedToIdentities returns the GrantedToIdentities property. // This is to get around the odd problem of // GrantedToIdentities being deprecated on OneDrive Business, while // GrantedToIdentitiesV2 is unavailable on OneDrive Personal. func (p *PermissionsType) GetGrantedToIdentities(driveType string) []*IdentitySet { if driveType == "personal" { return p.GrantedToIdentities } return p.GrantedToIdentitiesV2 }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/cache/cache_upload_test.go
backend/cache/cache_upload_test.go
//go:build !plan9 && !js && !race package cache_test import ( "context" "fmt" "math/rand" "os" "path" "strconv" "testing" "time" "github.com/rclone/rclone/backend/cache" _ "github.com/rclone/rclone/backend/drive" "github.com/rclone/rclone/fs" "github.com/stretchr/testify/require" ) func TestInternalUploadTempDirCreated(t *testing.T) { id := fmt.Sprintf("tiutdc%v", time.Now().Unix()) runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)}) _, err := os.Stat(path.Join(runInstance.tmpUploadDir, id)) require.NoError(t, err) } func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) { // create some rand test data testSize := int64(524288000) testReader := runInstance.randomReader(t, testSize) bu := runInstance.listenForBackgroundUpload(t, rootFs, "one") runInstance.writeRemoteReader(t, rootFs, "one", testReader) // validate that it exists in temp fs ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one"))) require.NoError(t, err) if runInstance.rootIsCrypt { require.Equal(t, int64(524416032), ti.Size()) } else { require.Equal(t, testSize, ti.Size()) } de1, err := runInstance.list(t, rootFs, "") require.NoError(t, err) require.Len(t, de1, 1) runInstance.completeBackgroundUpload(t, "one", bu) // check if it was removed from temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one"))) require.True(t, os.IsNotExist(err)) // check if it can be read data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false) require.NoError(t, err) require.Len(t, data2, 1024) } func TestInternalUploadQueueOneFileNoRest(t *testing.T) { id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix()) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"}) testInternalUploadQueueOneFile(t, id, rootFs, boltDb) } func TestInternalUploadQueueOneFileWithRest(t *testing.T) { id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix()) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"}) testInternalUploadQueueOneFile(t, id, rootFs, boltDb) } func TestInternalUploadMoveExistingFile(t *testing.T) { id := fmt.Sprintf("tiumef%v", time.Now().Unix()) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"}) err := rootFs.Mkdir(context.Background(), "one") require.NoError(t, err) err = rootFs.Mkdir(context.Background(), "one/test") require.NoError(t, err) err = rootFs.Mkdir(context.Background(), "second") require.NoError(t, err) // create some rand test data testSize := int64(10485760) testReader := runInstance.randomReader(t, testSize) runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader) runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin") de1, err := runInstance.list(t, rootFs, "one/test") require.NoError(t, err) require.Len(t, de1, 1) time.Sleep(time.Second * 5) //_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test"))) //require.NoError(t, err) err = runInstance.dirMove(t, rootFs, "one/test", "second/test") require.NoError(t, err) // check if it can be read de1, err = runInstance.list(t, rootFs, "second/test") require.NoError(t, err) require.Len(t, de1, 1) } func TestInternalUploadTempPathCleaned(t *testing.T) { id := fmt.Sprintf("tiutpc%v", time.Now().Unix()) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"}) err := rootFs.Mkdir(context.Background(), "one") require.NoError(t, err) err = rootFs.Mkdir(context.Background(), "one/test") require.NoError(t, err) err = rootFs.Mkdir(context.Background(), "second") require.NoError(t, err) // create some rand test data testSize := int64(1048576) testReader := runInstance.randomReader(t, testSize) testReader2 := runInstance.randomReader(t, testSize) runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader) runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2) runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin") _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test"))) require.True(t, os.IsNotExist(err)) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one"))) require.True(t, os.IsNotExist(err)) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second"))) require.False(t, os.IsNotExist(err)) runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin") _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin"))) require.True(t, os.IsNotExist(err)) de1, err := runInstance.list(t, rootFs, "one/test") require.NoError(t, err) require.Len(t, de1, 1) // check if it can be read de1, err = runInstance.list(t, rootFs, "second") require.NoError(t, err) require.Len(t, de1, 1) } func TestInternalUploadQueueMoreFiles(t *testing.T) { id := fmt.Sprintf("tiuqmf%v", time.Now().Unix()) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"}) err := rootFs.Mkdir(context.Background(), "test") require.NoError(t, err) minSize := 5242880 maxSize := 10485760 totalFiles := 10 randInstance := rand.New(rand.NewSource(time.Now().Unix())) lastFile := "" for i := range totalFiles { size := int64(randInstance.Intn(maxSize-minSize) + minSize) testReader := runInstance.randomReader(t, size) remote := "test/" + strconv.Itoa(i) + ".bin" runInstance.writeRemoteReader(t, rootFs, remote, testReader) // validate that it exists in temp fs ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote))) require.NoError(t, err) require.Equal(t, size, runInstance.cleanSize(t, ti.Size())) if runInstance.wrappedIsExternal && i < totalFiles-1 { time.Sleep(time.Second * 3) } lastFile = remote } // check if cache lists all files, likely temp upload didn't finish yet de1, err := runInstance.list(t, rootFs, "test") require.NoError(t, err) require.Len(t, de1, totalFiles) // wait for background uploader to do its thing runInstance.completeAllBackgroundUploads(t, rootFs, lastFile) // retry until we have no more temp files and fail if they don't go down to 0 _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test"))) require.True(t, os.IsNotExist(err)) // check if cache lists all files de1, err = runInstance.list(t, rootFs, "test") require.NoError(t, err) require.Len(t, de1, totalFiles) } func TestInternalUploadTempFileOperations(t *testing.T) { id := "tiutfo" rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"}) boltDb.PurgeTempUploads() // create some rand test data runInstance.mkdir(t, rootFs, "test") runInstance.writeRemoteString(t, rootFs, "test/one", "one content") // check if it can be read data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false) require.NoError(t, err) require.Equal(t, []byte("one content"), data1) // validate that it exists in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) require.NoError(t, err) // test DirMove - allowed err = runInstance.dirMove(t, rootFs, "test", "second") if err != errNotSupported { require.NoError(t, err) _, err = rootFs.NewObject(context.Background(), "test/one") require.Error(t, err) _, err = rootFs.NewObject(context.Background(), "second/one") require.NoError(t, err) // validate that it exists in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) require.Error(t, err) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one"))) require.NoError(t, err) _, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one"))) require.Error(t, err) var started bool started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one"))) require.NoError(t, err) require.False(t, started) runInstance.mkdir(t, rootFs, "test") runInstance.writeRemoteString(t, rootFs, "test/one", "one content") } // test Rmdir - allowed err = runInstance.rm(t, rootFs, "test") require.Error(t, err) require.Contains(t, err.Error(), "directory not empty") _, err = rootFs.NewObject(context.Background(), "test/one") require.NoError(t, err) // validate that it exists in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) require.NoError(t, err) started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one"))) require.False(t, started) require.NoError(t, err) // test Move/Rename -- allowed err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second")) if err != errNotSupported { require.NoError(t, err) // try to read from it _, err = rootFs.NewObject(context.Background(), "test/one") require.Error(t, err) _, err = rootFs.NewObject(context.Background(), "test/second") require.NoError(t, err) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false) require.NoError(t, err) require.Equal(t, []byte("one content"), data2) // validate that it exists in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) require.Error(t, err) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second"))) require.NoError(t, err) runInstance.writeRemoteString(t, rootFs, "test/one", "one content") } // test Copy -- allowed err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third")) if err != errNotSupported { require.NoError(t, err) _, err = rootFs.NewObject(context.Background(), "test/one") require.NoError(t, err) _, err = rootFs.NewObject(context.Background(), "test/third") require.NoError(t, err) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false) require.NoError(t, err) require.Equal(t, []byte("one content"), data2) // validate that it exists in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) require.NoError(t, err) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third"))) require.NoError(t, err) } // test Remove -- allowed err = runInstance.rm(t, rootFs, "test/one") require.NoError(t, err) _, err = rootFs.NewObject(context.Background(), "test/one") require.Error(t, err) // validate that it doesn't exist in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) require.Error(t, err) runInstance.writeRemoteString(t, rootFs, "test/one", "one content") // test Update -- allowed firstModTime, err := runInstance.modTime(t, rootFs, "test/one") require.NoError(t, err) err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated") require.NoError(t, err) obj2, err := rootFs.NewObject(context.Background(), "test/one") require.NoError(t, err) data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false) require.Equal(t, "one content updated", string(data2)) tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) require.NoError(t, err) if runInstance.rootIsCrypt { require.Equal(t, int64(67), tmpInfo.Size()) } else { require.Equal(t, int64(len(data2)), tmpInfo.Size()) } // test SetModTime -- allowed secondModTime, err := runInstance.modTime(t, rootFs, "test/one") require.NoError(t, err) require.NotEqual(t, secondModTime, firstModTime) require.NotEqual(t, time.Time{}, firstModTime) require.NotEqual(t, time.Time{}, secondModTime) } func TestInternalUploadUploadingFileOperations(t *testing.T) { id := "tiuufo" rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"}) boltDb.PurgeTempUploads() // create some rand test data runInstance.mkdir(t, rootFs, "test") runInstance.writeRemoteString(t, rootFs, "test/one", "one content") // check if it can be read data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false) require.NoError(t, err) require.Equal(t, []byte("one content"), data1) // validate that it exists in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) require.NoError(t, err) err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one"))) require.NoError(t, err) // test DirMove err = runInstance.dirMove(t, rootFs, "test", "second") if err != errNotSupported { require.Error(t, err) _, err = rootFs.NewObject(context.Background(), "test/one") require.NoError(t, err) // validate that it exists in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) require.NoError(t, err) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one"))) require.Error(t, err) } // test Rmdir err = runInstance.rm(t, rootFs, "test") require.Error(t, err) _, err = rootFs.NewObject(context.Background(), "test/one") require.NoError(t, err) // validate that it doesn't exist in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) require.NoError(t, err) // test Move/Rename err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second")) if err != errNotSupported { require.Error(t, err) // try to read from it _, err = rootFs.NewObject(context.Background(), "test/one") require.NoError(t, err) _, err = rootFs.NewObject(context.Background(), "test/second") require.Error(t, err) // validate that it exists in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) require.NoError(t, err) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second"))) require.Error(t, err) } // test Copy -- allowed err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third")) if err != errNotSupported { require.NoError(t, err) _, err = rootFs.NewObject(context.Background(), "test/one") require.NoError(t, err) _, err = rootFs.NewObject(context.Background(), "test/third") require.NoError(t, err) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false) require.NoError(t, err) require.Equal(t, []byte("one content"), data2) // validate that it exists in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) require.NoError(t, err) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third"))) require.NoError(t, err) } // test Remove err = runInstance.rm(t, rootFs, "test/one") require.Error(t, err) _, err = rootFs.NewObject(context.Background(), "test/one") require.NoError(t, err) // validate that it doesn't exist in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) require.NoError(t, err) runInstance.writeRemoteString(t, rootFs, "test/one", "one content") // test Update - this seems to work. Why? FIXME //firstModTime, err := runInstance.modTime(t, rootFs, "test/one") //require.NoError(t, err) //err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() { // data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true) // require.Equal(t, "one content", string(data2)) // // tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) // require.NoError(t, err) // if runInstance.rootIsCrypt { // require.Equal(t, int64(67), tmpInfo.Size()) // } else { // require.Equal(t, int64(len(data2)), tmpInfo.Size()) // } //}) //require.Error(t, err) // test SetModTime -- seems to work cause of previous //secondModTime, err := runInstance.modTime(t, rootFs, "test/one") //require.NoError(t, err) //require.Equal(t, secondModTime, firstModTime) //require.NotEqual(t, time.Time{}, firstModTime) //require.NotEqual(t, time.Time{}, secondModTime) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/cache/storage_persistent.go
backend/cache/storage_persistent.go
//go:build !plan9 && !js package cache import ( "bytes" "context" "encoding/binary" "encoding/json" "fmt" "os" "path" "strconv" "strings" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/walk" bolt "go.etcd.io/bbolt" "go.etcd.io/bbolt/errors" ) // Constants const ( RootBucket = "root" RootTsBucket = "rootTs" DataTsBucket = "dataTs" tempBucket = "pending" ) // Features flags for this storage type type Features struct { PurgeDb bool // purge the db before starting DbWaitTime time.Duration // time to wait for DB to be available } var boltMap = make(map[string]*Persistent) var boltMapMx sync.Mutex // GetPersistent returns a single instance for the specific store func GetPersistent(dbPath, chunkPath string, f *Features) (*Persistent, error) { // write lock to create one boltMapMx.Lock() defer boltMapMx.Unlock() if b, ok := boltMap[dbPath]; ok { if !b.open { err := b.connect() if err != nil { return nil, err } } return b, nil } bb, err := newPersistent(dbPath, chunkPath, f) if err != nil { return nil, err } boltMap[dbPath] = bb return boltMap[dbPath], nil } type chunkInfo struct { Path string Offset int64 Size int64 } type tempUploadInfo struct { DestPath string AddedOn time.Time Started bool } // String representation of a tempUploadInfo func (t *tempUploadInfo) String() string { return fmt.Sprintf("%v - %v (%v)", t.DestPath, t.Started, t.AddedOn) } // Persistent is a wrapper of persistent storage for a bolt.DB file type Persistent struct { dbPath string dataPath string open bool db *bolt.DB cleanupMux sync.Mutex tempQueueMux sync.Mutex features *Features } // newPersistent builds a new wrapper and connects to the bolt.DB file func newPersistent(dbPath, chunkPath string, f *Features) (*Persistent, error) { b := &Persistent{ dbPath: dbPath, dataPath: chunkPath, features: f, } err := b.connect() if err != nil { fs.Errorf(dbPath, "Error opening storage cache. Is there another rclone running on the same remote? %v", err) return nil, err } return b, nil } // String will return a human friendly string for this DB (currently the dbPath) func (b *Persistent) String() string { return "<Cache DB> " + b.dbPath } // connect creates a connection to the configured file // refreshDb will delete the file before to create an empty DB if it's set to true func (b *Persistent) connect() error { var err error err = os.MkdirAll(b.dataPath, os.ModePerm) if err != nil { return fmt.Errorf("failed to create a data directory %q: %w", b.dataPath, err) } b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime}) if err != nil { return fmt.Errorf("failed to open a cache connection to %q: %w", b.dbPath, err) } if b.features.PurgeDb { b.Purge() } _ = b.db.Update(func(tx *bolt.Tx) error { _, _ = tx.CreateBucketIfNotExists([]byte(RootBucket)) _, _ = tx.CreateBucketIfNotExists([]byte(RootTsBucket)) _, _ = tx.CreateBucketIfNotExists([]byte(DataTsBucket)) _, _ = tx.CreateBucketIfNotExists([]byte(tempBucket)) return nil }) b.open = true return nil } // getBucket prepares and cleans a specific path of the form: /var/tmp and will iterate through each path component // to get to the nested bucket of the final part (in this example: tmp) func (b *Persistent) getBucket(dir string, createIfMissing bool, tx *bolt.Tx) *bolt.Bucket { cleanPath(dir) entries := strings.FieldsFunc(dir, func(c rune) bool { // cover Windows where rclone still uses '/' as path separator // this should be safe as '/' is not a valid Windows character return (os.PathSeparator == c || c == rune('/')) }) bucket := tx.Bucket([]byte(RootBucket)) for _, entry := range entries { if createIfMissing { bucket, _ = bucket.CreateBucketIfNotExists([]byte(entry)) } else { bucket = bucket.Bucket([]byte(entry)) } if bucket == nil { return nil } } return bucket } // GetDir will retrieve data of a cached directory func (b *Persistent) GetDir(remote string) (*Directory, error) { cd := &Directory{} err := b.db.View(func(tx *bolt.Tx) error { bucket := b.getBucket(remote, false, tx) if bucket == nil { return fmt.Errorf("couldn't open bucket (%v)", remote) } data := bucket.Get([]byte(".")) if data != nil { return json.Unmarshal(data, cd) } return fmt.Errorf("%v not found", remote) }) return cd, err } // AddDir will update a CachedDirectory metadata and all its entries func (b *Persistent) AddDir(cachedDir *Directory) error { return b.AddBatchDir([]*Directory{cachedDir}) } // AddBatchDir will update a list of CachedDirectory metadata and all their entries func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error { if len(cachedDirs) == 0 { return nil } return b.db.Update(func(tx *bolt.Tx) error { var bucket *bolt.Bucket if cachedDirs[0].Dir == "" { bucket = tx.Bucket([]byte(RootBucket)) } else { bucket = b.getBucket(cachedDirs[0].Dir, true, tx) } if bucket == nil { return fmt.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir) } for _, cachedDir := range cachedDirs { var b *bolt.Bucket var err error if cachedDir.Name == "" { b = bucket } else { b, err = bucket.CreateBucketIfNotExists([]byte(cachedDir.Name)) } if err != nil { return err } encoded, err := json.Marshal(cachedDir) if err != nil { return fmt.Errorf("couldn't marshal object (%v): %v", cachedDir, err) } err = b.Put([]byte("."), encoded) if err != nil { return err } } return nil }) } // GetDirEntries will return a CachedDirectory, its list of dir entries and/or an error if it encountered issues func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error) { var dirEntries fs.DirEntries err := b.db.View(func(tx *bolt.Tx) error { bucket := b.getBucket(cachedDir.abs(), false, tx) if bucket == nil { return fmt.Errorf("couldn't open bucket (%v)", cachedDir.abs()) } val := bucket.Get([]byte(".")) if val != nil { err := json.Unmarshal(val, cachedDir) if err != nil { return fmt.Errorf("error during unmarshalling obj: %w", err) } } else { return fmt.Errorf("missing cached dir: %v", cachedDir) } c := bucket.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { // ignore metadata key: . if bytes.Equal(k, []byte(".")) { continue } entryPath := path.Join(cachedDir.Remote(), string(k)) if v == nil { // directory // we try to find a cached meta for the dir currentBucket := c.Bucket().Bucket(k) if currentBucket == nil { return fmt.Errorf("couldn't open bucket (%v)", string(k)) } metaKey := currentBucket.Get([]byte(".")) d := NewDirectory(cachedDir.CacheFs, entryPath) if metaKey != nil { //if we don't find it, we create an empty dir err := json.Unmarshal(metaKey, d) if err != nil { // if even this fails, we fallback to an empty dir fs.Debugf(string(k), "error during unmarshalling obj: %v", err) } } dirEntries = append(dirEntries, d) } else { // object o := NewObject(cachedDir.CacheFs, entryPath) err := json.Unmarshal(v, o) if err != nil { fs.Debugf(string(k), "error during unmarshalling obj: %v", err) continue } dirEntries = append(dirEntries, o) } } return nil }) return dirEntries, err } // RemoveDir will delete a CachedDirectory, all its objects and all the chunks stored for it func (b *Persistent) RemoveDir(fp string) error { var err error parentDir, dirName := path.Split(fp) if fp == "" { err = b.db.Update(func(tx *bolt.Tx) error { err := tx.DeleteBucket([]byte(RootBucket)) if err != nil { fs.Debugf(fp, "couldn't delete from cache: %v", err) return err } _, _ = tx.CreateBucketIfNotExists([]byte(RootBucket)) return nil }) } else { err = b.db.Update(func(tx *bolt.Tx) error { bucket := b.getBucket(cleanPath(parentDir), false, tx) if bucket == nil { return fmt.Errorf("couldn't open bucket (%v)", fp) } // delete the cached dir err := bucket.DeleteBucket([]byte(cleanPath(dirName))) if err != nil { fs.Debugf(fp, "couldn't delete from cache: %v", err) } return nil }) } // delete chunks on disk // safe to ignore as the files might not have been open if err == nil { _ = os.RemoveAll(path.Join(b.dataPath, fp)) _ = os.MkdirAll(b.dataPath, os.ModePerm) } return err } // ExpireDir will flush a CachedDirectory and all its objects from the objects // chunks will remain as they are func (b *Persistent) ExpireDir(cd *Directory) error { t := time.Now().Add(time.Duration(-cd.CacheFs.opt.InfoAge)) cd.CacheTs = &t // expire all parents return b.db.Update(func(tx *bolt.Tx) error { // expire all the parents currentDir := cd.abs() for { // until we get to the root bucket := b.getBucket(currentDir, false, tx) if bucket != nil { val := bucket.Get([]byte(".")) if val != nil { cd2 := &Directory{CacheFs: cd.CacheFs} err := json.Unmarshal(val, cd2) if err == nil { fs.Debugf(cd, "cache: expired %v", currentDir) cd2.CacheTs = &t enc2, _ := json.Marshal(cd2) _ = bucket.Put([]byte("."), enc2) } } } if currentDir == "" { break } currentDir = cleanPath(path.Dir(currentDir)) } return nil }) } // GetObject will return a CachedObject from its parent directory or an error if it doesn't find it func (b *Persistent) GetObject(cachedObject *Object) (err error) { return b.db.View(func(tx *bolt.Tx) error { bucket := b.getBucket(cachedObject.Dir, false, tx) if bucket == nil { return fmt.Errorf("couldn't open parent bucket for %v", cachedObject.Dir) } val := bucket.Get([]byte(cachedObject.Name)) if val != nil { return json.Unmarshal(val, cachedObject) } return fmt.Errorf("couldn't find object (%v)", cachedObject.Name) }) } // AddObject will create a cached object in its parent directory func (b *Persistent) AddObject(cachedObject *Object) error { return b.db.Update(func(tx *bolt.Tx) error { bucket := b.getBucket(cachedObject.Dir, true, tx) if bucket == nil { return fmt.Errorf("couldn't open parent bucket for %v", cachedObject) } // cache Object Info encoded, err := json.Marshal(cachedObject) if err != nil { return fmt.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err) } err = bucket.Put([]byte(cachedObject.Name), encoded) if err != nil { return fmt.Errorf("couldn't cache object (%v) info: %v", cachedObject, err) } return nil }) } // RemoveObject will delete a single cached object and all the chunks which belong to it func (b *Persistent) RemoveObject(fp string) error { parentDir, objName := path.Split(fp) return b.db.Update(func(tx *bolt.Tx) error { bucket := b.getBucket(cleanPath(parentDir), false, tx) if bucket == nil { return fmt.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir)) } err := bucket.Delete([]byte(cleanPath(objName))) if err != nil { fs.Debugf(fp, "couldn't delete obj from storage: %v", err) } // delete chunks on disk // safe to ignore as the file might not have been open _ = os.RemoveAll(path.Join(b.dataPath, fp)) return nil }) } // ExpireObject will flush an Object and all its data if desired func (b *Persistent) ExpireObject(co *Object, withData bool) error { co.CacheTs = time.Now().Add(time.Duration(-co.CacheFs.opt.InfoAge)) err := b.AddObject(co) if withData { _ = os.RemoveAll(path.Join(b.dataPath, co.abs())) } return err } // HasEntry confirms the existence of a single entry (dir or object) func (b *Persistent) HasEntry(remote string) bool { dir, name := path.Split(remote) dir = cleanPath(dir) name = cleanPath(name) err := b.db.View(func(tx *bolt.Tx) error { bucket := b.getBucket(dir, false, tx) if bucket == nil { return fmt.Errorf("couldn't open parent bucket for %v", remote) } if f := bucket.Bucket([]byte(name)); f != nil { return nil } if f := bucket.Get([]byte(name)); f != nil { return nil } return fmt.Errorf("couldn't find object (%v)", remote) }) return err == nil } // HasChunk confirms the existence of a single chunk of an object func (b *Persistent) HasChunk(cachedObject *Object, offset int64) bool { fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10)) if _, err := os.Stat(fp); !os.IsNotExist(err) { return true } return false } // GetChunk will retrieve a single chunk which belongs to a cached object or an error if it doesn't find it func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error) { var data []byte fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10)) data, err := os.ReadFile(fp) if err != nil { return nil, err } return data, err } // AddChunk adds a new chunk of a cached object func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error { _ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm) filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10)) err := os.WriteFile(filePath, data, os.ModePerm) if err != nil { return err } return b.db.Update(func(tx *bolt.Tx) error { tsBucket := tx.Bucket([]byte(DataTsBucket)) ts := time.Now() found := false // delete (older) timestamps for the same object c := tsBucket.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { var ci chunkInfo err = json.Unmarshal(v, &ci) if err != nil { continue } if ci.Path == fp && ci.Offset == offset { if tsInCache := time.Unix(0, btoi(k)); tsInCache.After(ts) && !found { found = true continue } err := c.Delete() if err != nil { fs.Debugf(fp, "failed to clean chunk: %v", err) } } } // don't overwrite if a newer one is already there if found { return nil } enc, err := json.Marshal(chunkInfo{Path: fp, Offset: offset, Size: int64(len(data))}) if err != nil { fs.Debugf(fp, "failed to timestamp chunk: %v", err) } err = tsBucket.Put(itob(ts.UnixNano()), enc) if err != nil { fs.Debugf(fp, "failed to timestamp chunk: %v", err) } return nil }) } // CleanChunksByAge will cleanup on a cron basis func (b *Persistent) CleanChunksByAge(chunkAge time.Duration) { // NOOP } // CleanChunksByNeed is a noop for this implementation func (b *Persistent) CleanChunksByNeed(offset int64) { // noop: we want to clean a Bolt DB by time only } // CleanChunksBySize will cleanup chunks after the total size passes a certain point func (b *Persistent) CleanChunksBySize(maxSize int64) { b.cleanupMux.Lock() defer b.cleanupMux.Unlock() var cntChunks int var roughlyCleaned fs.SizeSuffix err := b.db.Update(func(tx *bolt.Tx) error { dataTsBucket := tx.Bucket([]byte(DataTsBucket)) if dataTsBucket == nil { return fmt.Errorf("couldn't open (%v) bucket", DataTsBucket) } // iterate through ts c := dataTsBucket.Cursor() totalSize := int64(0) for k, v := c.First(); k != nil; k, v = c.Next() { var ci chunkInfo err := json.Unmarshal(v, &ci) if err != nil { continue } totalSize += ci.Size } if totalSize > maxSize { needToClean := totalSize - maxSize roughlyCleaned = fs.SizeSuffix(needToClean) for k, v := c.First(); k != nil; k, v = c.Next() { var ci chunkInfo err := json.Unmarshal(v, &ci) if err != nil { continue } // delete this ts entry err = c.Delete() if err != nil { fs.Errorf(ci.Path, "failed deleting chunk ts during cleanup (%v): %v", ci.Offset, err) continue } err = os.Remove(path.Join(b.dataPath, ci.Path, strconv.FormatInt(ci.Offset, 10))) if err == nil { cntChunks++ needToClean -= ci.Size if needToClean <= 0 { break } } } } if cntChunks > 0 { fs.Infof("cache-cleanup", "chunks %v, est. size: %v", cntChunks, roughlyCleaned.String()) } return nil }) if err != nil { if err == errors.ErrDatabaseNotOpen { // we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore return } fs.Errorf("cache", "cleanup failed: %v", err) } } // Stats returns a go map with the stats key values func (b *Persistent) Stats() (map[string]map[string]any, error) { r := make(map[string]map[string]any) r["data"] = make(map[string]any) r["data"]["oldest-ts"] = time.Now() r["data"]["oldest-file"] = "" r["data"]["newest-ts"] = time.Now() r["data"]["newest-file"] = "" r["data"]["total-chunks"] = 0 r["data"]["total-size"] = int64(0) r["files"] = make(map[string]any) r["files"]["oldest-ts"] = time.Now() r["files"]["oldest-name"] = "" r["files"]["newest-ts"] = time.Now() r["files"]["newest-name"] = "" r["files"]["total-files"] = 0 _ = b.db.View(func(tx *bolt.Tx) error { dataTsBucket := tx.Bucket([]byte(DataTsBucket)) rootTsBucket := tx.Bucket([]byte(RootTsBucket)) var totalDirs int var totalFiles int _ = b.iterateBuckets(tx.Bucket([]byte(RootBucket)), func(name string) { totalDirs++ }, func(key string, val []byte) { totalFiles++ }) r["files"]["total-dir"] = totalDirs r["files"]["total-files"] = totalFiles c := dataTsBucket.Cursor() totalChunks := 0 totalSize := int64(0) for k, v := c.First(); k != nil; k, v = c.Next() { var ci chunkInfo err := json.Unmarshal(v, &ci) if err != nil { continue } totalChunks++ totalSize += ci.Size } r["data"]["total-chunks"] = totalChunks r["data"]["total-size"] = totalSize if k, v := c.First(); k != nil { var ci chunkInfo _ = json.Unmarshal(v, &ci) r["data"]["oldest-ts"] = time.Unix(0, btoi(k)) r["data"]["oldest-file"] = ci.Path } if k, v := c.Last(); k != nil { var ci chunkInfo _ = json.Unmarshal(v, &ci) r["data"]["newest-ts"] = time.Unix(0, btoi(k)) r["data"]["newest-file"] = ci.Path } c = rootTsBucket.Cursor() if k, v := c.First(); k != nil { // split to get (abs path - offset) r["files"]["oldest-ts"] = time.Unix(0, btoi(k)) r["files"]["oldest-name"] = string(v) } if k, v := c.Last(); k != nil { r["files"]["newest-ts"] = time.Unix(0, btoi(k)) r["files"]["newest-name"] = string(v) } return nil }) return r, nil } // Purge will flush the entire cache func (b *Persistent) Purge() { b.cleanupMux.Lock() defer b.cleanupMux.Unlock() _ = b.db.Update(func(tx *bolt.Tx) error { _ = tx.DeleteBucket([]byte(RootBucket)) _ = tx.DeleteBucket([]byte(RootTsBucket)) _ = tx.DeleteBucket([]byte(DataTsBucket)) _, _ = tx.CreateBucketIfNotExists([]byte(RootBucket)) _, _ = tx.CreateBucketIfNotExists([]byte(RootTsBucket)) _, _ = tx.CreateBucketIfNotExists([]byte(DataTsBucket)) return nil }) err := os.RemoveAll(b.dataPath) if err != nil { fs.Errorf(b, "issue removing data folder: %v", err) } err = os.MkdirAll(b.dataPath, os.ModePerm) if err != nil { fs.Errorf(b, "issue removing data folder: %v", err) } } // GetChunkTs retrieves the current timestamp of this chunk func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) { var t time.Time err := b.db.View(func(tx *bolt.Tx) error { tsBucket := tx.Bucket([]byte(DataTsBucket)) c := tsBucket.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { var ci chunkInfo err := json.Unmarshal(v, &ci) if err != nil { continue } if ci.Path == path && ci.Offset == offset { t = time.Unix(0, btoi(k)) return nil } } return fmt.Errorf("not found %v-%v", path, offset) }) return t, err } func (b *Persistent) iterateBuckets(buk *bolt.Bucket, bucketFn func(name string), kvFn func(key string, val []byte)) error { err := b.db.View(func(tx *bolt.Tx) error { var c *bolt.Cursor if buk == nil { c = tx.Cursor() } else { c = buk.Cursor() } for k, v := c.First(); k != nil; k, v = c.Next() { if v == nil { var buk2 *bolt.Bucket if buk == nil { buk2 = tx.Bucket(k) } else { buk2 = buk.Bucket(k) } bucketFn(string(k)) _ = b.iterateBuckets(buk2, bucketFn, kvFn) } else { kvFn(string(k), v) } } return nil }) return err } // addPendingUpload adds a new file to the pending queue of uploads func (b *Persistent) addPendingUpload(destPath string, started bool) error { return b.db.Update(func(tx *bolt.Tx) error { bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) if err != nil { return fmt.Errorf("couldn't bucket for %v", tempBucket) } tempObj := &tempUploadInfo{ DestPath: destPath, AddedOn: time.Now(), Started: started, } // cache Object Info encoded, err := json.Marshal(tempObj) if err != nil { return fmt.Errorf("couldn't marshal object (%v) info: %v", destPath, err) } err = bucket.Put([]byte(destPath), encoded) if err != nil { return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err) } return nil }) } // getPendingUpload returns the next file from the pending queue of uploads func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (destPath string, err error) { b.tempQueueMux.Lock() defer b.tempQueueMux.Unlock() err = b.db.Update(func(tx *bolt.Tx) error { bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) if err != nil { return fmt.Errorf("couldn't bucket for %v", tempBucket) } c := bucket.Cursor() for k, v := c.Seek([]byte(inRoot)); k != nil && bytes.HasPrefix(k, []byte(inRoot)); k, v = c.Next() { //for k, v := c.First(); k != nil; k, v = c.Next() { var tempObj = &tempUploadInfo{} err = json.Unmarshal(v, tempObj) if err != nil { fs.Errorf(b, "failed to read pending upload: %v", err) continue } // skip over started uploads if tempObj.Started || time.Now().Before(tempObj.AddedOn.Add(waitTime)) { continue } tempObj.Started = true v2, err := json.Marshal(tempObj) if err != nil { fs.Errorf(b, "failed to update pending upload: %v", err) continue } err = bucket.Put(k, v2) if err != nil { fs.Errorf(b, "failed to update pending upload: %v", err) continue } destPath = tempObj.DestPath return nil } return fmt.Errorf("no pending upload found") }) return destPath, err } // SearchPendingUpload returns the file info from the pending queue of uploads func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error) { err = b.db.View(func(tx *bolt.Tx) error { bucket := tx.Bucket([]byte(tempBucket)) if bucket == nil { return fmt.Errorf("couldn't bucket for %v", tempBucket) } var tempObj = &tempUploadInfo{} v := bucket.Get([]byte(remote)) err = json.Unmarshal(v, tempObj) if err != nil { return fmt.Errorf("pending upload (%v) not found %v", remote, err) } started = tempObj.Started return nil }) return started, err } // searchPendingUploadFromDir files currently pending upload from a single dir func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, err error) { err = b.db.View(func(tx *bolt.Tx) error { bucket := tx.Bucket([]byte(tempBucket)) if bucket == nil { return fmt.Errorf("couldn't bucket for %v", tempBucket) } c := bucket.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { var tempObj = &tempUploadInfo{} err = json.Unmarshal(v, tempObj) if err != nil { fs.Errorf(b, "failed to read pending upload: %v", err) continue } parentDir := cleanPath(path.Dir(tempObj.DestPath)) if dir == parentDir { remotes = append(remotes, tempObj.DestPath) } } return nil }) return remotes, err } func (b *Persistent) rollbackPendingUpload(remote string) error { b.tempQueueMux.Lock() defer b.tempQueueMux.Unlock() return b.db.Update(func(tx *bolt.Tx) error { bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) if err != nil { return fmt.Errorf("couldn't bucket for %v", tempBucket) } var tempObj = &tempUploadInfo{} v := bucket.Get([]byte(remote)) err = json.Unmarshal(v, tempObj) if err != nil { return fmt.Errorf("pending upload (%v) not found: %w", remote, err) } tempObj.Started = false v2, err := json.Marshal(tempObj) if err != nil { return fmt.Errorf("pending upload not updated: %w", err) } err = bucket.Put([]byte(tempObj.DestPath), v2) if err != nil { return fmt.Errorf("pending upload not updated: %w", err) } return nil }) } func (b *Persistent) removePendingUpload(remote string) error { b.tempQueueMux.Lock() defer b.tempQueueMux.Unlock() return b.db.Update(func(tx *bolt.Tx) error { bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) if err != nil { return fmt.Errorf("couldn't bucket for %v", tempBucket) } return bucket.Delete([]byte(remote)) }) } // updatePendingUpload allows to update an existing item in the queue while checking if it's not started in the same // transaction. If it is started, it will not allow the update func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUploadInfo) error) error { b.tempQueueMux.Lock() defer b.tempQueueMux.Unlock() return b.db.Update(func(tx *bolt.Tx) error { bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) if err != nil { return fmt.Errorf("couldn't bucket for %v", tempBucket) } var tempObj = &tempUploadInfo{} v := bucket.Get([]byte(remote)) err = json.Unmarshal(v, tempObj) if err != nil { return fmt.Errorf("pending upload (%v) not found %v", remote, err) } if tempObj.Started { return fmt.Errorf("pending upload already started %v", remote) } err = fn(tempObj) if err != nil { return err } if remote != tempObj.DestPath { err := bucket.Delete([]byte(remote)) if err != nil { return err } // if this is removed then the entry can be removed too if tempObj.DestPath == "" { return nil } } v2, err := json.Marshal(tempObj) if err != nil { return fmt.Errorf("pending upload not updated: %w", err) } err = bucket.Put([]byte(tempObj.DestPath), v2) if err != nil { return fmt.Errorf("pending upload not updated: %w", err) } return nil }) } // ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error { return b.db.Update(func(tx *bolt.Tx) error { _ = tx.DeleteBucket([]byte(tempBucket)) bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) if err != nil { return err } var queuedEntries []fs.Object err = walk.ListR(ctx, cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error { for _, o := range entries { if oo, ok := o.(fs.Object); ok { queuedEntries = append(queuedEntries, oo) } } return nil }) if err != nil { return err } fs.Debugf(cacheFs, "reconciling temporary uploads") for _, queuedEntry := range queuedEntries { destPath := path.Join(cacheFs.Root(), queuedEntry.Remote()) tempObj := &tempUploadInfo{ DestPath: destPath, AddedOn: time.Now(), Started: false, } // cache Object Info encoded, err := json.Marshal(tempObj) if err != nil { return fmt.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err) } err = bucket.Put([]byte(destPath), encoded) if err != nil { return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err) } fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath) } return nil }) } // Close should be called when the program ends gracefully func (b *Persistent) Close() { b.cleanupMux.Lock() defer b.cleanupMux.Unlock() err := b.db.Close() if err != nil { fs.Errorf(b, "closing handle: %v", err) } b.open = false } // itob returns an 8-byte big endian representation of v. func itob(v int64) []byte { b := make([]byte, 8) binary.BigEndian.PutUint64(b, uint64(v)) return b } func btoi(d []byte) int64 { return int64(binary.BigEndian.Uint64(d)) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/cache/cache.go
backend/cache/cache.go
//go:build !plan9 && !js // Package cache implements a virtual provider to cache existing remotes. package cache import ( "context" "errors" "fmt" "io" "math" "os" "os/signal" "path" "path/filepath" "sort" "strconv" "strings" "sync" "syscall" "time" "github.com/rclone/rclone/backend/crypt" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/lib/atexit" "golang.org/x/time/rate" ) const ( // DefCacheChunkSize is the default value for chunk size DefCacheChunkSize = fs.SizeSuffix(5 * 1024 * 1024) // DefCacheTotalChunkSize is the default value for the maximum size of stored chunks DefCacheTotalChunkSize = fs.SizeSuffix(10 * 1024 * 1024 * 1024) // DefCacheChunkCleanInterval is the interval at which chunks are cleaned DefCacheChunkCleanInterval = fs.Duration(time.Minute) // DefCacheInfoAge is the default value for object info age DefCacheInfoAge = fs.Duration(6 * time.Hour) // DefCacheReadRetries is the default value for read retries DefCacheReadRetries = 10 // DefCacheTotalWorkers is how many workers run in parallel to download chunks DefCacheTotalWorkers = 4 // DefCacheChunkNoMemory will enable or disable in-memory storage for chunks DefCacheChunkNoMemory = false // DefCacheRps limits the number of requests per second to the source FS DefCacheRps = -1 // DefCacheWrites will cache file data on writes through the cache DefCacheWrites = false // DefCacheTmpWaitTime says how long should files be stored in local cache before being uploaded DefCacheTmpWaitTime = fs.Duration(15 * time.Second) // DefCacheDbWaitTime defines how long the cache backend should wait for the DB to be available DefCacheDbWaitTime = fs.Duration(1 * time.Second) ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "cache", Description: "Cache a remote", NewFs: NewFs, CommandHelp: commandHelp, Options: []fs.Option{{ Name: "remote", Help: "Remote to cache.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).", Required: true, }, { Name: "plex_url", Help: "The URL of the Plex server.", }, { Name: "plex_username", Help: "The username of the Plex user.", Sensitive: true, }, { Name: "plex_password", Help: "The password of the Plex user.", IsPassword: true, }, { Name: "plex_token", Help: "The plex token for authentication - auto set normally.", Hide: fs.OptionHideBoth, Advanced: true, Sensitive: true, }, { Name: "plex_insecure", Help: "Skip all certificate verification when connecting to the Plex server.", Advanced: true, }, { Name: "chunk_size", Help: `The size of a chunk (partial file data). Use lower numbers for slower connections. If the chunk size is changed, any downloaded chunks will be invalid and cache-chunk-path will need to be cleared or unexpected EOF errors will occur.`, Default: DefCacheChunkSize, Examples: []fs.OptionExample{{ Value: "1M", Help: "1 MiB", }, { Value: "5M", Help: "5 MiB", }, { Value: "10M", Help: "10 MiB", }}, }, { Name: "info_age", Help: `How long to cache file structure information (directory listings, file size, times, etc.). If all write operations are done through the cache then you can safely make this value very large as the cache store will also be updated in real time.`, Default: DefCacheInfoAge, Examples: []fs.OptionExample{{ Value: "1h", Help: "1 hour", }, { Value: "24h", Help: "24 hours", }, { Value: "48h", Help: "48 hours", }}, }, { Name: "chunk_total_size", Help: `The total size that the chunks can take up on the local disk. If the cache exceeds this value then it will start to delete the oldest chunks until it goes under this value.`, Default: DefCacheTotalChunkSize, Examples: []fs.OptionExample{{ Value: "500M", Help: "500 MiB", }, { Value: "1G", Help: "1 GiB", }, { Value: "10G", Help: "10 GiB", }}, }, { Name: "db_path", Default: filepath.Join(config.GetCacheDir(), "cache-backend"), Help: "Directory to store file structure metadata DB.\n\nThe remote name is used as the DB file name.", Advanced: true, }, { Name: "chunk_path", Default: filepath.Join(config.GetCacheDir(), "cache-backend"), Help: `Directory to cache chunk files. Path to where partial file data (chunks) are stored locally. The remote name is appended to the final path. This config follows the "--cache-db-path". If you specify a custom location for "--cache-db-path" and don't specify one for "--cache-chunk-path" then "--cache-chunk-path" will use the same path as "--cache-db-path".`, Advanced: true, }, { Name: "db_purge", Default: false, Help: "Clear all the cached data for this remote on start.", Hide: fs.OptionHideConfigurator, Advanced: true, }, { Name: "chunk_clean_interval", Default: DefCacheChunkCleanInterval, Help: `How often should the cache perform cleanups of the chunk storage. The default value should be ok for most people. If you find that the cache goes over "cache-chunk-total-size" too often then try to lower this value to force it to perform cleanups more often.`, Advanced: true, }, { Name: "read_retries", Default: DefCacheReadRetries, Help: `How many times to retry a read from a cache storage. Since reading from a cache stream is independent from downloading file data, readers can get to a point where there's no more data in the cache. Most of the times this can indicate a connectivity issue if cache isn't able to provide file data anymore. For really slow connections, increase this to a point where the stream is able to provide data but your experience will be very stuttering.`, Advanced: true, }, { Name: "workers", Default: DefCacheTotalWorkers, Help: `How many workers should run in parallel to download chunks. Higher values will mean more parallel processing (better CPU needed) and more concurrent requests on the cloud provider. This impacts several aspects like the cloud provider API limits, more stress on the hardware that rclone runs on but it also means that streams will be more fluid and data will be available much more faster to readers. **Note**: If the optional Plex integration is enabled then this setting will adapt to the type of reading performed and the value specified here will be used as a maximum number of workers to use.`, Advanced: true, }, { Name: "chunk_no_memory", Default: DefCacheChunkNoMemory, Help: `Disable the in-memory cache for storing chunks during streaming. By default, cache will keep file data during streaming in RAM as well to provide it to readers as fast as possible. This transient data is evicted as soon as it is read and the number of chunks stored doesn't exceed the number of workers. However, depending on other settings like "cache-chunk-size" and "cache-workers" this footprint can increase if there are parallel streams too (multiple files being read at the same time). If the hardware permits it, use this feature to provide an overall better performance during streaming but it can also be disabled if RAM is not available on the local machine.`, Advanced: true, }, { Name: "rps", Default: int(DefCacheRps), Help: `Limits the number of requests per second to the source FS (-1 to disable). This setting places a hard limit on the number of requests per second that cache will be doing to the cloud provider remote and try to respect that value by setting waits between reads. If you find that you're getting banned or limited on the cloud provider through cache and know that a smaller number of requests per second will allow you to work with it then you can use this setting for that. A good balance of all the other settings should make this setting useless but it is available to set for more special cases. **NOTE**: This will limit the number of requests during streams but other API calls to the cloud provider like directory listings will still pass.`, Advanced: true, }, { Name: "writes", Default: DefCacheWrites, Help: `Cache file data on writes through the FS. If you need to read files immediately after you upload them through cache you can enable this flag to have their data stored in the cache store at the same time during upload.`, Advanced: true, }, { Name: "tmp_upload_path", Default: "", Help: `Directory to keep temporary files until they are uploaded. This is the path where cache will use as a temporary storage for new files that need to be uploaded to the cloud provider. Specifying a value will enable this feature. Without it, it is completely disabled and files will be uploaded directly to the cloud provider`, Advanced: true, }, { Name: "tmp_wait_time", Default: DefCacheTmpWaitTime, Help: `How long should files be stored in local cache before being uploaded. This is the duration that a file must wait in the temporary location _cache-tmp-upload-path_ before it is selected for upload. Note that only one file is uploaded at a time and it can take longer to start the upload if a queue formed for this purpose.`, Advanced: true, }, { Name: "db_wait_time", Default: DefCacheDbWaitTime, Help: `How long to wait for the DB to be available - 0 is unlimited. Only one process can have the DB open at any one time, so rclone waits for this duration for the DB to become available before it gives an error. If you set it to 0 then it will wait forever.`, Advanced: true, }}, }) } // Options defines the configuration for this backend type Options struct { Remote string `config:"remote"` PlexURL string `config:"plex_url"` PlexUsername string `config:"plex_username"` PlexPassword string `config:"plex_password"` PlexToken string `config:"plex_token"` PlexInsecure bool `config:"plex_insecure"` ChunkSize fs.SizeSuffix `config:"chunk_size"` InfoAge fs.Duration `config:"info_age"` ChunkTotalSize fs.SizeSuffix `config:"chunk_total_size"` DbPath string `config:"db_path"` ChunkPath string `config:"chunk_path"` DbPurge bool `config:"db_purge"` ChunkCleanInterval fs.Duration `config:"chunk_clean_interval"` ReadRetries int `config:"read_retries"` TotalWorkers int `config:"workers"` ChunkNoMemory bool `config:"chunk_no_memory"` Rps int `config:"rps"` StoreWrites bool `config:"writes"` TempWritePath string `config:"tmp_upload_path"` TempWaitTime fs.Duration `config:"tmp_wait_time"` DbWaitTime fs.Duration `config:"db_wait_time"` } // Fs represents a wrapped fs.Fs type Fs struct { fs.Fs wrapper fs.Fs name string root string opt Options // parsed options features *fs.Features // optional features cache *Persistent tempFs fs.Fs lastChunkCleanup time.Time cleanupMu sync.Mutex rateLimiter *rate.Limiter plexConnector *plexConnector backgroundRunner *backgroundWriter cleanupChan chan bool parentsForgetFn []func(string, fs.EntryType) notifiedRemotes map[string]bool notifiedMu sync.Mutex parentsForgetMu sync.Mutex } // parseRootPath returns a cleaned root path and a nil error or "" and an error when the path is invalid func parseRootPath(path string) (string, error) { return strings.Trim(path, "/"), nil } var warnDeprecated sync.Once // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) { warnDeprecated.Do(func() { fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.") }) // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) { return nil, fmt.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)", opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers) } if strings.HasPrefix(opt.Remote, name+":") { return nil, errors.New("can't point cache remote at itself - check the value of the remote setting") } rpath, err := parseRootPath(rootPath) if err != nil { return nil, fmt.Errorf("failed to clean root path %q: %w", rootPath, err) } remotePath := fspath.JoinRootPath(opt.Remote, rootPath) wrappedFs, wrapErr := cache.Get(ctx, remotePath) if wrapErr != nil && wrapErr != fs.ErrorIsFile { return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remotePath, wrapErr) } var fsErr error fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath) if wrapErr == fs.ErrorIsFile { fsErr = fs.ErrorIsFile rpath = cleanPath(path.Dir(rpath)) } // configure cache backend if opt.DbPurge { fs.Debugf(name, "Purging the DB") } f := &Fs{ Fs: wrappedFs, name: name, root: rpath, opt: *opt, lastChunkCleanup: time.Now().Truncate(time.Hour * 24 * 30), cleanupChan: make(chan bool, 1), notifiedRemotes: make(map[string]bool), } cache.PinUntilFinalized(f.Fs, f) rps := rate.Inf if opt.Rps > 0 { rps = rate.Limit(float64(opt.Rps)) } f.rateLimiter = rate.NewLimiter(rps, opt.TotalWorkers) f.plexConnector = &plexConnector{} if opt.PlexURL != "" { if opt.PlexToken != "" { f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure) if err != nil { return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err) } } else if opt.PlexPassword != "" && opt.PlexUsername != "" { decPass, err := obscure.Reveal(opt.PlexPassword) if err != nil { decPass = opt.PlexPassword } f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) { m.Set("plex_token", token) }) if err != nil { return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err) } } } dbPath := f.opt.DbPath chunkPath := f.opt.ChunkPath // if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath if dbPath != filepath.Join(config.GetCacheDir(), "cache-backend") && chunkPath == filepath.Join(config.GetCacheDir(), "cache-backend") { chunkPath = dbPath } if filepath.Ext(dbPath) != "" { dbPath = filepath.Dir(dbPath) } if filepath.Ext(chunkPath) != "" { chunkPath = filepath.Dir(chunkPath) } err = os.MkdirAll(dbPath, os.ModePerm) if err != nil { return nil, fmt.Errorf("failed to create cache directory %v: %w", dbPath, err) } err = os.MkdirAll(chunkPath, os.ModePerm) if err != nil { return nil, fmt.Errorf("failed to create cache directory %v: %w", chunkPath, err) } dbPath = filepath.Join(dbPath, name+".db") chunkPath = filepath.Join(chunkPath, name) fs.Infof(name, "Cache DB path: %v", dbPath) fs.Infof(name, "Cache chunk path: %v", chunkPath) f.cache, err = GetPersistent(dbPath, chunkPath, &Features{ PurgeDb: opt.DbPurge, DbWaitTime: time.Duration(opt.DbWaitTime), }) if err != nil { return nil, fmt.Errorf("failed to start cache db: %w", err) } // Trap SIGINT and SIGTERM to close the DB handle gracefully c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGHUP) atexit.Register(func() { if opt.PlexURL != "" { f.plexConnector.closeWebsocket() } f.StopBackgroundRunners() }) go func() { for { s := <-c if s == syscall.SIGHUP { fs.Infof(f, "Clearing cache from signal") f.DirCacheFlush() } } }() fs.Infof(name, "Chunk Memory: %v", !f.opt.ChunkNoMemory) fs.Infof(name, "Chunk Size: %v", f.opt.ChunkSize) fs.Infof(name, "Chunk Total Size: %v", f.opt.ChunkTotalSize) fs.Infof(name, "Chunk Clean Interval: %v", f.opt.ChunkCleanInterval) fs.Infof(name, "Workers: %v", f.opt.TotalWorkers) fs.Infof(name, "File Age: %v", f.opt.InfoAge) if f.opt.StoreWrites { fs.Infof(name, "Cache Writes: enabled") } if f.opt.TempWritePath != "" { err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm) if err != nil { return nil, fmt.Errorf("failed to create cache directory %v: %w", f.opt.TempWritePath, err) } f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath) f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath) if err != nil { return nil, fmt.Errorf("failed to create temp fs: %w", err) } fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime) fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath) f.backgroundRunner, _ = initBackgroundUploader(f) go f.backgroundRunner.run() } go func() { for { time.Sleep(time.Duration(f.opt.ChunkCleanInterval)) select { case <-f.cleanupChan: fs.Infof(f, "stopping cleanup") return default: fs.Debugf(f, "starting cleanup") f.CleanUpCache(false) } } }() if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil { pollInterval := make(chan time.Duration, 1) pollInterval <- time.Duration(f.opt.ChunkCleanInterval) doChangeNotify(ctx, f.receiveChangeNotify, pollInterval) } f.features = (&fs.Features{ CanHaveEmptyDirectories: true, DuplicateFiles: false, // storage doesn't permit this }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) // override only those features that use a temp fs and it doesn't support them //f.features.ChangeNotify = f.ChangeNotify if f.opt.TempWritePath != "" { if f.tempFs.Features().Move == nil { f.features.Move = nil } if f.tempFs.Features().Move == nil { f.features.Move = nil } if f.tempFs.Features().DirMove == nil { f.features.DirMove = nil } if f.tempFs.Features().MergeDirs == nil { f.features.MergeDirs = nil } } // even if the wrapped fs doesn't support it, we still want it f.features.DirCacheFlush = f.DirCacheFlush rc.Add(rc.Call{ Path: "cache/expire", Fn: f.httpExpireRemote, Title: "Purge a remote from cache", Help: ` Purge a remote from the cache backend. Supports either a directory or a file. Params: - remote = path to remote (required) - withData = true/false to delete cached data (chunks) as well (optional) Eg rclone rc cache/expire remote=path/to/sub/folder/ rclone rc cache/expire remote=/ withData=true `, }) rc.Add(rc.Call{ Path: "cache/stats", Fn: f.httpStats, Title: "Get cache stats", Help: ` Show statistics for the cache remote. `, }) rc.Add(rc.Call{ Path: "cache/fetch", Fn: f.rcFetch, Title: "Fetch file chunks", Help: ` Ensure the specified file chunks are cached on disk. The chunks= parameter specifies the file chunks to check. It takes a comma separated list of array slice indices. The slice indices are similar to Python slices: start[:end] start is the 0 based chunk number from the beginning of the file to fetch inclusive. end is 0 based chunk number from the beginning of the file to fetch exclusive. Both values can be negative, in which case they count from the back of the file. The value "-5:" represents the last 5 chunks of a file. Some valid examples are: ":5,-5:" -> the first and last five chunks "0,-2" -> the first and the second last chunk "0:10" -> the first ten chunks Any parameter with a key that starts with "file" can be used to specify files to fetch, e.g. rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye File names will automatically be encrypted when the a crypt remote is used on top of the cache. `, }) return f, fsErr } func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err error) { out = make(rc.Params) m, err := f.Stats() if err != nil { return out, fmt.Errorf("error while getting cache stats") } out["status"] = "ok" out["stats"] = m return out, nil } func (f *Fs) unwrapRemote(remote string) string { remote = cleanPath(remote) if remote != "" { // if it's wrapped by crypt we need to check what format we got if cryptFs, yes := f.isWrappedByCrypt(); yes { _, err := cryptFs.DecryptFileName(remote) // if it failed to decrypt then it is a decrypted format and we need to encrypt it if err != nil { return cryptFs.EncryptFileName(remote) } // else it's an encrypted format and we can use it as it is } } return remote } func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params, err error) { out = make(rc.Params) remoteInt, ok := in["remote"] if !ok { return out, fmt.Errorf("remote is needed") } remote := remoteInt.(string) withData := false _, ok = in["withData"] if ok { withData = true } remote = f.unwrapRemote(remote) if !f.cache.HasEntry(path.Join(f.Root(), remote)) { return out, fmt.Errorf("%s doesn't exist in cache", remote) } co := NewObject(f, remote) err = f.cache.GetObject(co) if err != nil { // it could be a dir cd := NewDirectory(f, remote) err := f.cache.ExpireDir(cd) if err != nil { return out, fmt.Errorf("error expiring directory: %w", err) } // notify vfs too f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory) out["status"] = "ok" out["message"] = fmt.Sprintf("cached directory cleared: %v", remote) return out, nil } // expire the entry err = f.cache.ExpireObject(co, withData) if err != nil { return out, fmt.Errorf("error expiring file: %w", err) } // notify vfs too f.notifyChangeUpstream(co.Remote(), fs.EntryObject) out["status"] = "ok" out["message"] = fmt.Sprintf("cached file cleared: %v", remote) return out, nil } func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) { type chunkRange struct { start, end int64 } parseChunks := func(ranges string) (crs []chunkRange, err error) { for part := range strings.SplitSeq(ranges, ",") { var start, end int64 = 0, math.MaxInt64 switch ints := strings.Split(part, ":"); len(ints) { case 1: start, err = strconv.ParseInt(ints[0], 10, 64) if err != nil { return nil, fmt.Errorf("invalid range: %q", part) } end = start + 1 case 2: if ints[0] != "" { start, err = strconv.ParseInt(ints[0], 10, 64) if err != nil { return nil, fmt.Errorf("invalid range: %q", part) } } if ints[1] != "" { end, err = strconv.ParseInt(ints[1], 10, 64) if err != nil { return nil, fmt.Errorf("invalid range: %q", part) } } default: return nil, fmt.Errorf("invalid range: %q", part) } crs = append(crs, chunkRange{start: start, end: end}) } return } walkChunkRange := func(cr chunkRange, size int64, cb func(chunk int64)) { if size <= 0 { return } chunks := (size-1)/f.ChunkSize() + 1 start, end := cr.start, cr.end if start < 0 { start += chunks } if end <= 0 { end += chunks } if end <= start { return } switch { case start < 0: start = 0 case start >= chunks: return } switch { case end <= start: end = start + 1 case end >= chunks: end = chunks } for i := start; i < end; i++ { cb(i) } } walkChunkRanges := func(crs []chunkRange, size int64, cb func(chunk int64)) { for _, cr := range crs { walkChunkRange(cr, size, cb) } } v, ok := in["chunks"] if !ok { return nil, errors.New("missing chunks parameter") } s, ok := v.(string) if !ok { return nil, errors.New("invalid chunks parameter") } delete(in, "chunks") crs, err := parseChunks(s) if err != nil { return nil, fmt.Errorf("invalid chunks parameter: %w", err) } var files [][2]string for k, v := range in { if !strings.HasPrefix(k, "file") { return nil, fmt.Errorf("invalid parameter %s=%s", k, v) } switch v := v.(type) { case string: files = append(files, [2]string{v, f.unwrapRemote(v)}) default: return nil, fmt.Errorf("invalid parameter %s=%s", k, v) } } type fileStatus struct { Error string FetchedChunks int } fetchedChunks := make(map[string]fileStatus, len(files)) for _, pair := range files { file, remote := pair[0], pair[1] var status fileStatus o, err := f.NewObject(ctx, remote) if err != nil { fetchedChunks[file] = fileStatus{Error: err.Error()} continue } co := o.(*Object) err = co.refreshFromSource(ctx, true) if err != nil { fetchedChunks[file] = fileStatus{Error: err.Error()} continue } handle := NewObjectHandle(ctx, co, f) handle.UseMemory = false handle.scaleWorkers(1) walkChunkRanges(crs, co.Size(), func(chunk int64) { _, err := handle.getChunk(chunk * f.ChunkSize()) if err != nil { if status.Error == "" { status.Error = err.Error() } } else { status.FetchedChunks++ } }) fetchedChunks[file] = status } return rc.Params{"status": fetchedChunks}, nil } // receiveChangeNotify is a wrapper to notifications sent from the wrapped FS about changed files func (f *Fs) receiveChangeNotify(forgetPath string, entryType fs.EntryType) { if crypt, yes := f.isWrappedByCrypt(); yes { decryptedPath, err := crypt.DecryptFileName(forgetPath) if err == nil { fs.Infof(decryptedPath, "received cache expiry notification") } else { fs.Infof(forgetPath, "received cache expiry notification") } } else { fs.Infof(forgetPath, "received cache expiry notification") } // notify upstreams too (vfs) f.notifyChangeUpstream(forgetPath, entryType) var cd *Directory if entryType == fs.EntryObject { co := NewObject(f, forgetPath) err := f.cache.GetObject(co) if err != nil { fs.Debugf(f, "got change notification for non cached entry %v", co) } err = f.cache.ExpireObject(co, true) if err != nil { fs.Debugf(forgetPath, "notify: error expiring '%v': %v", co, err) } cd = NewDirectory(f, cleanPath(path.Dir(co.Remote()))) } else { cd = NewDirectory(f, forgetPath) } // we expire the dir err := f.cache.ExpireDir(cd) if err != nil { fs.Debugf(forgetPath, "notify: error expiring '%v': %v", cd, err) } else { fs.Debugf(forgetPath, "notify: expired '%v'", cd) } f.notifiedMu.Lock() defer f.notifiedMu.Unlock() f.notifiedRemotes[forgetPath] = true f.notifiedRemotes[cd.Remote()] = true } // notifyChangeUpstreamIfNeeded will check if the wrapped remote doesn't notify on changes // or if we use a temp fs func (f *Fs) notifyChangeUpstreamIfNeeded(remote string, entryType fs.EntryType) { if f.Fs.Features().ChangeNotify == nil || f.opt.TempWritePath != "" { f.notifyChangeUpstream(remote, entryType) } } // notifyChangeUpstream will loop through all the upstreams and notify // of the provided remote (should be only a dir) func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) { f.parentsForgetMu.Lock() defer f.parentsForgetMu.Unlock() if len(f.parentsForgetFn) > 0 { for _, fn := range f.parentsForgetFn { fn(remote, entryType) } } } // ChangeNotify can subscribe multiple callers // this is coupled with the wrapped fs ChangeNotify (if it supports it) // and also notifies other caches (i.e VFS) to clear out whenever something changes func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) { f.parentsForgetMu.Lock() defer f.parentsForgetMu.Unlock() fs.Debugf(f, "subscribing to ChangeNotify") f.parentsForgetFn = append(f.parentsForgetFn, notifyFunc) go func() { for range pollInterval { } }() } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // String returns a description of the FS func (f *Fs) String() string { return fmt.Sprintf("Cache remote %s:%s", f.name, f.root) } // ChunkSize returns the configured chunk size func (f *Fs) ChunkSize() int64 { return int64(f.opt.ChunkSize) } // InfoAge returns the configured file age func (f *Fs) InfoAge() time.Duration { return time.Duration(f.opt.InfoAge) } // TempUploadWaitTime returns the configured temp file upload wait time func (f *Fs) TempUploadWaitTime() time.Duration { return time.Duration(f.opt.TempWaitTime) } // NewObject finds the Object at remote. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { var err error fs.Debugf(f, "new object '%s'", remote) co := NewObject(f, remote) // search for entry in cache and validate it err = f.cache.GetObject(co) if err != nil { fs.Debugf(remote, "find: error: %v", err) } else if time.Now().After(co.CacheTs.Add(time.Duration(f.opt.InfoAge))) { fs.Debugf(co, "find: cold object: %+v", co) } else { fs.Debugf(co, "find: warm object: %v, expiring on: %v", co, co.CacheTs.Add(time.Duration(f.opt.InfoAge))) return co, nil } // search for entry in source or temp fs var obj fs.Object if f.opt.TempWritePath != "" { obj, err = f.tempFs.NewObject(ctx, remote) // not found in temp fs if err != nil { fs.Debugf(remote, "find: not found in local cache fs") obj, err = f.Fs.NewObject(ctx, remote) } else { fs.Debugf(obj, "find: found in local cache fs") } } else { obj, err = f.Fs.NewObject(ctx, remote) } // not found in either fs if err != nil { fs.Debugf(obj, "find failed: not found in either local or remote fs") return nil, err } // cache the new entry co = ObjectFromOriginal(ctx, f, obj).persist() fs.Debugf(co, "find: cached object") return co, nil } // List the objects and directories in dir into entries func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { fs.Debugf(f, "list '%s'", dir) cd := ShallowDirectory(f, dir) // search for cached dir entries and validate them entries, err = f.cache.GetDirEntries(cd) if err != nil { fs.Debugf(dir, "list: error: %v", err) } else if time.Now().After(cd.CacheTs.Add(time.Duration(f.opt.InfoAge))) { fs.Debugf(dir, "list: cold listing: %v", cd.CacheTs) } else if len(entries) == 0 { // TODO: read empty dirs from source? fs.Debugf(dir, "list: empty listing") } else { fs.Debugf(dir, "list: warm %v from cache for: %v, expiring on: %v", len(entries), cd.abs(), cd.CacheTs.Add(time.Duration(f.opt.InfoAge))) fs.Debugf(dir, "list: cached entries: %v", entries) return entries, nil } // we first search any temporary files stored locally var cachedEntries fs.DirEntries if f.opt.TempWritePath != "" { queuedEntries, err := f.cache.searchPendingUploadFromDir(cd.abs()) if err != nil { fs.Errorf(dir, "list: error getting pending uploads: %v", err) } else { fs.Debugf(dir, "list: read %v from temp fs", len(queuedEntries)) fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries) for _, queuedRemote := range queuedEntries { queuedEntry, err := f.tempFs.NewObject(ctx, f.cleanRootFromPath(queuedRemote)) if err != nil { fs.Debugf(dir, "list: temp file not found in local fs: %v", err) continue } co := ObjectFromOriginal(ctx, f, queuedEntry).persist() fs.Debugf(co, "list: cached temp object") cachedEntries = append(cachedEntries, co) } } } // search from the source sourceEntries, err := f.Fs.List(ctx, dir) if err != nil { return nil, err } fs.Debugf(dir, "list: read %v from source", len(sourceEntries)) fs.Debugf(dir, "list: source entries: %v", sourceEntries) sort.Sort(sourceEntries) for _, entry := range entries { entryRemote := entry.Remote() i := sort.Search(len(sourceEntries), func(i int) bool { return sourceEntries[i].Remote() >= entryRemote }) if i < len(sourceEntries) && sourceEntries[i].Remote() == entryRemote { continue } fp := path.Join(f.Root(), entryRemote) switch entry.(type) { case fs.Object: _ = f.cache.RemoveObject(fp) case fs.Directory: _ = f.cache.RemoveDir(fp) } fs.Debugf(dir, "list: remove entry: %v", entryRemote) } entries = nil //nolint:ineffassign
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/cache/cache_test.go
backend/cache/cache_test.go
// Test Cache filesystem interface //go:build !plan9 && !js && !race package cache_test import ( "testing" "github.com/rclone/rclone/backend/cache" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestCache:", NilObject: (*cache.Object)(nil), UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"}, UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"}, UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"}, SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/cache/directory.go
backend/cache/directory.go
//go:build !plan9 && !js package cache import ( "context" "path" "time" "github.com/rclone/rclone/fs" ) // Directory is a generic dir that stores basic information about it type Directory struct { Directory fs.Directory `json:"-"` // can be nil CacheFs *Fs `json:"-"` // cache fs Name string `json:"name"` // name of the directory Dir string `json:"dir"` // abs path of the directory CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown CacheItems int64 `json:"items"` // number of objects or -1 for unknown CacheType string `json:"cacheType"` // object type CacheTs *time.Time `json:",omitempty"` } // NewDirectory builds an empty dir which will be used to unmarshal data in it func NewDirectory(f *Fs, remote string) *Directory { cd := ShallowDirectory(f, remote) t := time.Now() cd.CacheTs = &t return cd } // ShallowDirectory builds an empty dir which will be used to unmarshal data in it func ShallowDirectory(f *Fs, remote string) *Directory { var cd *Directory fullRemote := cleanPath(path.Join(f.Root(), remote)) // build a new one dir := cleanPath(path.Dir(fullRemote)) name := cleanPath(path.Base(fullRemote)) cd = &Directory{ CacheFs: f, Name: name, Dir: dir, CacheModTime: time.Now().UnixNano(), CacheSize: 0, CacheItems: 0, CacheType: "Directory", } return cd } // DirectoryFromOriginal builds one from a generic fs.Directory func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Directory { var cd *Directory fullRemote := path.Join(f.Root(), d.Remote()) dir := cleanPath(path.Dir(fullRemote)) name := cleanPath(path.Base(fullRemote)) t := time.Now() cd = &Directory{ Directory: d, CacheFs: f, Name: name, Dir: dir, CacheModTime: d.ModTime(ctx).UnixNano(), CacheSize: d.Size(), CacheItems: d.Items(), CacheType: "Directory", CacheTs: &t, } return cd } // Fs returns its FS info func (d *Directory) Fs() fs.Info { return d.CacheFs } // String returns a human friendly name for this object func (d *Directory) String() string { if d == nil { return "<nil>" } return d.Remote() } // Remote returns the remote path func (d *Directory) Remote() string { return d.CacheFs.cleanRootFromPath(d.abs()) } // abs returns the absolute path to the dir func (d *Directory) abs() string { return cleanPath(path.Join(d.Dir, d.Name)) } // ModTime returns the cached ModTime func (d *Directory) ModTime(ctx context.Context) time.Time { return time.Unix(0, d.CacheModTime) } // Size returns the cached Size func (d *Directory) Size() int64 { return d.CacheSize } // Items returns the cached Items func (d *Directory) Items() int64 { return d.CacheItems } // ID returns the ID of the cached directory if known func (d *Directory) ID() string { if d.Directory == nil { return "" } return d.Directory.ID() } var ( _ fs.Directory = (*Directory)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/cache/storage_memory.go
backend/cache/storage_memory.go
//go:build !plan9 && !js package cache import ( "fmt" "strconv" "strings" "time" cache "github.com/patrickmn/go-cache" "github.com/rclone/rclone/fs" ) // Memory is a wrapper of transient storage for a go-cache store type Memory struct { db *cache.Cache } // NewMemory builds this cache storage // defaultExpiration will set the expiry time of chunks in this storage func NewMemory(defaultExpiration time.Duration) *Memory { mem := &Memory{} err := mem.Connect(defaultExpiration) if err != nil { fs.Errorf("cache", "can't open ram connection: %v", err) } return mem } // Connect will create a connection for the storage func (m *Memory) Connect(defaultExpiration time.Duration) error { m.db = cache.New(defaultExpiration, -1) return nil } // HasChunk confirms the existence of a single chunk of an object func (m *Memory) HasChunk(cachedObject *Object, offset int64) bool { key := cachedObject.abs() + "-" + strconv.FormatInt(offset, 10) _, found := m.db.Get(key) return found } // GetChunk will retrieve a single chunk which belongs to a cached object or an error if it doesn't find it func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) { key := cachedObject.abs() + "-" + strconv.FormatInt(offset, 10) var data []byte if x, found := m.db.Get(key); found { data = x.([]byte) return data, nil } return nil, fmt.Errorf("couldn't get cached object data at offset %v", offset) } // AddChunk adds a new chunk of a cached object func (m *Memory) AddChunk(fp string, data []byte, offset int64) error { return m.AddChunkAhead(fp, data, offset, time.Second) } // AddChunkAhead adds a new chunk of a cached object func (m *Memory) AddChunkAhead(fp string, data []byte, offset int64, t time.Duration) error { key := fp + "-" + strconv.FormatInt(offset, 10) m.db.Set(key, data, cache.DefaultExpiration) return nil } // CleanChunksByAge will cleanup on a cron basis func (m *Memory) CleanChunksByAge(chunkAge time.Duration) { m.db.DeleteExpired() } // CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk func (m *Memory) CleanChunksByNeed(offset int64) { for key := range m.db.Items() { sepIdx := strings.LastIndex(key, "-") keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64) if err != nil { fs.Errorf("cache", "couldn't parse offset entry %v", key) continue } if keyOffset < offset { m.db.Delete(key) } } } // CleanChunksBySize will cleanup chunks after the total size passes a certain point func (m *Memory) CleanChunksBySize(maxSize int64) { // NOOP }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/cache/plex.go
backend/cache/plex.go
//go:build !plan9 && !js package cache import ( "bytes" "crypto/tls" "encoding/json" "fmt" "io" "net/http" "net/url" "strings" "sync" "time" cache "github.com/patrickmn/go-cache" "github.com/rclone/rclone/fs" "golang.org/x/net/websocket" ) const ( // defPlexLoginURL is the default URL for Plex login defPlexLoginURL = "https://plex.tv/users/sign_in.json" defPlexNotificationURL = "%s/:/websockets/notifications?X-Plex-Token=%s" ) // PlaySessionStateNotification is part of the API response of Plex type PlaySessionStateNotification struct { SessionKey string `json:"sessionKey"` GUID string `json:"guid"` Key string `json:"key"` ViewOffset int64 `json:"viewOffset"` State string `json:"state"` TranscodeSession string `json:"transcodeSession"` } // NotificationContainer is part of the API response of Plex type NotificationContainer struct { Type string `json:"type"` Size int `json:"size"` PlaySessionState []PlaySessionStateNotification `json:"PlaySessionStateNotification"` } // PlexNotification is part of the API response of Plex type PlexNotification struct { Container NotificationContainer `json:"NotificationContainer"` } // plexConnector is managing the cache integration with Plex type plexConnector struct { url *url.URL username string password string token string insecure bool f *Fs mu sync.Mutex running bool runningMu sync.Mutex stateCache *cache.Cache saveToken func(string) } // newPlexConnector connects to a Plex server and generates a token func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool, saveToken func(string)) (*plexConnector, error) { u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/")) if err != nil { return nil, err } pc := &plexConnector{ f: f, url: u, username: username, password: password, token: "", insecure: insecure, stateCache: cache.New(time.Hour, time.Minute), saveToken: saveToken, } return pc, nil } // newPlexConnector connects to a Plex server and generates a token func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*plexConnector, error) { u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/")) if err != nil { return nil, err } pc := &plexConnector{ f: f, url: u, token: token, insecure: insecure, stateCache: cache.New(time.Hour, time.Minute), } pc.listenWebsocket() return pc, nil } func (p *plexConnector) closeWebsocket() { p.runningMu.Lock() defer p.runningMu.Unlock() fs.Infof("plex", "stopped Plex watcher") p.running = false } func (p *plexConnector) websocketDial() (*websocket.Conn, error) { u := strings.TrimRight(strings.Replace(strings.Replace( p.url.String(), "http://", "ws://", 1), "https://", "wss://", 1), "/") url := fmt.Sprintf(defPlexNotificationURL, u, p.token) config, err := websocket.NewConfig(url, "http://localhost") if err != nil { return nil, err } if p.insecure { config.TlsConfig = &tls.Config{InsecureSkipVerify: true} } return websocket.DialConfig(config) } func (p *plexConnector) listenWebsocket() { p.runningMu.Lock() defer p.runningMu.Unlock() conn, err := p.websocketDial() if err != nil { fs.Errorf("plex", "%v", err) return } p.running = true go func() { for { if !p.isConnected() { break } notif := &PlexNotification{} err := websocket.JSON.Receive(conn, notif) if err != nil { fs.Debugf("plex", "%v", err) p.closeWebsocket() break } // we're only interested in play events if notif.Container.Type == "playing" { // we loop through each of them for _, v := range notif.Container.PlaySessionState { // event type of playing if v.State == "playing" { // if it's not cached get the details and cache them if _, found := p.stateCache.Get(v.Key); !found { req, err := http.NewRequest("GET", fmt.Sprintf("%s%s", p.url.String(), v.Key), nil) if err != nil { continue } p.fillDefaultHeaders(req) resp, err := http.DefaultClient.Do(req) if err != nil { continue } var data []byte data, err = io.ReadAll(resp.Body) if err != nil { continue } p.stateCache.Set(v.Key, data, cache.DefaultExpiration) } } else if v.State == "stopped" { p.stateCache.Delete(v.Key) } } } } }() } // fillDefaultHeaders will add common headers to requests func (p *plexConnector) fillDefaultHeaders(req *http.Request) { req.Header.Add("X-Plex-Client-Identifier", fmt.Sprintf("rclone (%v)", p.f.String())) req.Header.Add("X-Plex-Product", fmt.Sprintf("rclone (%v)", p.f.Name())) req.Header.Add("X-Plex-Version", fs.Version) req.Header.Add("Accept", "application/json") if p.token != "" { req.Header.Add("X-Plex-Token", p.token) } } // authenticate will generate a token based on a username/password func (p *plexConnector) authenticate() error { p.mu.Lock() defer p.mu.Unlock() form := url.Values{} form.Set("user[login]", p.username) form.Add("user[password]", p.password) req, err := http.NewRequest("POST", defPlexLoginURL, strings.NewReader(form.Encode())) if err != nil { return err } p.fillDefaultHeaders(req) resp, err := http.DefaultClient.Do(req) if err != nil { return err } var data map[string]any err = json.NewDecoder(resp.Body).Decode(&data) if err != nil { return fmt.Errorf("failed to obtain token: %w", err) } tokenGen, ok := get(data, "user", "authToken") if !ok { return fmt.Errorf("failed to obtain token: %v", data) } token, ok := tokenGen.(string) if !ok { return fmt.Errorf("failed to obtain token: %v", data) } p.token = token if p.token != "" { if p.saveToken != nil { p.saveToken(p.token) } fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String()) } p.listenWebsocket() return nil } // isConnected checks if this rclone is authenticated to Plex func (p *plexConnector) isConnected() bool { p.runningMu.Lock() defer p.runningMu.Unlock() return p.running } // isConfigured checks if this rclone is configured to use a Plex server func (p *plexConnector) isConfigured() bool { return p.url != nil } func (p *plexConnector) isPlaying(co *Object) bool { var err error if !p.isConnected() { p.listenWebsocket() } remote := co.Remote() if cr, yes := p.f.isWrappedByCrypt(); yes { remote, err = cr.DecryptFileName(co.Remote()) if err != nil { fs.Debugf("plex", "can not decrypt wrapped file: %v", err) return false } } isPlaying := false for _, v := range p.stateCache.Items() { if bytes.Contains(v.Object.([]byte), []byte(remote)) { isPlaying = true break } } return isPlaying } // adapted from: https://stackoverflow.com/a/28878037 (credit) func get(m any, path ...any) (any, bool) { for _, p := range path { switch idx := p.(type) { case string: if mm, ok := m.(map[string]any); ok { if val, found := mm[idx]; found { m = val continue } } return nil, false case int: if mm, ok := m.([]any); ok { if len(mm) > idx { m = mm[idx] continue } } return nil, false } } return m, true }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/cache/handle.go
backend/cache/handle.go
//go:build !plan9 && !js package cache import ( "context" "errors" "fmt" "io" "path" "runtime" "strings" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/operations" ) var uploaderMap = make(map[string]*backgroundWriter) var uploaderMapMx sync.Mutex // initBackgroundUploader returns a single instance func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) { // write lock to create one uploaderMapMx.Lock() defer uploaderMapMx.Unlock() if b, ok := uploaderMap[fs.String()]; ok { // if it was already started we close it so that it can be started again if b.running { b.close() } else { return b, nil } } bb := newBackgroundWriter(fs) uploaderMap[fs.String()] = bb return uploaderMap[fs.String()], nil } // Handle is managing the read/write/seek operations on an open handle type Handle struct { ctx context.Context cachedObject *Object cfs *Fs memory *Memory preloadQueue chan int64 preloadOffset int64 offset int64 seenOffsets map[int64]bool mu sync.Mutex workersWg sync.WaitGroup confirmReading chan bool workers int maxWorkerID int UseMemory bool closed bool reading bool } // NewObjectHandle returns a new Handle for an existing Object func NewObjectHandle(ctx context.Context, o *Object, cfs *Fs) *Handle { r := &Handle{ ctx: ctx, cachedObject: o, cfs: cfs, offset: 0, preloadOffset: -1, // -1 to trigger the first preload UseMemory: !cfs.opt.ChunkNoMemory, reading: false, } r.seenOffsets = make(map[int64]bool) r.memory = NewMemory(-1) // create a larger buffer to queue up requests r.preloadQueue = make(chan int64, r.cfs.opt.TotalWorkers*10) r.confirmReading = make(chan bool) r.startReadWorkers() return r } // cacheFs is a convenience method to get the parent cache FS of the object's manager func (r *Handle) cacheFs() *Fs { return r.cfs } // storage is a convenience method to get the persistent storage of the object's manager func (r *Handle) storage() *Persistent { return r.cacheFs().cache } // String representation of this reader func (r *Handle) String() string { return r.cachedObject.abs() } // startReadWorkers will start the worker pool func (r *Handle) startReadWorkers() { if r.workers > 0 { return } totalWorkers := r.cacheFs().opt.TotalWorkers if r.cacheFs().plexConnector.isConfigured() { if !r.cacheFs().plexConnector.isConnected() { err := r.cacheFs().plexConnector.authenticate() if err != nil { fs.Errorf(r, "failed to authenticate to Plex: %v", err) } } if r.cacheFs().plexConnector.isConnected() { totalWorkers = 1 } } r.scaleWorkers(totalWorkers) } // scaleWorkers will increase the worker pool count by the provided amount func (r *Handle) scaleWorkers(desired int) { current := r.workers if current == desired { return } if current > desired { // scale in gracefully for r.workers > desired { r.preloadQueue <- -1 r.workers-- } } else { // scale out for r.workers < desired { w := &worker{ r: r, id: r.maxWorkerID, } r.workersWg.Add(1) r.workers++ r.maxWorkerID++ go w.run() } } // ignore first scale out from 0 if current != 0 { fs.Debugf(r, "scale workers to %v", desired) } } func (r *Handle) confirmExternalReading() { // if we have a max value of workers // then we skip this step if r.workers > 1 || !r.cacheFs().plexConnector.isConfigured() { return } if !r.cacheFs().plexConnector.isPlaying(r.cachedObject) { return } fs.Infof(r, "confirmed reading by external reader") r.scaleWorkers(r.cacheFs().opt.TotalWorkers) } // queueOffset will send an offset to the workers if it's different from the last one func (r *Handle) queueOffset(offset int64) { if offset != r.preloadOffset { // clean past in-memory chunks if r.UseMemory { go r.memory.CleanChunksByNeed(offset) } r.confirmExternalReading() r.preloadOffset = offset // clear the past seen chunks // they will remain in our persistent storage but will be removed from transient // so they need to be picked up by a worker for k := range r.seenOffsets { if k < offset { r.seenOffsets[k] = false } } for i := range r.workers { o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i) if o < 0 || o >= r.cachedObject.Size() { continue } if v, ok := r.seenOffsets[o]; ok && v { continue } r.seenOffsets[o] = true r.preloadQueue <- o } } } // getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it // it can be from transient or persistent cache // it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer func (r *Handle) getChunk(chunkStart int64) ([]byte, error) { var data []byte var err error // we calculate the modulus of the requested offset with the size of a chunk offset := chunkStart % int64(r.cacheFs().opt.ChunkSize) // we align the start offset of the first chunk to a likely chunk in the storage chunkStart -= offset r.queueOffset(chunkStart) found := false if r.UseMemory { data, err = r.memory.GetChunk(r.cachedObject, chunkStart) if err == nil { found = true } } if !found { // we're gonna give the workers a chance to pickup the chunk // and retry a couple of times for i := range r.cacheFs().opt.ReadRetries * 8 { data, err = r.storage().GetChunk(r.cachedObject, chunkStart) if err == nil { found = true break } fs.Debugf(r, "%v: chunk retry storage: %v", chunkStart, i) time.Sleep(time.Millisecond * 500) } } // not found in ram or // the worker didn't managed to download the chunk in time so we abort and close the stream if err != nil || len(data) == 0 || !found { if r.workers == 0 { fs.Errorf(r, "out of workers") return nil, io.ErrUnexpectedEOF } return nil, fmt.Errorf("chunk not found %v", chunkStart) } // first chunk will be aligned with the start if offset > 0 { if offset > int64(len(data)) { fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v", r.offset, chunkStart, len(data), offset, r.cacheFs().opt.ChunkSize, r.cachedObject.Size()) return nil, io.ErrUnexpectedEOF } data = data[int(offset):] } return data, nil } // Read a chunk from storage or len(p) func (r *Handle) Read(p []byte) (n int, err error) { r.mu.Lock() defer r.mu.Unlock() var buf []byte // first reading if !r.reading { r.reading = true } // reached EOF if r.offset >= r.cachedObject.Size() { return 0, io.EOF } currentOffset := r.offset buf, err = r.getChunk(currentOffset) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { fs.Errorf(r, "(%v/%v) error (%v) response", currentOffset, r.cachedObject.Size(), err) } if len(buf) == 0 && err != io.ErrUnexpectedEOF { return 0, io.EOF } readSize := copy(p, buf) newOffset := currentOffset + int64(readSize) r.offset = newOffset return readSize, err } // Close will tell the workers to stop func (r *Handle) Close() error { r.mu.Lock() defer r.mu.Unlock() if r.closed { return errors.New("file already closed") } close(r.preloadQueue) r.closed = true // wait for workers to complete their jobs before returning r.workersWg.Wait() r.memory.db.Flush() fs.Debugf(r, "cache reader closed %v", r.offset) return nil } // Seek will move the current offset based on whence and instruct the workers to move there too func (r *Handle) Seek(offset int64, whence int) (int64, error) { r.mu.Lock() defer r.mu.Unlock() var err error switch whence { case io.SeekStart: fs.Debugf(r, "moving offset set from %v to %v", r.offset, offset) r.offset = offset case io.SeekCurrent: fs.Debugf(r, "moving offset cur from %v to %v", r.offset, r.offset+offset) r.offset += offset case io.SeekEnd: fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset) r.offset = r.cachedObject.Size() + offset default: err = fmt.Errorf("cache: unimplemented seek whence %v", whence) } chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize)) if chunkStart >= int64(r.cacheFs().opt.ChunkSize) { chunkStart -= int64(r.cacheFs().opt.ChunkSize) } r.queueOffset(chunkStart) return r.offset, err } type worker struct { r *Handle rc io.ReadCloser id int } // String is a representation of this worker func (w *worker) String() string { return fmt.Sprintf("worker-%v <%v>", w.id, w.r.cachedObject.Name) } // reader will return a reader depending on the capabilities of the source reader: // - if it supports seeking it will seek to the desired offset and return the same reader // - if it doesn't support seeking it will close a possible existing one and open at the desired offset // - if there's no reader associated with this worker, it will create one func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error) { var err error r := w.rc if w.rc == nil { r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) { return w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1}) }) if err != nil { return nil, err } return r, nil } if !closeOpen { if do, ok := r.(fs.RangeSeeker); ok { _, err = do.RangeSeek(w.r.ctx, offset, io.SeekStart, end-offset) return r, err } else if do, ok := r.(io.Seeker); ok { _, err = do.Seek(offset, io.SeekStart) return r, err } } _ = w.rc.Close() return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) { r, err = w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1}) if err != nil { return nil, err } return r, nil }) } // run is the main loop for the worker which receives offsets to preload func (w *worker) run() { var err error var data []byte defer func() { if w.rc != nil { _ = w.rc.Close() } w.r.workersWg.Done() }() for { chunkStart, open := <-w.r.preloadQueue if chunkStart < 0 || !open { break } // skip if it exists if w.r.UseMemory { if w.r.memory.HasChunk(w.r.cachedObject, chunkStart) { continue } // add it in ram if it's in the persistent storage data, err = w.r.storage().GetChunk(w.r.cachedObject, chunkStart) if err == nil { err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart) if err != nil { fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err) } else { continue } } } else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) { continue } chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize) // TODO: Remove this comment if it proves to be reliable for #1896 //if chunkEnd > w.r.cachedObject.Size() { // chunkEnd = w.r.cachedObject.Size() //} w.download(chunkStart, chunkEnd, 0) } } func (w *worker) download(chunkStart, chunkEnd int64, retry int) { var err error var data []byte // stop retries if retry >= w.r.cacheFs().opt.ReadRetries { return } // back-off between retries if retry > 0 { time.Sleep(time.Second * time.Duration(retry)) } closeOpen := false if retry > 0 { closeOpen = true } w.rc, err = w.reader(chunkStart, chunkEnd, closeOpen) // we seem to be getting only errors so we abort if err != nil { fs.Errorf(w, "object open failed %v: %v", chunkStart, err) err = w.r.cachedObject.refreshFromSource(w.r.ctx, true) if err != nil { fs.Errorf(w, "%v", err) } w.download(chunkStart, chunkEnd, retry+1) return } data = make([]byte, chunkEnd-chunkStart) var sourceRead int sourceRead, err = io.ReadFull(w.rc, data) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err) err = w.r.cachedObject.refreshFromSource(w.r.ctx, true) if err != nil { fs.Errorf(w, "%v", err) } w.download(chunkStart, chunkEnd, retry+1) return } data = data[:sourceRead] // reslice to remove extra garbage if err == io.ErrUnexpectedEOF { fs.Debugf(w, "partial downloaded chunk %v", fs.SizeSuffix(chunkStart)) } else { fs.Debugf(w, "downloaded chunk %v", chunkStart) } if w.r.UseMemory { err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart) if err != nil { fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err) } } err = w.r.storage().AddChunk(w.r.cachedObject.abs(), data, chunkStart) if err != nil { fs.Errorf(w, "failed caching chunk in storage %v: %v", chunkStart, err) } } const ( // BackgroundUploadStarted is a state for a temp file that has started upload BackgroundUploadStarted = iota // BackgroundUploadCompleted is a state for a temp file that has completed upload BackgroundUploadCompleted // BackgroundUploadError is a state for a temp file that has an error upload BackgroundUploadError ) // BackgroundUploadState is an entity that maps to an existing file which is stored on the temp fs type BackgroundUploadState struct { Remote string Status int Error error } type backgroundWriter struct { fs *Fs stateCh chan int running bool notifyCh chan BackgroundUploadState mu sync.Mutex } func newBackgroundWriter(f *Fs) *backgroundWriter { b := &backgroundWriter{ fs: f, stateCh: make(chan int), notifyCh: make(chan BackgroundUploadState), } return b } func (b *backgroundWriter) close() { b.stateCh <- 2 b.mu.Lock() defer b.mu.Unlock() b.running = false } func (b *backgroundWriter) pause() { b.stateCh <- 1 } func (b *backgroundWriter) play() { b.stateCh <- 0 } func (b *backgroundWriter) isRunning() bool { b.mu.Lock() defer b.mu.Unlock() return b.running } func (b *backgroundWriter) notify(remote string, status int, err error) { state := BackgroundUploadState{ Remote: remote, Status: status, Error: err, } select { case b.notifyCh <- state: fs.Debugf(remote, "notified background upload state: %v", state.Status) default: } } func (b *backgroundWriter) run() { state := 0 for { b.mu.Lock() b.running = true b.mu.Unlock() select { case s := <-b.stateCh: state = s default: // } switch state { case 1: runtime.Gosched() time.Sleep(time.Millisecond * 500) continue case 2: return } absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), time.Duration(b.fs.opt.TempWaitTime)) if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) { time.Sleep(time.Second) continue } remote := b.fs.cleanRootFromPath(absPath) b.notify(remote, BackgroundUploadStarted, nil) fs.Infof(remote, "background upload: started upload") err = operations.MoveFile(context.TODO(), b.fs.UnWrap(), b.fs.tempFs, remote, remote) if err != nil { b.notify(remote, BackgroundUploadError, err) _ = b.fs.cache.rollbackPendingUpload(absPath) fs.Errorf(remote, "background upload: %v", err) continue } // clean empty dirs up to root thisDir := cleanPath(path.Dir(remote)) for thisDir != "" { thisList, err := b.fs.tempFs.List(context.TODO(), thisDir) if err != nil { break } if len(thisList) > 0 { break } err = b.fs.tempFs.Rmdir(context.TODO(), thisDir) fs.Debugf(thisDir, "cleaned from temp path") if err != nil { break } thisDir = cleanPath(path.Dir(thisDir)) } fs.Infof(remote, "background upload: uploaded entry") err = b.fs.cache.removePendingUpload(absPath) if err != nil && !strings.Contains(err.Error(), "pending upload not found") { fs.Errorf(remote, "background upload: %v", err) } parentCd := NewDirectory(b.fs, cleanPath(path.Dir(remote))) err = b.fs.cache.ExpireDir(parentCd) if err != nil { fs.Errorf(parentCd, "background upload: cache expire error: %v", err) } b.fs.notifyChangeUpstream(remote, fs.EntryObject) fs.Infof(remote, "finished background upload") b.notify(remote, BackgroundUploadCompleted, nil) } } // Check the interfaces are satisfied var ( _ io.ReadCloser = (*Handle)(nil) _ io.Seeker = (*Handle)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/cache/cache_unsupported.go
backend/cache/cache_unsupported.go
// Build for cache for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 || js // Package cache implements a virtual provider to cache existing remotes. package cache
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/cache/utils_test.go
backend/cache/utils_test.go
//go:build !plan9 && !js package cache import bolt "go.etcd.io/bbolt" // PurgeTempUploads will remove all the pending uploads from the queue func (b *Persistent) PurgeTempUploads() { b.tempQueueMux.Lock() defer b.tempQueueMux.Unlock() _ = b.db.Update(func(tx *bolt.Tx) error { _ = tx.DeleteBucket([]byte(tempBucket)) _, _ = tx.CreateBucketIfNotExists([]byte(tempBucket)) return nil }) } // SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already) func (b *Persistent) SetPendingUploadToStarted(remote string) error { return b.updatePendingUpload(remote, func(item *tempUploadInfo) error { item.Started = true return nil }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/cache/object.go
backend/cache/object.go
//go:build !plan9 && !js package cache import ( "context" "fmt" "io" "path" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/readers" ) const ( objectInCache = "Object" objectPendingUpload = "TempObject" ) // Object is a generic file like object that stores basic information about it type Object struct { fs.Object `json:"-"` ParentFs fs.Fs `json:"-"` // parent fs CacheFs *Fs `json:"-"` // cache fs Name string `json:"name"` // name of the directory Dir string `json:"dir"` // abs path of the object CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown CacheStorable bool `json:"storable"` // says whether this object can be stored CacheType string `json:"cacheType"` CacheTs time.Time `json:"cacheTs"` cacheHashesMu sync.Mutex CacheHashes map[hash.Type]string // all supported hashes cached refreshMutex sync.Mutex } // NewObject builds one from a generic fs.Object func NewObject(f *Fs, remote string) *Object { fullRemote := path.Join(f.Root(), remote) dir, name := path.Split(fullRemote) cacheType := objectInCache parentFs := f.UnWrap() if f.opt.TempWritePath != "" { _, err := f.cache.SearchPendingUpload(fullRemote) if err == nil { // queued for upload cacheType = objectPendingUpload parentFs = f.tempFs fs.Debugf(fullRemote, "pending upload found") } } co := &Object{ ParentFs: parentFs, CacheFs: f, Name: cleanPath(name), Dir: cleanPath(dir), CacheModTime: time.Now().UnixNano(), CacheSize: 0, CacheStorable: false, CacheType: cacheType, CacheTs: time.Now(), } return co } // ObjectFromOriginal builds one from a generic fs.Object func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object { var co *Object fullRemote := cleanPath(path.Join(f.Root(), o.Remote())) dir, name := path.Split(fullRemote) cacheType := objectInCache parentFs := f.UnWrap() if f.opt.TempWritePath != "" { _, err := f.cache.SearchPendingUpload(fullRemote) if err == nil { // queued for upload cacheType = objectPendingUpload parentFs = f.tempFs fs.Debugf(fullRemote, "pending upload found") } } co = &Object{ ParentFs: parentFs, CacheFs: f, Name: cleanPath(name), Dir: cleanPath(dir), CacheType: cacheType, CacheTs: time.Now(), } co.updateData(ctx, o) return co } func (o *Object) updateData(ctx context.Context, source fs.Object) { o.Object = source o.CacheModTime = source.ModTime(ctx).UnixNano() o.CacheSize = source.Size() o.CacheStorable = source.Storable() o.CacheTs = time.Now() o.cacheHashesMu.Lock() o.CacheHashes = make(map[hash.Type]string) o.cacheHashesMu.Unlock() } // Fs returns its FS info func (o *Object) Fs() fs.Info { return o.CacheFs } // String returns a human friendly name for this object func (o *Object) String() string { if o == nil { return "<nil>" } return o.Remote() } // Remote returns the remote path func (o *Object) Remote() string { p := path.Join(o.Dir, o.Name) return o.CacheFs.cleanRootFromPath(p) } // abs returns the absolute path to the object func (o *Object) abs() string { return path.Join(o.Dir, o.Name) } // ModTime returns the cached ModTime func (o *Object) ModTime(ctx context.Context) time.Time { _ = o.refresh(ctx) return time.Unix(0, o.CacheModTime) } // Size returns the cached Size func (o *Object) Size() int64 { _ = o.refresh(context.TODO()) return o.CacheSize } // Storable returns the cached Storable func (o *Object) Storable() bool { _ = o.refresh(context.TODO()) return o.CacheStorable } // refresh will check if the object info is expired and request the info from source if it is // all these conditions must be true to ignore a refresh // 1. cache ts didn't expire yet // 2. is not pending a notification from the wrapped fs func (o *Object) refresh(ctx context.Context) error { isNotified := o.CacheFs.isNotifiedRemote(o.Remote()) isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge))) if !isExpired && !isNotified { return nil } return o.refreshFromSource(ctx, true) } // refreshFromSource requests the original FS for the object in case it comes from a cached entry func (o *Object) refreshFromSource(ctx context.Context, force bool) error { o.refreshMutex.Lock() defer o.refreshMutex.Unlock() var err error var liveObject fs.Object if o.Object != nil && !force { return nil } if o.isTempFile() { liveObject, err = o.ParentFs.NewObject(ctx, o.Remote()) if err != nil { err = fmt.Errorf("in parent fs %v: %w", o.ParentFs, err) } } else { liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote()) if err != nil { err = fmt.Errorf("in cache fs %v: %w", o.CacheFs.Fs, err) } } if err != nil { fs.Errorf(o, "error refreshing object in : %v", err) return err } o.updateData(ctx, liveObject) o.persist() return nil } // SetModTime sets the ModTime of this object func (o *Object) SetModTime(ctx context.Context, t time.Time) error { if err := o.refreshFromSource(ctx, false); err != nil { return err } err := o.Object.SetModTime(ctx, t) if err != nil { return err } o.CacheModTime = t.UnixNano() o.persist() fs.Debugf(o, "updated ModTime: %v", t) return nil } // Open is used to request a specific part of the file using fs.RangeOption func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { var err error if o.Object == nil { err = o.refreshFromSource(ctx, true) } else { err = o.refresh(ctx) } if err != nil { return nil, err } cacheReader := NewObjectHandle(ctx, o, o.CacheFs) var offset, limit int64 = 0, -1 for _, option := range options { switch x := option.(type) { case *fs.SeekOption: offset = x.Offset case *fs.RangeOption: offset, limit = x.Decode(o.Size()) } _, err = cacheReader.Seek(offset, io.SeekStart) if err != nil { return nil, err } } return readers.NewLimitedReadCloser(cacheReader, limit), nil } // Update will change the object data func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { if err := o.refreshFromSource(ctx, false); err != nil { return err } // pause background uploads if active if o.CacheFs.opt.TempWritePath != "" { o.CacheFs.backgroundRunner.pause() defer o.CacheFs.backgroundRunner.play() // don't allow started uploads if o.isTempFile() && o.tempFileStartedUpload() { return fmt.Errorf("%v is currently uploading, can't update", o) } } fs.Debugf(o, "updating object contents with size %v", src.Size()) // FIXME use reliable upload err := o.Object.Update(ctx, in, src, options...) if err != nil { fs.Errorf(o, "error updating source: %v", err) return err } // deleting cached chunks and info to be replaced with new ones _ = o.CacheFs.cache.RemoveObject(o.abs()) // advertise to ChangeNotify if wrapped doesn't do that o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject) o.CacheModTime = src.ModTime(ctx).UnixNano() o.CacheSize = src.Size() o.cacheHashesMu.Lock() o.CacheHashes = make(map[hash.Type]string) o.cacheHashesMu.Unlock() o.CacheTs = time.Now() o.persist() return nil } // Remove deletes the object from both the cache and the source func (o *Object) Remove(ctx context.Context) error { if err := o.refreshFromSource(ctx, false); err != nil { return err } // pause background uploads if active if o.CacheFs.opt.TempWritePath != "" { o.CacheFs.backgroundRunner.pause() defer o.CacheFs.backgroundRunner.play() // don't allow started uploads if o.isTempFile() && o.tempFileStartedUpload() { return fmt.Errorf("%v is currently uploading, can't delete", o) } } err := o.Object.Remove(ctx) if err != nil { return err } fs.Debugf(o, "removing object") _ = o.CacheFs.cache.RemoveObject(o.abs()) _ = o.CacheFs.cache.removePendingUpload(o.abs()) parentCd := NewDirectory(o.CacheFs, cleanPath(path.Dir(o.Remote()))) _ = o.CacheFs.cache.ExpireDir(parentCd) // advertise to ChangeNotify if wrapped doesn't do that o.CacheFs.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory) return nil } // Hash requests a hash of the object and stores in the cache // since it might or might not be called, this is lazy loaded func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { _ = o.refresh(ctx) o.cacheHashesMu.Lock() if o.CacheHashes == nil { o.CacheHashes = make(map[hash.Type]string) } cachedHash, found := o.CacheHashes[ht] o.cacheHashesMu.Unlock() if found { return cachedHash, nil } if err := o.refreshFromSource(ctx, false); err != nil { return "", err } liveHash, err := o.Object.Hash(ctx, ht) if err != nil { return "", err } o.cacheHashesMu.Lock() o.CacheHashes[ht] = liveHash o.cacheHashesMu.Unlock() o.persist() fs.Debugf(o, "object hash cached: %v", liveHash) return liveHash, nil } // persist adds this object to the persistent cache func (o *Object) persist() *Object { err := o.CacheFs.cache.AddObject(o) if err != nil { fs.Errorf(o, "failed to cache object: %v", err) } return o } func (o *Object) isTempFile() bool { _, err := o.CacheFs.cache.SearchPendingUpload(o.abs()) if err != nil { o.CacheType = objectInCache return false } o.CacheType = objectPendingUpload return true } func (o *Object) tempFileStartedUpload() bool { started, err := o.CacheFs.cache.SearchPendingUpload(o.abs()) if err != nil { return false } return started } // UnWrap returns the Object that this Object is wrapping or // nil if it isn't wrapping anything func (o *Object) UnWrap() fs.Object { return o.Object } var ( _ fs.Object = (*Object)(nil) _ fs.ObjectUnWrapper = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/cache/cache_internal_test.go
backend/cache/cache_internal_test.go
//go:build !plan9 && !js && !race package cache_test import ( "bytes" "context" "encoding/base64" "errors" goflag "flag" "fmt" "io" "math/rand" "os" "path" "path/filepath" "runtime" "runtime/debug" "strings" "testing" "time" "github.com/rclone/rclone/backend/cache" "github.com/rclone/rclone/backend/crypt" _ "github.com/rclone/rclone/backend/drive" "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/testy" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/vfs/vfscommon" "github.com/stretchr/testify/require" ) const ( // these 2 passwords are test random cryptPassword1 = "3XcvMMdsV3d-HGAReTMdNH-5FcX5q32_lUeA" // oGJdUbQc7s8 cryptPassword2 = "NlgTBEIe-qibA7v-FoMfuX6Cw8KlLai_aMvV" // mv4mZW572HM cryptedTextBase64 = "UkNMT05FAAC320i2xIee0BiNyknSPBn+Qcw3q9FhIFp3tvq6qlqvbsno3PnxmEFeJG3jDBnR/wku2gHWeQ==" // one content cryptedText2Base64 = "UkNMT05FAAATcQkVsgjBh8KafCKcr0wdTa1fMmV0U8hsCLGFoqcvxKVmvv7wx3Hf5EXxFcki2FFV4sdpmSrb9Q==" // updated content cryptedText3Base64 = "UkNMT05FAAB/f7YtYKbPfmk9+OX/ffN3qG3OEdWT+z74kxCX9V/YZwJ4X2DN3HOnUC3gKQ4Gcoud5UtNvQ==" // test content letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" ) var ( remoteName string uploadDir string runInstance *run errNotSupported = errors.New("not supported") decryptedToEncryptedRemotes = map[string]string{ "one": "lm4u7jjt3c85bf56vjqgeenuno", "second": "qvt1ochrkcfbptp5mu9ugb2l14", "test": "jn4tegjtpqro30t3o11thb4b5s", "test2": "qakvqnh8ttei89e0gc76crpql4", "data.bin": "0q2847tfko6mhj3dag3r809qbc", "ticw/data.bin": "5mv97b0ule6pht33srae5pice8/0q2847tfko6mhj3dag3r809qbc", "tiuufo/test/one": "vi6u1olqhirqv14cd8qlej1mgo/jn4tegjtpqro30t3o11thb4b5s/lm4u7jjt3c85bf56vjqgeenuno", "tiuufo/test/second": "vi6u1olqhirqv14cd8qlej1mgo/jn4tegjtpqro30t3o11thb4b5s/qvt1ochrkcfbptp5mu9ugb2l14", "tiutfo/test/one": "legd371aa8ol36tjfklt347qnc/jn4tegjtpqro30t3o11thb4b5s/lm4u7jjt3c85bf56vjqgeenuno", "tiutfo/second/one": "legd371aa8ol36tjfklt347qnc/qvt1ochrkcfbptp5mu9ugb2l14/lm4u7jjt3c85bf56vjqgeenuno", "second/one": "qvt1ochrkcfbptp5mu9ugb2l14/lm4u7jjt3c85bf56vjqgeenuno", "test/one": "jn4tegjtpqro30t3o11thb4b5s/lm4u7jjt3c85bf56vjqgeenuno", "test/second": "jn4tegjtpqro30t3o11thb4b5s/qvt1ochrkcfbptp5mu9ugb2l14", "one/test": "lm4u7jjt3c85bf56vjqgeenuno/jn4tegjtpqro30t3o11thb4b5s", "one/test/data.bin": "lm4u7jjt3c85bf56vjqgeenuno/jn4tegjtpqro30t3o11thb4b5s/0q2847tfko6mhj3dag3r809qbc", "second/test/data.bin": "qvt1ochrkcfbptp5mu9ugb2l14/jn4tegjtpqro30t3o11thb4b5s/0q2847tfko6mhj3dag3r809qbc", "test/third": "jn4tegjtpqro30t3o11thb4b5s/2nd7fjiop5h3ihfj1vl953aa5g", "test/0.bin": "jn4tegjtpqro30t3o11thb4b5s/e6frddt058b6kvbpmlstlndmtk", "test/1.bin": "jn4tegjtpqro30t3o11thb4b5s/kck472nt1k7qbmob0mt1p1crgc", "test/2.bin": "jn4tegjtpqro30t3o11thb4b5s/744oe9ven2rmak4u27if51qk24", "test/3.bin": "jn4tegjtpqro30t3o11thb4b5s/2bjd8kef0u5lmsu6qhqll34bcs", "test/4.bin": "jn4tegjtpqro30t3o11thb4b5s/cvjs73iv0a82v0c7r67avllh7s", "test/5.bin": "jn4tegjtpqro30t3o11thb4b5s/0plkdo790b6bnmt33qsdqmhv9c", "test/6.bin": "jn4tegjtpqro30t3o11thb4b5s/s5r633srnjtbh83893jovjt5d0", "test/7.bin": "jn4tegjtpqro30t3o11thb4b5s/6rq45tr9bjsammku622flmqsu4", "test/8.bin": "jn4tegjtpqro30t3o11thb4b5s/37bc6tcl3e31qb8cadvjb749vk", "test/9.bin": "jn4tegjtpqro30t3o11thb4b5s/t4pr35hnls32789o8fk0chk1ec", } ) func init() { goflag.StringVar(&remoteName, "remote-internal", "TestInternalCache", "Remote to test with, defaults to local filesystem") goflag.StringVar(&uploadDir, "upload-dir-internal", "", "") } // TestMain drives the tests func TestMain(m *testing.M) { goflag.Parse() var rc int fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName) runInstance = newRun() rc = m.Run() os.Exit(rc) } func TestInternalListRootAndInnerRemotes(t *testing.T) { id := fmt.Sprintf("tilrair%v", time.Now().Unix()) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) // Instantiate inner fs innerFolder := "inner" runInstance.mkdir(t, rootFs, innerFolder) rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil) runInstance.writeObjectString(t, rootFs2, "one", "content") listRoot, err := runInstance.list(t, rootFs, "") require.NoError(t, err) listRootInner, err := runInstance.list(t, rootFs, innerFolder) require.NoError(t, err) listInner, err := rootFs2.List(context.Background(), "") require.NoError(t, err) require.Len(t, listRoot, 1) require.Len(t, listRootInner, 1) require.Len(t, listInner, 1) } /* TODO: is this testing something? func TestInternalVfsCache(t *testing.T) { vfscommon.Opt.DirCacheTime = time.Second * 30 testSize := int64(524288000) vfscommon.Opt.CacheMode = vfs.CacheModeWrites id := "tiuufo" rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"}) defer runInstance.cleanupFs(t, rootFs, boltDb) err := rootFs.Mkdir(context.Background(), "test") require.NoError(t, err) runInstance.writeObjectString(t, rootFs, "test/second", "content") _, err = rootFs.List(context.Background(), "test") require.NoError(t, err) testReader := runInstance.randomReader(t, testSize) writeCh := make(chan interface{}) //write2Ch := make(chan interface{}) readCh := make(chan interface{}) cacheCh := make(chan interface{}) // write the main file go func() { defer func() { writeCh <- true }() log.Printf("========== started writing file 'test/one'") runInstance.writeRemoteReader(t, rootFs, "test/one", testReader) log.Printf("========== done writing file 'test/one'") }() // routine to check which cache has what, autostarts go func() { for { select { case <-cacheCh: log.Printf("========== finished checking caches") return default: } li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")} for _, r := range li2 { var err error ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r)))) if err != nil || len(ci) == 0 { log.Printf("========== '%v' not in cache", r) } else { log.Printf("========== '%v' IN CACHE", r) } _, err = os.Stat(path.Join(runInstance.vfsCachePath, id, r)) if err != nil { log.Printf("========== '%v' not in vfs", r) } else { log.Printf("========== '%v' IN VFS", r) } } time.Sleep(time.Second * 10) } }() // routine to list, autostarts go func() { for { select { case <-readCh: log.Printf("========== finished checking listings and readings") return default: } li, err := runInstance.list(t, rootFs, "test") if err != nil { log.Printf("========== error listing 'test' folder: %v", err) } else { log.Printf("========== list 'test' folder count: %v", len(li)) } time.Sleep(time.Second * 10) } }() // wait for main file to be written <-writeCh log.Printf("========== waiting for VFS to expire") time.Sleep(time.Second * 120) // try a final read li2 := [2]string{"test/one", "test/second"} for _, r := range li2 { _, err := runInstance.readDataFromRemote(t, rootFs, r, int64(0), int64(2), false) if err != nil { log.Printf("========== error reading '%v': %v", r, err) } else { log.Printf("========== read '%v'", r) } } // close the cache and list checkers cacheCh <- true readCh <- true } */ func TestInternalObjWrapFsFound(t *testing.T) { id := fmt.Sprintf("tiowff%v", time.Now().Unix()) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) cfs, err := runInstance.getCacheFs(rootFs) require.NoError(t, err) wrappedFs := cfs.UnWrap() var testData []byte if runInstance.rootIsCrypt { testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64) require.NoError(t, err) } else { testData = []byte("test content") } runInstance.writeObjectBytes(t, wrappedFs, runInstance.encryptRemoteIfNeeded(t, "test"), testData) listRoot, err := runInstance.list(t, rootFs, "") require.NoError(t, err) require.Len(t, listRoot, 1) cachedData, err := runInstance.readDataFromRemote(t, rootFs, "test", 0, int64(len([]byte("test content"))), false) require.NoError(t, err) require.Equal(t, "test content", string(cachedData)) err = runInstance.rm(t, rootFs, "test") require.NoError(t, err) listRoot, err = runInstance.list(t, rootFs, "") require.NoError(t, err) require.Len(t, listRoot, 0) } func TestInternalObjNotFound(t *testing.T) { id := fmt.Sprintf("tionf%v", time.Now().Unix()) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) obj, err := rootFs.NewObject(context.Background(), "404") require.Error(t, err) require.Nil(t, obj) } func TestInternalCachedWrittenContentMatches(t *testing.T) { testy.SkipUnreliable(t) id := fmt.Sprintf("ticwcm%v", time.Now().Unix()) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) cfs, err := runInstance.getCacheFs(rootFs) require.NoError(t, err) chunkSize := cfs.ChunkSize() // create some rand test data testData := randStringBytes(int(chunkSize*4 + chunkSize/2)) // write the object runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) // check sample of data from in-file sampleStart := chunkSize / 2 sampleEnd := chunkSize testSample := testData[sampleStart:sampleEnd] checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", sampleStart, sampleEnd, false) require.NoError(t, err) require.Equal(t, int64(len(checkSample)), sampleEnd-sampleStart) require.Equal(t, checkSample, testSample) } func TestInternalDoubleWrittenContentMatches(t *testing.T) { if runtime.GOOS == "windows" && runtime.GOARCH == "386" { t.Skip("Skip test on windows/386") } id := fmt.Sprintf("tidwcm%v", time.Now().Unix()) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) // write the object runInstance.writeRemoteString(t, rootFs, "one", "one content") err := runInstance.updateData(t, rootFs, "one", "one content", " updated") require.NoError(t, err) err = runInstance.updateData(t, rootFs, "one", "one content updated", " double") require.NoError(t, err) // check sample of data from in-file data, err := runInstance.readDataFromRemote(t, rootFs, "one", int64(0), int64(len("one content updated double")), true) require.NoError(t, err) require.Equal(t, "one content updated double", string(data)) } func TestInternalCachedUpdatedContentMatches(t *testing.T) { testy.SkipUnreliable(t) id := fmt.Sprintf("ticucm%v", time.Now().Unix()) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) var err error // create some rand test data var testData1 []byte var testData2 []byte if runInstance.rootIsCrypt { testData1, err = base64.StdEncoding.DecodeString(cryptedTextBase64) require.NoError(t, err) testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64) require.NoError(t, err) } else { testData1 = []byte(random.String(100)) testData2 = []byte(random.String(200)) } // write the object o := runInstance.updateObjectRemote(t, rootFs, "data.bin", testData1, testData2) require.Equal(t, o.Size(), int64(len(testData2))) // check data from in-file checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(len(testData2)), false) require.NoError(t, err) require.Equal(t, checkSample, testData2) } func TestInternalWrappedWrittenContentMatches(t *testing.T) { id := fmt.Sprintf("tiwwcm%v", time.Now().Unix()) vfscommon.Opt.DirCacheTime = fs.Duration(time.Second) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) if runInstance.rootIsCrypt { t.Skip("test skipped with crypt remote") } cfs, err := runInstance.getCacheFs(rootFs) require.NoError(t, err) chunkSize := cfs.ChunkSize() // create some rand test data testSize := chunkSize*4 + chunkSize/2 testData := randStringBytes(int(testSize)) // write the object o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData) require.Equal(t, o.Size(), testSize) time.Sleep(time.Second * 3) checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false) require.NoError(t, err) require.Equal(t, int64(len(checkSample)), o.Size()) for i := range checkSample { require.Equal(t, testData[i], checkSample[i]) } } func TestInternalLargeWrittenContentMatches(t *testing.T) { id := fmt.Sprintf("tilwcm%v", time.Now().Unix()) vfscommon.Opt.DirCacheTime = fs.Duration(time.Second) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) if runInstance.rootIsCrypt { t.Skip("test skipped with crypt remote") } cfs, err := runInstance.getCacheFs(rootFs) require.NoError(t, err) chunkSize := cfs.ChunkSize() // create some rand test data testSize := chunkSize*10 + chunkSize/2 testData := randStringBytes(int(testSize)) // write the object runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData) time.Sleep(time.Second * 3) readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false) require.NoError(t, err) for i := range readData { require.Equalf(t, testData[i], readData[i], "at byte %v", i) } } func TestInternalWrappedFsChangeNotSeen(t *testing.T) { id := fmt.Sprintf("tiwfcns%v", time.Now().Unix()) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) cfs, err := runInstance.getCacheFs(rootFs) require.NoError(t, err) chunkSize := cfs.ChunkSize() // create some rand test data testData := randStringBytes(int(chunkSize*4 + chunkSize/2)) runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) // update in the wrapped fs originalSize, err := runInstance.size(t, rootFs, "data.bin") require.NoError(t, err) fs.Logf(nil, "original size: %v", originalSize) o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) require.NoError(t, err) expectedSize := int64(len([]byte("test content"))) var data2 []byte if runInstance.rootIsCrypt { data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64) require.NoError(t, err) expectedSize++ // FIXME newline gets in, likely test data issue } else { data2 = []byte("test content") } objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap()) err = o.Update(context.Background(), bytes.NewReader(data2), objInfo) require.NoError(t, err) require.Equal(t, int64(len(data2)), o.Size()) fs.Logf(nil, "updated size: %v", len(data2)) // get a new instance from the cache if runInstance.wrappedIsExternal { err = runInstance.retryBlock(func() error { coSize, err := runInstance.size(t, rootFs, "data.bin") if err != nil { return err } if coSize != expectedSize { return fmt.Errorf("%v <> %v", coSize, expectedSize) } return nil }, 12, time.Second*10) require.NoError(t, err) } else { coSize, err := runInstance.size(t, rootFs, "data.bin") require.NoError(t, err) require.NotEqual(t, coSize, expectedSize) } } func TestInternalMoveWithNotify(t *testing.T) { id := fmt.Sprintf("timwn%v", time.Now().Unix()) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) if !runInstance.wrappedIsExternal { t.Skipf("Not external") } cfs, err := runInstance.getCacheFs(rootFs) require.NoError(t, err) srcName := runInstance.encryptRemoteIfNeeded(t, "test") + "/" + runInstance.encryptRemoteIfNeeded(t, "one") + "/" + runInstance.encryptRemoteIfNeeded(t, "data.bin") dstName := runInstance.encryptRemoteIfNeeded(t, "test") + "/" + runInstance.encryptRemoteIfNeeded(t, "second") + "/" + runInstance.encryptRemoteIfNeeded(t, "data.bin") // create some rand test data var testData []byte if runInstance.rootIsCrypt { testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64) require.NoError(t, err) } else { testData = []byte("test content") } _ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test")) _ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/one")) _ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/second")) srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData) // list in mount _, err = runInstance.list(t, rootFs, "test") require.NoError(t, err) _, err = runInstance.list(t, rootFs, "test/one") require.NoError(t, err) // move file _, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName) require.NoError(t, err) err = runInstance.retryBlock(func() error { li, err := runInstance.list(t, rootFs, "test") if err != nil { fs.Logf(nil, "err: %v", err) return err } if len(li) != 2 { fs.Logf(nil, "not expected listing /test: %v", li) return fmt.Errorf("not expected listing /test: %v", li) } li, err = runInstance.list(t, rootFs, "test/one") if err != nil { fs.Logf(nil, "err: %v", err) return err } if len(li) != 0 { fs.Logf(nil, "not expected listing /test/one: %v", li) return fmt.Errorf("not expected listing /test/one: %v", li) } li, err = runInstance.list(t, rootFs, "test/second") if err != nil { fs.Logf(nil, "err: %v", err) return err } if len(li) != 1 { fs.Logf(nil, "not expected listing /test/second: %v", li) return fmt.Errorf("not expected listing /test/second: %v", li) } if fi, ok := li[0].(os.FileInfo); ok { if fi.Name() != "data.bin" { fs.Logf(nil, "not expected name: %v", fi.Name()) return fmt.Errorf("not expected name: %v", fi.Name()) } } else if di, ok := li[0].(fs.DirEntry); ok { if di.Remote() != "test/second/data.bin" { fs.Logf(nil, "not expected remote: %v", di.Remote()) return fmt.Errorf("not expected remote: %v", di.Remote()) } } else { fs.Logf(nil, "unexpected listing: %v", li) return fmt.Errorf("unexpected listing: %v", li) } fs.Logf(nil, "complete listing: %v", li) return nil }, 12, time.Second*10) require.NoError(t, err) } func TestInternalNotifyCreatesEmptyParts(t *testing.T) { id := fmt.Sprintf("tincep%v", time.Now().Unix()) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil) if !runInstance.wrappedIsExternal { t.Skipf("Not external") } cfs, err := runInstance.getCacheFs(rootFs) require.NoError(t, err) srcName := runInstance.encryptRemoteIfNeeded(t, "test") + "/" + runInstance.encryptRemoteIfNeeded(t, "one") + "/" + runInstance.encryptRemoteIfNeeded(t, "test") dstName := runInstance.encryptRemoteIfNeeded(t, "test") + "/" + runInstance.encryptRemoteIfNeeded(t, "one") + "/" + runInstance.encryptRemoteIfNeeded(t, "test2") // create some rand test data var testData []byte if runInstance.rootIsCrypt { testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64) require.NoError(t, err) } else { testData = []byte("test content") } err = rootFs.Mkdir(context.Background(), "test") require.NoError(t, err) err = rootFs.Mkdir(context.Background(), "test/one") require.NoError(t, err) srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData) // list in mount _, err = runInstance.list(t, rootFs, "test") require.NoError(t, err) _, err = runInstance.list(t, rootFs, "test/one") require.NoError(t, err) found := boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"))) require.True(t, found) boltDb.Purge() found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"))) require.False(t, found) // move file _, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName) require.NoError(t, err) err = runInstance.retryBlock(func() error { found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"))) if !found { fs.Logf(nil, "not found /test") return fmt.Errorf("not found /test") } found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"))) if !found { fs.Logf(nil, "not found /test/one") return fmt.Errorf("not found /test/one") } found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2"))) if !found { fs.Logf(nil, "not found /test/one/test2") return fmt.Errorf("not found /test/one/test2") } li, err := runInstance.list(t, rootFs, "test/one") if err != nil { fs.Logf(nil, "err: %v", err) return err } if len(li) != 1 { fs.Logf(nil, "not expected listing /test/one: %v", li) return fmt.Errorf("not expected listing /test/one: %v", li) } if fi, ok := li[0].(os.FileInfo); ok { if fi.Name() != "test2" { fs.Logf(nil, "not expected name: %v", fi.Name()) return fmt.Errorf("not expected name: %v", fi.Name()) } } else if di, ok := li[0].(fs.DirEntry); ok { if di.Remote() != "test/one/test2" { fs.Logf(nil, "not expected remote: %v", di.Remote()) return fmt.Errorf("not expected remote: %v", di.Remote()) } } else { fs.Logf(nil, "unexpected listing: %v", li) return fmt.Errorf("unexpected listing: %v", li) } fs.Logf(nil, "complete listing /test/one/test2") return nil }, 12, time.Second*10) require.NoError(t, err) } func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) { id := fmt.Sprintf("ticsadcf%v", time.Now().Unix()) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) cfs, err := runInstance.getCacheFs(rootFs) require.NoError(t, err) chunkSize := cfs.ChunkSize() // create some rand test data testData := randStringBytes(int(chunkSize*4 + chunkSize/2)) runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) // update in the wrapped fs o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) require.NoError(t, err) wrappedTime := time.Now().Add(-1 * time.Hour) err = o.SetModTime(context.Background(), wrappedTime) require.NoError(t, err) // get a new instance from the cache co, err := rootFs.NewObject(context.Background(), "data.bin") require.NoError(t, err) require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String()) cfs.DirCacheFlush() // flush the cache // get a new instance from the cache co, err = rootFs.NewObject(context.Background(), "data.bin") require.NoError(t, err) require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix()) } func TestInternalCacheWrites(t *testing.T) { id := "ticw" rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"}) cfs, err := runInstance.getCacheFs(rootFs) require.NoError(t, err) chunkSize := cfs.ChunkSize() // create some rand test data earliestTime := time.Now() testData := randStringBytes(int(chunkSize*4 + chunkSize/2)) runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) expectedTs := time.Now() ts, err := boltDb.GetChunkTs(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "data.bin")), 0) require.NoError(t, err) require.WithinDuration(t, expectedTs, ts, expectedTs.Sub(earliestTime)) } func TestInternalMaxChunkSizeRespected(t *testing.T) { if runtime.GOOS == "windows" && runtime.GOARCH == "386" { t.Skip("Skip test on windows/386") } id := fmt.Sprintf("timcsr%v", time.Now().Unix()) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"}) cfs, err := runInstance.getCacheFs(rootFs) require.NoError(t, err) chunkSize := cfs.ChunkSize() totalChunks := 20 // create some rand test data testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2)) runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) o, err := cfs.NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) require.NoError(t, err) co, ok := o.(*cache.Object) require.True(t, ok) for i := range 4 { // read first 4 _ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false) } cfs.CleanUpCache(true) // the last 2 **must** be in the cache require.True(t, boltDb.HasChunk(co, chunkSize*2)) require.True(t, boltDb.HasChunk(co, chunkSize*3)) for i := 4; i < 6; i++ { // read next 2 _ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false) } cfs.CleanUpCache(true) // the last 2 **must** be in the cache require.True(t, boltDb.HasChunk(co, chunkSize*4)) require.True(t, boltDb.HasChunk(co, chunkSize*5)) } func TestInternalExpiredEntriesRemoved(t *testing.T) { id := fmt.Sprintf("tieer%v", time.Now().Unix()) vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) cfs, err := runInstance.getCacheFs(rootFs) require.NoError(t, err) // create some rand test data runInstance.writeRemoteString(t, rootFs, "one", "one content") runInstance.mkdir(t, rootFs, "test") runInstance.writeRemoteString(t, rootFs, "test/second", "second content") l, err := runInstance.list(t, rootFs, "test") require.NoError(t, err) require.Len(t, l, 1) err = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/third")) require.NoError(t, err) l, err = runInstance.list(t, rootFs, "test") require.NoError(t, err) require.Len(t, l, 1) err = runInstance.retryBlock(func() error { l, err = runInstance.list(t, rootFs, "test") if err != nil { return err } if len(l) != 2 { return errors.New("list is not 2") } return nil }, 10, time.Second) require.NoError(t, err) } func TestInternalBug2117(t *testing.T) { vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 10) id := fmt.Sprintf("tib2117%v", time.Now().Unix()) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"}) if runInstance.rootIsCrypt { t.Skipf("skipping crypt") } cfs, err := runInstance.getCacheFs(rootFs) require.NoError(t, err) err = cfs.UnWrap().Mkdir(context.Background(), "test") require.NoError(t, err) for i := 1; i <= 4; i++ { err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d", i)) require.NoError(t, err) for j := 1; j <= 4; j++ { err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d/dir%d", i, j)) require.NoError(t, err) runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test") } } di, err := runInstance.list(t, rootFs, "test/dir1/dir2") require.NoError(t, err) fs.Logf(nil, "len: %v", len(di)) require.Len(t, di, 1) time.Sleep(time.Second * 30) di, err = runInstance.list(t, rootFs, "test/dir1/dir2") require.NoError(t, err) fs.Logf(nil, "len: %v", len(di)) require.Len(t, di, 1) di, err = runInstance.list(t, rootFs, "test/dir1") require.NoError(t, err) fs.Logf(nil, "len: %v", len(di)) require.Len(t, di, 4) di, err = runInstance.list(t, rootFs, "test") require.NoError(t, err) fs.Logf(nil, "len: %v", len(di)) require.Len(t, di, 4) } // run holds the remotes for a test run type run struct { okDiff time.Duration runDefaultCfgMap configmap.Simple tmpUploadDir string rootIsCrypt bool wrappedIsExternal bool tempFiles []*os.File dbPath string chunkPath string vfsCachePath string } func newRun() *run { var err error r := &run{ okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this } // Read in all the defaults for all the options fsInfo, err := fs.Find("cache") if err != nil { panic(fmt.Sprintf("Couldn't find cache remote: %v", err)) } r.runDefaultCfgMap = configmap.Simple{} for _, option := range fsInfo.Options { r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default)) } if uploadDir == "" { r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp") if err != nil { panic(fmt.Sprintf("Failed to create temp dir: %v", err)) } } else { r.tmpUploadDir = uploadDir } fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir) return r } func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string { if !runInstance.rootIsCrypt || len(decryptedToEncryptedRemotes) == 0 { return remote } enc, ok := decryptedToEncryptedRemotes[remote] if !ok { t.Fatalf("Failed to find decrypted -> encrypted mapping for '%v'", remote) return remote } return enc } func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) { fstest.Initialise() remoteExists := false for _, s := range config.GetRemotes() { if s.Name == remote { remoteExists = true } } if !remoteExists && needRemote { t.Skipf("Need remote (%v) to exist", remote) return nil, nil } // Config to pass to NewFs m := configmap.Simple{} for k, v := range r.runDefaultCfgMap { m.Set(k, v) } for k, v := range flags { m.Set(k, v) } // if the remote doesn't exist, create a new one with a local one for it // identify which is the cache remote (it can be wrapped by a crypt too) rootIsCrypt := false cacheRemote := remote if !remoteExists { localRemote := remote + "-local" config.FileSetValue(localRemote, "type", "local") config.FileSetValue(localRemote, "nounc", "true") m.Set("type", "cache") m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote)) } else { remoteType := config.GetValue(remote, "type") if remoteType == "" { t.Skipf("skipped due to invalid remote type for %v", remote) return nil, nil } if remoteType != "cache" { if remoteType == "crypt" { rootIsCrypt = true m.Set("password", cryptPassword1) m.Set("password2", cryptPassword2) } remoteRemote := config.GetValue(remote, "remote") if remoteRemote == "" { t.Skipf("skipped due to invalid remote wrapper for %v", remote) return nil, nil } remoteRemoteParts := strings.Split(remoteRemote, ":") remoteWrapping := remoteRemoteParts[0] remoteType := config.GetValue(remoteWrapping, "type") if remoteType != "cache" { t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType) return nil, nil } cacheRemote = remoteWrapping } } runInstance.rootIsCrypt = rootIsCrypt runInstance.dbPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote+".db") runInstance.chunkPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote) runInstance.vfsCachePath = filepath.Join(config.GetCacheDir(), "vfs", remote) boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true}) require.NoError(t, err) ci := fs.GetConfig(context.Background()) ci.LowLevelRetries = 1 // Instantiate root if purge { boltDb.PurgeTempUploads() _ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id)) } f, err := cache.NewFs(context.Background(), remote, id, m) require.NoError(t, err) cfs, err := r.getCacheFs(f) require.NoError(t, err) _, isCache := cfs.Features().UnWrap().(*cache.Fs) _, isCrypt := cfs.Features().UnWrap().(*crypt.Fs) _, isLocal := cfs.Features().UnWrap().(*local.Fs) if isCache || isCrypt || isLocal { r.wrappedIsExternal = false } else { r.wrappedIsExternal = true } if purge { _ = operations.Purge(context.Background(), f, "") } err = f.Mkdir(context.Background(), "") require.NoError(t, err) t.Cleanup(func() { runInstance.cleanupFs(t, f) }) return f, boltDb } func (r *run) cleanupFs(t *testing.T, f fs.Fs) { err := operations.Purge(context.Background(), f, "") require.NoError(t, err) cfs, err := r.getCacheFs(f) require.NoError(t, err) cfs.StopBackgroundRunners() err = os.RemoveAll(r.tmpUploadDir) require.NoError(t, err)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sharefile/generate_tzdata.go
backend/sharefile/generate_tzdata.go
//go:build ignore package main import ( "log" "net/http" "github.com/shurcooL/vfsgen" ) func main() { var AssetDir http.FileSystem = http.Dir("./tzdata") err := vfsgen.Generate(AssetDir, vfsgen.Options{ PackageName: "sharefile", BuildTags: "!dev", VariableName: "tzdata", }) if err != nil { log.Fatalln(err) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sharefile/sharefile.go
backend/sharefile/sharefile.go
// Package sharefile provides an interface to the Citrix Sharefile // object storage system. package sharefile //go:generate ./update-timezone.sh /* NOTES ## for docs Detail standard/chunked/streaming uploads? ## Bugs in API The times in updateItem are being parsed in EST/DST local time updateItem only sets times accurate to 1 second https://community.sharefilesupport.com/citrixsharefile/topics/bug-report-for-update-item-patch-items-id-setting-clientmodifieddate-ignores-timezone-and-milliseconds When doing a rename+move directory, the server appears to do the rename first in the local directory which can overwrite files of the same name in the local directory. https://community.sharefilesupport.com/citrixsharefile/topics/bug-report-for-update-item-patch-items-id-file-overwrite-under-certain-conditions The Copy command can't change the name at the same time which means we have to copy via a temporary directory. https://community.sharefilesupport.com/citrixsharefile/topics/copy-item-needs-to-be-able-to-set-a-new-name ## Allowed characters https://api.sharefile.com/rest/index/odata.aspx $select to limit returned fields https://www.odata.org/documentation/odata-version-3-0/odata-version-3-0-core-protocol/#theselectsystemqueryoption Also $filter to select only things we need https://support.citrix.com/article/CTX234774 The following characters should not be used in folder or file names. \ / . , : ; * ? " < > A filename ending with a period without an extension File names with leading or trailing whitespaces. // sharefile stringNeedsEscaping = []byte{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x2A, 0x2E, 0x2F, 0x3A, 0x3C, 0x3E, 0x3F, 0x7C, 0xEFBCBC } maxFileLength = 256 canWriteUnnormalized = true canReadUnnormalized = true canReadRenormalized = false canStream = true Which is control chars + [' ', '*', '.', '/', ':', '<', '>', '?', '|'] - also \ and " */ import ( "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "path" "strings" "time" "github.com/rclone/rclone/backend/sharefile/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/rest" ) const ( rcloneClientID = "djQUPlHTUM9EvayYBWuKC5IrVIoQde46" rcloneEncryptedClientSecret = "v7572bKhUindQL3yDnUAebmgP-QxiwT38JLxVPolcZBl6SSs329MtFzH73x7BeELmMVZtneUPvALSopUZ6VkhQ" minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential apiPath = "/sf/v3" // add to endpoint to get API path tokenPath = "/oauth/token" // add to endpoint to get Token path minChunkSize = 256 * fs.Kibi maxChunkSize = 2 * fs.Gibi defaultChunkSize = 64 * fs.Mebi defaultUploadCutoff = 128 * fs.Mebi ) // Generate a new oauth2 config which we will update when we know the TokenURL func newOauthConfig(tokenURL string) *oauthutil.Config { return &oauthutil.Config{ Scopes: nil, AuthURL: "https://secure.sharefile.com/oauth/authorize", TokenURL: tokenURL, ClientID: rcloneClientID, ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), RedirectURL: oauthutil.RedirectPublicSecureURL, } } // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "sharefile", Description: "Citrix Sharefile", NewFs: NewFs, Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { oauthConfig := newOauthConfig("") checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error { if auth == nil || auth.Form == nil { return errors.New("endpoint not found in response") } subdomain := auth.Form.Get("subdomain") apicp := auth.Form.Get("apicp") if subdomain == "" || apicp == "" { return fmt.Errorf("subdomain or apicp not found in response: %+v", auth.Form) } endpoint := "https://" + subdomain + "." + apicp m.Set("endpoint", endpoint) oauthConfig.TokenURL = endpoint + tokenPath return nil } return oauthutil.ConfigOut("", &oauthutil.Options{ OAuth2Config: oauthConfig, CheckAuth: checkAuth, }) }, Options: append(oauthutil.SharedOptions, []fs.Option{{ Name: "upload_cutoff", Help: "Cutoff for switching to multipart upload.", Default: defaultUploadCutoff, Advanced: true, }, { Name: "root_folder_id", Help: `ID of the root folder. Leave blank to access "Personal Folders". You can use one of the standard values here or any folder ID (long hex number ID).`, Examples: []fs.OptionExample{{ Value: "", Help: `Access the Personal Folders (default).`, }, { Value: "favorites", Help: "Access the Favorites folder.", }, { Value: "allshared", Help: "Access all the shared folders.", }, { Value: "connectors", Help: "Access all the individual connectors.", }, { Value: "top", Help: "Access the home, favorites, and shared folders as well as the connectors.", }}, Sensitive: true, }, { Name: "chunk_size", Default: defaultChunkSize, Help: `Upload chunk size. Must a power of 2 >= 256k. Making this larger will improve performance, but note that each chunk is buffered in memory one per transfer. Reducing this will reduce memory usage but decrease performance.`, Advanced: true, }, { Name: "endpoint", Help: `Endpoint for API calls. This is usually auto discovered as part of the oauth process, but can be set manually to something like: https://XXX.sharefile.com `, Advanced: true, Default: "", }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.Base | encoder.EncodeWin | // :?"*<>| encoder.EncodeBackSlash | // \ encoder.EncodeCtl | encoder.EncodeRightSpace | encoder.EncodeRightPeriod | encoder.EncodeLeftSpace | encoder.EncodeLeftPeriod | encoder.EncodeInvalidUtf8), }}...), }) } // Options defines the configuration for this backend type Options struct { RootFolderID string `config:"root_folder_id"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` ChunkSize fs.SizeSuffix `config:"chunk_size"` Endpoint string `config:"endpoint"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a remote cloud storage system type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options ci *fs.ConfigInfo // global config features *fs.Features // optional features srv *rest.Client // the connection to the server dirCache *dircache.DirCache // Map of directory path to directory id pacer *fs.Pacer // pacer for API calls bufferTokens chan []byte // control concurrency of multipart uploads tokenRenewer *oauthutil.Renew // renew the token on expiry rootID string // ID of the users root folder location *time.Location // timezone of server for SetModTime workaround } // Object describes a file type Object struct { fs *Fs // what this object is part of remote string // The remote path hasMetaData bool // metadata is present and correct size int64 // size of the object modTime time.Time // modification time of the object id string // ID of the object md5 string // hash of the object } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("sharefile root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // parsePath parses a sharefile 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // Reads the metadata for the id passed in. If id is "" then it returns the root // if path is not "" then the item read use id as the root and the path is relative func (f *Fs) readMetaDataForIDPath(ctx context.Context, id, path string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) { opts := rest.Opts{ Method: "GET", Path: "/Items", Parameters: url.Values{ "$select": {api.ListRequestSelect}, }, } if id != "" { opts.Path += "(" + id + ")" } if path != "" { opts.Path += "/ByPath" opts.Parameters.Set("path", "/"+f.opt.Enc.FromStandardPath(path)) } var item api.Item var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &item) return shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { if filesOnly { return nil, fs.ErrorObjectNotFound } return nil, fs.ErrorDirNotFound } return nil, fmt.Errorf("couldn't find item: %w", err) } if directoriesOnly && item.Type != api.ItemTypeFolder { return nil, fs.ErrorIsFile } if filesOnly { if item.Type == api.ItemTypeFolder { return nil, fs.ErrorIsDir } else if item.Type != api.ItemTypeFile { return nil, fs.ErrorNotAFile } } return &item, nil } // Reads the metadata for the id passed in. If id is "" then it returns the root func (f *Fs) readMetaDataForID(ctx context.Context, id string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) { return f.readMetaDataForIDPath(ctx, id, "", directoriesOnly, filesOnly) } // readMetaDataForPath reads the metadata from the path func (f *Fs) readMetaDataForPath(ctx context.Context, path string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) { leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, fs.ErrorObjectNotFound } return nil, err } return f.readMetaDataForIDPath(ctx, directoryID, leaf, directoriesOnly, filesOnly) } // errorHandler parses a non 2xx error response into an error func errorHandler(resp *http.Response) error { body, err := rest.ReadBody(resp) if err != nil { body = nil } var e = api.Error{ Code: fmt.Sprint(resp.StatusCode), Reason: resp.Status, } e.Message.Lang = "en" e.Message.Value = string(body) if body != nil { _ = json.Unmarshal(body, &e) } return &e } func checkUploadChunkSize(cs fs.SizeSuffix) error { if cs < minChunkSize { return fmt.Errorf("ChunkSize: %s is less than %s", cs, minChunkSize) } if cs > maxChunkSize { return fmt.Errorf("ChunkSize: %s is greater than %s", cs, maxChunkSize) } return nil } func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadChunkSize(cs) if err == nil { old, f.opt.ChunkSize = f.opt.ChunkSize, cs f.fillBufferTokens() // reset the buffer tokens } return } func checkUploadCutoff(cs fs.SizeSuffix) error { return nil } func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadCutoff(cs) if err == nil { old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs } return } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } // Check parameters OK if opt.Endpoint == "" { return nil, errors.New("endpoint not set: rebuild the remote or set manually") } err = checkUploadChunkSize(opt.ChunkSize) if err != nil { return nil, err } err = checkUploadCutoff(opt.UploadCutoff) if err != nil { return nil, err } root = parsePath(root) oauthConfig := newOauthConfig(opt.Endpoint + tokenPath) var client *http.Client var ts *oauthutil.TokenSource client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig) if err != nil { return nil, fmt.Errorf("failed to configure sharefile: %w", err) } ci := fs.GetConfig(ctx) f := &Fs{ name: name, root: root, opt: *opt, ci: ci, srv: rest.NewClient(client).SetRoot(opt.Endpoint + apiPath), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } f.features = (&fs.Features{ CaseInsensitive: true, CanHaveEmptyDirectories: true, ReadMimeType: false, }).Fill(ctx, f) f.srv.SetErrorHandler(errorHandler) f.fillBufferTokens() // Renew the token in the background if ts != nil { f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { _, err := f.List(ctx, "") return err }) } // Load the server timezone from an internal file // Used to correct the time in SetModTime const serverTimezone = "America/New_York" timezone, err := tzdata.Open(serverTimezone) if err != nil { return nil, fmt.Errorf("failed to open timezone db: %w", err) } tzdata, err := io.ReadAll(timezone) if err != nil { return nil, fmt.Errorf("failed to read timezone: %w", err) } _ = timezone.Close() f.location, err = time.LoadLocationFromTZData(serverTimezone, tzdata) if err != nil { return nil, fmt.Errorf("failed to load location from timezone: %w", err) } // Find ID of user's root folder if opt.RootFolderID == "" { item, err := f.readMetaDataForID(ctx, opt.RootFolderID, true, false) if err != nil { return nil, fmt.Errorf("couldn't find root ID: %w", err) } f.rootID = item.ID } else { f.rootID = opt.RootFolderID } // Get rootID f.dirCache = dircache.New(root, f.rootID, f) // Find the current root err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) tempF := *f tempF.dirCache = dircache.New(newRoot, f.rootID, &tempF) tempF.root = newRoot // Make new Fs which is the parent err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } _, err := tempF.newObjectWithInfo(ctx, remote, nil) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f return f, nil } return nil, err } f.features.Fill(ctx, &tempF) // XXX: update the old f here instead of returning tempF, since // `features` were already filled with functions having *f as a receiver. // See https://github.com/rclone/rclone/issues/2182 f.dirCache = tempF.dirCache f.root = tempF.root // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // Fill up (or reset) the buffer tokens func (f *Fs) fillBufferTokens() { f.bufferTokens = make(chan []byte, f.ci.Transfers) for range f.ci.Transfers { f.bufferTokens <- nil } } // getUploadBlock gets a block from the pool of size chunkSize func (f *Fs) getUploadBlock() []byte { buf := <-f.bufferTokens if buf == nil { buf = make([]byte, f.opt.ChunkSize) } // fs.Debugf(f, "Getting upload block %p", buf) return buf } // putUploadBlock returns a block to the pool of size chunkSize func (f *Fs) putUploadBlock(buf []byte) { buf = buf[:cap(buf)] if len(buf) != int(f.opt.ChunkSize) { panic("bad blocksize returned to pool") } // fs.Debugf(f, "Returning upload block %p", buf) f.bufferTokens <- buf } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } var err error if info != nil { // Set info err = o.setMetaData(info) } else { err = o.readMetaData(ctx) // reads info and meta, returning an error } if err != nil { return nil, err } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // FindLeaf finds a directory of name leaf in the folder with ID pathID func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { if pathID == "top" { // Find the leaf in pathID found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool { if item.Name == leaf { pathIDOut = item.ID return true } return false }) return pathIDOut, found, err } info, err := f.readMetaDataForIDPath(ctx, pathID, leaf, true, false) if err == nil { found = true pathIDOut = info.ID } else if err == fs.ErrorDirNotFound { err = nil // don't return an error if not found } return pathIDOut, found, err } // CreateDir makes a directory with pathID as parent and name leaf func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { var resp *http.Response leaf = f.opt.Enc.FromStandardName(leaf) var req = api.Item{ Name: leaf, FileName: leaf, CreatedAt: time.Now(), } var info api.Item opts := rest.Opts{ Method: "POST", Path: "/Items(" + pathID + ")/Folder", Parameters: url.Values{ "$select": {api.ListRequestSelect}, "overwrite": {"false"}, "passthrough": {"false"}, }, } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &req, &info) return shouldRetry(ctx, resp, err) }) if err != nil { return "", fmt.Errorf("CreateDir: %w", err) } return info.ID, nil } // list the objects into the function supplied // // If directories is set it only sends directories // User function to process a File item from listAll // // Should return true to finish processing type listAllFn func(*api.Item) bool // Lists the directory required calling the user function on each item found // // If the user fn ever returns true then it early exits with found = true func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { opts := rest.Opts{ Method: "GET", Path: "/Items(" + dirID + ")/Children", Parameters: url.Values{ "$select": {api.ListRequestSelect}, }, } var result api.ListResponse var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if err != nil { return found, fmt.Errorf("couldn't list files: %w", err) } for i := range result.Value { item := &result.Value[i] if item.Type == api.ItemTypeFolder { if filesOnly { continue } } else if item.Type == api.ItemTypeFile { if directoriesOnly { continue } } else { fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type) continue } item.Name = f.opt.Enc.ToStandardName(item.Name) if fn(item) { found = true break } } return } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } var iErr error _, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool { remote := path.Join(dir, info.Name) if info.Type == api.ItemTypeFolder { // cache the directory ID for later lookups f.dirCache.Put(remote, info.ID) d := fs.NewDir(remote, info.CreatedAt).SetID(info.ID).SetSize(info.Size).SetItems(int64(info.FileCount)) entries = append(entries, d) } else if info.Type == api.ItemTypeFile { o, err := f.newObjectWithInfo(ctx, remote, info) if err != nil { iErr = err return true } entries = append(entries, o) } return false }) if err != nil { return nil, err } if iErr != nil { return nil, iErr } return entries, nil } // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // // Returns the object, leaf, directoryID and error. // // Used to create new objects func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { // Create the directory for the object if it doesn't exist leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true) if err != nil { return } // Temporary Object under construction o = &Object{ fs: f, remote: remote, } return o, leaf, directoryID, nil } // Put the object // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil) switch err { case nil: return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it return f.PutUnchecked(ctx, in, src) default: return nil, err } } // FIXMEPutStream uploads to the remote path with the modTime given of indeterminate size // // PutStream no longer appears to work - the streamed uploads need the // size specified at the start otherwise we get this error: // // upload failed: file size does not match (-2) func (f *Fs) FIXMEPutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(ctx, in, src, options...) } // PutUnchecked the object into the container // // This will produce an error if the object already exists. // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() modTime := src.ModTime(ctx) o, _, _, err := f.createObject(ctx, remote, modTime, size) if err != nil { return nil, err } return o, o.Update(ctx, in, src, options...) } // Mkdir creates the container if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { _, err := f.dirCache.FindDir(ctx, dir, true) return err } // purgeCheck removes the directory, if check is set then it refuses // to do so if it has anything in func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } dc := f.dirCache rootID, err := dc.FindDir(ctx, dir, false) if err != nil { return err } // need to check if empty as it will delete recursively by default if check { found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool { return true }) if err != nil { return fmt.Errorf("purgeCheck: %w", err) } if found { return fs.ErrorDirectoryNotEmpty } } err = f.remove(ctx, rootID) f.dirCache.FlushDir(dir) if err != nil { return err } return nil } // Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, true) } // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { // sharefile returns times accurate to the millisecond, but // for some reason these seem only accurate 2ms. // updateItem seems to only set times accurate to 1 second though. return time.Second // this doesn't appear to be documented anywhere } // Purge deletes all the files and the container // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, false) } // updateItem patches a file or folder // // if leaf = "" or directoryID = "" or modTime == nil then it will be // left alone // // Note that this seems to work by renaming first, then moving to a // new directory which means that it can overwrite existing objects // :-( func (f *Fs) updateItem(ctx context.Context, id, leaf, directoryID string, modTime *time.Time) (info *api.Item, err error) { // Move the object opts := rest.Opts{ Method: "PATCH", Path: "/Items(" + id + ")", Parameters: url.Values{ "$select": {api.ListRequestSelect}, "overwrite": {"false"}, }, } leaf = f.opt.Enc.FromStandardName(leaf) // FIXME this appears to be a bug in the API // // If you set the modified time via PATCH then the server // appears to parse it as a local time for America/New_York // // However if you set it when uploading the file then it is fine... // // Also it only sets the time to 1 second resolution where it // uses 1ms resolution elsewhere if modTime != nil && f.location != nil { newTime := modTime.In(f.location) isoTime := newTime.Format(time.RFC3339Nano) // Chop TZ -05:00 off the end and replace with Z isoTime = isoTime[:len(isoTime)-6] + "Z" // Parse it back into a time newModTime, err := time.Parse(time.RFC3339Nano, isoTime) if err != nil { return nil, fmt.Errorf("updateItem: time parse: %w", err) } modTime = &newModTime } update := api.UpdateItemRequest{ Name: leaf, FileName: leaf, ModifiedAt: modTime, } if directoryID != "" { update.Parent = &api.Parent{ ID: directoryID, } } var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &update, &info) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } return info, nil } // move a file or folder // // This is complicated by the fact that we can't use updateItem to move // to a different directory AND rename at the same time as it can // overwrite files in the source directory. func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDirectoryID, newDirectoryID string) (item *api.Item, err error) { // To demonstrate bug // item, err = f.updateItem(ctx, id, newLeaf, newDirectoryID, nil) // if err != nil { // return nil, fmt.Errorf("Move rename leaf: %w", err) // } // return item, nil doRenameLeaf := oldLeaf != newLeaf doMove := oldDirectoryID != newDirectoryID // Now rename the leaf to a temporary name if we are moving to // another directory to make sure we don't overwrite something // in the source directory by accident if doRenameLeaf && doMove { tmpLeaf := newLeaf + "." + random.String(8) item, err = f.updateItem(ctx, id, tmpLeaf, "", nil) if err != nil { return nil, fmt.Errorf("Move rename leaf: %w", err) } } // Move the object to a new directory (with the existing name) // if required if doMove { item, err = f.updateItem(ctx, id, "", newDirectoryID, nil) if err != nil { return nil, fmt.Errorf("Move directory: %w", err) } } // Rename the leaf to its final name if required if doRenameLeaf { item, err = f.updateItem(ctx, id, newLeaf, "", nil) if err != nil { return nil, fmt.Errorf("Move rename leaf: %w", err) } } return item, nil } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } // Find ID of src parent, not creating subdirs srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) if err != nil { return nil, err } // Create temporary object dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } // Do the move info, err := f.move(ctx, true, srcObj.id, srcLeaf, leaf, srcParentID, directoryID) if err != nil { return nil, err } err = dstObj.setMetaData(info) if err != nil { return nil, err } return dstObj, nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } srcID, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) if err != nil { return err } // Do the move _, err = f.move(ctx, false, srcID, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID) if err != nil { return err } srcFs.dirCache.FlushDir(srcRemote) return nil } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } err = srcObj.readMetaData(ctx) if err != nil { return nil, err } // Find ID of src parent, not creating subdirs srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) if err != nil { return nil, err } srcLeaf = f.opt.Enc.FromStandardName(srcLeaf) _ = srcParentID // Create temporary object dstObj, dstLeaf, dstParentID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } dstLeaf = f.opt.Enc.FromStandardName(dstLeaf) sameName := strings.EqualFold(srcLeaf, dstLeaf) if sameName && srcParentID == dstParentID { return nil, fmt.Errorf("copy: can't copy to a file in the same directory whose name only differs in case: %q vs %q", srcLeaf, dstLeaf) } // Discover whether we can just copy directly or not directCopy := false if sameName { // if copying to same name can copy directly directCopy = true } else { // if (dstParentID, srcLeaf) does not exist then can // Copy then Rename without fear of overwriting // something _, err := f.readMetaDataForIDPath(ctx, dstParentID, srcLeaf, false, false) if err == fs.ErrorObjectNotFound || err == fs.ErrorDirNotFound { directCopy = true } else if err != nil { return nil, fmt.Errorf("copy: failed to examine destination dir: %w", err) //} else { // otherwise need to copy via a temporary directory } } // Copy direct to destination unless !directCopy in which case
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sharefile/tzdata_vfsdata.go
backend/sharefile/tzdata_vfsdata.go
// Code generated by vfsgen; DO NOT EDIT. //go:build !dev package sharefile import ( "bytes" "compress/gzip" "fmt" "io" "net/http" "os" pathpkg "path" "time" ) // tzdata statically implements the virtual filesystem provided to vfsgen. var tzdata = func() http.FileSystem { fs := vfsgen۰FS{ "/": &vfsgen۰DirInfo{ name: "/", modTime: time.Date(2019, 9, 12, 14, 55, 27, 600751842, time.UTC), }, "/America": &vfsgen۰DirInfo{ name: "America", modTime: time.Date(2019, 9, 12, 14, 55, 27, 600751842, time.UTC), }, "/America/New_York": &vfsgen۰CompressedFileInfo{ name: "New_York", modTime: time.Date(2019, 7, 2, 0, 44, 57, 0, time.UTC), uncompressedSize: 3536, compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xd6\x7f\x54\xd5\xf5\x1d\xc7\xf1\xaf\x4a\x6a\x28\xa1\x2b\x9a\xa6\xb1\xa6\xdb\x08\x13\xf0\x47\xe4\x2f\xb6\x68\xc9\x18\xda\x0d\xd3\x4b\x22\x39\xfc\xd4\x20\x0e\xea\xc6\x67\xfd\x20\xdc\x0c\xdb\xb4\x18\x9e\xd3\xdc\x4e\x1e\xd8\x56\xe7\x36\x53\x94\x5c\x44\x0a\x38\xc9\x76\xfd\xc5\xc8\xe3\x8e\xab\xb8\x66\xd8\x8f\xeb\x5b\xa7\x77\x65\x92\x1f\xa4\x8c\xc9\xba\x77\xe7\xf9\x9e\xff\xed\x9f\xf9\xdf\xfe\x48\x4f\x3e\xfc\x9c\x13\xf0\x8f\xf7\xf5\x7c\xfb\x8b\xca\x1f\x9c\xe6\xfd\xd7\xaf\xab\x2e\xff\xc7\xaf\x73\x97\xff\x7e\xdd\x13\x9e\xe7\x05\xb6\x26\xdb\xe7\x5f\xfd\xd8\xfc\xe1\x29\xcf\x6e\xfa\xfd\x11\xf3\x42\xe9\x29\xbb\x79\xed\x47\xb2\x65\xf9\xcb\xb6\x21\x73\x9b\xd9\xba\xe8\xb0\xdb\x96\x54\x6b\x1a\xa7\xbf\xe4\x1a\xa3\x0d\xb2\xfd\xda\x5f\xb9\xed\xe1\x1a\xf9\x63\x9f\x75\x2f\x05\xcb\xa5\x29\xb4\xd0\xbd\x1c\x98\x2f\xcd\x2d\xb7\xba\x57\xaa\xd3\x64\xc7\x73\xf7\xd8\x9d\x65\xf3\x4c\xcb\xea\xe9\xb6\x35\x77\xb2\x69\x5b\x9a\x64\x77\xa5\x5c\x63\xfe\x34\xe7\x73\xbb\x7b\xa8\x33\xed\xe3\x8e\xdb\xf6\x48\x97\xd9\x13\xf7\x99\xdb\xd3\xd9\x6a\x5e\x3b\xfd\x8e\xfb\x73\xf3\x9b\x12\xec\x68\x77\x7b\x37\xec\x94\x7d\x5b\x9e\x75\xfb\x2b\x36\xca\x81\x75\x8f\xbb\x83\xf9\x95\xd2\x51\xb2\xcc\xfd\x25\xa3\x50\x3a\x7d\xab\xed\xeb\x89\xb3\xe5\x50\x5a\xb1\x3d\xd4\xbf\xd8\x1c\x4e\xc8\xb6\x87\xbb\x67\x99\xbf\xfe\xd2\xd9\xae\x89\x9f\xda\x2e\x33\x20\xa1\x47\x4f\xbb\xa3\xd9\x1f\xc8\xdb\x05\x9d\xee\xd8\x4d\x7b\xe5\x9d\xcc\x46\xd7\xed\x6d\x92\xe3\x49\xeb\xdd\x71\x59\x2b\xef\x46\xb7\xd9\xf7\xf6\x95\xca\xfb\xe1\x5a\xfb\xc1\x8b\xbf\x30\xe1\xe0\x0a\x7b\xa2\xb6\xc4\x48\xc0\x67\x4f\x96\x7f\xcf\x9c\xaa\xce\xb0\x7f\xcf\xbb\xd9\x9c\x2e\x1e\x6d\xcf\x2c\x4e\x97\x48\x6e\x9a\xfb\xc7\x8c\x51\xf2\x61\x4a\xa2\xfb\xe8\xfa\x0b\x72\x76\x68\xaf\x3b\x7b\xf1\xa8\x7c\x1c\x09\xb9\x73\xc7\x76\x49\x4f\x67\x9b\xfb\x64\x6f\xc8\x9c\x6f\xee\xb2\xee\xf9\x36\xd3\xbb\xa1\xd5\x5e\x58\x53\x6f\xfa\x2a\xea\xec\xa7\xcb\x56\x99\xcf\xf2\xab\xec\xc5\xdb\xef\x33\x9f\x67\x14\xd9\xfe\x9b\x1f\x93\x7f\x26\x66\xd9\x4b\xc3\x97\xc8\xa5\xfe\x42\x37\xf0\xe1\x1c\xf9\x57\xf7\x6c\xf7\xc5\xa1\x1b\x25\xba\x7b\xbc\x8b\x6d\x8b\x89\x57\x1f\x75\x83\x6a\x4e\xca\xe0\xc7\xc4\x0d\xb1\x51\x13\x67\xbe\xb0\x57\x2d\x10\x33\x34\xfb\x84\x1d\x36\xe5\x80\x19\xf6\xf4\x58\x7b\xf5\xa8\x2d\xe6\xea\xa6\x8d\x2e\xde\x3d\x65\x46\xc8\x93\x76\xe4\xf1\x17\x24\x61\x5f\x99\xbd\xa6\x7d\x9d\x24\xbe\xb8\xd6\x8d\xfa\xdd\x83\x32\xba\xb6\xd4\x7d\x65\xd5\xf7\xe5\xda\xf2\x5c\x77\xdd\x92\x49\x92\x94\x97\xea\xae\x9f\x35\x52\xbe\x9a\x3a\xc2\x8d\x99\x90\x6a\xc6\x0e\xef\x71\x37\x0c\x1e\x61\x6e\xb8\x10\x6f\xc7\x9d\xec\x31\xe3\xdf\x3a\x67\x6f\xdc\xff\x86\x49\xde\xf1\x37\xfb\xb5\x4d\x3b\xcc\x4d\x95\x9e\xfb\xfa\xcf\x9f\x31\x13\x0a\x4e\xb9\x89\xcb\x9b\xe5\x1b\x99\x1d\xee\x9b\xf3\x7e\x23\xdf\x4a\xda\xea\x52\x26\x3d\x2c\x29\xd1\x83\x36\x35\xbe\x40\x52\xc3\x0d\x76\xd2\xd9\x19\x72\x4b\xb0\xc6\x4e\x0e\xf9\x4d\x5a\xa0\xdc\xa6\xb7\xdc\x66\x32\xaa\xe7\xdb\x29\xcf\x8c\x31\x53\xcb\xca\xdc\xb4\x87\x2e\x99\xe9\xb9\x79\xee\xd6\x85\xef\x9b\xcc\x94\x5b\xdc\x6d\xd3\x82\x66\xc6\xb0\x04\x37\x73\xdc\xbb\x32\x33\x72\xde\xcd\x1a\x78\x4d\x66\x77\xbe\xe5\xe6\xbc\x17\x90\xac\xe6\x4f\xec\xb7\xf7\x3c\x21\xdf\xd9\xf0\xa6\xbd\xfd\xd9\x07\x24\xbb\x62\xa7\xbd\x63\xdd\x1a\xf3\xdd\xfc\x8d\xf6\xce\x92\xfb\xcd\xdc\x8c\x4a\x9b\x33\xf7\x4e\x93\xd3\xd7\xe8\x72\xd3\x96\x49\x6e\x68\xbd\xcb\x4b\xb8\x43\xf2\x5a\x56\xba\x79\x3d\x13\x65\xfe\x73\xb5\xf6\xae\x63\xd9\xc6\xb7\x7a\x85\xbd\x7b\xd7\x04\x93\xbf\xd4\x67\x17\xd4\xc5\x99\x7b\xb2\x32\xec\xc2\x47\x23\x66\xd1\xf8\xd1\xd6\x5f\x70\xc8\xf8\x07\xfa\xec\xbd\x99\xdb\xcd\xbd\x67\x12\x5d\x61\x72\xa7\x14\x76\xf4\xba\x25\xd1\x46\x29\xda\x12\x72\xf7\x85\xd7\xcb\xd2\x75\x6d\xee\x07\xc1\x95\x52\x5c\x52\xef\x96\x05\xee\x16\xe3\x6b\xb5\xf7\xd7\xac\x30\x0f\xa4\xd5\xd9\x1f\x96\xf9\x4c\x49\x42\x95\x2d\xcd\xcd\x30\xa5\x3d\x45\xb6\x2c\x65\xb4\x29\x3b\x92\x65\xcb\x87\xf6\x99\xf2\xa6\x64\xbb\x3c\xf2\xb6\x59\x51\x37\xdb\xad\x7c\xa3\x57\x7e\x54\x39\xde\xfd\xb8\x39\x24\x15\x05\x51\x67\x37\xb4\xc9\x4f\x32\xc5\x3d\x54\x51\x2f\x0f\x27\x1d\x70\x8f\xe4\xaf\x92\x47\xa2\x27\x6c\xe5\xcc\x3a\x53\x19\xde\x6f\xab\xc6\x54\x99\xaa\xe0\x66\xbb\xaa\xbf\xc8\xfc\x34\xf0\xa4\xfd\x59\x77\x96\x59\x5d\x5d\x66\x1f\xdf\x9d\x6c\xaa\x8b\xf3\xec\x9a\xdf\x7a\x66\xf0\xa0\x2b\xfc\x3d\x24\xee\x8a\xbf\xe4\xff\xe5\x77\x2c\xf6\x6a\xc0\xf3\x62\xb1\xd7\xf7\x0d\x8a\x8b\xc5\xda\x5f\xf1\x86\xeb\xdf\x47\xea\x9f\xa3\xee\xf2\xf9\xbd\x9c\xb9\x7e\x2f\x67\x91\xdf\xcb\x59\xec\xf7\x72\x16\xf8\x75\xda\x06\xe9\x1f\x57\xb2\x81\xb1\x58\x2c\x56\x3c\xc4\xfd\x1a\xd9\x42\x64\x0f\x91\x4d\x44\x76\x11\xd9\x46\x64\x1f\x91\x8d\x44\x76\x12\xd9\x4a\x64\x2f\x91\xcd\x54\xa3\x0d\xfa\xff\xb3\x9d\x6a\xb8\x46\xdf\x6c\x28\xb2\xa3\xc8\x96\x22\x7b\x8a\x6c\x2a\xb2\xab\xc8\xb6\x22\xfb\x8a\x6c\x2c\xb2\xb3\xc8\xd6\x22\x7b\x8b\x6c\x2e\xb2\xbb\xc8\xf6\xaa\x91\x2e\x7d\xb3\xc1\x6a\x67\xab\xbe\xd9\x62\x64\x8f\x91\x4d\x46\x76\x19\xd9\x66\x64\x9f\x91\x8d\x46\x76\x1a\xd9\x6a\x64\xaf\x91\xcd\x46\x76\x1b\xd9\x6e\xb5\x7f\xb1\xfe\x3c\x36\x5c\xed\x9e\xa5\x6f\xb6\x1c\xd9\x73\xd5\x0c\xe8\xd7\xb1\xeb\xc8\xb6\x23\xfb\x8e\x6c\x3c\xb2\xf3\xc8\xd6\x23\x7b\xaf\xca\x5a\x7d\xb3\xfb\xc8\xf6\x23\xfb\x8f\x34\x00\xe9\x00\xd2\x02\xa4\x07\x48\x13\x90\x2e\x20\x6d\x40\xfa\x80\x34\x02\xe9\x04\xd2\x0a\xa4\x17\x48\x33\x90\x6e\xa8\x17\x8f\xea\x9b\x7e\x20\x0d\x41\x3a\x82\xb4\x04\xe9\x09\xd2\x14\xa4\x2b\x48\x5b\x90\xbe\x20\x8d\x41\x3a\x83\xb4\x06\xe9\x0d\xd2\x1c\xa4\x3b\x48\x7b\xd4\xfe\x42\xfd\x79\x34\x08\xe9\x10\xd2\x22\xd4\x1e\xe9\x3f\xe4\x98\xe8\xa7\xa5\x3e\xea\xf4\x83\x55\x73\x52\xdf\xf4\x09\x69\x14\xd2\x29\xfd\x80\x2d\x10\x7d\xd3\x2b\xa4\x59\xea\xd3\x63\xf5\x4d\xbb\xd4\xa6\x8d\xfa\xf5\x34\x0c\xe9\x18\xd2\x32\xa4\x67\x48\xd3\x90\xae\x21\x6d\x43\xfa\x86\x34\x0e\xe9\x1c\xd2\x3a\xa4\x77\x48\xf3\x90\xee\x21\xed\x43\xfa\x87\x34\x50\xbd\x10\xaf\x3f\x8f\x16\x22\x3d\x44\x9a\x88\x74\x11\x69\x23\xd2\x47\xa4\x91\x48\x27\x91\x56\x22\xbd\x44\x9a\x89\x74\x13\x69\xa7\x1a\x3d\xa8\xdf\x8f\x86\xaa\xe1\x06\x7d\xd3\x52\xa4\xa7\x48\x53\x91\xae\x22\x6d\x45\xfa\x8a\x34\x16\xe9\x2c\xd2\x5a\xa4\xb7\x48\x73\x91\xee\x22\xed\x45\xfa\x8b\x34\x58\x8d\x9c\xd7\x37\x2d\x46\x7a\x8c\x34\x19\xe9\x32\xd2\x66\xa4\xcf\x48\xa3\x91\x4e\x23\xad\x46\x7a\x8d\x34\x1b\xe9\x36\xd2\x6e\xb5\xaf\x51\xbf\x3f\x0d\x57\x43\xeb\xf5\x4d\xcb\xd5\x96\x95\xfa\xa6\xe9\x48\xd7\x91\xb6\x23\x7d\x47\x1a\x8f\x74\x1e\x69\x3d\xd2\x7b\xa4\xf9\x48\xf7\x91\xf6\xab\x03\x7d\xfa\xe6\x06\x50\xcf\x24\xea\xcf\xe3\x16\x50\x3b\x7a\xf5\xcd\x4d\x80\xdc\x05\xc8\x6d\x80\xdc\x07\xc8\x8d\x80\xdc\x09\xc8\xad\x80\xdc\x0b\xc8\xcd\x80\xdc\x0d\xc8\xed\x80\xdc\x0f\xc8\x0d\xa1\xf6\x14\xe9\x9b\x5b\x42\x3d\x92\xa5\x6f\x6e\x0a\xb5\x29\x59\xdf\xdc\x16\xc8\x7d\x81\xdc\x18\xc8\x9d\x81\xdc\x1a\xc8\xbd\x81\xdc\x1c\xc8\xdd\x81\xdc\x1e\xc8\xfd\x81\xdc\x20\x6a\xf4\x3f\x9f\x57\x6e\x11\x35\xbc\x5f\xdf\xdc\x24\x6a\x70\xb3\xbe\xb9\x4d\x90\xfb\x04\xb9\x51\x90\x3b\x05\xb9\x55\x90\x7b\x05\xbf\xbc\x59\xfe\xf7\x9b\x25\x3e\x67\x91\x3f\x33\x67\xae\x7f\xb2\x6f\x7a\xfa\xb4\xf4\x29\x93\x7d\x53\xa7\xa6\x4f\x4d\x9f\x12\xff\xef\x00\x00\x00\xff\xff\x96\x2d\xbf\x9f\xd0\x0d\x00\x00"), }, } fs["/"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ fs["/America"].(os.FileInfo), } fs["/America"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ fs["/America/New_York"].(os.FileInfo), } return fs }() type vfsgen۰FS map[string]interface{} func (fs vfsgen۰FS) Open(path string) (http.File, error) { path = pathpkg.Clean("/" + path) f, ok := fs[path] if !ok { return nil, &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist} } switch f := f.(type) { case *vfsgen۰CompressedFileInfo: gr, err := gzip.NewReader(bytes.NewReader(f.compressedContent)) if err != nil { // This should never happen because we generate the gzip bytes such that they are always valid. panic("unexpected error reading own gzip compressed bytes: " + err.Error()) } return &vfsgen۰CompressedFile{ vfsgen۰CompressedFileInfo: f, gr: gr, }, nil case *vfsgen۰DirInfo: return &vfsgen۰Dir{ vfsgen۰DirInfo: f, }, nil default: // This should never happen because we generate only the above types. panic(fmt.Sprintf("unexpected type %T", f)) } } // vfsgen۰CompressedFileInfo is a static definition of a gzip compressed file. type vfsgen۰CompressedFileInfo struct { name string modTime time.Time compressedContent []byte uncompressedSize int64 } func (f *vfsgen۰CompressedFileInfo) Readdir(count int) ([]os.FileInfo, error) { return nil, fmt.Errorf("cannot Readdir from file %s", f.name) } func (f *vfsgen۰CompressedFileInfo) Stat() (os.FileInfo, error) { return f, nil } func (f *vfsgen۰CompressedFileInfo) GzipBytes() []byte { return f.compressedContent } func (f *vfsgen۰CompressedFileInfo) Name() string { return f.name } func (f *vfsgen۰CompressedFileInfo) Size() int64 { return f.uncompressedSize } func (f *vfsgen۰CompressedFileInfo) Mode() os.FileMode { return 0444 } func (f *vfsgen۰CompressedFileInfo) ModTime() time.Time { return f.modTime } func (f *vfsgen۰CompressedFileInfo) IsDir() bool { return false } func (f *vfsgen۰CompressedFileInfo) Sys() interface{} { return nil } // vfsgen۰CompressedFile is an opened compressedFile instance. type vfsgen۰CompressedFile struct { *vfsgen۰CompressedFileInfo gr *gzip.Reader grPos int64 // Actual gr uncompressed position. seekPos int64 // Seek uncompressed position. } func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) { if f.grPos > f.seekPos { // Rewind to beginning. err = f.gr.Reset(bytes.NewReader(f.compressedContent)) if err != nil { return 0, err } f.grPos = 0 } if f.grPos < f.seekPos { // Fast-forward. _, err = io.CopyN(io.Discard, f.gr, f.seekPos-f.grPos) if err != nil { return 0, err } f.grPos = f.seekPos } n, err = f.gr.Read(p) f.grPos += int64(n) f.seekPos = f.grPos return n, err } func (f *vfsgen۰CompressedFile) Seek(offset int64, whence int) (int64, error) { switch whence { case io.SeekStart: f.seekPos = 0 + offset case io.SeekCurrent: f.seekPos += offset case io.SeekEnd: f.seekPos = f.uncompressedSize + offset default: panic(fmt.Errorf("invalid whence value: %v", whence)) } return f.seekPos, nil } func (f *vfsgen۰CompressedFile) Close() error { return f.gr.Close() } // vfsgen۰DirInfo is a static definition of a directory. type vfsgen۰DirInfo struct { name string modTime time.Time entries []os.FileInfo } func (d *vfsgen۰DirInfo) Read([]byte) (int, error) { return 0, fmt.Errorf("cannot Read from directory %s", d.name) } func (d *vfsgen۰DirInfo) Close() error { return nil } func (d *vfsgen۰DirInfo) Stat() (os.FileInfo, error) { return d, nil } func (d *vfsgen۰DirInfo) Name() string { return d.name } func (d *vfsgen۰DirInfo) Size() int64 { return 0 } func (d *vfsgen۰DirInfo) Mode() os.FileMode { return 0755 | os.ModeDir } func (d *vfsgen۰DirInfo) ModTime() time.Time { return d.modTime } func (d *vfsgen۰DirInfo) IsDir() bool { return true } func (d *vfsgen۰DirInfo) Sys() interface{} { return nil } // vfsgen۰Dir is an opened dir instance. type vfsgen۰Dir struct { *vfsgen۰DirInfo pos int // Position within entries for Seek and Readdir. } func (d *vfsgen۰Dir) Seek(offset int64, whence int) (int64, error) { if offset == 0 && whence == io.SeekStart { d.pos = 0 return 0, nil } return 0, fmt.Errorf("unsupported Seek in directory %s", d.name) } func (d *vfsgen۰Dir) Readdir(count int) ([]os.FileInfo, error) { if d.pos >= len(d.entries) && count > 0 { return nil, io.EOF } if count <= 0 || count > len(d.entries)-d.pos { count = len(d.entries) - d.pos } e := d.entries[d.pos : d.pos+count] d.pos += count return e, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sharefile/sharefile_test.go
backend/sharefile/sharefile_test.go
// Test filesystem interface package sharefile import ( "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestSharefile:", NilObject: (*Object)(nil), ChunkedUpload: fstests.ChunkedUploadConfig{ MinChunkSize: minChunkSize, CeilChunkSize: fstests.NextPowerOfTwo, }, }) } func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadChunkSize(cs) } func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadCutoff(cs) } var ( _ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadCutoffer = (*Fs)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sharefile/upload.go
backend/sharefile/upload.go
// Upload large files for sharefile // // Docs - https://api.sharefile.com/rest/docs/resource.aspx?name=Items#Upload_File package sharefile import ( "bytes" "context" "crypto/md5" "encoding/hex" "encoding/json" "fmt" "io" "strings" "sync" "github.com/rclone/rclone/backend/sharefile/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/rest" ) // largeUpload is used to control the upload of large files which need chunking type largeUpload struct { ctx context.Context f *Fs // parent Fs o *Object // object being uploaded in io.Reader // read the data from here wrap accounting.WrapFn // account parts being transferred size int64 // total size parts int64 // calculated number of parts, if known info *api.UploadSpecification // where to post chunks, etc. threads int // number of threads to use in upload streamed bool // set if using streamed upload } // newLargeUpload starts an upload of object o from in with metadata in src func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, info *api.UploadSpecification) (up *largeUpload, err error) { size := src.Size() parts := int64(-1) if size >= 0 { parts = size / int64(o.fs.opt.ChunkSize) if size%int64(o.fs.opt.ChunkSize) != 0 { parts++ } } var streamed bool switch strings.ToLower(info.Method) { case "streamed": streamed = true case "threaded": streamed = false default: return nil, fmt.Errorf("can't use method %q with newLargeUpload", info.Method) } threads := min(f.ci.Transfers, info.MaxNumberOfThreads) // unwrap the accounting from the input, we use wrap to put it // back on after the buffering in, wrap := accounting.UnWrap(in) up = &largeUpload{ ctx: ctx, f: f, o: o, in: in, wrap: wrap, size: size, threads: threads, info: info, parts: parts, streamed: streamed, } return up, nil } // parse the api.UploadFinishResponse in respBody func (up *largeUpload) parseUploadFinishResponse(respBody []byte) (err error) { var finish api.UploadFinishResponse err = json.Unmarshal(respBody, &finish) if err != nil { // Sometimes the unmarshal fails in which case return the body return fmt.Errorf("upload: bad response: %q", bytes.TrimSpace(respBody)) } return up.o.checkUploadResponse(up.ctx, &finish) } // Transfer a chunk func (up *largeUpload) transferChunk(ctx context.Context, part int64, offset int64, body []byte, fileHash string) error { md5sumRaw := md5.Sum(body) md5sum := hex.EncodeToString(md5sumRaw[:]) size := int64(len(body)) // Add some more parameters to the ChunkURI u := up.info.ChunkURI u += fmt.Sprintf("&index=%d&byteOffset=%d&hash=%s&fmt=json", part, offset, md5sum, ) if fileHash != "" { u += fmt.Sprintf("&finish=true&fileSize=%d&fileHash=%s", offset+int64(len(body)), fileHash, ) } opts := rest.Opts{ Method: "POST", RootURL: u, ContentLength: &size, } var respBody []byte err := up.f.pacer.Call(func() (bool, error) { fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body)) opts.Body = up.wrap(bytes.NewReader(body)) resp, err := up.f.srv.Call(ctx, &opts) if err != nil { fs.Debugf(up.o, "Error sending chunk %d: %v", part, err) } else { respBody, err = rest.ReadBody(resp) } // retry all errors now that the multipart upload has started return err != nil, err }) if err != nil { fs.Debugf(up.o, "Error sending chunk %d: %v", part, err) return err } // If last chunk and using "streamed" transfer, get the response back now if up.streamed && fileHash != "" { return up.parseUploadFinishResponse(respBody) } fs.Debugf(up.o, "Done sending chunk %d", part) return nil } // finish closes off the large upload and reads the metadata func (up *largeUpload) finish(ctx context.Context) error { fs.Debugf(up.o, "Finishing large file upload") // For a streamed transfer we will already have read the info if up.streamed { return nil } opts := rest.Opts{ Method: "POST", RootURL: up.info.FinishURI, } var respBody []byte err := up.f.pacer.Call(func() (bool, error) { resp, err := up.f.srv.Call(ctx, &opts) if err != nil { return shouldRetry(ctx, resp, err) } respBody, err = rest.ReadBody(resp) // retry all errors now that the multipart upload has started return err != nil, err }) if err != nil { return err } return up.parseUploadFinishResponse(respBody) } // Upload uploads the chunks from the input func (up *largeUpload) Upload(ctx context.Context) error { if up.parts >= 0 { fs.Debugf(up.o, "Starting upload of large file in %d chunks", up.parts) } else { fs.Debugf(up.o, "Starting streaming upload of large file") } var ( offset int64 errs = make(chan error, 1) wg sync.WaitGroup err error wholeFileHash = md5.New() eof = false ) outer: for part := int64(0); !eof; part++ { // Check any errors select { case err = <-errs: break outer default: } // Get a block of memory buf := up.f.getUploadBlock() // Read the chunk var n int n, err = readers.ReadFill(up.in, buf) if err == io.EOF { eof = true buf = buf[:n] err = nil } else if err != nil { up.f.putUploadBlock(buf) break outer } // Hash it _, _ = io.Copy(wholeFileHash, bytes.NewBuffer(buf)) // Get file hash if was last chunk fileHash := "" if eof { fileHash = hex.EncodeToString(wholeFileHash.Sum(nil)) } // Transfer the chunk wg.Add(1) transferChunk := func(part, offset int64, buf []byte, fileHash string) { defer wg.Done() defer up.f.putUploadBlock(buf) err := up.transferChunk(ctx, part, offset, buf, fileHash) if err != nil { select { case errs <- err: default: } } } if up.streamed { transferChunk(part, offset, buf, fileHash) // streamed } else { go transferChunk(part, offset, buf, fileHash) // multithreaded } offset += int64(n) } wg.Wait() // check size read is correct if eof && err == nil && up.size >= 0 && up.size != offset { err = fmt.Errorf("upload: short read: read %d bytes expected %d", up.size, offset) } // read any errors if err == nil { select { case err = <-errs: default: } } // finish regardless of errors finishErr := up.finish(ctx) if err == nil { err = finishErr } return err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sharefile/api/types.go
backend/sharefile/api/types.go
// Package api contains definitions for using the premiumize.me API package api import ( "errors" "fmt" "time" ) // ListRequestSelect should be used in $select for Items/Children const ListRequestSelect = "odata.count,FileCount,Name,FileName,CreationDate,IsHidden,FileSizeBytes,odata.type,Id,Hash,ClientModifiedDate" // ListResponse is returned from the Items/Children call type ListResponse struct { OdataCount int `json:"odata.count"` Value []Item `json:"value"` } // Item Types const ( ItemTypeFolder = "ShareFile.Api.Models.Folder" ItemTypeFile = "ShareFile.Api.Models.File" ) // Item refers to a file or folder type Item struct { FileCount int32 `json:"FileCount,omitempty"` Name string `json:"Name,omitempty"` FileName string `json:"FileName,omitempty"` CreatedAt time.Time `json:"CreationDate,omitempty"` ModifiedAt time.Time `json:"ClientModifiedDate,omitempty"` IsHidden bool `json:"IsHidden,omitempty"` Size int64 `json:"FileSizeBytes,omitempty"` Type string `json:"odata.type,omitempty"` ID string `json:"Id,omitempty"` Hash string `json:"Hash,omitempty"` } // Error is an odata error return type Error struct { Code string `json:"code"` Message struct { Lang string `json:"lang"` Value string `json:"value"` } `json:"message"` Reason string `json:"reason"` } // Satisfy error interface func (e *Error) Error() string { return fmt.Sprintf("%s: %s: %s", e.Message.Value, e.Code, e.Reason) } // Check Error satisfies error interface var _ error = &Error{} // DownloadSpecification is the response to /Items/Download type DownloadSpecification struct { Token string `json:"DownloadToken"` URL string `json:"DownloadUrl"` Metadata string `json:"odata.metadata"` Type string `json:"odata.type"` } // UploadRequest is set to /Items/Upload2 to receive an UploadSpecification type UploadRequest struct { Method string `json:"method"` // Upload method: one of: standard, streamed or threaded Raw bool `json:"raw"` // Raw post if true or MIME upload if false Filename string `json:"fileName"` // Uploaded item file name. Filesize *int64 `json:"fileSize,omitempty"` // Uploaded item file size. Overwrite bool `json:"overwrite"` // Indicates whether items with the same name will be overwritten or not. CreatedDate time.Time `json:"ClientCreatedDate"` // Created Date of this Item. ModifiedDate time.Time `json:"ClientModifiedDate"` // Modified Date of this Item. BatchID string `json:"batchId,omitempty"` // Indicates part of a batch. Batched uploads do not send notification until the whole batch is completed. BatchLast *bool `json:"batchLast,omitempty"` // Indicates is the last in a batch. Upload notifications for the whole batch are sent after this upload. CanResume *bool `json:"canResume,omitempty"` // Indicates uploader supports resume. StartOver *bool `json:"startOver,omitempty"` // Indicates uploader wants to restart the file - i.e., ignore previous failed upload attempts. Tool string `json:"tool,omitempty"` // Identifies the uploader tool. Title string `json:"title,omitempty"` // Item Title Details string `json:"details,omitempty"` // Item description IsSend *bool `json:"isSend,omitempty"` // Indicates that this upload is part of a Send operation SendGUID string `json:"sendGuid,omitempty"` // Used if IsSend is true. Specifies which Send operation this upload is part of. OpID string `json:"opid,omitempty"` // Used for Asynchronous copy/move operations - called by Zones to push files to other Zones ThreadCount *int `json:"threadCount,omitempty"` // Specifies the number of threads the threaded uploader will use. Only used is method is threaded, ignored otherwise Notify *bool `json:"notify,omitempty"` // Indicates whether users will be notified of this upload - based on folder preferences ExpirationDays *int `json:"expirationDays,omitempty"` // File expiration days BaseFileID string `json:"baseFileId,omitempty"` // Used to check conflict in file during File Upload. } // UploadSpecification is returned from /Items/Upload type UploadSpecification struct { Method string `json:"Method"` // The Upload method that must be used for this upload PrepareURI string `json:"PrepareUri"` // If provided, clients must issue a request to this Uri before uploading any data. ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supported. ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true. ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server MaxNumberOfThreads int `json:"MaxNumberOfThreads"` // Specifies the max number of chunks that can be sent simultaneously for threaded uploads } // UploadFinishResponse is returns from calling UploadSpecification.FinishURI type UploadFinishResponse struct { Error bool `json:"error"` ErrorMessage string `json:"errorMessage"` ErrorCode int `json:"errorCode"` Value []struct { UploadID string `json:"uploadid"` ParentID string `json:"parentid"` ID string `json:"id"` StreamID string `json:"streamid"` FileName string `json:"filename"` DisplayName string `json:"displayname"` Size int `json:"size"` Md5 string `json:"md5"` } `json:"value"` } // ID returns the ID of the first response if available func (finish *UploadFinishResponse) ID() (string, error) { if finish.Error { return "", fmt.Errorf("upload failed: %s (%d)", finish.ErrorMessage, finish.ErrorCode) } if len(finish.Value) == 0 { return "", errors.New("upload failed: no results returned") } return finish.Value[0].ID, nil } // Parent is the ID of the parent folder type Parent struct { ID string `json:"Id,omitempty"` } // Zone is where the data is stored type Zone struct { ID string `json:"Id,omitempty"` } // UpdateItemRequest is sent to PATCH /v3/Items(id) type UpdateItemRequest struct { Name string `json:"Name,omitempty"` FileName string `json:"FileName,omitempty"` Description string `json:"Description,omitempty"` ExpirationDate *time.Time `json:"ExpirationDate,omitempty"` Parent *Parent `json:"Parent,omitempty"` Zone *Zone `json:"Zone,omitempty"` ModifiedAt *time.Time `json:"ClientModifiedDate,omitempty"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/webdav/tus.go
backend/webdav/tus.go
package webdav /* Chunked upload based on the tus protocol for ownCloud Infinite Scale See https://tus.io/protocols/resumable-upload */ import ( "context" "fmt" "io" "net/http" "path/filepath" "strconv" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/rest" ) func (o *Object) updateViaTus(ctx context.Context, in io.Reader, contentType string, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { fn := filepath.Base(src.Remote()) metadata := map[string]string{ "filename": fn, "mtime": strconv.FormatInt(src.ModTime(ctx).Unix(), 10), "filetype": contentType, } // Fingerprint is used to identify the upload when resuming. That is not yet implemented fingerprint := "" // create an upload from a file. upload := NewUpload(in, src.Size(), metadata, fingerprint) // create the uploader. uploader, err := o.CreateUploader(ctx, upload, options...) if err == nil { // start the uploading process. err = uploader.Upload(ctx, options...) } return err } func (f *Fs) getTusLocationOrRetry(ctx context.Context, resp *http.Response, err error) (bool, string, error) { switch resp.StatusCode { case 201: location := resp.Header.Get("Location") return false, location, nil case 412: return false, "", ErrVersionMismatch case 413: return false, "", ErrLargeUpload } retry, err := f.shouldRetry(ctx, resp, err) return retry, "", err } // CreateUploader creates a new upload to the server. func (o *Object) CreateUploader(ctx context.Context, u *Upload, options ...fs.OpenOption) (*Uploader, error) { if u == nil { return nil, ErrNilUpload } // if c.Config.Resume && len(u.Fingerprint) == 0 { // return nil, ErrFingerprintNotSet // } l := int64(0) p := o.filePath() // cut the filename off dir, _ := filepath.Split(p) if dir == "" { dir = "/" } opts := rest.Opts{ Method: "POST", Path: dir, NoResponse: true, RootURL: o.fs.endpointURL, ContentLength: &l, ExtraHeaders: o.extraHeaders(ctx, o), Options: options, } opts.ExtraHeaders["Upload-Length"] = strconv.FormatInt(u.size, 10) opts.ExtraHeaders["Upload-Metadata"] = u.EncodedMetadata() opts.ExtraHeaders["Tus-Resumable"] = "1.0.0" // opts.ExtraHeaders["mtime"] = strconv.FormatInt(src.ModTime(ctx).Unix(), 10) var tusLocation string // rclone http call err := o.fs.pacer.CallNoRetry(func() (bool, error) { var retry bool res, err := o.fs.srv.Call(ctx, &opts) retry, tusLocation, err = o.fs.getTusLocationOrRetry(ctx, res, err) return retry, err }) if err != nil { return nil, fmt.Errorf("making upload directory failed: %w", err) } uploader := NewUploader(o.fs, tusLocation, u, 0) return uploader, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/webdav/webdav_test.go
backend/webdav/webdav_test.go
// Test Webdav filesystem interface package webdav import ( "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestWebdavNextcloud:", NilObject: (*Object)(nil), ChunkedUpload: fstests.ChunkedUploadConfig{ MinChunkSize: 1 * fs.Mebi, }, }) } // TestIntegration runs integration tests against the remote func TestIntegration2(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("skipping as -remote is set") } fstests.Run(t, &fstests.Opt{ RemoteName: "TestWebdavOwncloud:", NilObject: (*Object)(nil), ChunkedUpload: fstests.ChunkedUploadConfig{ Skip: true, }, }) } // TestIntegration runs integration tests against the remote func TestIntegration3(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("skipping as -remote is set") } fstests.Run(t, &fstests.Opt{ RemoteName: "TestWebdavRclone:", NilObject: (*Object)(nil), ChunkedUpload: fstests.ChunkedUploadConfig{ Skip: true, }, }) } // TestIntegration runs integration tests against the remote func TestIntegration4(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("skipping as -remote is set") } fstests.Run(t, &fstests.Opt{ RemoteName: "TestWebdavNTLM:", NilObject: (*Object)(nil), }) } func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadChunkSize(cs) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/webdav/tus-upload.go
backend/webdav/tus-upload.go
package webdav import ( "bytes" "encoding/base64" "fmt" "io" "strings" ) // Metadata is a typedef for a string to string map to hold metadata type Metadata map[string]string // Upload is a struct containing the file status during upload type Upload struct { stream io.ReadSeeker size int64 offset int64 Fingerprint string Metadata Metadata } // Updates the Upload information based on offset. func (u *Upload) updateProgress(offset int64) { u.offset = offset } // Finished returns whether this upload is finished or not. func (u *Upload) Finished() bool { return u.offset >= u.size } // Progress returns the progress in a percentage. func (u *Upload) Progress() int64 { return (u.offset * 100) / u.size } // Offset returns the current upload offset. func (u *Upload) Offset() int64 { return u.offset } // Size returns the size of the upload body. func (u *Upload) Size() int64 { return u.size } // EncodedMetadata encodes the upload metadata. func (u *Upload) EncodedMetadata() string { var encoded []string for k, v := range u.Metadata { encoded = append(encoded, fmt.Sprintf("%s %s", k, b64encode(v))) } return strings.Join(encoded, ",") } func b64encode(s string) string { return base64.StdEncoding.EncodeToString([]byte(s)) } // NewUpload creates a new upload from an io.Reader. func NewUpload(reader io.Reader, size int64, metadata Metadata, fingerprint string) *Upload { stream, ok := reader.(io.ReadSeeker) if !ok { buf := new(bytes.Buffer) _, err := buf.ReadFrom(reader) if err != nil { return nil } stream = bytes.NewReader(buf.Bytes()) } if metadata == nil { metadata = make(Metadata) } return &Upload{ stream: stream, size: size, Fingerprint: fingerprint, Metadata: metadata, } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/webdav/webdav_internal_test.go
backend/webdav/webdav_internal_test.go
package webdav_test import ( "context" "fmt" "net/http" "net/http/httptest" "strings" "testing" "github.com/rclone/rclone/backend/webdav" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configfile" "github.com/rclone/rclone/fs/config/configmap" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var ( remoteName = "TestWebDAV" headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"} ) // prepareServer the test server and return a function to tidy it up afterwards // with each request the headers option tests are executed func prepareServer(t *testing.T) (configmap.Simple, func()) { // test the headers are there send send a dummy response to About handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path) assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0]) assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2]) _, err := fmt.Fprintf(w, `<d:multistatus xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns"> <d:response> <d:href>/remote.php/webdav/</d:href> <d:propstat> <d:prop> <d:quota-available-bytes>-3</d:quota-available-bytes> <d:quota-used-bytes>376461895</d:quota-used-bytes> </d:prop> <d:status>HTTP/1.1 200 OK</d:status> </d:propstat> </d:response> </d:multistatus>`) require.NoError(t, err) }) // Make the test server ts := httptest.NewServer(handler) // Configure the remote configfile.Install() m := configmap.Simple{ "type": "webdav", "url": ts.URL, // add headers to test the headers option "headers": strings.Join(headers, ","), } // return a function to tidy up return m, ts.Close } // prepare the test server and return a function to tidy it up afterwards func prepare(t *testing.T) (fs.Fs, func()) { m, tidy := prepareServer(t) // Instantiate the WebDAV server f, err := webdav.NewFs(context.Background(), remoteName, "", m) require.NoError(t, err) return f, tidy } // TestHeaders any request will test the headers option func TestHeaders(t *testing.T) { f, tidy := prepare(t) defer tidy() // send an About response since that is all the dummy server can return _, err := f.Features().About(context.Background()) require.NoError(t, err) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/webdav/webdav.go
backend/webdav/webdav.go
// Package webdav provides an interface to the Webdav // object storage system. package webdav // SetModTime might be possible // https://stackoverflow.com/questions/3579608/webdav-can-a-client-modify-the-mtime-of-a-file // ...support for a PROPSET to lastmodified (mind the missing get) which does the utime() call might be an option. // For example the ownCloud WebDAV server does it that way. import ( "bytes" "context" "crypto/tls" "encoding/xml" "errors" "fmt" "io" "net/http" "net/url" "os/exec" "path" "regexp" "strconv" "strings" "sync" "time" "github.com/Azure/go-ntlmssp" "golang.org/x/sync/singleflight" "github.com/rclone/rclone/backend/webdav/api" "github.com/rclone/rclone/backend/webdav/odrvcookie" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" ) const ( minSleep = fs.Duration(10 * time.Millisecond) maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential defaultDepth = "1" // depth for PROPFIND ) const defaultEncodingSharepointNTLM = (encoder.EncodeWin | encoder.EncodeHashPercent | // required by IIS/8.5 in contrast with onedrive which doesn't need it (encoder.Display &^ encoder.EncodeDot) | // test with IIS/8.5 shows that EncodeDot is not needed encoder.EncodeBackSlash | encoder.EncodeLeftSpace | encoder.EncodeLeftTilde | encoder.EncodeRightPeriod | encoder.EncodeRightSpace | encoder.EncodeInvalidUtf8) // Register with Fs func init() { configEncodingHelp := fmt.Sprintf( "%s\n\nDefault encoding is %s for sharepoint-ntlm or identity otherwise.", config.ConfigEncodingHelp, defaultEncodingSharepointNTLM) fs.Register(&fs.RegInfo{ Name: "webdav", Description: "WebDAV", NewFs: NewFs, Options: []fs.Option{{ Name: "url", Help: "URL of http host to connect to.\n\nE.g. https://example.com.", Required: true, }, { Name: "vendor", Help: "Name of the WebDAV site/service/software you are using.", Examples: []fs.OptionExample{{ Value: "fastmail", Help: "Fastmail Files", }, { Value: "nextcloud", Help: "Nextcloud", }, { Value: "owncloud", Help: "Owncloud 10 PHP based WebDAV server", }, { Value: "infinitescale", Help: "ownCloud Infinite Scale", }, { Value: "sharepoint", Help: "Sharepoint Online, authenticated by Microsoft account", }, { Value: "sharepoint-ntlm", Help: "Sharepoint with NTLM authentication, usually self-hosted or on-premises", }, { Value: "rclone", Help: "rclone WebDAV server to serve a remote over HTTP via the WebDAV protocol", }, { Value: "other", Help: "Other site/service or software", }}, }, { Name: "user", Help: "User name.\n\nIn case NTLM authentication is used, the username should be in the format 'Domain\\User'.", Sensitive: true, }, { Name: "pass", Help: "Password.", IsPassword: true, }, { Name: "bearer_token", Help: "Bearer token instead of user/pass (e.g. a Macaroon).", Sensitive: true, }, { Name: "bearer_token_command", Help: "Command to run to get a bearer token.", Advanced: true, }, { Name: config.ConfigEncoding, Help: configEncodingHelp, Advanced: true, }, { Name: "headers", Help: `Set HTTP headers for all transactions. Use this to set additional HTTP headers for all transactions The input format is comma separated list of key,value pairs. Standard [CSV encoding](https://godoc.org/encoding/csv) may be used. For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'. You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'. `, Default: fs.CommaSepList{}, Advanced: true, }, { Name: "pacer_min_sleep", Help: "Minimum time to sleep between API calls.", Default: minSleep, Advanced: true, }, { Name: "nextcloud_chunk_size", Help: `Nextcloud upload chunk size. We recommend configuring your NextCloud instance to increase the max chunk size to 1 GB for better upload performances. See https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/big_file_upload_configuration.html#adjust-chunk-size-on-nextcloud-side Set to 0 to disable chunked uploading. `, Advanced: true, Default: 10 * fs.Mebi, // Default NextCloud `max_chunk_size` is `10 MiB`. See https://github.com/nextcloud/server/blob/0447b53bda9fe95ea0cbed765aa332584605d652/apps/files/lib/App.php#L57 }, { Name: "owncloud_exclude_shares", Help: "Exclude ownCloud shares", Advanced: true, Default: false, }, { Name: "owncloud_exclude_mounts", Help: "Exclude ownCloud mounted storages", Advanced: true, Default: false, }, fshttp.UnixSocketConfig, { Name: "auth_redirect", Help: `Preserve authentication on redirect. If the server redirects rclone to a new domain when it is trying to read a file then normally rclone will drop the Authorization: header from the request. This is standard security practice to avoid sending your credentials to an unknown webserver. However this is desirable in some circumstances. If you are getting an error like "401 Unauthorized" when rclone is attempting to read files from the webdav server then you can try this option. `, Advanced: true, Default: false, }}, }) } // Options defines the configuration for this backend type Options struct { URL string `config:"url"` Vendor string `config:"vendor"` User string `config:"user"` Pass string `config:"pass"` BearerToken string `config:"bearer_token"` BearerTokenCommand fs.SpaceSepList `config:"bearer_token_command"` Enc encoder.MultiEncoder `config:"encoding"` Headers fs.CommaSepList `config:"headers"` PacerMinSleep fs.Duration `config:"pacer_min_sleep"` ChunkSize fs.SizeSuffix `config:"nextcloud_chunk_size"` ExcludeShares bool `config:"owncloud_exclude_shares"` ExcludeMounts bool `config:"owncloud_exclude_mounts"` UnixSocket string `config:"unix_socket"` AuthRedirect bool `config:"auth_redirect"` } // Fs represents a remote webdav type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options features *fs.Features // optional features endpoint *url.URL // URL of the host endpointURL string // endpoint as a string srv *rest.Client // the connection to the server pacer *fs.Pacer // pacer for API calls precision time.Duration // mod time precision canStream bool // set if can stream canTus bool // supports the TUS upload protocol useOCMtime bool // set if can use X-OC-Mtime propsetMtime bool // set if can use propset retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default) checkBeforePurge bool // enables extra check that directory to purge really exists hasOCMD5 bool // set if can use owncloud style checksums for MD5 hasOCSHA1 bool // set if can use owncloud style checksums for SHA1 hasMESHA1 bool // set if can use fastmail style checksums for SHA1 ntlmAuthMu sync.Mutex // mutex to serialize NTLM auth roundtrips chunksUploadURL string // upload URL for nextcloud chunked canChunk bool // set if nextcloud and nextcloud_chunk_size is set authSingleflight *singleflight.Group } // Object describes a webdav object // // Will definitely have info but maybe not meta type Object struct { fs *Fs // what this object is part of remote string // The remote path hasMetaData bool // whether info below has been set size int64 // size of the object modTime time.Time // modification time of the object sha1 string // SHA-1 of the object content if known md5 string // MD5 of the object content if known } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("webdav root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 423, // Locked 425, // Too Early 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } // If we have a bearer token command and it has expired then refresh it if len(f.opt.BearerTokenCommand) != 0 && resp != nil && resp.StatusCode == 401 { fs.Debugf(f, "Bearer token expired: %v", err) authErr := f.fetchAndSetBearerToken() if authErr != nil { err = authErr } return true, err } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // safeRoundTripper is a wrapper for http.RoundTripper that serializes // http roundtrips. NTLM authentication sequence can involve up to four // rounds of negotiations and might fail due to concurrency. // This wrapper allows to use ntlmssp.Negotiator safely with goroutines. type safeRoundTripper struct { fs *Fs rt http.RoundTripper } // RoundTrip guards wrapped RoundTripper by a mutex. func (srt *safeRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { srt.fs.ntlmAuthMu.Lock() defer srt.fs.ntlmAuthMu.Unlock() return srt.rt.RoundTrip(req) } // itemIsDir returns true if the item is a directory // // When a client sees a resourcetype it doesn't recognize it should // assume it is a regular non-collection resource. [WebDav book by // Lisa Dusseault ch 7.5.8 p170] func itemIsDir(item *api.Response) bool { if t := item.Props.Type; t != nil { if t.Space == "DAV:" && t.Local == "collection" { return true } fs.Debugf(nil, "Unknown resource type %q/%q on %q", t.Space, t.Local, item.Props.Name) } // the iscollection prop is a Microsoft extension, but if present it is a reliable indicator // if the above check failed - see #2716. This can be an integer or a boolean - see #2964 if t := item.Props.IsCollection; t != nil { switch x := strings.ToLower(*t); x { case "0", "false": return false case "1", "true": return true default: fs.Debugf(nil, "Unknown value %q for IsCollection", x) } } return false } // readMetaDataForPath reads the metadata from the path func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string) (info *api.Prop, err error) { // FIXME how do we read back additional properties? opts := rest.Opts{ Method: "PROPFIND", Path: f.filePath(path), ExtraHeaders: map[string]string{ "Depth": depth, }, NoRedirect: true, } if f.hasOCMD5 || f.hasOCSHA1 { opts.Body = bytes.NewBuffer(owncloudProps) } var result api.Multistatus var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallXML(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) if apiErr, ok := err.(*api.Error); ok { // does not exist switch apiErr.StatusCode { case http.StatusNotFound: if f.retryWithZeroDepth && depth != "0" { return f.readMetaDataForPath(ctx, path, "0") } return nil, fs.ErrorObjectNotFound case http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther: // Some sort of redirect - go doesn't deal with these properly (it resets // the method to GET). However we can assume that if it was redirected the // object was not found. return nil, fs.ErrorObjectNotFound } } if err != nil { return nil, fmt.Errorf("read metadata failed: %w", err) } if len(result.Responses) < 1 { return nil, fs.ErrorObjectNotFound } item := result.Responses[0] // status code 425 is accepted here as well if !(item.Props.StatusOK() || item.Props.Code() == 425) { return nil, fs.ErrorObjectNotFound } if itemIsDir(&item) { return nil, fs.ErrorIsDir } return &item.Props, nil } // errorHandler parses a non 2xx error response into an error func errorHandler(resp *http.Response) error { body, err := rest.ReadBody(resp) if err != nil { return fmt.Errorf("error when trying to read error from body: %w", err) } // Decode error response errResponse := new(api.Error) err = xml.Unmarshal(body, &errResponse) if err != nil { // set the Message to be the body if can't parse the XML errResponse.Message = strings.TrimSpace(string(body)) } errResponse.Status = resp.Status errResponse.StatusCode = resp.StatusCode return errResponse } // addSlash makes sure s is terminated with a / if non empty func addSlash(s string) string { if s != "" && !strings.HasSuffix(s, "/") { s += "/" } return s } // filePath returns a file path (f.root, file) func (f *Fs) filePath(file string) string { subPath := path.Join(f.root, file) if f.opt.Enc != encoder.EncodeZero { subPath = f.opt.Enc.FromStandardPath(subPath) } return rest.URLPathEscape(subPath) } // dirPath returns a directory path (f.root, dir) func (f *Fs) dirPath(dir string) string { return addSlash(f.filePath(dir)) } // filePath returns a file path (f.root, remote) func (o *Object) filePath() string { return o.fs.filePath(o.remote) } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } if len(opt.Headers)%2 != 0 { return nil, errors.New("odd number of headers supplied") } fs.Debugf(nil, "found headers: %v", opt.Headers) rootIsDir := strings.HasSuffix(root, "/") root = strings.Trim(root, "/") if !strings.HasSuffix(opt.URL, "/") { opt.URL += "/" } if opt.Pass != "" { var err error opt.Pass, err = obscure.Reveal(opt.Pass) if err != nil { return nil, fmt.Errorf("couldn't decrypt password: %w", err) } } if opt.Vendor == "" { opt.Vendor = "other" } root = strings.Trim(root, "/") if opt.Enc == encoder.EncodeZero && opt.Vendor == "sharepoint-ntlm" { opt.Enc = defaultEncodingSharepointNTLM } // Parse the endpoint u, err := url.Parse(opt.URL) if err != nil { return nil, err } f := &Fs{ name: name, root: root, opt: *opt, endpoint: u, endpointURL: u.String(), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), precision: fs.ModTimeNotSupported, authSingleflight: new(singleflight.Group), } var client *http.Client if opt.UnixSocket == "" { client = fshttp.NewClient(ctx) } else { client = fshttp.NewClientWithUnixSocket(ctx, opt.UnixSocket) } if opt.Vendor == "sharepoint-ntlm" { // Disable transparent HTTP/2 support as per https://golang.org/pkg/net/http/ , // otherwise any connection to IIS 10.0 fails with 'stream error: stream ID 39; HTTP_1_1_REQUIRED' // https://docs.microsoft.com/en-us/iis/get-started/whats-new-in-iis-10/http2-on-iis says: // 'Windows authentication (NTLM/Kerberos/Negotiate) is not supported with HTTP/2.' t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) { t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{} }) // Add NTLM layer client.Transport = &safeRoundTripper{ fs: f, rt: ntlmssp.Negotiator{RoundTripper: t}, } } f.srv = rest.NewClient(client).SetRoot(u.String()) f.features = (&fs.Features{ CanHaveEmptyDirectories: true, }).Fill(ctx, f) if opt.User != "" || opt.Pass != "" { f.srv.SetUserPass(opt.User, opt.Pass) } else if opt.BearerToken != "" { f.setBearerToken(opt.BearerToken) } else if len(f.opt.BearerTokenCommand) != 0 { err = f.fetchAndSetBearerToken() if err != nil { return nil, err } } if opt.Headers != nil { f.addHeaders(opt.Headers) } f.srv.SetErrorHandler(errorHandler) err = f.setQuirks(ctx, opt.Vendor) if err != nil { return nil, err } if !f.findHeader(opt.Headers, "Referer") { f.srv.SetHeader("Referer", u.String()) } if root != "" && !rootIsDir { // Check to see if the root actually an existing file remote := path.Base(root) f.root = path.Dir(root) if f.root == "." { f.root = "" } _, err := f.NewObject(ctx, remote) if err != nil { if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorIsDir) { // File doesn't exist so return old f f.root = root return f, nil } return nil, err } // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // sets the BearerToken up func (f *Fs) setBearerToken(token string) { f.opt.BearerToken = token f.srv.SetHeader("Authorization", "Bearer "+token) } // fetch the bearer token using the command func (f *Fs) fetchBearerToken(cmd fs.SpaceSepList) (string, error) { var ( stdout bytes.Buffer stderr bytes.Buffer c = exec.Command(cmd[0], cmd[1:]...) ) c.Stdout = &stdout c.Stderr = &stderr var ( err = c.Run() stdoutString = strings.TrimSpace(stdout.String()) stderrString = strings.TrimSpace(stderr.String()) ) if err != nil { if stderrString == "" { stderrString = stdoutString } return "", fmt.Errorf("failed to get bearer token using %q: %s: %w", f.opt.BearerTokenCommand, stderrString, err) } return stdoutString, nil } // Adds the configured headers to the request if any func (f *Fs) addHeaders(headers fs.CommaSepList) { for i := 0; i < len(headers); i += 2 { key := f.opt.Headers[i] value := f.opt.Headers[i+1] f.srv.SetHeader(key, value) } } // Returns true if the header was configured func (f *Fs) findHeader(headers fs.CommaSepList, find string) bool { for i := 0; i < len(headers); i += 2 { key := f.opt.Headers[i] if strings.EqualFold(key, find) { return true } } return false } // fetch the bearer token and set it if successful func (f *Fs) fetchAndSetBearerToken() error { _, err, _ := f.authSingleflight.Do("bearerToken", func() (interface{}, error) { if len(f.opt.BearerTokenCommand) == 0 { return nil, nil } token, err := f.fetchBearerToken(f.opt.BearerTokenCommand) if err != nil { return nil, err } f.setBearerToken(token) return nil, nil }) return err } // The WebDAV url can optionally be suffixed with a path. This suffix needs to be ignored for determining the temporary upload directory of chunks. var nextCloudURLRegex = regexp.MustCompile(`^(.*)/dav/files/([^/]+)`) // setQuirks adjusts the Fs for the vendor passed in func (f *Fs) setQuirks(ctx context.Context, vendor string) error { switch vendor { case "fastmail": f.canStream = true f.precision = time.Second f.useOCMtime = true f.hasMESHA1 = true case "owncloud": f.canStream = true f.precision = time.Second f.useOCMtime = true f.propsetMtime = true f.hasOCMD5 = true f.hasOCSHA1 = true case "infinitescale": f.precision = time.Second f.useOCMtime = true f.propsetMtime = true f.hasOCMD5 = false f.hasOCSHA1 = true f.canChunk = false f.canTus = true f.opt.ChunkSize = 10 * fs.Mebi case "nextcloud": f.precision = time.Second f.useOCMtime = true f.propsetMtime = true f.hasOCSHA1 = true f.canChunk = true if f.opt.ChunkSize == 0 { fs.Logf(nil, "Chunked uploads are disabled because nextcloud_chunk_size is set to 0") } else { chunksUploadURL, err := f.getChunksUploadURL() if err != nil { return err } f.chunksUploadURL = chunksUploadURL fs.Debugf(nil, "Chunks temporary upload directory: %s", f.chunksUploadURL) } case "sharepoint": // To mount sharepoint, two Cookies are required // They have to be set instead of BasicAuth f.srv.RemoveHeader("Authorization") // We don't need this Header if using cookies spCk := odrvcookie.New(f.opt.User, f.opt.Pass, f.endpointURL) spCookies, err := spCk.Cookies(ctx) if err != nil { return err } odrvcookie.NewRenew(12*time.Hour, func() { spCookies, err := spCk.Cookies(ctx) if err != nil { fs.Errorf(nil, "could not renew cookies: %s", err.Error()) return } f.srv.SetCookie(&spCookies.FedAuth, &spCookies.RtFa) fs.Debugf(spCookies, "successfully renewed sharepoint cookies") }) f.srv.SetCookie(&spCookies.FedAuth, &spCookies.RtFa) // sharepoint, unlike the other vendors, only lists files if the depth header is set to 0 // however, rclone defaults to 1 since it provides recursive directory listing // to determine if we may have found a file, the request has to be resent // with the depth set to 0 f.retryWithZeroDepth = true case "sharepoint-ntlm": // Sharepoint with NTLM authentication // See comment above f.retryWithZeroDepth = true // Sharepoint 2016 returns status 204 to the purge request // even if the directory to purge does not really exist // so we must perform an extra check to detect this // condition and return a proper error code. f.checkBeforePurge = true case "rclone": f.canStream = true f.precision = time.Second f.useOCMtime = true case "other": default: fs.Debugf(f, "Unknown vendor %q", vendor) } // Remove PutStream from optional features if !f.canStream { f.features.PutStream = nil } return nil } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Prop) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } var err error if info != nil { // Set info err = o.setMetaData(info) } else { err = o.readMetaData(ctx) // reads info and meta, returning an error } if err != nil { return nil, err } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // Read the normal props, plus the checksums // // <oc:checksums><oc:checksum>SHA1:f572d396fae9206628714fb2ce00f72e94f2258f MD5:b1946ac92492d2347c6235b4d2611184 ADLER32:084b021f</oc:checksum></oc:checksums> var owncloudProps = []byte(`<?xml version="1.0"?> <d:propfind xmlns:d="DAV:" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns"> <d:prop> <d:displayname /> <d:getlastmodified /> <d:getcontentlength /> <d:resourcetype /> <d:getcontenttype /> <oc:checksums /> <oc:permissions /> </d:prop> </d:propfind> `) // list the objects into the function supplied // // If directories is set it only sends directories // User function to process a File item from listAll // // Should return true to finish processing type listAllFn func(string, bool, *api.Prop) bool // Lists the directory required calling the user function on each item found // // If the user fn ever returns true then it early exits with found = true func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, filesOnly bool, depth string, fn listAllFn) (found bool, err error) { opts := rest.Opts{ Method: "PROPFIND", Path: f.dirPath(dir), // FIXME Should not start with / ExtraHeaders: map[string]string{ "Depth": depth, }, } if f.hasOCMD5 || f.hasOCSHA1 { opts.Body = bytes.NewBuffer(owncloudProps) } var result api.Multistatus var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallXML(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if apiErr, ok := err.(*api.Error); ok { // does not exist if apiErr.StatusCode == http.StatusNotFound { if f.retryWithZeroDepth && depth != "0" { return f.listAll(ctx, dir, directoriesOnly, filesOnly, "0", fn) } return found, fs.ErrorDirNotFound } } return found, fmt.Errorf("couldn't list files: %w", err) } // fmt.Printf("result = %#v", &result) baseURL, err := rest.URLJoin(f.endpoint, opts.Path) if err != nil { return false, fmt.Errorf("couldn't join URL: %w", err) } for i := range result.Responses { item := &result.Responses[i] isDir := itemIsDir(item) // Find name u, err := rest.URLJoin(baseURL, item.Href) if err != nil { fs.Errorf(nil, "URL Join failed for %q and %q: %v", baseURL, item.Href, err) continue } // Make sure directories end with a / if isDir { u.Path = addSlash(u.Path) } if !strings.HasPrefix(u.Path, baseURL.Path) { fs.Debugf(nil, "Item with unknown path received: %q, %q", u.Path, baseURL.Path) continue } subPath := u.Path[len(baseURL.Path):] subPath = strings.TrimPrefix(subPath, "/") // ignore leading / here for davrods if f.opt.Enc != encoder.EncodeZero { subPath = f.opt.Enc.ToStandardPath(subPath) } remote := path.Join(dir, subPath) remote = strings.TrimSuffix(remote, "/") // the listing contains info about itself which we ignore if remote == dir { continue } // Check OK if !item.Props.StatusOK() { fs.Debugf(remote, "Ignoring item with bad status %q", item.Props.Status) continue } if isDir { if filesOnly { continue } } else { if directoriesOnly { continue } } if f.opt.ExcludeShares { // https: //owncloud.dev/apis/http/webdav/#supported-webdav-properties if strings.Contains(item.Props.Permissions, "S") { continue } } if f.opt.ExcludeMounts { // https: //owncloud.dev/apis/http/webdav/#supported-webdav-properties if strings.Contains(item.Props.Permissions, "M") { continue } } // item.Name = restoreReservedChars(item.Name) if fn(remote, isDir, &item.Props) { found = true break } } return } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return list.WithListP(ctx, dir, f) } // ListP lists the objects and directories of the Fs starting // from dir non recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error { list := list.NewHelper(callback) var iErr error _, err := f.listAll(ctx, dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool { if isDir { d := fs.NewDir(remote, time.Time(info.Modified)) // .SetID(info.ID) // FIXME more info from dir? can set size, items? err := list.Add(d) if err != nil { iErr = err return true } } else { o, err := f.newObjectWithInfo(ctx, remote, info) if err != nil { iErr = err return true } err = list.Add(o) if err != nil { iErr = err return true } } return false }) if err != nil { return err } if iErr != nil { return iErr } return list.Flush() } // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // // Used to create new objects func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object) { // Temporary Object under construction o = &Object{ fs: f, remote: remote, size: size, modTime: modTime, } return o } // Put the object // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size()) return o, o.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(ctx, in, src, options...) } // mkParentDir makes the parent of the native path dirPath if // necessary and any directories above that func (f *Fs) mkParentDir(ctx context.Context, dirPath string) (err error) { // defer log.Trace(dirPath, "")("err=%v", &err) // chop off trailing / if it exists parent := path.Dir(strings.TrimSuffix(dirPath, "/")) if parent == "." { parent = "" } return f.mkdir(ctx, parent) } // _dirExists - list dirPath to see if it exists // // dirPath should be a native path ending in a / func (f *Fs) _dirExists(ctx context.Context, dirPath string) (exists bool) { opts := rest.Opts{ Method: "PROPFIND", Path: dirPath, ExtraHeaders: map[string]string{ "Depth": "0", }, } var result api.Multistatus var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallXML(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) return err == nil } // low level mkdir, only makes the directory, doesn't attempt to create parents func (f *Fs) _mkdir(ctx context.Context, dirPath string) error { // We assume the root is already created if dirPath == "" { return nil } // Collections must end with / if !strings.HasSuffix(dirPath, "/") { dirPath += "/" } opts := rest.Opts{ Method: "MKCOL", Path: dirPath, NoResponse: true, } err := f.pacer.Call(func() (bool, error) { resp, err := f.srv.Call(ctx, &opts) return f.shouldRetry(ctx, resp, err) }) if apiErr, ok := err.(*api.Error); ok { // Check if it already exists. The response code for this isn't // defined in the RFC so the implementations vary wildly. // // owncloud returns 423/StatusLocked if the create is already in progress if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable || apiErr.StatusCode == http.StatusLocked { return nil } // 4shared returns a 409/StatusConflict here which clashes // horribly with the intermediate paths don't exist meaning. So // check to see if actually exists. This will correct other // error codes too. if f._dirExists(ctx, dirPath) { return nil } } return err } // mkdir makes the directory and parents using native paths func (f *Fs) mkdir(ctx context.Context, dirPath string) (err error) { // defer log.Trace(dirPath, "")("err=%v", &err) err = f._mkdir(ctx, dirPath) if apiErr, ok := err.(*api.Error); ok { // parent does not exist so create it first then try again if apiErr.StatusCode == http.StatusConflict { err = f.mkParentDir(ctx, dirPath) if err == nil { err = f._mkdir(ctx, dirPath) } } } return err } // Mkdir creates the directory if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { dirPath := f.dirPath(dir) return f.mkdir(ctx, dirPath) } // dirNotEmpty returns true if the directory exists and is not Empty // // if the directory does not exist then err will be ErrorDirNotFound func (f *Fs) dirNotEmpty(ctx context.Context, dir string) (found bool, err error) {
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/webdav/tus-uploader.go
backend/webdav/tus-uploader.go
package webdav import ( "bytes" "context" "fmt" "io" "net/http" "net/url" "strconv" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/rest" ) // Uploader holds all information about a currently running upload type Uploader struct { fs *Fs url string upload *Upload offset int64 aborted bool uploadSubs []chan Upload notifyChan chan bool overridePatchMethod bool } // NotifyUploadProgress subscribes to progress updates. func (u *Uploader) NotifyUploadProgress(c chan Upload) { u.uploadSubs = append(u.uploadSubs, c) } func (f *Fs) shouldRetryChunk(ctx context.Context, resp *http.Response, err error, newOff *int64) (bool, error) { if resp == nil { return true, err } switch resp.StatusCode { case 204: if off, err := strconv.ParseInt(resp.Header.Get("Upload-Offset"), 10, 64); err == nil { *newOff = off return false, nil } return false, err case 409: return false, ErrOffsetMismatch case 412: return false, ErrVersionMismatch case 413: return false, ErrLargeUpload } return f.shouldRetry(ctx, resp, err) } func (u *Uploader) uploadChunk(ctx context.Context, body io.Reader, size int64, offset int64, options ...fs.OpenOption) (int64, error) { var method string if !u.overridePatchMethod { method = "PATCH" } else { method = "POST" } extraHeaders := map[string]string{} // FIXME: Use extraHeaders(ctx, src) from Object maybe? extraHeaders["Upload-Offset"] = strconv.FormatInt(offset, 10) extraHeaders["Tus-Resumable"] = "1.0.0" extraHeaders["filetype"] = u.upload.Metadata["filetype"] if u.overridePatchMethod { extraHeaders["X-HTTP-Method-Override"] = "PATCH" } url, err := url.Parse(u.url) if err != nil { return 0, fmt.Errorf("upload Chunk failed, could not parse url") } // FIXME: Use GetBody func as in chunking.go opts := rest.Opts{ Method: method, Path: url.Path, NoResponse: true, RootURL: fmt.Sprintf("%s://%s", url.Scheme, url.Host), ContentLength: &size, Body: body, ContentType: "application/offset+octet-stream", ExtraHeaders: extraHeaders, Options: options, } var newOffset int64 err = u.fs.pacer.CallNoRetry(func() (bool, error) { res, err := u.fs.srv.Call(ctx, &opts) return u.fs.shouldRetryChunk(ctx, res, err, &newOffset) }) if err != nil { return 0, fmt.Errorf("uploadChunk failed: %w", err) // FIXME What do we do here? Remove the entire upload? // See https://github.com/tus/tusd/issues/176 } return newOffset, nil } // Upload uploads the entire body to the server. func (u *Uploader) Upload(ctx context.Context, options ...fs.OpenOption) error { cnt := 1 fs.Debug(u.fs, "Uploaded starts") for u.offset < u.upload.size && !u.aborted { err := u.UploadChunk(ctx, cnt, options...) cnt++ if err != nil { return err } } fs.Debug(u.fs, "-- Uploaded finished") return nil } // UploadChunk uploads a single chunk. func (u *Uploader) UploadChunk(ctx context.Context, cnt int, options ...fs.OpenOption) error { chunkSize := u.fs.opt.ChunkSize data := make([]byte, chunkSize) _, err := u.upload.stream.Seek(u.offset, 0) if err != nil { fs.Errorf(u.fs, "Chunk %d: Error seek in stream failed: %v", cnt, err) return err } size, err := u.upload.stream.Read(data) if err != nil { fs.Errorf(u.fs, "Chunk %d: Error: Can not read from data stream: %v", cnt, err) return err } body := bytes.NewBuffer(data[:size]) newOffset, err := u.uploadChunk(ctx, body, int64(size), u.offset, options...) if err == nil { fs.Debugf(u.fs, "Uploaded chunk no %d ok, range %d -> %d", cnt, u.offset, newOffset) } else { fs.Errorf(u.fs, "Uploaded chunk no %d failed: %v", cnt, err) return err } u.offset = newOffset u.upload.updateProgress(u.offset) u.notifyChan <- true return nil } // Waits for a signal to broadcast to all subscribers func (u *Uploader) broadcastProgress() { for range u.notifyChan { for _, c := range u.uploadSubs { c <- *u.upload } } } // NewUploader creates a new Uploader. func NewUploader(f *Fs, url string, upload *Upload, offset int64) *Uploader { notifyChan := make(chan bool) uploader := &Uploader{ f, url, upload, offset, false, nil, notifyChan, false, } go uploader.broadcastProgress() return uploader }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/webdav/tus-errors.go
backend/webdav/tus-errors.go
package webdav import ( "errors" "fmt" ) var ( // ErrChunkSize is returned when the chunk size is zero ErrChunkSize = errors.New("tus chunk size must be greater than zero") // ErrNilLogger is returned when the logger is nil ErrNilLogger = errors.New("tus logger can't be nil") // ErrNilStore is returned when the store is nil ErrNilStore = errors.New("tus store can't be nil if resume is enable") // ErrNilUpload is returned when the upload is nil ErrNilUpload = errors.New("tus upload can't be nil") // ErrLargeUpload is returned when the upload body is to large ErrLargeUpload = errors.New("tus upload body is to large") // ErrVersionMismatch is returned when the tus protocol version is mismatching ErrVersionMismatch = errors.New("tus protocol version mismatch") // ErrOffsetMismatch is returned when the tus upload offset is mismatching ErrOffsetMismatch = errors.New("tus upload offset mismatch") // ErrUploadNotFound is returned when the tus upload is not found ErrUploadNotFound = errors.New("tus upload not found") // ErrResumeNotEnabled is returned when the tus resuming is not enabled ErrResumeNotEnabled = errors.New("tus resuming not enabled") // ErrFingerprintNotSet is returned when the tus fingerprint is not set ErrFingerprintNotSet = errors.New("tus fingerprint not set") ) // ClientError represents an error state of a client type ClientError struct { Code int Body []byte } // Error returns an error string containing the client error code func (c ClientError) Error() string { return fmt.Sprintf("unexpected status code: %d", c.Code) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/webdav/chunking.go
backend/webdav/chunking.go
package webdav /* chunked update for Nextcloud see https://docs.nextcloud.com/server/20/developer_manual/client_apis/WebDAV/chunking.html */ import ( "context" "crypto/md5" "encoding/hex" "errors" "fmt" "io" "net/http" "path" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/rest" ) func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error, sleepTime *time.Duration, wasLocked *bool) (bool, error) { // Not found. Can be returned by NextCloud when merging chunks of an upload. if resp != nil && resp.StatusCode == 404 { if *wasLocked { // Assume a 404 error after we've received a 423 error is actually a success return false, nil } return true, err } // 423 LOCKED if resp != nil && resp.StatusCode == 423 { *wasLocked = true fs.Logf(f, "Sleeping for %v to wait for chunks to be merged after 423 error", *sleepTime) time.Sleep(*sleepTime) *sleepTime *= 2 return true, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err) } return f.shouldRetry(ctx, resp, err) } // set the chunk size for testing func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { old, f.opt.ChunkSize = f.opt.ChunkSize, cs return } func (o *Object) getChunksUploadDir() (string, error) { hasher := md5.New() _, err := hasher.Write([]byte(o.filePath())) if err != nil { return "", fmt.Errorf("chunked upload couldn't hash URL: %w", err) } uploadDir := "rclone-chunked-upload-" + hex.EncodeToString(hasher.Sum(nil)) return uploadDir, nil } func (f *Fs) getChunksUploadURL() (string, error) { submatch := nextCloudURLRegex.FindStringSubmatch(f.endpointURL) if submatch == nil { return "", errors.New("the remote url looks incorrect. Note that nextcloud chunked uploads require you to use the /dav/files/USER endpoint instead of /webdav. Please check 'rclone config show remotename' to verify that the url field ends in /dav/files/USERNAME") } baseURL, user := submatch[1], submatch[2] chunksUploadURL := fmt.Sprintf("%s/dav/uploads/%s/", baseURL, user) return chunksUploadURL, nil } func (o *Object) shouldUseChunkedUpload(src fs.ObjectInfo) bool { return o.fs.canChunk && o.fs.opt.ChunkSize > 0 && src.Size() > int64(o.fs.opt.ChunkSize) } func (o *Object) updateChunked(ctx context.Context, in0 io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { var uploadDir string // see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#starting-a-chunked-upload uploadDir, err = o.createChunksUploadDirectory(ctx) if err != nil { return err } partObj := &Object{ fs: o.fs, } // see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#uploading-chunks err = o.uploadChunks(ctx, in0, src.Size(), partObj, uploadDir, options) if err != nil { return err } // see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#assembling-the-chunks err = o.mergeChunks(ctx, uploadDir, options, src) if err != nil { return err } return nil } func (o *Object) uploadChunks(ctx context.Context, in0 io.Reader, size int64, partObj *Object, uploadDir string, options []fs.OpenOption) error { chunkSize := int64(partObj.fs.opt.ChunkSize) // TODO: upload chunks in parallel for faster transfer speeds for offset := int64(0); offset < size; offset += chunkSize { if err := ctx.Err(); err != nil { return err } // Last chunk may be smaller contentLength := min(size-offset, chunkSize) endOffset := offset + contentLength - 1 partObj.remote = fmt.Sprintf("%s/%015d-%015d", uploadDir, offset, endOffset) // Enable low-level HTTP 2 retries. // 2022-04-28 15:59:06 ERROR : stuff/video.avi: Failed to copy: uploading chunk failed: Put "https://censored.com/remote.php/dav/uploads/Admin/rclone-chunked-upload-censored/000006113198080-000006123683840": http2: Transport: cannot retry err [http2: Transport received Server's graceful shutdown GOAWAY] after Request.Body was written; define Request.GetBody to avoid this error buf := make([]byte, chunkSize) in := readers.NewRepeatableLimitReaderBuffer(in0, buf, chunkSize) getBody := func() (io.ReadCloser, error) { // RepeatableReader{} plays well with accounting so rewinding doesn't make the progress buggy if _, err := in.Seek(0, io.SeekStart); err != nil { return nil, err } return io.NopCloser(in), nil } err := partObj.updateSimple(ctx, in, getBody, partObj.remote, contentLength, "application/x-www-form-urlencoded", nil, o.fs.chunksUploadURL, options...) if err != nil { return fmt.Errorf("uploading chunk failed: %w", err) } } return nil } func (o *Object) createChunksUploadDirectory(ctx context.Context) (string, error) { uploadDir, err := o.getChunksUploadDir() if err != nil { return uploadDir, err } err = o.purgeUploadedChunks(ctx, uploadDir) if err != nil { return "", fmt.Errorf("chunked upload couldn't purge upload directory: %w", err) } opts := rest.Opts{ Method: "MKCOL", Path: uploadDir + "/", NoResponse: true, RootURL: o.fs.chunksUploadURL, } err = o.fs.pacer.CallNoRetry(func() (bool, error) { resp, err := o.fs.srv.Call(ctx, &opts) return o.fs.shouldRetry(ctx, resp, err) }) if err != nil { return "", fmt.Errorf("making upload directory failed: %w", err) } return uploadDir, err } func (o *Object) mergeChunks(ctx context.Context, uploadDir string, options []fs.OpenOption, src fs.ObjectInfo) error { var resp *http.Response // see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html?highlight=chunk#assembling-the-chunks opts := rest.Opts{ Method: "MOVE", Path: path.Join(uploadDir, ".file"), NoResponse: true, Options: options, RootURL: o.fs.chunksUploadURL, } destinationURL, err := rest.URLJoin(o.fs.endpoint, o.filePath()) if err != nil { return fmt.Errorf("finalize chunked upload couldn't join URL: %w", err) } opts.ExtraHeaders = o.extraHeaders(ctx, src) opts.ExtraHeaders["Destination"] = destinationURL.String() sleepTime := 5 * time.Second wasLocked := false err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return o.fs.shouldRetryChunkMerge(ctx, resp, err, &sleepTime, &wasLocked) }) if err != nil { return fmt.Errorf("finalize chunked upload failed, destinationURL: \"%s\": %w", destinationURL, err) } return err } func (o *Object) purgeUploadedChunks(ctx context.Context, uploadDir string) error { // clean the upload directory if it exists (this means that a previous try didn't clean up properly). opts := rest.Opts{ Method: "DELETE", Path: uploadDir + "/", NoResponse: true, RootURL: o.fs.chunksUploadURL, } err := o.fs.pacer.Call(func() (bool, error) { resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil) // directory doesn't exist, no need to purge if resp != nil && resp.StatusCode == http.StatusNotFound { return false, nil } return o.fs.shouldRetry(ctx, resp, err) }) return err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false