repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/premiumizeme/premiumizeme.go
backend/premiumizeme/premiumizeme.go
// Package premiumizeme provides an interface to the premiumize.me // object storage system. package premiumizeme /* Run of rclone info stringNeedsEscaping = []rune{ 0x00, 0x0A, 0x0D, 0x22, 0x2F, 0x5C, 0xBF, 0xFE 0x00, 0x0A, 0x0D, '"', '/', '\\', 0xBF, 0xFE } maxFileLength = 255 canWriteUnnormalized = true canReadUnnormalized = true canReadRenormalized = false canStream = false */ import ( "context" "encoding/json" "errors" "fmt" "io" "net" "net/http" "net/url" "path" "strings" "time" "github.com/rclone/rclone/backend/premiumizeme/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/rest" ) const ( rcloneClientID = "658922194" rcloneEncryptedClientSecret = "B5YIvQoRIhcpAYs8HYeyjb9gK-ftmZEbqdh_gNfc4RgO9Q" minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential rootID = "0" // ID of root folder is always this rootURL = "https://www.premiumize.me/api" ) // Globals var ( // Description of how to auth for this app oauthConfig = &oauthutil.Config{ Scopes: nil, AuthURL: "https://www.premiumize.me/authorize", TokenURL: "https://www.premiumize.me/token", ClientID: rcloneClientID, ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), RedirectURL: oauthutil.RedirectURL, } ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "premiumizeme", Description: "premiumize.me", NewFs: NewFs, Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { return oauthutil.ConfigOut("", &oauthutil.Options{ OAuth2Config: oauthConfig, }) }, Options: append(oauthutil.SharedOptions, []fs.Option{{ Name: "api_key", Help: `API Key. This is not normally used - use oauth instead. `, Hide: fs.OptionHideBoth, Default: "", Sensitive: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // Encode invalid UTF-8 bytes as json doesn't handle them properly. Default: (encoder.Display | encoder.EncodeBackSlash | encoder.EncodeDoubleQuote | encoder.EncodeInvalidUtf8), }}...), }) } // Options defines the configuration for this backend type Options struct { APIKey string `config:"api_key"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a remote cloud storage system type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options features *fs.Features // optional features srv *rest.Client // the connection to the server dirCache *dircache.DirCache // Map of directory path to directory id pacer *fs.Pacer // pacer for API calls tokenRenewer *oauthutil.Renew // renew the token on expiry } // Object describes a file type Object struct { fs *Fs // what this object is part of remote string // The remote path hasMetaData bool // metadata is present and correct size int64 // size of the object modTime time.Time // modification time of the object id string // ID of the object parentID string // ID of parent directory mimeType string // Mime type of object url string // URL to download file } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("premiumize.me root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // parsePath parses a premiumize.me 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // readMetaDataForPath reads the metadata from the path func (f *Fs) readMetaDataForPath(ctx context.Context, path string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) { // defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, fs.ErrorObjectNotFound } return nil, err } lcLeaf := strings.ToLower(leaf) _, found, err := f.listAll(ctx, directoryID, directoriesOnly, filesOnly, func(item *api.Item) bool { if strings.ToLower(item.Name) == lcLeaf { info = item return true } return false }) if err != nil { return nil, err } if !found { return nil, fs.ErrorObjectNotFound } return info, nil } // errorHandler parses a non 2xx error response into an error func errorHandler(resp *http.Response) error { body, err := rest.ReadBody(resp) if err != nil { body = nil } var e = api.Response{ Message: string(body), Status: fmt.Sprintf("%s (%d)", resp.Status, resp.StatusCode), } if body != nil { _ = json.Unmarshal(body, &e) } return &e } // Return a url.Values with the api key in func (f *Fs) baseParams() url.Values { params := url.Values{} if f.opt.APIKey != "" { params.Add("apikey", f.opt.APIKey) } return params } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } root = parsePath(root) var client *http.Client var ts *oauthutil.TokenSource if opt.APIKey == "" { client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig) if err != nil { return nil, fmt.Errorf("failed to configure premiumize.me: %w", err) } } else { client = fshttp.NewClient(ctx) } f := &Fs{ name: name, root: root, opt: *opt, srv: rest.NewClient(client).SetRoot(rootURL), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } f.features = (&fs.Features{ CaseInsensitive: true, CanHaveEmptyDirectories: true, ReadMimeType: true, }).Fill(ctx, f) f.srv.SetErrorHandler(errorHandler) // Renew the token in the background if ts != nil { f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { _, err := f.About(ctx) return err }) } // Get rootID f.dirCache = dircache.New(root, rootID, f) // Find the current root err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) tempF := *f tempF.dirCache = dircache.New(newRoot, rootID, &tempF) tempF.root = newRoot // Make new Fs which is the parent err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } _, err := tempF.newObjectWithInfo(ctx, remote, nil) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f return f, nil } return nil, err } f.features.Fill(ctx, &tempF) // XXX: update the old f here instead of returning tempF, since // `features` were already filled with functions having *f as a receiver. // See https://github.com/rclone/rclone/issues/2182 f.dirCache = tempF.dirCache f.root = tempF.root // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } var err error if info != nil { // Set info err = o.setMetaData(info) } else { err = o.readMetaData(ctx) // reads info and meta, returning an error } if err != nil { return nil, err } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // FindLeaf finds a directory of name leaf in the folder with ID pathID func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { // Find the leaf in pathID var newDirID string newDirID, found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool { if strings.EqualFold(item.Name, leaf) { pathIDOut = item.ID return true } return false }) // Update the Root directory ID to its actual value if pathID == rootID { f.dirCache.SetRootIDAlias(newDirID) } return pathIDOut, found, err } // CreateDir makes a directory with pathID as parent and name leaf func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) var resp *http.Response var info api.FolderCreateResponse opts := rest.Opts{ Method: "POST", Path: "/folder/create", Parameters: f.baseParams(), MultipartParams: url.Values{ "name": {f.opt.Enc.FromStandardName(leaf)}, "parent_id": {pathID}, }, } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &info) return shouldRetry(ctx, resp, err) }) if err != nil { //fmt.Printf("...Error %v\n", err) return "", fmt.Errorf("CreateDir http: %w", err) } if err = info.AsErr(); err != nil { return "", fmt.Errorf("CreateDir: %w", err) } // fmt.Printf("...Id %q\n", *info.Id) return info.ID, nil } // list the objects into the function supplied // // If directories is set it only sends directories // User function to process a File item from listAll // // Should return true to finish processing type listAllFn func(*api.Item) bool // Lists the directory required calling the user function on each item found // // If the user fn ever returns true then it early exits with found = true // // It returns a newDirID which is what the system returned as the directory ID func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (newDirID string, found bool, err error) { opts := rest.Opts{ Method: "GET", Path: "/folder/list", Parameters: f.baseParams(), } if dirID != rootID { opts.Parameters.Set("id", dirID) } opts.Parameters.Set("includebreadcrumbs", "false") var result api.FolderListResponse var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if err != nil { return newDirID, found, fmt.Errorf("couldn't list files: %w", err) } if err = result.AsErr(); err != nil { return newDirID, found, fmt.Errorf("error while listing: %w", err) } newDirID = result.FolderID for i := range result.Content { item := &result.Content[i] if item.Type == api.ItemTypeFolder { if filesOnly { continue } } else if item.Type == api.ItemTypeFile { if directoriesOnly { continue } } else { fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type) continue } item.Name = f.opt.Enc.ToStandardName(item.Name) if fn(item) { found = true break } } return } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } var iErr error _, _, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool { remote := path.Join(dir, info.Name) if info.Type == api.ItemTypeFolder { // cache the directory ID for later lookups f.dirCache.Put(remote, info.ID) d := fs.NewDir(remote, time.Unix(info.CreatedAt, 0)).SetID(info.ID) entries = append(entries, d) } else if info.Type == api.ItemTypeFile { o, err := f.newObjectWithInfo(ctx, remote, info) if err != nil { iErr = err return true } entries = append(entries, o) } return false }) if err != nil { return nil, err } if iErr != nil { return nil, iErr } return entries, nil } // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // // Returns the object, leaf, directoryID and error. // // Used to create new objects func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { // Create the directory for the object if it doesn't exist leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true) if err != nil { return } // Temporary Object under construction o = &Object{ fs: f, remote: remote, } return o, leaf, directoryID, nil } // Put the object // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil) switch err { case nil: return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it return f.PutUnchecked(ctx, in, src, options...) default: return nil, err } } // PutUnchecked the object into the container // // This will produce an error if the object already exists. // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() modTime := src.ModTime(ctx) o, _, _, err := f.createObject(ctx, remote, modTime, size) if err != nil { return nil, err } return o, o.Update(ctx, in, src, options...) } // Mkdir creates the container if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { _, err := f.dirCache.FindDir(ctx, dir, true) return err } // purgeCheck removes the root directory, if check is set then it // refuses to do so if it has anything in func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } dc := f.dirCache rootID, err := dc.FindDir(ctx, dir, false) if err != nil { return err } // need to check if empty as it will delete recursively by default if check { _, found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool { return true }) if err != nil { return fmt.Errorf("purgeCheck: %w", err) } if found { return fs.ErrorDirectoryNotEmpty } } opts := rest.Opts{ Method: "POST", Path: "/folder/delete", MultipartParams: url.Values{ "id": {rootID}, }, Parameters: f.baseParams(), } var resp *http.Response var result api.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("rmdir failed: %w", err) } if err = result.AsErr(); err != nil { return fmt.Errorf("rmdir: %w", err) } f.dirCache.FlushDir(dir) if err != nil { return err } return nil } // Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, true) } // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported } // Purge deletes all the files in the directory // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, false) } // move a file or folder // // This is complicated by the fact that there is an API to move files // between directories and a separate one to rename them. We try to // call the minimum number of API calls. func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDirectoryID, newDirectoryID string) (err error) { newLeaf = f.opt.Enc.FromStandardName(newLeaf) oldLeaf = f.opt.Enc.FromStandardName(oldLeaf) doRenameLeaf := oldLeaf != newLeaf doMove := oldDirectoryID != newDirectoryID // Now rename the leaf to a temporary name if we are moving to // another directory to make sure we don't overwrite something // in the destination directory by accident if doRenameLeaf && doMove { tmpLeaf := newLeaf + "." + random.String(8) err = f.renameLeaf(ctx, isFile, id, tmpLeaf) if err != nil { return fmt.Errorf("Move rename leaf: %w", err) } } // Move the object to a new directory (with the existing name) // if required if doMove { opts := rest.Opts{ Method: "POST", Path: "/folder/paste", Parameters: f.baseParams(), MultipartParams: url.Values{ "id": {newDirectoryID}, }, } opts.MultipartParams.Set("items[0][id]", id) if isFile { opts.MultipartParams.Set("items[0][type]", "file") } else { opts.MultipartParams.Set("items[0][type]", "folder") } //replacedLeaf := enc.FromStandardName(leaf) var resp *http.Response var result api.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("Move http: %w", err) } if err = result.AsErr(); err != nil { return fmt.Errorf("Move: %w", err) } } // Rename the leaf to its final name if required if doRenameLeaf { err = f.renameLeaf(ctx, isFile, id, newLeaf) if err != nil { return fmt.Errorf("Move rename leaf: %w", err) } } return nil } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } // Create temporary object dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } // Do the move err = f.move(ctx, true, srcObj.id, path.Base(srcObj.remote), leaf, srcObj.parentID, directoryID) if err != nil { return nil, err } err = dstObj.readMetaData(ctx) if err != nil { return nil, err } return dstObj, nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } srcID, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) if err != nil { return err } // Do the move err = f.move(ctx, false, srcID, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID) if err != nil { return err } srcFs.dirCache.FlushDir(srcRemote) return nil } // PublicLink adds a "readable by anyone with link" permission on the given file or folder. func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { _, err := f.dirCache.FindDir(ctx, remote, false) if err == nil { return "", fs.ErrorCantShareDirectories } o, err := f.NewObject(ctx, remote) if err != nil { return "", err } return o.(*Object).url, nil } // Shutdown shutdown the fs func (f *Fs) Shutdown(ctx context.Context) error { f.tokenRenewer.Shutdown() return nil } // About gets quota information func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { var resp *http.Response var info api.AccountInfoResponse opts := rest.Opts{ Method: "POST", Path: "/account/info", Parameters: f.baseParams(), } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &info) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } if err = info.AsErr(); err != nil { return nil, err } usage = &fs.Usage{ Used: fs.NewUsageValue(info.SpaceUsed), } return usage, nil } // DirCacheFlush resets the directory cache - used in testing as an // optional interface func (f *Fs) DirCacheFlush() { f.dirCache.ResetRoot() } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.None) } // ------------------------------------------------------------ // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Hash returns the SHA-1 of an object returning a lowercase hex string func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { return "", hash.ErrUnsupported } // Size returns the size of an object in bytes func (o *Object) Size() int64 { err := o.readMetaData(context.TODO()) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return 0 } return o.size } // setMetaData sets the metadata from info func (o *Object) setMetaData(info *api.Item) (err error) { if info.Type != "file" { return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile) } o.hasMetaData = true o.size = info.Size o.modTime = time.Unix(info.CreatedAt, 0) o.id = info.ID o.mimeType = info.MimeType o.url = info.Link return nil } // readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info func (o *Object) readMetaData(ctx context.Context) (err error) { if o.hasMetaData { return nil } info, err := o.fs.readMetaDataForPath(ctx, o.remote, false, true) if err != nil { return err } return o.setMetaData(info) } // ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime(ctx context.Context) time.Time { err := o.readMetaData(ctx) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return time.Now() } return o.modTime } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return fs.ErrorCantSetModTime } // Storable returns a boolean showing whether this object storable func (o *Object) Storable() bool { return true } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { if o.url == "" { return nil, errors.New("can't download - no URL") } fs.FixRangeOption(options, o.size) var resp *http.Response opts := rest.Opts{ Path: "", RootURL: o.url, Method: "GET", Options: options, } err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } return resp.Body, err } // Update the object with the contents of the io.Reader, modTime and size // // If existing is set then it updates the object rather than creating a new one. // // The new object may have been created if an error is returned func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { remote := o.Remote() size := src.Size() // Create the directory for the object if it doesn't exist leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true) if err != nil { return err } leaf = o.fs.opt.Enc.FromStandardName(leaf) var resp *http.Response var info api.FolderUploadinfoResponse opts := rest.Opts{ Method: "POST", Path: "/folder/uploadinfo", Parameters: o.fs.baseParams(), Options: options, MultipartParams: url.Values{ "id": {directoryID}, }, } err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info) if err != nil { return shouldRetry(ctx, resp, err) } // Just check the download URL resolves - sometimes // the URLs returned by premiumize.me don't resolve so // this needs a retry. var u *url.URL u, err = url.Parse(info.URL) if err != nil { return true, fmt.Errorf("failed to parse download URL: %w", err) } _, err = net.LookupIP(u.Hostname()) if err != nil { return true, fmt.Errorf("failed to resolve download URL: %w", err) } return false, nil }) if err != nil { return fmt.Errorf("upload get URL http: %w", err) } if err = info.AsErr(); err != nil { return fmt.Errorf("upload get URL: %w", err) } // if file exists then rename it out the way otherwise uploads can fail uploaded := false var oldID = o.id if o.hasMetaData { newLeaf := leaf + "." + random.String(8) fs.Debugf(o, "Moving old file out the way to %q", newLeaf) err = o.fs.renameLeaf(ctx, true, oldID, newLeaf) if err != nil { return fmt.Errorf("upload rename old file: %w", err) } defer func() { // on failed upload rename old file back if !uploaded { fs.Debugf(o, "Renaming old file back (from %q to %q) since upload failed", leaf, newLeaf) newErr := o.fs.renameLeaf(ctx, true, oldID, leaf) if newErr != nil && err == nil { err = fmt.Errorf("upload renaming old file back: %w", newErr) } } }() } opts = rest.Opts{ Method: "POST", RootURL: info.URL, Body: in, MultipartParams: url.Values{ "token": {info.Token}, }, MultipartContentName: "file", // ..name of the parameter which is the attached file MultipartFileName: leaf, // ..name of the file for the attached file ContentLength: &size, } var result api.Response err = o.fs.pacer.CallNoRetry(func() (bool, error) { resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("upload file http: %w", err) } if err = result.AsErr(); err != nil { return fmt.Errorf("upload file: %w", err) } // on successful upload, remove old file if it exists uploaded = true if o.hasMetaData { fs.Debugf(o, "Removing old file") err := o.fs.remove(ctx, oldID) if err != nil { return fmt.Errorf("upload remove old file: %w", err) } } o.hasMetaData = false return o.readMetaData(ctx) } // Rename the leaf of a file or directory in a directory func (f *Fs) renameLeaf(ctx context.Context, isFile bool, id string, newLeaf string) (err error) { opts := rest.Opts{ Method: "POST", MultipartParams: url.Values{ "id": {id}, "name": {newLeaf}, }, Parameters: f.baseParams(), } if isFile { opts.Path = "/item/rename" } else { opts.Path = "/folder/rename" } var resp *http.Response var result api.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("rename http: %w", err) } if err = result.AsErr(); err != nil { return fmt.Errorf("rename: %w", err) } return nil } // Remove an object by ID func (f *Fs) remove(ctx context.Context, id string) (err error) { opts := rest.Opts{ Method: "POST", Path: "/item/delete", MultipartParams: url.Values{ "id": {id}, }, Parameters: f.baseParams(), } var resp *http.Response var result api.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("remove http: %w", err) } if err = result.AsErr(); err != nil { return fmt.Errorf("remove: %w", err) } return nil } // Remove an object func (o *Object) Remove(ctx context.Context) error { err := o.readMetaData(ctx) if err != nil { return fmt.Errorf("Remove: Failed to read metadata: %w", err) } return o.fs.remove(ctx, o.id) } // MimeType of an Object if known, "" otherwise func (o *Object) MimeType(ctx context.Context) string { return o.mimeType } // ID returns the ID of the Object if known, or "" if not func (o *Object) ID() string { return o.id } // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) _ fs.Purger = (*Fs)(nil) _ fs.Mover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil) _ fs.Shutdowner = (*Fs)(nil) _ fs.Object = (*Object)(nil) _ fs.MimeTyper = (*Object)(nil) _ fs.IDer = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/premiumizeme/premiumizeme_test.go
backend/premiumizeme/premiumizeme_test.go
// Test filesystem interface package premiumizeme_test import ( "testing" "github.com/rclone/rclone/backend/premiumizeme" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestPremiumizeMe:", NilObject: (*premiumizeme.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/premiumizeme/api/types.go
backend/premiumizeme/api/types.go
// Package api contains definitions for using the premiumize.me API package api import "fmt" // Response is returned by all messages and embedded in the // structures below type Response struct { Message string `json:"message,omitempty"` Status string `json:"status"` } // Error satisfies the error interface func (e *Response) Error() string { return fmt.Sprintf("%s: %s", e.Status, e.Message) } // AsErr checks the status and returns an err if bad or nil if good func (e *Response) AsErr() error { if e.Status != "success" { return e } return nil } // Item Types const ( ItemTypeFolder = "folder" ItemTypeFile = "file" ) // Item refers to a file or folder type Item struct { Breadcrumbs []Breadcrumb `json:"breadcrumbs"` CreatedAt int64 `json:"created_at,omitempty"` ID string `json:"id"` Link string `json:"link,omitempty"` Name string `json:"name"` Size int64 `json:"size,omitempty"` StreamLink string `json:"stream_link,omitempty"` Type string `json:"type"` TranscodeStatus string `json:"transcode_status"` IP string `json:"ip"` MimeType string `json:"mime_type"` } // Breadcrumb is part the breadcrumb trail for a file or folder. It // is returned as part of folder/list if required type Breadcrumb struct { ID string `json:"id,omitempty"` Name string `json:"name,omitempty"` ParentID string `json:"parent_id,omitempty"` } // FolderListResponse is the response to folder/list type FolderListResponse struct { Response Content []Item `json:"content"` Name string `json:"name,omitempty"` ParentID string `json:"parent_id,omitempty"` FolderID string `json:"folder_id,omitempty"` } // FolderCreateResponse is the response to folder/create type FolderCreateResponse struct { Response ID string `json:"id,omitempty"` } // FolderUploadinfoResponse is the response to folder/uploadinfo type FolderUploadinfoResponse struct { Response Token string `json:"token,omitempty"` URL string `json:"url,omitempty"` } // AccountInfoResponse is the response to account/info type AccountInfoResponse struct { Response CustomerID string `json:"customer_id,omitempty"` LimitUsed float64 `json:"limit_used,omitempty"` // fraction 0..1 of download traffic limit PremiumUntil int64 `json:"premium_until,omitempty"` SpaceUsed float64 `json:"space_used,omitempty"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/alias/alias.go
backend/alias/alias.go
// Package alias implements a virtual provider to rename existing remotes. package alias import ( "context" "errors" "strings" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fspath" ) // Register with Fs func init() { fsi := &fs.RegInfo{ Name: "alias", Description: "Alias for an existing remote", NewFs: NewFs, Options: []fs.Option{{ Name: "remote", Help: "Remote or path to alias.\n\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".", Required: true, }}, } fs.Register(fsi) } // Options defines the configuration for this backend type Options struct { Remote string `config:"remote"` } // NewFs constructs an Fs from the path. // // The returned Fs is the actual Fs, referenced by remote in the config func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } if opt.Remote == "" { return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting") } if strings.HasPrefix(opt.Remote, name+":") { return nil, errors.New("can't point alias remote at itself - check the value of the remote setting") } return cache.Get(ctx, fspath.JoinRootPath(opt.Remote, root)) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/alias/alias_internal_test.go
backend/alias/alias_internal_test.go
package alias import ( "context" "fmt" "path" "path/filepath" "sort" "testing" _ "github.com/rclone/rclone/backend/local" // pull in test backend "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configfile" "github.com/stretchr/testify/require" ) var ( remoteName = "TestAlias" ) func prepare(t *testing.T, root string) { configfile.Install() // Configure the remote config.FileSetValue(remoteName, "type", "alias") config.FileSetValue(remoteName, "remote", root) } func TestNewFS(t *testing.T) { type testEntry struct { remote string size int64 isDir bool } for testi, test := range []struct { remoteRoot string fsRoot string fsList string wantOK bool entries []testEntry }{ {"", "", "", true, []testEntry{ {"four", -1, true}, {"one%.txt", 6, false}, {"three", -1, true}, {"two.html", 7, false}, }}, {"", "four", "", true, []testEntry{ {"five", -1, true}, {"under four.txt", 9, false}, }}, {"", "", "four", true, []testEntry{ {"four/five", -1, true}, {"four/under four.txt", 9, false}, }}, {"four", "..", "", true, []testEntry{ {"five", -1, true}, {"under four.txt", 9, false}, }}, {"", "../../three", "", true, []testEntry{ {"underthree.txt", 9, false}, }}, {"four", "../../five", "", true, []testEntry{ {"underfive.txt", 6, false}, }}, } { what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList) remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot))) require.NoError(t, err, what) prepare(t, remoteRoot) f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:%s", remoteName, test.fsRoot)) require.NoError(t, err, what) gotEntries, err := f.List(context.Background(), test.fsList) require.NoError(t, err, what) sort.Sort(gotEntries) require.Equal(t, len(test.entries), len(gotEntries), what) for i, gotEntry := range gotEntries { what := fmt.Sprintf("%s, entry=%d", what, i) wantEntry := test.entries[i] _, isDir := gotEntry.(fs.Directory) require.Equal(t, wantEntry.remote, gotEntry.Remote(), what) if !isDir { require.Equal(t, wantEntry.size, gotEntry.Size(), what) } require.Equal(t, wantEntry.isDir, isDir, what) } } } func TestNewFSNoRemote(t *testing.T) { prepare(t, "") f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName)) require.Error(t, err) require.Nil(t, f) } func TestNewFSInvalidRemote(t *testing.T) { prepare(t, "not_existing_test_remote:") f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName)) require.Error(t, err) require.Nil(t, f) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hidrive/hidrive_test.go
backend/hidrive/hidrive_test.go
// Test HiDrive filesystem interface package hidrive import ( "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote. func TestIntegration(t *testing.T) { name := "TestHiDrive" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":", NilObject: (*Object)(nil), ChunkedUpload: fstests.ChunkedUploadConfig{ MinChunkSize: 1, MaxChunkSize: MaximumUploadBytes, CeilChunkSize: nil, NeedMultipleChunks: false, }, }) } // Change the configured UploadChunkSize. // Will only be called while no transfer is in progress. func (f *Fs) SetUploadChunkSize(chunksize fs.SizeSuffix) (fs.SizeSuffix, error) { var old fs.SizeSuffix old, f.opt.UploadChunkSize = f.opt.UploadChunkSize, chunksize return old, nil } // Change the configured UploadCutoff. // Will only be called while no transfer is in progress. func (f *Fs) SetUploadCutoff(cutoff fs.SizeSuffix) (fs.SizeSuffix, error) { var old fs.SizeSuffix old, f.opt.UploadCutoff = f.opt.UploadCutoff, cutoff return old, nil } var ( _ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadCutoffer = (*Fs)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hidrive/hidrive.go
backend/hidrive/hidrive.go
// Package hidrive provides an interface to the HiDrive object storage system. package hidrive // FIXME HiDrive only supports file or folder names of 255 characters or less. // Operations that create files or folders with longer names will throw an HTTP error: // - 422 Unprocessable Entity // A more graceful way for rclone to handle this may be desirable. import ( "context" "encoding/json" "errors" "fmt" "io" "net/http" "path" "strconv" "time" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/backend/hidrive/api" "github.com/rclone/rclone/backend/hidrive/hidrivehash" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" ) const ( rcloneClientID = "6b0258fdda630d34db68a3ce3cbf19ae" rcloneEncryptedClientSecret = "GC7UDZ3Ra4jLcmfQSagKCDJ1JEy-mU6pBBhFrS3tDEHILrK7j3TQHUrglkO5SgZ_" minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential defaultUploadChunkSize = 48 * fs.Mebi defaultUploadCutoff = 2 * defaultUploadChunkSize defaultUploadConcurrency = 4 ) // Globals var ( // Description of how to auth for this app. oauthConfig = &oauthutil.Config{ AuthURL: "https://my.hidrive.com/client/authorize", TokenURL: "https://my.hidrive.com/oauth2/token", ClientID: rcloneClientID, ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), RedirectURL: oauthutil.TitleBarRedirectURL, } // hidrivehashType is the hash.Type for HiDrive hashes. hidrivehashType hash.Type ) // Register the backend with Fs. func init() { hidrivehashType = hash.RegisterHash("hidrive", "HiDriveHash", 40, hidrivehash.New) fs.Register(&fs.RegInfo{ Name: "hidrive", Description: "HiDrive", NewFs: NewFs, Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, fmt.Errorf("couldn't parse config into struct: %w", err) } //fs.Debugf(nil, "hidrive: configuring oauth-token.") oauthConfig.Scopes = createHiDriveScopes(opt.ScopeRole, opt.ScopeAccess) return oauthutil.ConfigOut("", &oauthutil.Options{ OAuth2Config: oauthConfig, }) }, Options: append(oauthutil.SharedOptions, []fs.Option{{ Name: "scope_access", Help: "Access permissions that rclone should use when requesting access from HiDrive.", Default: "rw", Examples: []fs.OptionExample{{ Value: "rw", Help: "Read and write access to resources.", }, { Value: "ro", Help: "Read-only access to resources.", }}, }, { Name: "scope_role", Help: "User-level that rclone should use when requesting access from HiDrive.", Default: "user", Examples: []fs.OptionExample{{ Value: "user", Help: `User-level access to management permissions. This will be sufficient in most cases.`, }, { Value: "admin", Help: "Extensive access to management permissions.", }, { Value: "owner", Help: "Full access to management permissions.", }}, Advanced: true, }, { Name: "root_prefix", Help: `The root/parent folder for all paths. Fill in to use the specified folder as the parent for all paths given to the remote. This way rclone can use any folder as its starting point.`, Default: "/", Examples: []fs.OptionExample{{ Value: "/", Help: `The topmost directory accessible by rclone. This will be equivalent with "root" if rclone uses a regular HiDrive user account.`, }, { Value: "root", Help: `The topmost directory of the HiDrive user account`, }, { Value: "", Help: `This specifies that there is no root-prefix for your paths. When using this you will always need to specify paths to this remote with a valid parent e.g. "remote:/path/to/dir" or "remote:root/path/to/dir".`, }}, Advanced: true, }, { Name: "endpoint", Help: `Endpoint for the service. This is the URL that API-calls will be made to.`, Default: "https://api.hidrive.strato.com/2.1", Advanced: true, }, { Name: "disable_fetching_member_count", Help: `Do not fetch number of objects in directories unless it is absolutely necessary. Requests may be faster if the number of objects in subdirectories is not fetched.`, Default: false, Advanced: true, }, { Name: "chunk_size", Help: fmt.Sprintf(`Chunksize for chunked uploads. Any files larger than the configured cutoff (or files of unknown size) will be uploaded in chunks of this size. The upper limit for this is %v bytes (about %v). That is the maximum amount of bytes a single upload-operation will support. Setting this above the upper limit or to a negative value will cause uploads to fail. Setting this to larger values may increase the upload speed at the cost of using more memory. It can be set to smaller values smaller to save on memory.`, MaximumUploadBytes, fs.SizeSuffix(MaximumUploadBytes)), Default: defaultUploadChunkSize, Advanced: true, }, { Name: "upload_cutoff", Help: fmt.Sprintf(`Cutoff/Threshold for chunked uploads. Any files larger than this will be uploaded in chunks of the configured chunksize. The upper limit for this is %v bytes (about %v). That is the maximum amount of bytes a single upload-operation will support. Setting this above the upper limit will cause uploads to fail.`, MaximumUploadBytes, fs.SizeSuffix(MaximumUploadBytes)), Default: defaultUploadCutoff, Advanced: true, }, { Name: "upload_concurrency", Help: `Concurrency for chunked uploads. This is the upper limit for how many transfers for the same file are running concurrently. Setting this above to a value smaller than 1 will cause uploads to deadlock. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing this may help to speed up the transfers.`, Default: defaultUploadConcurrency, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // HiDrive only supports file or folder names of 255 characters or less. // Names containing "/" are not supported. // The special names "." and ".." are not supported. Default: (encoder.EncodeZero | encoder.EncodeSlash | encoder.EncodeDot), }}...), }) } // Options defines the configuration for this backend. type Options struct { EndpointAPI string `config:"endpoint"` OptionalMemberCountDisabled bool `config:"disable_fetching_member_count"` UploadChunkSize fs.SizeSuffix `config:"chunk_size"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadConcurrency int64 `config:"upload_concurrency"` Enc encoder.MultiEncoder `config:"encoding"` RootPrefix string `config:"root_prefix"` ScopeAccess string `config:"scope_access"` ScopeRole string `config:"scope_role"` } // Fs represents a remote hidrive. type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options features *fs.Features // optional features srv *rest.Client // the connection to the server pacer *fs.Pacer // pacer for API calls // retryOnce is NOT intended as a pacer for API calls. // The intended use case is to repeat an action that failed because // some preconditions were not previously fulfilled. // Code using this should then establish these preconditions // and let the pacer retry the operation. retryOnce *pacer.Pacer // pacer with no delays to retry certain operations once tokenRenewer *oauthutil.Renew // renew the token on expiry } // Object describes a hidrive object. // // Will definitely have the remote-path but may lack meta-information. type Object struct { fs *Fs // what this object is part of remote string // The remote path hasMetadata bool // whether info below has been set size int64 // size of the object modTime time.Time // modification time of the object id string // ID of the object hash string // content-hash of the object } // ------------------------------------------------------------ // Name returns the name of the remote (as passed into NewFs). func (f *Fs) Name() string { return f.name } // Root returns the name of the remote (as passed into NewFs). func (f *Fs) Root() string { return f.root } // String returns a string-representation of this Fs. func (f *Fs) String() string { return fmt.Sprintf("HiDrive root '%s'", f.root) } // Precision returns the precision of this Fs. func (f *Fs) Precision() time.Duration { return time.Second } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hidrivehashType) } // Features returns the optional features of this Fs. func (f *Fs) Features() *fs.Features { return f.features } // errorHandler parses a non 2xx error response into an error. func errorHandler(resp *http.Response) error { // Decode error response. errResponse := new(api.Error) err := rest.DecodeJSON(resp, &errResponse) if err != nil { fs.Debugf(nil, "Couldn't decode error response: %v", err) } _, err = errResponse.Code.Int64() if err != nil { errResponse.Code = json.Number(strconv.Itoa(resp.StatusCode)) } return errResponse } // NewFs creates a new file system from the path. func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { //fs.Debugf(nil, "hidrive: creating new Fs.") // Parse config into Options struct. opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } // Clean root-prefix and root-path. // NOTE: With the default-encoding "." and ".." will be encoded, // but with custom encodings without encoder.EncodeDot // "." and ".." will be interpreted as paths. if opt.RootPrefix != "" { opt.RootPrefix = path.Clean(opt.Enc.FromStandardPath(opt.RootPrefix)) } root = path.Clean(opt.Enc.FromStandardPath(root)) client, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig) if err != nil { return nil, fmt.Errorf("failed to configure HiDrive: %w", err) } f := &Fs{ name: name, root: root, opt: *opt, srv: rest.NewClient(client).SetRoot(opt.EndpointAPI), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), retryOnce: pacer.New(pacer.RetriesOption(2), pacer.MaxConnectionsOption(-1), pacer.CalculatorOption(&pacer.ZeroDelayCalculator{})), } f.features = (&fs.Features{ CanHaveEmptyDirectories: true, }).Fill(ctx, f) f.srv.SetErrorHandler(errorHandler) if ts != nil { transaction := func() error { resolvedRoot := f.resolvePath("") _, err := f.fetchMetadataForPath(ctx, resolvedRoot, api.HiDriveObjectNoMetadataFields) return err } f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, transaction) } // Do not allow the root-prefix to be nonexistent nor a directory, // but it can be empty. if f.opt.RootPrefix != "" { item, err := f.fetchMetadataForPath(ctx, f.opt.RootPrefix, api.HiDriveObjectNoMetadataFields) if err != nil { return nil, fmt.Errorf("could not access root-prefix: %w", err) } if item.Type != api.HiDriveObjectTypeDirectory { return nil, errors.New("the root-prefix needs to point to a valid directory or be empty") } } resolvedRoot := f.resolvePath("") item, err := f.fetchMetadataForPath(ctx, resolvedRoot, api.HiDriveObjectNoMetadataFields) if err != nil { if isHTTPError(err, 404) { // NOTE: NewFs needs to work with paths that do not exist, // in case they will be created later (see mkdir). return f, nil } return nil, fmt.Errorf("could not access root-path: %w", err) } if item.Type != api.HiDriveObjectTypeDirectory { fs.Debugf(f, "The root is not a directory. Setting its parent-directory as the new root.") // NOTE: There is no need to check // if the parent-directory is inside the root-prefix: // If the parent-directory was outside, // then the resolved path would be the root-prefix, // therefore the root-prefix would point to a file, // which has already been checked for. // In case the root-prefix is empty, this needs not be checked, // because top-level files cannot exist. f.root = path.Dir(f.root) return f, fs.ErrorIsFile } return f, nil } // newObject constructs an Object by calling the given function metaFiller // on an Object with no metadata. // // metaFiller should set the metadata of the object or // return an appropriate error. func (f *Fs) newObject(remote string, metaFiller func(*Object) error) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } var err error if metaFiller != nil { err = metaFiller(o) } if err != nil { return nil, err } return o, nil } // newObjectFromHiDriveObject constructs an Object from the given api.HiDriveObject. func (f *Fs) newObjectFromHiDriveObject(remote string, info *api.HiDriveObject) (fs.Object, error) { metaFiller := func(o *Object) error { return o.setMetadata(info) } return f.newObject(remote, metaFiller) } // NewObject finds the Object at remote. // // If remote points to a directory then it returns fs.ErrorIsDir. // If it can not be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { //fs.Debugf(f, "executing NewObject(%s).", remote) metaFiller := func(o *Object) error { return o.readMetadata(ctx) } return f.newObject(remote, metaFiller) } // List the objects and directories in dir into entries. // The entries can be returned in any order, // but should be for a complete directory. // // dir should be "" to list the root, and should not have trailing slashes. // // This returns fs.ErrorDirNotFound if the directory is not found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { //fs.Debugf(f, "executing List(%s).", dir) var iErr error addEntry := func(info *api.HiDriveObject) bool { fs.Debugf(f, "found directory-element with name %s", info.Name) remote := path.Join(dir, info.Name) if info.Type == api.HiDriveObjectTypeDirectory { d := fs.NewDir(remote, info.ModTime()) d.SetID(info.ID) d.SetSize(info.Size) d.SetItems(info.MemberCount) entries = append(entries, d) } else if info.Type == api.HiDriveObjectTypeFile { o, err := f.newObjectFromHiDriveObject(remote, info) if err != nil { iErr = err return true } entries = append(entries, o) } return false } var fields []string if f.opt.OptionalMemberCountDisabled { fields = api.HiDriveObjectWithMetadataFields } else { fields = api.HiDriveObjectWithDirectoryMetadataFields } resolvedDir := f.resolvePath(dir) _, err = f.iterateOverDirectory(ctx, resolvedDir, AllMembers, addEntry, fields, Unsorted) if err != nil { if isHTTPError(err, 404) { return nil, fs.ErrorDirNotFound } return nil, err } if iErr != nil { return nil, iErr } return entries, nil } // Put the contents of the io.Reader into the remote path // with the modTime given of the given size. // The existing or new object is returned. // // A new object may have been created or // an existing one accessed even if an error is returned, // in which case both the object and the error will be returned. func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() //fs.Debugf(f, "executing Put(%s, %v).", remote, options) existingObj, err := f.NewObject(ctx, remote) switch err { case nil: return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // Object was not found, so create a new one. return f.PutUnchecked(ctx, in, src, options...) } return nil, err } // PutStream uploads the contents of the io.Reader to the remote path // with the modTime given of indeterminate size. // The existing or new object is returned. // // A new object may have been created or // an existing one accessed even if an error is returned, // in which case both the object and the error will be returned. func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { //fs.Debugf(f, "executing PutStream(%s, %v).", src.Remote(), options) return f.Put(ctx, in, src, options...) } // PutUnchecked the contents of the io.Reader into the remote path // with the modTime given of the given size. // This guarantees that existing objects will not be overwritten. // The new object is returned. // // This will produce an error if an object already exists at that path. // // In case the upload fails and an object has been created, // this will try to delete the object at that path. // In case the failed upload could not be deleted, // both the object and the (upload-)error will be returned. func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() modTime := src.ModTime(ctx) //fs.Debugf(f, "executing PutUnchecked(%s, %v).", remote, options) resolvedPath := f.resolvePath(remote) // NOTE: The file creation operation is a single atomic operation. // Thus uploading as much content as is reasonable // (i.e. everything up to the cutoff) in the first request, // avoids files being created on upload failure for small files. // (As opposed to creating an empty file and then uploading the content.) tmpReader, bytesRead, err := readerForChunk(in, int(f.opt.UploadCutoff)) cutoffReader := cachedReader(tmpReader) if err != nil { return nil, err } var info *api.HiDriveObject err = f.retryOnce.Call(func() (bool, error) { var createErr error // Reset the reading index (in case this is a retry). if _, createErr = cutoffReader.Seek(0, io.SeekStart); createErr != nil { return false, createErr } info, createErr = f.createFile(ctx, resolvedPath, cutoffReader, modTime, IgnoreOnExist) if createErr == fs.ErrorDirNotFound { // Create the parent-directory for the object and repeat request. _, parentErr := f.createDirectories(ctx, path.Dir(resolvedPath), IgnoreOnExist) if parentErr != nil && parentErr != fs.ErrorDirExists { fs.Errorf(f, "Tried to create parent-directory for '%s', but failed.", resolvedPath) return false, parentErr } return true, createErr } return false, createErr }) if err != nil { return nil, err } o, err := f.newObjectFromHiDriveObject(remote, info) if err != nil { return nil, err } if fs.SizeSuffix(bytesRead) < f.opt.UploadCutoff { return o, nil } // If there is more left to write, o.Update needs to skip ahead. // Use a fs.SeekOption with the current offset to do this. options = append(options, &fs.SeekOption{Offset: int64(bytesRead)}) err = o.Update(ctx, in, src, options...) if err == nil { return o, nil } // Try to remove object at path after the its content could not be uploaded. deleteErr := f.pacer.Call(func() (bool, error) { deleteErr := o.Remove(ctx) return deleteErr == fs.ErrorObjectNotFound, deleteErr }) if deleteErr == nil { return nil, err } fs.Errorf(f, "Tried to delete failed upload at path '%s', but failed: %v", resolvedPath, deleteErr) return o, err } // Mkdir creates the directory if it does not exist. // // This will create any missing parent directories. // // NOTE: If an error occurs while the parent directories are being created, // any directories already created will NOT be deleted again. func (f *Fs) Mkdir(ctx context.Context, dir string) error { //fs.Debugf(f, "executing Mkdir(%s).", dir) resolvedDir := f.resolvePath(dir) _, err := f.createDirectories(ctx, resolvedDir, IgnoreOnExist) if err == fs.ErrorDirExists { // NOTE: The conflict is caused by the directory already existing, // which should be ignored here. return nil } return err } // Rmdir removes the directory if empty. // // This returns fs.ErrorDirNotFound if the directory is not found. // This returns fs.ErrorDirectoryNotEmpty if the directory is not empty. func (f *Fs) Rmdir(ctx context.Context, dir string) error { //fs.Debugf(f, "executing Rmdir(%s).", dir) resolvedDir := f.resolvePath(dir) return f.deleteDirectory(ctx, resolvedDir, false) } // Purge removes the directory and all of its contents. // // This returns fs.ErrorDirectoryNotEmpty if the directory is not empty. func (f *Fs) Purge(ctx context.Context, dir string) error { //fs.Debugf(f, "executing Purge(%s).", dir) resolvedDir := f.resolvePath(dir) return f.deleteDirectory(ctx, resolvedDir, true) } // shouldRetryAndCreateParents returns a boolean as to whether the operation // should be retried after the parent-directories of the destination have been created. // If so, it will create the parent-directories. // // If any errors arise while finding the source or // creating the parent-directory those will be returned. // Otherwise returns the originalError. func (f *Fs) shouldRetryAndCreateParents(ctx context.Context, destinationPath string, sourcePath string, originalError error) (bool, error) { if fserrors.ContextError(ctx, &originalError) { return false, originalError } if isHTTPError(originalError, 404) { // Check if source is missing. _, srcErr := f.fetchMetadataForPath(ctx, sourcePath, api.HiDriveObjectNoMetadataFields) if srcErr != nil { return false, srcErr } // Source exists, so the parent of the destination must have been missing. // Create the parent-directory and repeat request. _, parentErr := f.createDirectories(ctx, path.Dir(destinationPath), IgnoreOnExist) if parentErr != nil && parentErr != fs.ErrorDirExists { fs.Errorf(f, "Tried to create parent-directory for '%s', but failed.", destinationPath) return false, parentErr } return true, originalError } return false, originalError } // Copy src to this remote using server-side copy operations. // // It returns the destination Object and a possible error. // // This returns fs.ErrorCantCopy if the operation cannot be performed. // // NOTE: If an error occurs when copying the Object, // any parent-directories already created will NOT be deleted again. // // NOTE: This operation will expand sparse areas in the content of the source-Object // to blocks of 0-bytes in the destination-Object. func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } // Get the absolute path to the source. srcPath := srcObj.fs.resolvePath(srcObj.Remote()) //fs.Debugf(f, "executing Copy(%s, %s).", srcPath, remote) dstPath := f.resolvePath(remote) var info *api.HiDriveObject err := f.retryOnce.Call(func() (bool, error) { var copyErr error info, copyErr = f.copyFile(ctx, srcPath, dstPath, OverwriteOnExist) return f.shouldRetryAndCreateParents(ctx, dstPath, srcPath, copyErr) }) if err != nil { return nil, err } dstObj, err := f.newObjectFromHiDriveObject(remote, info) if err != nil { return nil, err } return dstObj, nil } // Move src to this remote using server-side move operations. // // It returns the destination Object and a possible error. // // This returns fs.ErrorCantMove if the operation cannot be performed. // // NOTE: If an error occurs when moving the Object, // any parent-directories already created will NOT be deleted again. // // NOTE: This operation will expand sparse areas in the content of the source-Object // to blocks of 0-bytes in the destination-Object. func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } // Get the absolute path to the source. srcPath := srcObj.fs.resolvePath(srcObj.Remote()) //fs.Debugf(f, "executing Move(%s, %s).", srcPath, remote) dstPath := f.resolvePath(remote) var info *api.HiDriveObject err := f.retryOnce.Call(func() (bool, error) { var moveErr error info, moveErr = f.moveFile(ctx, srcPath, dstPath, OverwriteOnExist) return f.shouldRetryAndCreateParents(ctx, dstPath, srcPath, moveErr) }) if err != nil { return nil, err } dstObj, err := f.newObjectFromHiDriveObject(remote, info) if err != nil { return nil, err } return dstObj, nil } // DirMove moves from src at srcRemote to this remote at dstRemote // using server-side move operations. // // This returns fs.ErrorCantCopy if the operation cannot be performed. // This returns fs.ErrorDirExists if the destination already exists. // // NOTE: If an error occurs when moving the directory, // any parent-directories already created will NOT be deleted again. func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } // Get the absolute path to the source. srcPath := srcFs.resolvePath(srcRemote) //fs.Debugf(f, "executing DirMove(%s, %s).", srcPath, dstRemote) dstPath := f.resolvePath(dstRemote) err := f.retryOnce.Call(func() (bool, error) { var moveErr error _, moveErr = f.moveDirectory(ctx, srcPath, dstPath, IgnoreOnExist) return f.shouldRetryAndCreateParents(ctx, dstPath, srcPath, moveErr) }) if err != nil { if isHTTPError(err, 409) { return fs.ErrorDirExists } return err } return nil } // Shutdown shutdown the fs func (f *Fs) Shutdown(ctx context.Context) error { f.tokenRenewer.Shutdown() return nil } // ------------------------------------------------------------ // Fs returns the parent Fs. func (o *Object) Fs() fs.Info { return o.fs } // String returns a string-representation of this Object. func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path. func (o *Object) Remote() string { return o.remote } // ID returns the ID of the Object if known, or "" if not. func (o *Object) ID() string { err := o.readMetadata(context.TODO()) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return "" } return o.id } // Hash returns the selected checksum of the file. // If no checksum is available it returns "". func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { err := o.readMetadata(ctx) if err != nil { return "", fmt.Errorf("failed to read hash from metadata: %w", err) } switch t { case hidrivehashType: return o.hash, nil default: return "", hash.ErrUnsupported } } // Size returns the size of an object in bytes. func (o *Object) Size() int64 { err := o.readMetadata(context.TODO()) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return -1 } return o.size } // setMetadata sets the metadata from info. func (o *Object) setMetadata(info *api.HiDriveObject) error { if info.Type == api.HiDriveObjectTypeDirectory { return fs.ErrorIsDir } if info.Type != api.HiDriveObjectTypeFile { return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile) } o.hasMetadata = true o.size = info.Size o.modTime = info.ModTime() o.id = info.ID o.hash = info.ContentHash return nil } // readMetadata fetches the metadata if it has not already been fetched. func (o *Object) readMetadata(ctx context.Context) error { if o.hasMetadata { return nil } resolvedPath := o.fs.resolvePath(o.remote) info, err := o.fs.fetchMetadataForPath(ctx, resolvedPath, api.HiDriveObjectWithMetadataFields) if err != nil { if isHTTPError(err, 404) { return fs.ErrorObjectNotFound } return err } return o.setMetadata(info) } // ModTime returns the modification time of the object. func (o *Object) ModTime(ctx context.Context) time.Time { err := o.readMetadata(ctx) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return time.Now() } return o.modTime } // SetModTime sets the metadata on the object to set the modification date. func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { parameters := api.NewQueryParameters() resolvedPath := o.fs.resolvePath(o.remote) parameters.SetPath(resolvedPath) err := parameters.SetTime("mtime", modTime) if err != nil { return err } opts := rest.Opts{ Method: "PATCH", Path: "/meta", Parameters: parameters.Values, NoResponse: true, } var resp *http.Response err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return o.fs.shouldRetry(ctx, resp, err) }) if err != nil { return err } o.modTime = modTime return nil } // Storable says whether this object can be stored. func (o *Object) Storable() bool { return true } // Open an object for reading. func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { parameters := api.NewQueryParameters() resolvedPath := o.fs.resolvePath(o.remote) parameters.SetPath(resolvedPath) fs.FixRangeOption(options, o.Size()) opts := rest.Opts{ Method: "GET", Path: "/file", Parameters: parameters.Values, Options: options, } var resp *http.Response var err error err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return o.fs.shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } return resp.Body, err } // Update the existing object // with the contents of the io.Reader, modTime and size. // // For unknown-sized contents (indicated by src.Size() == -1) // this will try to properly upload it in multiple chunks. func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { //fs.Debugf(o.fs, "executing Update(%s, %v).", o.remote, options) modTime := src.ModTime(ctx) resolvedPath := o.fs.resolvePath(o.remote) if o.fs.tokenRenewer != nil { o.fs.tokenRenewer.Start() defer o.fs.tokenRenewer.Stop() } // PutUnchecked can pass a valid SeekOption to skip ahead. var offset uint64 for _, option := range options { if seekoption, ok := option.(*fs.SeekOption); ok { offset = uint64(seekoption.Offset) break } } var info *api.HiDriveObject var err, metaErr error if offset > 0 || src.Size() == -1 || src.Size() >= int64(o.fs.opt.UploadCutoff) { fs.Debugf(o.fs, "Uploading with chunks of size %v and %v transfers in parallel at path '%s'.", int(o.fs.opt.UploadChunkSize), o.fs.opt.UploadConcurrency, resolvedPath) // NOTE: o.fs.opt.UploadChunkSize should always // be between 0 and MaximumUploadBytes, // so the conversion to an int does not cause problems for valid inputs. if offset > 0 { // NOTE: The offset is only set // when the file was newly created, // therefore the file does not need truncating. _, err = o.fs.updateFileChunked(ctx, resolvedPath, in, offset, int(o.fs.opt.UploadChunkSize), o.fs.opt.UploadConcurrency) if err == nil { err = o.SetModTime(ctx, modTime) } } else { _, _, err = o.fs.uploadFileChunked(ctx, resolvedPath, in, modTime, int(o.fs.opt.UploadChunkSize), o.fs.opt.UploadConcurrency) } // Try to check if object was updated, either way. // Metadata should be updated even if the upload fails. info, metaErr = o.fs.fetchMetadataForPath(ctx, resolvedPath, api.HiDriveObjectWithMetadataFields) } else { info, err = o.fs.overwriteFile(ctx, resolvedPath, cachedReader(in), modTime) metaErr = err } // Update metadata of this object, // if there was no error with getting the metadata. if metaErr == nil { metaErr = o.setMetadata(info) } // Errors with the upload-process are more relevant, return those first. if err != nil { return err } return metaErr } // Remove an object. func (o *Object) Remove(ctx context.Context) error { resolvedPath := o.fs.resolvePath(o.remote) return o.fs.deleteObject(ctx, resolvedPath) } // Check the interfaces are satisfied. var (
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hidrive/helpers.go
backend/hidrive/helpers.go
package hidrive // This file is for helper-functions which may provide more general and // specialized functionality than the generic interfaces. // There are two sections: // 1. methods bound to Fs // 2. other functions independent from Fs used throughout the package // NOTE: Functions accessing paths expect any relative paths // to be resolved prior to execution with resolvePath(...). import ( "bytes" "context" "errors" "io" "net/http" "path" "strconv" "sync" "time" "github.com/rclone/rclone/backend/hidrive/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/lib/ranges" "github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/rest" "golang.org/x/sync/errgroup" "golang.org/x/sync/semaphore" ) const ( // MaximumUploadBytes represents the maximum amount of bytes // a single upload-operation will support. MaximumUploadBytes = 2147483647 // = 2GiB - 1 // iterationChunkSize represents the chunk size used to iterate directory contents. iterationChunkSize = 5000 ) var ( // retryErrorCodes is a slice of error codes that we will always retry. retryErrorCodes = []int{ 429, // Too Many Requests 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // ErrorFileExists is returned when a query tries to create a file // that already exists. ErrorFileExists = errors.New("destination file already exists") ) // MemberType represents the possible types of entries a directory can contain. type MemberType string // possible values for MemberType const ( AllMembers MemberType = "all" NoMembers MemberType = "none" DirectoryMembers MemberType = api.HiDriveObjectTypeDirectory FileMembers MemberType = api.HiDriveObjectTypeFile SymlinkMembers MemberType = api.HiDriveObjectTypeSymlink ) // SortByField represents possible fields to sort entries of a directory by. type SortByField string // possible values for SortByField const ( descendingSort string = "-" SortByName SortByField = "name" SortByModTime SortByField = "mtime" SortByObjectType SortByField = "type" SortBySize SortByField = "size" SortByNameDescending SortByField = SortByField(descendingSort) + SortByName SortByModTimeDescending SortByField = SortByField(descendingSort) + SortByModTime SortByObjectTypeDescending SortByField = SortByField(descendingSort) + SortByObjectType SortBySizeDescending SortByField = SortByField(descendingSort) + SortBySize ) var ( // Unsorted disables sorting and can therefore not be combined with other values. Unsorted = []SortByField{"none"} // DefaultSorted does not specify how to sort and // therefore implies the default sort order. DefaultSorted = []SortByField{} ) // CopyOrMoveOperationType represents the possible types of copy- and move-operations. type CopyOrMoveOperationType int // possible values for CopyOrMoveOperationType const ( MoveOriginal CopyOrMoveOperationType = iota CopyOriginal CopyOriginalPreserveModTime ) // OnExistAction represents possible actions the API should take, // when a request tries to create a path that already exists. type OnExistAction string // possible values for OnExistAction const ( // IgnoreOnExist instructs the API not to execute // the request in case of a conflict, but to return an error. IgnoreOnExist OnExistAction = "ignore" // AutoNameOnExist instructs the API to automatically rename // any conflicting request-objects. AutoNameOnExist OnExistAction = "autoname" // OverwriteOnExist instructs the API to overwrite any conflicting files. // This can only be used, if the request operates on files directly. // (For example when moving/copying a file.) // For most requests this action will simply be ignored. OverwriteOnExist OnExistAction = "overwrite" ) // shouldRetry returns a boolean as to whether this resp and err deserve to be retried. // It tries to expire/invalidate the token, if necessary. // It returns the err as a convenience. func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } if resp != nil && (resp.StatusCode == 401 || isHTTPError(err, 401)) && len(resp.Header["Www-Authenticate"]) > 0 { fs.Debugf(f, "Token might be invalid: %v", err) if f.tokenRenewer != nil { iErr := f.tokenRenewer.Expire() if iErr == nil { return true, err } } } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // resolvePath resolves the given (relative) path and // returns a path suitable for API-calls. // This will consider the root-path of the fs and any needed prefixes. // // Any relative paths passed to functions that access these paths should // be resolved with this first! func (f *Fs) resolvePath(objectPath string) string { resolved := path.Join(f.opt.RootPrefix, f.root, f.opt.Enc.FromStandardPath(objectPath)) return resolved } // iterateOverDirectory calls the given function callback // on each item found in a given directory. // // If callback ever returns true then this exits early with found = true. func (f *Fs) iterateOverDirectory(ctx context.Context, directory string, searchOnly MemberType, callback func(*api.HiDriveObject) bool, fields []string, sortBy []SortByField) (found bool, err error) { parameters := api.NewQueryParameters() parameters.SetPath(directory) parameters.AddFields("members.", fields...) parameters.AddFields("", api.DirectoryContentFields...) parameters.Set("members", string(searchOnly)) for _, v := range sortBy { // The explicit conversion is necessary for each element. parameters.AddList("sort", ",", string(v)) } opts := rest.Opts{ Method: "GET", Path: "/dir", Parameters: parameters.Values, } iterateContent := func(result *api.DirectoryContent, err error) (bool, error) { if err != nil { return false, err } for _, item := range result.Entries { item.Name = f.opt.Enc.ToStandardName(item.Name) if callback(&item) { return true, nil } } return false, nil } return f.paginateDirectoryAccess(ctx, &opts, iterationChunkSize, 0, iterateContent) } // paginateDirectoryAccess executes requests specified via ctx and opts // which should produce api.DirectoryContent. // This will paginate the requests using limit starting at the given offset. // // The given function callback is called on each api.DirectoryContent found // along with any errors that occurred. // If callback ever returns true then this exits early with found = true. // If callback ever returns an error then this exits early with that error. func (f *Fs) paginateDirectoryAccess(ctx context.Context, opts *rest.Opts, limit int64, offset int64, callback func(*api.DirectoryContent, error) (bool, error)) (found bool, err error) { for { opts.Parameters.Set("limit", strconv.FormatInt(offset, 10)+","+strconv.FormatInt(limit, 10)) var result api.DirectoryContent var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) found, err = callback(&result, err) if found || err != nil { return found, err } offset += int64(len(result.Entries)) if offset >= result.TotalCount || limit > int64(len(result.Entries)) { break } } return false, nil } // fetchMetadataForPath reads the metadata from the path. func (f *Fs) fetchMetadataForPath(ctx context.Context, path string, fields []string) (*api.HiDriveObject, error) { parameters := api.NewQueryParameters() parameters.SetPath(path) parameters.AddFields("", fields...) opts := rest.Opts{ Method: "GET", Path: "/meta", Parameters: parameters.Values, } var result api.HiDriveObject var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } return &result, nil } // copyOrMove copies or moves a directory or file // from the source-path to the destination-path. // // The operation will only be successful // if the parent-directory of the destination-path exists. // // NOTE: Use the explicit methods instead of directly invoking this method. // (Those are: copyDirectory, moveDirectory, copyFile, moveFile.) func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType CopyOrMoveOperationType, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) { parameters := api.NewQueryParameters() parameters.Set("src", source) parameters.Set("dst", destination) if onExist == AutoNameOnExist || (onExist == OverwriteOnExist && !isDirectory) { parameters.Set("on_exist", string(onExist)) } endpoint := "/" if isDirectory { endpoint += "dir" } else { endpoint += "file" } switch operationType { case MoveOriginal: endpoint += "/move" case CopyOriginalPreserveModTime: parameters.Set("preserve_mtime", strconv.FormatBool(true)) fallthrough case CopyOriginal: endpoint += "/copy" } opts := rest.Opts{ Method: "POST", Path: endpoint, Parameters: parameters.Values, } var result api.HiDriveObject var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } return &result, nil } // moveDirectory moves the directory at the source-path to the destination-path and // returns the resulting api-object if successful. // // The operation will only be successful // if the parent-directory of the destination-path exists. func (f *Fs) moveDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) { return f.copyOrMove(ctx, true, MoveOriginal, source, destination, onExist) } // copyFile copies the file at the source-path to the destination-path and // returns the resulting api-object if successful. // // The operation will only be successful // if the parent-directory of the destination-path exists. // // NOTE: This operation will expand sparse areas in the content of the source-file // to blocks of 0-bytes in the destination-file. func (f *Fs) copyFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) { return f.copyOrMove(ctx, false, CopyOriginalPreserveModTime, source, destination, onExist) } // moveFile moves the file at the source-path to the destination-path and // returns the resulting api-object if successful. // // The operation will only be successful // if the parent-directory of the destination-path exists. // // NOTE: This operation may expand sparse areas in the content of the source-file // to blocks of 0-bytes in the destination-file. func (f *Fs) moveFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) { return f.copyOrMove(ctx, false, MoveOriginal, source, destination, onExist) } // createDirectory creates the directory at the given path and // returns the resulting api-object if successful. // // The directory will only be created if its parent-directory exists. // This returns fs.ErrorDirNotFound if the parent-directory is not found. // This returns fs.ErrorDirExists if the directory already exists. func (f *Fs) createDirectory(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) { parameters := api.NewQueryParameters() parameters.SetPath(directory) if onExist == AutoNameOnExist { parameters.Set("on_exist", string(onExist)) } opts := rest.Opts{ Method: "POST", Path: "/dir", Parameters: parameters.Values, } var result api.HiDriveObject var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) switch { case err == nil: return &result, nil case isHTTPError(err, 404): return nil, fs.ErrorDirNotFound case isHTTPError(err, 409): return nil, fs.ErrorDirExists } return nil, err } // createDirectories creates the directory at the given path // along with any missing parent directories and // returns the resulting api-object (of the created directory) if successful. // // This returns fs.ErrorDirExists if the directory already exists. // // If an error occurs while the parent directories are being created, // any directories already created will NOT be deleted again. func (f *Fs) createDirectories(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) { result, err := f.createDirectory(ctx, directory, onExist) if err == nil { return result, nil } if err != fs.ErrorDirNotFound { return nil, err } parentDirectory := path.Dir(directory) _, err = f.createDirectories(ctx, parentDirectory, onExist) if err != nil && err != fs.ErrorDirExists { return nil, err } // NOTE: Ignoring fs.ErrorDirExists does no harm, // since it does not mean the child directory cannot be created. return f.createDirectory(ctx, directory, onExist) } // deleteDirectory deletes the directory at the given path. // // If recursive is false, the directory will only be deleted if it is empty. // If recursive is true, the directory will be deleted regardless of its content. // This returns fs.ErrorDirNotFound if the directory is not found. // This returns fs.ErrorDirectoryNotEmpty if the directory is not empty and // recursive is false. func (f *Fs) deleteDirectory(ctx context.Context, directory string, recursive bool) error { parameters := api.NewQueryParameters() parameters.SetPath(directory) parameters.Set("recursive", strconv.FormatBool(recursive)) opts := rest.Opts{ Method: "DELETE", Path: "/dir", Parameters: parameters.Values, NoResponse: true, } var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) return f.shouldRetry(ctx, resp, err) }) switch { case isHTTPError(err, 404): return fs.ErrorDirNotFound case isHTTPError(err, 409): return fs.ErrorDirectoryNotEmpty } return err } // deleteObject deletes the object/file at the given path. // // This returns fs.ErrorObjectNotFound if the object is not found. func (f *Fs) deleteObject(ctx context.Context, path string) error { parameters := api.NewQueryParameters() parameters.SetPath(path) opts := rest.Opts{ Method: "DELETE", Path: "/file", Parameters: parameters.Values, NoResponse: true, } var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) return f.shouldRetry(ctx, resp, err) }) if isHTTPError(err, 404) { return fs.ErrorObjectNotFound } return err } // createFile creates a file at the given path // with the content of the io.ReadSeeker. // This guarantees that existing files will not be overwritten. // The maximum size of the content is limited by MaximumUploadBytes. // The io.ReadSeeker should be resettable by seeking to its start. // If modTime is not the zero time instant, // it will be set as the file's modification time after the operation. // // This returns fs.ErrorDirNotFound // if the parent directory of the file is not found. // This returns ErrorFileExists if a file already exists at the specified path. func (f *Fs) createFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time, onExist OnExistAction) (*api.HiDriveObject, error) { parameters := api.NewQueryParameters() parameters.SetFileInDirectory(path) if onExist == AutoNameOnExist { parameters.Set("on_exist", string(onExist)) } var err error if !modTime.IsZero() { err = parameters.SetTime("mtime", modTime) if err != nil { return nil, err } } opts := rest.Opts{ Method: "POST", Path: "/file", Body: content, ContentType: "application/octet-stream", Parameters: parameters.Values, } var result api.HiDriveObject var resp *http.Response err = f.pacer.Call(func() (bool, error) { // Reset the reading index (in case this is a retry). if _, err = content.Seek(0, io.SeekStart); err != nil { return false, err } resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) switch { case err == nil: return &result, nil case isHTTPError(err, 404): return nil, fs.ErrorDirNotFound case isHTTPError(err, 409): return nil, ErrorFileExists } return nil, err } // overwriteFile updates the content of the file at the given path // with the content of the io.ReadSeeker. // If the file does not exist it will be created. // The maximum size of the content is limited by MaximumUploadBytes. // The io.ReadSeeker should be resettable by seeking to its start. // If modTime is not the zero time instant, // it will be set as the file's modification time after the operation. // // This returns fs.ErrorDirNotFound // if the parent directory of the file is not found. func (f *Fs) overwriteFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time) (*api.HiDriveObject, error) { parameters := api.NewQueryParameters() parameters.SetFileInDirectory(path) var err error if !modTime.IsZero() { err = parameters.SetTime("mtime", modTime) if err != nil { return nil, err } } opts := rest.Opts{ Method: "PUT", Path: "/file", Body: content, ContentType: "application/octet-stream", Parameters: parameters.Values, } var result api.HiDriveObject var resp *http.Response err = f.pacer.Call(func() (bool, error) { // Reset the reading index (in case this is a retry). if _, err = content.Seek(0, io.SeekStart); err != nil { return false, err } resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) switch { case err == nil: return &result, nil case isHTTPError(err, 404): return nil, fs.ErrorDirNotFound } return nil, err } // uploadFileChunked updates the content of the existing file at the given path // with the content of the io.Reader. // Returns the position of the last successfully written byte, stopping before the first failed write. // If nothing was written this will be 0. // Returns the resulting api-object if successful. // // Replaces the file contents by uploading multiple chunks of the given size in parallel. // Therefore this can and be used to upload files of any size efficiently. // The number of parallel transfers is limited by transferLimit which should larger than 0. // If modTime is not the zero time instant, // it will be set as the file's modification time after the operation. // // NOTE: This method uses updateFileChunked and may create sparse files, // if the upload of a chunk fails unexpectedly. // See note about sparse files in patchFile. // If any of the uploads fail, the process will be aborted and // the first error that occurred will be returned. // This is not an atomic operation, // therefore if the upload fails the file may be partially modified. // // This returns fs.ErrorObjectNotFound if the object is not found. func (f *Fs) uploadFileChunked(ctx context.Context, path string, content io.Reader, modTime time.Time, chunkSize int, transferLimit int64) (okSize uint64, info *api.HiDriveObject, err error) { okSize, err = f.updateFileChunked(ctx, path, content, 0, chunkSize, transferLimit) if err == nil { info, err = f.resizeFile(ctx, path, okSize, modTime) } return okSize, info, err } // updateFileChunked updates the content of the existing file at the given path // starting at the given offset. // Returns the position of the last successfully written byte, stopping before the first failed write. // If nothing was written this will be 0. // // Replaces the file contents starting from the given byte offset // with the content of the io.Reader. // If the offset is beyond the file end, the file is extended up to the offset. // // The upload is done multiple chunks of the given size in parallel. // Therefore this can and be used to upload files of any size efficiently. // The number of parallel transfers is limited by transferLimit which should larger than 0. // // NOTE: Because it is inefficient to set the modification time with every chunk, // setting it to a specific value must be done in a separate request // after this operation finishes. // // NOTE: This method uses patchFile and may create sparse files, // especially if the upload of a chunk fails unexpectedly. // See note about sparse files in patchFile. // If any of the uploads fail, the process will be aborted and // the first error that occurred will be returned. // This is not an atomic operation, // therefore if the upload fails the file may be partially modified. // // This returns fs.ErrorObjectNotFound if the object is not found. func (f *Fs) updateFileChunked(ctx context.Context, path string, content io.Reader, offset uint64, chunkSize int, transferLimit int64) (okSize uint64, err error) { var ( okChunksMu sync.Mutex // protects the variables below okChunks []ranges.Range ) g, gCtx := errgroup.WithContext(ctx) transferSemaphore := semaphore.NewWeighted(transferLimit) var readErr error startMoreTransfers := true zeroTime := time.Time{} for chunk := uint64(0); startMoreTransfers; chunk++ { // Acquire semaphore to limit number of transfers in parallel. readErr = transferSemaphore.Acquire(gCtx, 1) if readErr != nil { break } // Read a chunk of data. chunkReader, bytesRead, readErr := readerForChunk(content, chunkSize) if bytesRead < chunkSize { startMoreTransfers = false } if readErr != nil || bytesRead <= 0 { break } // Transfer the chunk. chunkOffset := uint64(chunkSize)*chunk + offset g.Go(func() error { // After this upload is done, // signal that another transfer can be started. defer transferSemaphore.Release(1) uploadErr := f.patchFile(gCtx, path, cachedReader(chunkReader), chunkOffset, zeroTime) if uploadErr == nil { // Remember successfully written chunks. okChunksMu.Lock() okChunks = append(okChunks, ranges.Range{Pos: int64(chunkOffset), Size: int64(bytesRead)}) okChunksMu.Unlock() fs.Debugf(f, "Done uploading chunk of size %v at offset %v.", bytesRead, chunkOffset) } else { fs.Infof(f, "Error while uploading chunk at offset %v. Error is %v.", chunkOffset, uploadErr) } return uploadErr }) } if readErr != nil { // Log the error in case it is later ignored because of an upload-error. fs.Infof(f, "Error while reading/preparing to upload a chunk. Error is %v.", readErr) } err = g.Wait() // Compute the first continuous range of the file content, // which does not contain any failed chunks. // Do not forget to add the file content up to the starting offset, // which is presumed to be already correct. rs := ranges.Ranges{} rs.Insert(ranges.Range{Pos: 0, Size: int64(offset)}) for _, chunkRange := range okChunks { rs.Insert(chunkRange) } if len(rs) > 0 && rs[0].Pos == 0 { okSize = uint64(rs[0].Size) } if err != nil { return okSize, err } if readErr != nil { return okSize, readErr } return okSize, nil } // patchFile updates the content of the existing file at the given path // starting at the given offset. // // Replaces the file contents starting from the given byte offset // with the content of the io.ReadSeeker. // If the offset is beyond the file end, the file is extended up to the offset. // The maximum size of the update is limited by MaximumUploadBytes. // The io.ReadSeeker should be resettable by seeking to its start. // If modTime is not the zero time instant, // it will be set as the file's modification time after the operation. // // NOTE: By extending the file up to the offset this may create sparse files, // which allocate less space on the file system than their apparent size indicates, // since holes between data chunks are "real" holes // and not regions made up of consecutive 0-bytes. // Subsequent operations (such as copying data) // usually expand the holes into regions of 0-bytes. // // This returns fs.ErrorObjectNotFound if the object is not found. func (f *Fs) patchFile(ctx context.Context, path string, content io.ReadSeeker, offset uint64, modTime time.Time) error { parameters := api.NewQueryParameters() parameters.SetPath(path) parameters.Set("offset", strconv.FormatUint(offset, 10)) if !modTime.IsZero() { err := parameters.SetTime("mtime", modTime) if err != nil { return err } } opts := rest.Opts{ Method: "PATCH", Path: "/file", Body: content, ContentType: "application/octet-stream", Parameters: parameters.Values, NoResponse: true, } var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { // Reset the reading index (in case this is a retry). _, err = content.Seek(0, io.SeekStart) if err != nil { return false, err } resp, err = f.srv.Call(ctx, &opts) if isHTTPError(err, 423) { return true, err } return f.shouldRetry(ctx, resp, err) }) if isHTTPError(err, 404) { return fs.ErrorObjectNotFound } return err } // resizeFile updates the existing file at the given path to be of the given size // and returns the resulting api-object if successful. // // If the given size is smaller than the current filesize, // the file is cut/truncated at that position. // If the given size is larger, the file is extended up to that position. // If modTime is not the zero time instant, // it will be set as the file's modification time after the operation. // // NOTE: By extending the file this may create sparse files, // which allocate less space on the file system than their apparent size indicates, // since holes between data chunks are "real" holes // and not regions made up of consecutive 0-bytes. // Subsequent operations (such as copying data) // usually expand the holes into regions of 0-bytes. // // This returns fs.ErrorObjectNotFound if the object is not found. func (f *Fs) resizeFile(ctx context.Context, path string, size uint64, modTime time.Time) (*api.HiDriveObject, error) { parameters := api.NewQueryParameters() parameters.SetPath(path) parameters.Set("size", strconv.FormatUint(size, 10)) if !modTime.IsZero() { err := parameters.SetTime("mtime", modTime) if err != nil { return nil, err } } opts := rest.Opts{ Method: "POST", Path: "/file/truncate", Parameters: parameters.Values, } var result api.HiDriveObject var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) switch { case err == nil: return &result, nil case isHTTPError(err, 404): return nil, fs.ErrorObjectNotFound } return nil, err } // ------------------------------------------------------------ // isHTTPError compares the numerical status code // of an api.Error to the given HTTP status. // // If the given error is not an api.Error or // a numerical status code could not be determined, this returns false. // Otherwise this returns whether the status code of the error is equal to the given status. func isHTTPError(err error, status int64) bool { if apiErr, ok := err.(*api.Error); ok { errStatus, decodeErr := apiErr.Code.Int64() if decodeErr == nil && errStatus == status { return true } } return false } // createHiDriveScopes creates oauth-scopes // from the given user-role and access-permissions. // // If the arguments are empty, they will not be included in the result. func createHiDriveScopes(role string, access string) []string { switch { case role != "" && access != "": return []string{access + "," + role} case role != "": return []string{role} case access != "": return []string{access} } return []string{} } // cachedReader returns a version of the reader that caches its contents and // can therefore be reset using Seek. func cachedReader(reader io.Reader) io.ReadSeeker { bytesReader, ok := reader.(*bytes.Reader) if ok { return bytesReader } repeatableReader, ok := reader.(*readers.RepeatableReader) if ok { return repeatableReader } return readers.NewRepeatableReader(reader) } // readerForChunk reads a chunk of bytes from reader (after handling any accounting). // Returns a new io.Reader (chunkReader) for that chunk // and the number of bytes that have been read from reader. func readerForChunk(reader io.Reader, length int) (chunkReader io.Reader, bytesRead int, err error) { // Unwrap any accounting from the input if present. reader, wrap := accounting.UnWrap(reader) // Read a chunk of data. buffer := make([]byte, length) bytesRead, err = io.ReadFull(reader, buffer) if err == io.EOF || err == io.ErrUnexpectedEOF { err = nil } if err != nil { return nil, bytesRead, err } // Truncate unused capacity. buffer = buffer[:bytesRead] // Use wrap to put any accounting back for chunkReader. return wrap(bytes.NewReader(buffer)), bytesRead, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hidrive/api/types.go
backend/hidrive/api/types.go
// Package api has type definitions and code related to API-calls for the HiDrive-API. package api import ( "encoding/json" "fmt" "net/url" "strconv" "time" ) // Time represents date and time information for the API. type Time time.Time // MarshalJSON turns Time into JSON (in Unix-time/UTC). func (t *Time) MarshalJSON() ([]byte, error) { secs := time.Time(*t).Unix() return []byte(strconv.FormatInt(secs, 10)), nil } // UnmarshalJSON turns JSON into Time. func (t *Time) UnmarshalJSON(data []byte) error { secs, err := strconv.ParseInt(string(data), 10, 64) if err != nil { return err } *t = Time(time.Unix(secs, 0)) return nil } // Error is returned from the API when things go wrong. type Error struct { Code json.Number `json:"code"` ContextInfo json.RawMessage Message string `json:"msg"` } // Error returns a string for the error and satisfies the error interface. func (e *Error) Error() string { out := fmt.Sprintf("Error %q", e.Code.String()) if e.Message != "" { out += ": " + e.Message } if e.ContextInfo != nil { out += fmt.Sprintf(" (%+v)", e.ContextInfo) } return out } // Check Error satisfies the error interface. var _ error = (*Error)(nil) // possible types for HiDriveObject const ( HiDriveObjectTypeDirectory = "dir" HiDriveObjectTypeFile = "file" HiDriveObjectTypeSymlink = "symlink" ) // HiDriveObject describes a folder, a symlink or a file. // Depending on the type and content, not all fields are present. type HiDriveObject struct { Type string `json:"type"` ID string `json:"id"` ParentID string `json:"parent_id"` Name string `json:"name"` Path string `json:"path"` Size int64 `json:"size"` MemberCount int64 `json:"nmembers"` ModifiedAt Time `json:"mtime"` ChangedAt Time `json:"ctime"` MetaHash string `json:"mhash"` MetaOnlyHash string `json:"mohash"` NameHash string `json:"nhash"` ContentHash string `json:"chash"` IsTeamfolder bool `json:"teamfolder"` Readable bool `json:"readable"` Writable bool `json:"writable"` Shareable bool `json:"shareable"` MIMEType string `json:"mime_type"` } // ModTime returns the modification time of the HiDriveObject. func (i *HiDriveObject) ModTime() time.Time { t := time.Time(i.ModifiedAt) if t.IsZero() { t = time.Time(i.ChangedAt) } return t } // UnmarshalJSON turns JSON into HiDriveObject and // introduces specific default-values where necessary. func (i *HiDriveObject) UnmarshalJSON(data []byte) error { type objectAlias HiDriveObject defaultObject := objectAlias{ Size: -1, MemberCount: -1, } err := json.Unmarshal(data, &defaultObject) if err != nil { return err } name, err := url.PathUnescape(defaultObject.Name) if err == nil { defaultObject.Name = name } *i = HiDriveObject(defaultObject) return nil } // DirectoryContent describes the content of a directory. type DirectoryContent struct { TotalCount int64 `json:"nmembers"` Entries []HiDriveObject `json:"members"` } // UnmarshalJSON turns JSON into DirectoryContent and // introduces specific default-values where necessary. func (d *DirectoryContent) UnmarshalJSON(data []byte) error { type directoryContentAlias DirectoryContent defaultDirectoryContent := directoryContentAlias{ TotalCount: -1, } err := json.Unmarshal(data, &defaultDirectoryContent) if err != nil { return err } *d = DirectoryContent(defaultDirectoryContent) return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hidrive/api/queries.go
backend/hidrive/api/queries.go
package api import ( "encoding/json" "net/url" "path" "strings" "time" ) // Some presets for different amounts of information that can be requested for fields; // it is recommended to only request the information that is actually needed. var ( HiDriveObjectNoMetadataFields = []string{"name", "type"} HiDriveObjectWithMetadataFields = append(HiDriveObjectNoMetadataFields, "id", "size", "mtime", "chash") HiDriveObjectWithDirectoryMetadataFields = append(HiDriveObjectWithMetadataFields, "nmembers") DirectoryContentFields = []string{"nmembers"} ) // QueryParameters represents the parameters passed to an API-call. type QueryParameters struct { url.Values } // NewQueryParameters initializes an instance of QueryParameters and // returns a pointer to it. func NewQueryParameters() *QueryParameters { return &QueryParameters{url.Values{}} } // SetFileInDirectory sets the appropriate parameters // to specify a path to a file in a directory. // This is used by requests that work with paths for files that do not exist yet. // (For example when creating a file). // Most requests use the format produced by SetPath(...). func (p *QueryParameters) SetFileInDirectory(filePath string) { directory, file := path.Split(path.Clean(filePath)) p.Set("dir", path.Clean(directory)) p.Set("name", file) // NOTE: It would be possible to switch to pid-based requests // by modifying this function. } // SetPath sets the appropriate parameters to access the given path. func (p *QueryParameters) SetPath(objectPath string) { p.Set("path", path.Clean(objectPath)) // NOTE: It would be possible to switch to pid-based requests // by modifying this function. } // SetTime sets the key to the time-value. It replaces any existing values. func (p *QueryParameters) SetTime(key string, value time.Time) error { valueAPI := Time(value) valueBytes, err := json.Marshal(&valueAPI) if err != nil { return err } p.Set(key, string(valueBytes)) return nil } // AddList adds the given values as a list // with each value separated by the separator. // It appends to any existing values associated with key. func (p *QueryParameters) AddList(key string, separator string, values ...string) { original := p.Get(key) p.Set(key, strings.Join(values, separator)) if original != "" { p.Set(key, original+separator+p.Get(key)) } } // AddFields sets the appropriate parameter to access the given fields. // The given fields will be appended to any other existing fields. func (p *QueryParameters) AddFields(prefix string, fields ...string) { modifiedFields := make([]string, len(fields)) for i, field := range fields { modifiedFields[i] = prefix + field } p.AddList("fields", ",", modifiedFields...) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hidrive/hidrivehash/hidrivehash_test.go
backend/hidrive/hidrivehash/hidrivehash_test.go
package hidrivehash_test import ( "crypto/sha1" "encoding" "encoding/hex" "fmt" "io" "testing" "github.com/rclone/rclone/backend/hidrive/hidrivehash" "github.com/rclone/rclone/backend/hidrive/hidrivehash/internal" "github.com/stretchr/testify/assert" ) // helper functions to set up test-tables func sha1ArrayAsSlice(sum [sha1.Size]byte) []byte { return sum[:] } func mustDecode(hexstring string) []byte { result, err := hex.DecodeString(hexstring) if err != nil { panic(err) } return result } // ------------------------------------------------------------ var testTableLevelPositionEmbedded = []struct { ins [][]byte outs [][]byte name string }{ { [][]byte{ sha1ArrayAsSlice([20]byte{245, 202, 195, 223, 121, 198, 189, 112, 138, 202, 222, 2, 146, 156, 127, 16, 208, 233, 98, 88}), sha1ArrayAsSlice([20]byte{78, 188, 156, 219, 173, 54, 81, 55, 47, 220, 222, 207, 201, 21, 57, 252, 255, 239, 251, 186}), }, [][]byte{ sha1ArrayAsSlice([20]byte{245, 202, 195, 223, 121, 198, 189, 112, 138, 202, 222, 2, 146, 156, 127, 16, 208, 233, 98, 88}), sha1ArrayAsSlice([20]byte{68, 135, 96, 187, 38, 253, 14, 167, 186, 167, 188, 210, 91, 177, 185, 13, 208, 217, 94, 18}), }, "documentation-v3.2rev27-example L0 (position-embedded)", }, { [][]byte{ sha1ArrayAsSlice([20]byte{68, 254, 92, 166, 52, 37, 104, 180, 22, 123, 249, 144, 182, 78, 64, 74, 57, 117, 225, 195}), sha1ArrayAsSlice([20]byte{75, 211, 153, 190, 125, 179, 67, 49, 60, 149, 98, 246, 142, 20, 11, 254, 159, 162, 129, 237}), sha1ArrayAsSlice([20]byte{150, 2, 9, 153, 97, 153, 189, 104, 147, 14, 77, 203, 244, 243, 25, 212, 67, 48, 111, 107}), }, [][]byte{ sha1ArrayAsSlice([20]byte{68, 254, 92, 166, 52, 37, 104, 180, 22, 123, 249, 144, 182, 78, 64, 74, 57, 117, 225, 195}), sha1ArrayAsSlice([20]byte{144, 209, 246, 100, 177, 216, 171, 229, 83, 17, 92, 135, 68, 98, 76, 72, 217, 24, 99, 176}), sha1ArrayAsSlice([20]byte{38, 211, 255, 254, 19, 114, 105, 77, 230, 31, 170, 83, 57, 85, 102, 29, 28, 72, 211, 27}), }, "documentation-example L0 (position-embedded)", }, { [][]byte{ sha1ArrayAsSlice([20]byte{173, 123, 132, 245, 176, 172, 43, 183, 121, 40, 66, 252, 101, 249, 188, 193, 160, 189, 2, 116}), sha1ArrayAsSlice([20]byte{40, 34, 8, 238, 37, 5, 237, 184, 79, 105, 10, 167, 171, 254, 13, 229, 132, 112, 254, 8}), sha1ArrayAsSlice([20]byte{39, 112, 26, 86, 190, 35, 100, 101, 28, 131, 122, 191, 254, 144, 239, 107, 253, 124, 104, 203}), }, [][]byte{ sha1ArrayAsSlice([20]byte{173, 123, 132, 245, 176, 172, 43, 183, 121, 40, 66, 252, 101, 249, 188, 193, 160, 189, 2, 116}), sha1ArrayAsSlice([20]byte{213, 157, 141, 227, 213, 178, 25, 111, 200, 145, 77, 164, 17, 247, 202, 167, 37, 46, 0, 124}), sha1ArrayAsSlice([20]byte{253, 13, 168, 58, 147, 213, 125, 212, 229, 20, 200, 100, 16, 136, 186, 19, 34, 170, 105, 71}), }, "documentation-example L1 (position-embedded)", }, } var testTableLevel = []struct { ins [][]byte outs [][]byte name string }{ { [][]byte{ mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"), mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"), mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"), }, [][]byte{ mustDecode("44fe5ca6342568b4167bf990b64e404a3975e1c3"), mustDecode("90d1f664b1d8abe553115c8744624c48d91863b0"), mustDecode("26d3fffe1372694de61faa533955661d1c48d31b"), }, "documentation-example L0", }, { [][]byte{ mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"), mustDecode("daedc425199501b1e86b5eaba5649cbde205e6ae"), mustDecode("286ac5283f99c4e0f11683900a3e39661c375dd6"), }, [][]byte{ mustDecode("ad7b84f5b0ac2bb7792842fc65f9bcc1a0bd0274"), mustDecode("d59d8de3d5b2196fc8914da411f7caa7252e007c"), mustDecode("fd0da83a93d57dd4e514c8641088ba1322aa6947"), }, "documentation-example L1", }, { [][]byte{ mustDecode("0000000000000000000000000000000000000000"), mustDecode("0000000000000000000000000000000000000000"), mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"), mustDecode("0000000000000000000000000000000000000000"), mustDecode("daedc425199501b1e86b5eaba5649cbde205e6ae"), mustDecode("0000000000000000000000000000000000000000"), mustDecode("0000000000000000000000000000000000000000"), mustDecode("0000000000000000000000000000000000000000"), mustDecode("286ac5283f99c4e0f11683900a3e39661c375dd6"), mustDecode("0000000000000000000000000000000000000000"), }, [][]byte{ mustDecode("0000000000000000000000000000000000000000"), mustDecode("0000000000000000000000000000000000000000"), mustDecode("a197464ec19f2b2b2bc6b21f6c939c7e57772843"), mustDecode("a197464ec19f2b2b2bc6b21f6c939c7e57772843"), mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"), mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"), mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"), mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"), mustDecode("8f56351897b4e1d100646fa122c924347721b2f5"), mustDecode("8f56351897b4e1d100646fa122c924347721b2f5"), }, "mixed-with-empties", }, } var testTable = []struct { data []byte // pattern describes how to use data to construct the hash-input. // For every entry n at even indices this repeats the data n times. // For every entry m at odd indices this repeats a null-byte m times. // The input-data is constructed by concatenating the results in order. pattern []int64 out []byte name string }{ { []byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"), []int64{64}, mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"), "documentation-example L0", }, { []byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"), []int64{64 * 256}, mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"), "documentation-example L1", }, { []byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"), []int64{64 * 256, 0, 64 * 128, 4096 * 128, 64*2 + 32}, mustDecode("fd0da83a93d57dd4e514c8641088ba1322aa6947"), "documentation-example L2", }, { []byte("hello rclone\n"), []int64{316}, mustDecode("72370f9c18a2c20b31d71f3f4cee7a3cd2703737"), "not-block-aligned", }, { []byte("hello rclone\n"), []int64{13, 4096 * 3, 4}, mustDecode("a6990b81791f0d2db750b38f046df321c975aa60"), "not-block-aligned-with-null-bytes", }, { []byte{}, []int64{}, mustDecode("0000000000000000000000000000000000000000"), "empty", }, { []byte{}, []int64{0, 4096 * 256 * 256}, mustDecode("0000000000000000000000000000000000000000"), "null-bytes", }, } // ------------------------------------------------------------ func TestLevelAdd(t *testing.T) { for _, test := range testTableLevelPositionEmbedded { l := hidrivehash.NewLevel().(internal.LevelHash) t.Run(test.name, func(t *testing.T) { for i := range test.ins { l.Add(test.ins[i]) assert.Equal(t, test.outs[i], l.Sum(nil)) } }) } } func TestLevelWrite(t *testing.T) { for _, test := range testTableLevel { l := hidrivehash.NewLevel() t.Run(test.name, func(t *testing.T) { for i := range test.ins { l.Write(test.ins[i]) assert.Equal(t, test.outs[i], l.Sum(nil)) } }) } } func TestLevelIsFull(t *testing.T) { content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19} l := hidrivehash.NewLevel() for range 256 { assert.False(t, l.(internal.LevelHash).IsFull()) written, err := l.Write(content[:]) assert.Equal(t, len(content), written) if !assert.NoError(t, err) { t.FailNow() } } assert.True(t, l.(internal.LevelHash).IsFull()) written, err := l.Write(content[:]) assert.True(t, l.(internal.LevelHash).IsFull()) assert.Equal(t, 0, written) assert.ErrorIs(t, err, hidrivehash.ErrorHashFull) } func TestLevelReset(t *testing.T) { l := hidrivehash.NewLevel() zeroHash := l.Sum(nil) _, err := l.Write([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}) if assert.NoError(t, err) { assert.NotEqual(t, zeroHash, l.Sum(nil)) l.Reset() assert.Equal(t, zeroHash, l.Sum(nil)) } } func TestLevelSize(t *testing.T) { l := hidrivehash.NewLevel() assert.Equal(t, 20, l.Size()) } func TestLevelBlockSize(t *testing.T) { l := hidrivehash.NewLevel() assert.Equal(t, 20, l.BlockSize()) } func TestLevelBinaryMarshaler(t *testing.T) { content := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19} l := hidrivehash.NewLevel().(internal.LevelHash) l.Write(content[:10]) encoded, err := l.MarshalBinary() if assert.NoError(t, err) { d := hidrivehash.NewLevel().(internal.LevelHash) err = d.UnmarshalBinary(encoded) if assert.NoError(t, err) { assert.Equal(t, l.Sum(nil), d.Sum(nil)) l.Write(content[10:]) d.Write(content[10:]) assert.Equal(t, l.Sum(nil), d.Sum(nil)) } } } func TestLevelInvalidEncoding(t *testing.T) { l := hidrivehash.NewLevel().(internal.LevelHash) err := l.UnmarshalBinary([]byte{}) assert.ErrorIs(t, err, hidrivehash.ErrorInvalidEncoding) } // ------------------------------------------------------------ type infiniteReader struct { source []byte offset int } func (m *infiniteReader) Read(b []byte) (int, error) { count := copy(b, m.source[m.offset:]) m.offset += count m.offset %= len(m.source) return count, nil } func writeInChunks(writer io.Writer, chunkSize int64, data []byte, pattern []int64) error { readers := make([]io.Reader, len(pattern)) nullBytes := [4096]byte{} for i, n := range pattern { if i%2 == 0 { readers[i] = io.LimitReader(&infiniteReader{data, 0}, n*int64(len(data))) } else { readers[i] = io.LimitReader(&infiniteReader{nullBytes[:], 0}, n) } } reader := io.MultiReader(readers...) for { _, err := io.CopyN(writer, reader, chunkSize) if err != nil { if err == io.EOF { err = nil } return err } } } func TestWrite(t *testing.T) { for _, test := range testTable { t.Run(test.name, func(t *testing.T) { h := hidrivehash.New() err := writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern) if assert.NoError(t, err) { normalSum := h.Sum(nil) assert.Equal(t, test.out, normalSum) // Test if different block-sizes produce differing results. for _, blockSize := range []int64{397, 512, 4091, 8192, 10000} { t.Run(fmt.Sprintf("block-size %v", blockSize), func(t *testing.T) { h := hidrivehash.New() err := writeInChunks(h, blockSize, test.data, test.pattern) if assert.NoError(t, err) { assert.Equal(t, normalSum, h.Sum(nil)) } }) } } }) } } func TestReset(t *testing.T) { h := hidrivehash.New() zeroHash := h.Sum(nil) _, err := h.Write([]byte{1}) if assert.NoError(t, err) { assert.NotEqual(t, zeroHash, h.Sum(nil)) h.Reset() assert.Equal(t, zeroHash, h.Sum(nil)) } } func TestSize(t *testing.T) { h := hidrivehash.New() assert.Equal(t, 20, h.Size()) } func TestBlockSize(t *testing.T) { h := hidrivehash.New() assert.Equal(t, 4096, h.BlockSize()) } func TestBinaryMarshaler(t *testing.T) { for _, test := range testTable { h := hidrivehash.New() d := hidrivehash.New() half := len(test.pattern) / 2 t.Run(test.name, func(t *testing.T) { err := writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern[:half]) assert.NoError(t, err) encoded, err := h.(encoding.BinaryMarshaler).MarshalBinary() if assert.NoError(t, err) { err = d.(encoding.BinaryUnmarshaler).UnmarshalBinary(encoded) if assert.NoError(t, err) { assert.Equal(t, h.Sum(nil), d.Sum(nil)) err = writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern[half:]) assert.NoError(t, err) err = writeInChunks(d, int64(d.BlockSize()), test.data, test.pattern[half:]) assert.NoError(t, err) assert.Equal(t, h.Sum(nil), d.Sum(nil)) } } }) } } func TestInvalidEncoding(t *testing.T) { h := hidrivehash.New() err := h.(encoding.BinaryUnmarshaler).UnmarshalBinary([]byte{}) assert.ErrorIs(t, err, hidrivehash.ErrorInvalidEncoding) } func TestSum(t *testing.T) { assert.Equal(t, [hidrivehash.Size]byte{}, hidrivehash.Sum([]byte{})) content := []byte{1} h := hidrivehash.New() h.Write(content) sum := hidrivehash.Sum(content) assert.Equal(t, h.Sum(nil), sum[:]) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hidrive/hidrivehash/hidrivehash.go
backend/hidrive/hidrivehash/hidrivehash.go
// Package hidrivehash implements the HiDrive hashing algorithm which combines SHA-1 hashes hierarchically to a single top-level hash. // // Note: This implementation does not grant access to any partial hashes generated. // // See: https://developer.hidrive.com/wp-content/uploads/2021/07/HiDrive_Synchronization-v3.3-rev28.pdf // (link to newest version: https://static.hidrive.com/dev/0001) package hidrivehash import ( "bytes" "crypto/sha1" "encoding" "encoding/binary" "errors" "fmt" "hash" "io" "github.com/rclone/rclone/backend/hidrive/hidrivehash/internal" ) const ( // BlockSize of the checksum in bytes. BlockSize = 4096 // Size of the checksum in bytes. Size = sha1.Size // sumsPerLevel is the number of checksums sumsPerLevel = 256 ) var ( // zeroSum is a special hash consisting of 20 null-bytes. // This will be the hash of any empty file (or ones containing only null-bytes). zeroSum = [Size]byte{} // ErrorInvalidEncoding is returned when a hash should be decoded from a binary form that is invalid. ErrorInvalidEncoding = errors.New("encoded binary form is invalid for this hash") // ErrorHashFull is returned when a hash reached its capacity and cannot accept any more input. ErrorHashFull = errors.New("hash reached its capacity") ) // writeByBlock writes len(p) bytes from p to the io.Writer in blocks of size blockSize. // It returns the number of bytes written from p (0 <= n <= len(p)) // and any error encountered that caused the write to stop early. // // A pointer bytesInBlock to a counter needs to be supplied, // that is used to keep track how many bytes have been written to the writer already. // A pointer onlyNullBytesInBlock to a boolean needs to be supplied, // that is used to keep track whether the block so far only consists of null-bytes. // The callback onBlockWritten is called whenever a full block has been written to the writer // and is given as input the number of bytes that still need to be written. func writeByBlock(p []byte, writer io.Writer, blockSize uint32, bytesInBlock *uint32, onlyNullBytesInBlock *bool, onBlockWritten func(remaining int) error) (n int, err error) { total := len(p) nullBytes := make([]byte, blockSize) for len(p) > 0 { toWrite := min(int(blockSize-*bytesInBlock), len(p)) c, err := writer.Write(p[:toWrite]) *bytesInBlock += uint32(c) *onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite]) // Discard data written through a reslice p = p[c:] if err != nil { return total - len(p), err } if *bytesInBlock == blockSize { err = onBlockWritten(len(p)) if err != nil { return total - len(p), err } *bytesInBlock = 0 *onlyNullBytesInBlock = true } } return total, nil } // level is a hash.Hash that is used to aggregate the checksums produced by the level hierarchically beneath it. // It is used to represent any level-n hash, except for level-0. type level struct { checksum [Size]byte // aggregated checksum of this level sumCount uint32 // number of sums contained in this level so far bytesInHasher uint32 // number of bytes written into hasher so far onlyNullBytesInHasher bool // whether the hasher only contains null-bytes so far hasher hash.Hash } // NewLevel returns a new hash.Hash computing any level-n hash, except level-0. func NewLevel() hash.Hash { l := &level{} l.Reset() return l } // Add takes a position-embedded SHA-1 checksum and adds it to the level. func (l *level) Add(sha1sum []byte) { var tmp uint var carry bool for i := Size - 1; i >= 0; i-- { tmp = uint(sha1sum[i]) + uint(l.checksum[i]) if carry { tmp++ } carry = tmp > 255 l.checksum[i] = byte(tmp) } } // IsFull returns whether the number of checksums added to this level reached its capacity. func (l *level) IsFull() bool { return l.sumCount >= sumsPerLevel } // Write (via the embedded io.Writer interface) adds more data to the running hash. // Contrary to the specification from hash.Hash, this DOES return an error, // specifically ErrorHashFull if and only if IsFull() returns true. func (l *level) Write(p []byte) (n int, err error) { if l.IsFull() { return 0, ErrorHashFull } onBlockWritten := func(remaining int) error { if !l.onlyNullBytesInHasher { c, err := l.hasher.Write([]byte{byte(l.sumCount)}) l.bytesInHasher += uint32(c) if err != nil { return err } l.Add(l.hasher.Sum(nil)) } l.sumCount++ l.hasher.Reset() if remaining > 0 && l.IsFull() { return ErrorHashFull } return nil } return writeByBlock(p, l.hasher, uint32(l.BlockSize()), &l.bytesInHasher, &l.onlyNullBytesInHasher, onBlockWritten) } // Sum appends the current hash to b and returns the resulting slice. // It does not change the underlying hash state. func (l *level) Sum(b []byte) []byte { return append(b, l.checksum[:]...) } // Reset resets the Hash to its initial state. func (l *level) Reset() { l.checksum = zeroSum // clear the current checksum l.sumCount = 0 l.bytesInHasher = 0 l.onlyNullBytesInHasher = true l.hasher = sha1.New() } // Size returns the number of bytes Sum will return. func (l *level) Size() int { return Size } // BlockSize returns the hash's underlying block size. // The Write method must be able to accept any amount // of data, but it may operate more efficiently if all writes // are a multiple of the block size. func (l *level) BlockSize() int { return Size } // MarshalBinary encodes the hash into a binary form and returns the result. func (l *level) MarshalBinary() ([]byte, error) { b := make([]byte, Size+4+4+1) copy(b, l.checksum[:]) binary.BigEndian.PutUint32(b[Size:], l.sumCount) binary.BigEndian.PutUint32(b[Size+4:], l.bytesInHasher) if l.onlyNullBytesInHasher { b[Size+4+4] = 1 } encodedHasher, err := l.hasher.(encoding.BinaryMarshaler).MarshalBinary() if err != nil { return nil, err } b = append(b, encodedHasher...) return b, nil } // UnmarshalBinary decodes the binary form generated by MarshalBinary. // The hash will replace its internal state accordingly. func (l *level) UnmarshalBinary(b []byte) error { if len(b) < Size+4+4+1 { return ErrorInvalidEncoding } copy(l.checksum[:], b) l.sumCount = binary.BigEndian.Uint32(b[Size:]) l.bytesInHasher = binary.BigEndian.Uint32(b[Size+4:]) switch b[Size+4+4] { case 0: l.onlyNullBytesInHasher = false case 1: l.onlyNullBytesInHasher = true default: return ErrorInvalidEncoding } err := l.hasher.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[Size+4+4+1:]) return err } // hidriveHash is the hash computing the actual checksum used by HiDrive by combining multiple level-hashes. type hidriveHash struct { levels []*level // collection of level-hashes, one for each level starting at level-1 lastSumWritten [Size]byte // the last checksum written to any of the levels bytesInBlock uint32 // bytes written into blockHash so far onlyNullBytesInBlock bool // whether the hasher only contains null-bytes so far blockHash hash.Hash } // New returns a new hash.Hash computing the HiDrive checksum. func New() hash.Hash { h := &hidriveHash{} h.Reset() return h } // aggregateToLevel writes the checksum to the level at the given index // and if necessary propagates any changes to levels above. func (h *hidriveHash) aggregateToLevel(index int, sum []byte) { for i := index; ; i++ { if i >= len(h.levels) { h.levels = append(h.levels, NewLevel().(*level)) } _, err := h.levels[i].Write(sum) copy(h.lastSumWritten[:], sum) if err != nil { panic(fmt.Errorf("level-hash should not have produced an error: %w", err)) } if !h.levels[i].IsFull() { break } sum = h.levels[i].Sum(nil) h.levels[i].Reset() } } // Write (via the embedded io.Writer interface) adds more data to the running hash. // It never returns an error. func (h *hidriveHash) Write(p []byte) (n int, err error) { onBlockWritten := func(remaining int) error { var sum []byte if h.onlyNullBytesInBlock { sum = zeroSum[:] } else { sum = h.blockHash.Sum(nil) } h.blockHash.Reset() h.aggregateToLevel(0, sum) return nil } return writeByBlock(p, h.blockHash, uint32(BlockSize), &h.bytesInBlock, &h.onlyNullBytesInBlock, onBlockWritten) } // Sum appends the current hash to b and returns the resulting slice. // It does not change the underlying hash state. func (h *hidriveHash) Sum(b []byte) []byte { // Save internal state. state, err := h.MarshalBinary() if err != nil { panic(fmt.Errorf("saving the internal state should not have produced an error: %w", err)) } if h.bytesInBlock > 0 { // Fill remainder of block with null-bytes. filler := make([]byte, h.BlockSize()-int(h.bytesInBlock)) _, err = h.Write(filler) if err != nil { panic(fmt.Errorf("filling with null-bytes should not have an error: %w", err)) } } checksum := zeroSum for i := range h.levels { level := h.levels[i] if i < len(h.levels)-1 { // Aggregate non-empty non-final levels. if level.sumCount >= 1 { h.aggregateToLevel(i+1, level.Sum(nil)) level.Reset() } } else { // Determine sum of final level. if level.sumCount > 1 { copy(checksum[:], level.Sum(nil)) } else { // This is needed, otherwise there is no way to return // the non-position-embedded checksum. checksum = h.lastSumWritten } } } // Restore internal state. err = h.UnmarshalBinary(state) if err != nil { panic(fmt.Errorf("restoring the internal state should not have produced an error: %w", err)) } return append(b, checksum[:]...) } // Reset resets the Hash to its initial state. func (h *hidriveHash) Reset() { h.levels = nil h.lastSumWritten = zeroSum // clear the last written checksum h.bytesInBlock = 0 h.onlyNullBytesInBlock = true h.blockHash = sha1.New() } // Size returns the number of bytes Sum will return. func (h *hidriveHash) Size() int { return Size } // BlockSize returns the hash's underlying block size. // The Write method must be able to accept any amount // of data, but it may operate more efficiently if all writes // are a multiple of the block size. func (h *hidriveHash) BlockSize() int { return BlockSize } // MarshalBinary encodes the hash into a binary form and returns the result. func (h *hidriveHash) MarshalBinary() ([]byte, error) { b := make([]byte, Size+4+1+8) copy(b, h.lastSumWritten[:]) binary.BigEndian.PutUint32(b[Size:], h.bytesInBlock) if h.onlyNullBytesInBlock { b[Size+4] = 1 } binary.BigEndian.PutUint64(b[Size+4+1:], uint64(len(h.levels))) for _, level := range h.levels { encodedLevel, err := level.MarshalBinary() if err != nil { return nil, err } encodedLength := make([]byte, 8) binary.BigEndian.PutUint64(encodedLength, uint64(len(encodedLevel))) b = append(b, encodedLength...) b = append(b, encodedLevel...) } encodedBlockHash, err := h.blockHash.(encoding.BinaryMarshaler).MarshalBinary() if err != nil { return nil, err } b = append(b, encodedBlockHash...) return b, nil } // UnmarshalBinary decodes the binary form generated by MarshalBinary. // The hash will replace its internal state accordingly. func (h *hidriveHash) UnmarshalBinary(b []byte) error { if len(b) < Size+4+1+8 { return ErrorInvalidEncoding } copy(h.lastSumWritten[:], b) h.bytesInBlock = binary.BigEndian.Uint32(b[Size:]) switch b[Size+4] { case 0: h.onlyNullBytesInBlock = false case 1: h.onlyNullBytesInBlock = true default: return ErrorInvalidEncoding } amount := binary.BigEndian.Uint64(b[Size+4+1:]) h.levels = make([]*level, int(amount)) offset := Size + 4 + 1 + 8 for i := range h.levels { length := int(binary.BigEndian.Uint64(b[offset:])) offset += 8 h.levels[i] = NewLevel().(*level) err := h.levels[i].UnmarshalBinary(b[offset : offset+length]) if err != nil { return err } offset += length } err := h.blockHash.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[offset:]) return err } // Sum returns the HiDrive checksum of the data. func Sum(data []byte) [Size]byte { h := New().(*hidriveHash) _, _ = h.Write(data) var result [Size]byte copy(result[:], h.Sum(nil)) return result } // Check the interfaces are satisfied. var ( _ hash.Hash = (*level)(nil) _ encoding.BinaryMarshaler = (*level)(nil) _ encoding.BinaryUnmarshaler = (*level)(nil) _ internal.LevelHash = (*level)(nil) _ hash.Hash = (*hidriveHash)(nil) _ encoding.BinaryMarshaler = (*hidriveHash)(nil) _ encoding.BinaryUnmarshaler = (*hidriveHash)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hidrive/hidrivehash/internal/internal.go
backend/hidrive/hidrivehash/internal/internal.go
// Package internal provides utilities for HiDrive. package internal import ( "encoding" "hash" ) // LevelHash is an internal interface for level-hashes. type LevelHash interface { encoding.BinaryMarshaler encoding.BinaryUnmarshaler hash.Hash // Add takes a position-embedded checksum and adds it to the level. Add(sum []byte) // IsFull returns whether the number of checksums added to this level reached its capacity. IsFull() bool }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/storj/storj_unsupported.go
backend/storj/storj_unsupported.go
//go:build plan9 // Package storj provides an interface to Storj decentralized object storage. package storj
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/storj/object.go
backend/storj/object.go
//go:build !plan9 package storj import ( "context" "errors" "io" "path" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/bucket" "golang.org/x/text/unicode/norm" "storj.io/uplink" ) // Object describes a Storj object type Object struct { fs *Fs absolute string size int64 created time.Time modified time.Time } // Check the interfaces are satisfied. var _ fs.Object = &Object{} // newObjectFromUplink creates a new object from a Storj uplink object. func newObjectFromUplink(f *Fs, relative string, object *uplink.Object) *Object { // Attempt to use the modified time from the metadata. Otherwise // fallback to the server time. modified := object.System.Created if modifiedStr, ok := object.Custom["rclone:mtime"]; ok { var err error modified, err = time.Parse(time.RFC3339Nano, modifiedStr) if err != nil { modified = object.System.Created } } bucketName, _ := bucket.Split(path.Join(f.root, relative)) return &Object{ fs: f, absolute: norm.NFC.String(bucketName + "/" + object.Key), size: object.System.ContentLength, created: object.System.Created, modified: modified, } } // String returns a description of the Object func (o *Object) String() string { if o == nil { return "<nil>" } return o.Remote() } // Remote returns the remote path func (o *Object) Remote() string { // It is possible that we have an empty root (meaning the filesystem is // rooted at the project level). In this case the relative path is just // the full absolute path to the object (including the bucket name). if o.fs.root == "" { return o.absolute } // At this point we know that the filesystem itself is at least a // bucket name (and possibly a prefix path). // // . This is necessary to remove the slash. // | // v return o.absolute[len(o.fs.root)+1:] } // ModTime returns the modification date of the file // It should return a best guess if one isn't available func (o *Object) ModTime(ctx context.Context) time.Time { return o.modified } // Size returns the size of the file func (o *Object) Size() int64 { return o.size } // Fs returns read only access to the Fs that this object is part of func (o *Object) Fs() fs.Info { return o.fs } // Hash returns the selected checksum of the file // If no checksum is available it returns "" func (o *Object) Hash(ctx context.Context, ty hash.Type) (_ string, err error) { fs.Debugf(o, "%s", ty) return "", hash.ErrUnsupported } // Storable says whether this object can be stored func (o *Object) Storable() bool { return true } // SetModTime sets the metadata on the object to set the modification date func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) { fs.Debugf(o, "touch -d %q sj://%s", t, o.absolute) return fs.ErrorCantSetModTime } // Open opens the file for read. Call Close() on the returned io.ReadCloser func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (_ io.ReadCloser, err error) { fs.Debugf(o, "cat sj://%s # %+v", o.absolute, options) bucketName, bucketPath := bucket.Split(o.absolute) // Convert the semantics of HTTP range headers to an offset and length // that libuplink can use. var ( offset int64 length int64 = -1 ) for _, option := range options { switch opt := option.(type) { case *fs.RangeOption: s := opt.Start >= 0 e := opt.End >= 0 switch { case s && e: offset = opt.Start length = (opt.End + 1) - opt.Start case s && !e: offset = opt.Start case !s && e: offset = -opt.End } case *fs.SeekOption: offset = opt.Offset default: if option.Mandatory() { fs.Errorf(o, "Unsupported mandatory option: %v", option) return nil, errors.New("unsupported mandatory option") } } } fs.Debugf(o, "range %d + %d", offset, length) return o.fs.project.DownloadObject(ctx, bucketName, bucketPath, &uplink.DownloadOptions{ Offset: offset, Length: length, }) } // Update in to the object with the modTime given of the given size // // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either // return an error or update the object properly (rather than e.g. calling panic). func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { fs.Debugf(o, "cp input ./%s %+v", o.Remote(), options) oNew, err := o.fs.put(ctx, in, src, o.Remote(), options...) if err == nil { *o = *(oNew.(*Object)) } return err } // Remove this object. func (o *Object) Remove(ctx context.Context) (err error) { fs.Debugf(o, "rm sj://%s", o.absolute) bucketName, bucketPath := bucket.Split(o.absolute) _, err = o.fs.project.DeleteObject(ctx, bucketName, bucketPath) return err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/storj/storj_test.go
backend/storj/storj_test.go
//go:build !plan9 // Test Storj filesystem interface package storj_test import ( "testing" "github.com/rclone/rclone/backend/storj" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestStorj:", NilObject: (*storj.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/storj/fs.go
backend/storj/fs.go
//go:build !plan9 // Package storj provides an interface to Storj decentralized object storage. package storj import ( "context" "errors" "fmt" "io" "path" "strings" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/bucket" "golang.org/x/text/unicode/norm" "storj.io/uplink" "storj.io/uplink/edge" ) const ( existingProvider = "existing" newProvider = "new" ) var satMap = map[string]string{ "us1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us1.storj.io:7777", "eu1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@eu1.storj.io:7777", "ap1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@ap1.storj.io:7777", } // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "storj", Description: "Storj Decentralized Cloud Storage", Aliases: []string{"tardigrade"}, NewFs: NewFs, Config: func(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) { provider, _ := m.Get(fs.ConfigProvider) config.FileDeleteKey(name, fs.ConfigProvider) if provider == newProvider { satelliteString, _ := m.Get("satellite_address") apiKey, _ := m.Get("api_key") passphrase, _ := m.Get("passphrase") // satelliteString contains always default and passphrase can be empty if apiKey == "" { return nil, nil } satellite, found := satMap[satelliteString] if !found { satellite = satelliteString } access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase) if err != nil { return nil, fmt.Errorf("couldn't create access grant: %w", err) } serializedAccess, err := access.Serialize() if err != nil { return nil, fmt.Errorf("couldn't serialize access grant: %w", err) } m.Set("satellite_address", satellite) m.Set("access_grant", serializedAccess) } else if provider == existingProvider { config.FileDeleteKey(name, "satellite_address") config.FileDeleteKey(name, "api_key") config.FileDeleteKey(name, "passphrase") } else { return nil, fmt.Errorf("invalid provider type: %s", provider) } return nil, nil }, Options: []fs.Option{ { Name: fs.ConfigProvider, Help: "Choose an authentication method.", Default: existingProvider, Examples: []fs.OptionExample{{ Value: "existing", Help: "Use an existing access grant.", }, { Value: newProvider, Help: "Create a new access grant from satellite address, API key, and passphrase.", }, }}, { Name: "access_grant", Help: "Access grant.", Provider: "existing", Sensitive: true, }, { Name: "satellite_address", Help: "Satellite address.\n\nCustom satellite address should match the format: `<nodeid>@<address>:<port>`.", Provider: newProvider, Default: "us1.storj.io", Examples: []fs.OptionExample{{ Value: "us1.storj.io", Help: "US1", }, { Value: "eu1.storj.io", Help: "EU1", }, { Value: "ap1.storj.io", Help: "AP1", }, }, }, { Name: "api_key", Help: "API key.", Provider: newProvider, Sensitive: true, }, { Name: "passphrase", Help: "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.", Provider: newProvider, Sensitive: true, }, }, }) } // Options defines the configuration for this backend type Options struct { Access string `config:"access_grant"` SatelliteAddress string `config:"satellite_address"` APIKey string `config:"api_key"` Passphrase string `config:"passphrase"` } // Fs represents a remote to Storj type Fs struct { name string // the name of the remote root string // root of the filesystem opts Options // parsed options features *fs.Features // optional features access *uplink.Access // parsed scope project *uplink.Project // project client } // Check the interfaces are satisfied. var ( _ fs.Fs = &Fs{} _ fs.ListRer = &Fs{} _ fs.PutStreamer = &Fs{} _ fs.Mover = &Fs{} _ fs.Copier = &Fs{} _ fs.Purger = &Fs{} _ fs.PublicLinker = &Fs{} ) // NewFs creates a filesystem backed by Storj. func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs, err error) { // Setup filesystem and connection to Storj root = norm.NFC.String(root) root = strings.Trim(root, "/") f := &Fs{ name: name, root: root, } // Parse config into Options struct err = configstruct.Set(m, &f.opts) if err != nil { return nil, err } // Parse access var access *uplink.Access if f.opts.Access != "" { access, err = uplink.ParseAccess(f.opts.Access) if err != nil { return nil, fmt.Errorf("storj: access: %w", err) } } if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" { access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase) if err != nil { return nil, fmt.Errorf("storj: access: %w", err) } serializedAccess, err := access.Serialize() if err != nil { return nil, fmt.Errorf("storj: access: %w", err) } err = config.SetValueAndSave(f.name, "access_grant", serializedAccess) if err != nil { return nil, fmt.Errorf("storj: access: %w", err) } } if access == nil { return nil, errors.New("access not found") } f.access = access f.features = (&fs.Features{ BucketBased: true, BucketBasedRootOK: true, }).Fill(ctx, f) project, err := f.connect(ctx) if err != nil { return nil, err } f.project = project // Root validation needs to check the following: If a bucket path is // specified and exists, then the object must be a directory. // // NOTE: At this point this must return the filesystem object we've // created so far even if there is an error. if root != "" { bucketName, bucketPath := bucket.Split(root) if bucketName != "" && bucketPath != "" { _, err = project.StatBucket(ctx, bucketName) if err != nil { return f, fmt.Errorf("storj: bucket: %w", err) } object, err := project.StatObject(ctx, bucketName, bucketPath) if err == nil { if !object.IsPrefix { // If the root is actually a file we // need to return the *parent* // directory of the root instead and an // error that the original root // requested is a file. newRoot := path.Dir(f.root) if newRoot == "." { newRoot = "" } f.root = newRoot return f, fs.ErrorIsFile } } } } return f, nil } // connect opens a connection to Storj. func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) { fs.Debugf(f, "connecting...") defer fs.Debugf(f, "connected: %+v", err) cfg := uplink.Config{ UserAgent: "rclone", } project, err = cfg.OpenProject(ctx, f.access) if err != nil { return nil, fmt.Errorf("storj: project: %w", err) } return } // absolute computes the absolute bucket name and path from the filesystem root // and the relative path provided. func (f *Fs) absolute(relative string) (bucketName, bucketPath string) { bn, bp := bucket.Split(path.Join(f.root, relative)) // NOTE: Technically libuplink does not care about the encoding. It is // happy to work with them as opaque byte sequences. However, rclone // has a test that requires two paths with the same normalized form // (but different un-normalized forms) to point to the same file. This // means we have to normalize before we interact with libuplink. return norm.NFC.String(bn), norm.NFC.String(bp) } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String returns a description of the FS func (f *Fs) String() string { return fmt.Sprintf("FS sj://%s", f.root) } // Precision of the ModTimes in this Fs func (f *Fs) Precision() time.Duration { return time.Nanosecond } // Hashes returns the supported hash types of the filesystem. func (f *Fs) Hashes() hash.Set { return hash.NewHashSet() } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // List the objects and directories in relative into entries. The entries can // be returned in any order but should be for a complete directory. // // relative should be "" to list the root, and should not have trailing // slashes. // // This should return fs.ErrDirNotFound if the directory isn't found. func (f *Fs) List(ctx context.Context, relative string) (entries fs.DirEntries, err error) { fs.Debugf(f, "ls ./%s", relative) bucketName, bucketPath := f.absolute(relative) defer func() { if errors.Is(err, uplink.ErrBucketNotFound) { err = fs.ErrorDirNotFound } }() if bucketName == "" { if bucketPath != "" { return nil, fs.ErrorListBucketRequired } return f.listBuckets(ctx) } return f.listObjects(ctx, relative, bucketName, bucketPath) } func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) { fs.Debugf(f, "BKT ls") buckets := f.project.ListBuckets(ctx, nil) for buckets.Next() { bucket := buckets.Item() entries = append(entries, fs.NewDir(bucket.Name, bucket.Created)) } return entries, buckets.Err() } // newDirEntry creates a directory entry from an uplink object. // // NOTE: Getting the exact behavior required by rclone is somewhat tricky. The // path manipulation here is necessary to cover all the different ways the // filesystem and object could be initialized and combined. func (f *Fs) newDirEntry(relative, prefix string, object *uplink.Object) fs.DirEntry { if object.IsPrefix { // . The entry must include the relative path as its prefix. Depending on // | what is being listed and how the filesystem root was initialized the // | relative path may be empty (and so we use path joining here to ensure // | we don't end up with an empty path segment). // | // | . Remove the prefix used during listing. // | | // | | . Remove the trailing slash. // | | | // v v v return fs.NewDir(path.Join(relative, object.Key[len(prefix):len(object.Key)-1]), object.System.Created) } return newObjectFromUplink(f, relative, object) } func (f *Fs) listObjects(ctx context.Context, relative, bucketName, bucketPath string) (entries fs.DirEntries, err error) { fs.Debugf(f, "OBJ ls ./%s (%q, %q)", relative, bucketName, bucketPath) opts := &uplink.ListObjectsOptions{ Prefix: newPrefix(bucketPath), System: true, Custom: true, } fs.Debugf(f, "opts %+v", opts) objects := f.project.ListObjects(ctx, bucketName, opts) for objects.Next() { entries = append(entries, f.newDirEntry(relative, opts.Prefix, objects.Item())) } err = objects.Err() if err != nil { return nil, err } return entries, nil } // ListR lists the objects and directories of the Fs starting from dir // recursively into out. // // relative should be "" to start from the root, and should not have trailing // slashes. // // This should return ErrDirNotFound if the directory isn't found. // // It should call callback for each tranche of entries read. These need not be // returned in any particular order. If callback returns an error then the // listing will stop immediately. // // Don't implement this unless you have a more efficient way of listing // recursively that doing a directory traversal. func (f *Fs) ListR(ctx context.Context, relative string, callback fs.ListRCallback) (err error) { fs.Debugf(f, "ls -R ./%s", relative) bucketName, bucketPath := f.absolute(relative) defer func() { if errors.Is(err, uplink.ErrBucketNotFound) { err = fs.ErrorDirNotFound } }() if bucketName == "" { if bucketPath != "" { return fs.ErrorListBucketRequired } return f.listBucketsR(ctx, callback) } return f.listObjectsR(ctx, relative, bucketName, bucketPath, callback) } func (f *Fs) listBucketsR(ctx context.Context, callback fs.ListRCallback) (err error) { fs.Debugf(f, "BKT ls -R") buckets := f.project.ListBuckets(ctx, nil) for buckets.Next() { bucket := buckets.Item() err = f.listObjectsR(ctx, bucket.Name, bucket.Name, "", callback) if err != nil { return err } } return buckets.Err() } func (f *Fs) listObjectsR(ctx context.Context, relative, bucketName, bucketPath string, callback fs.ListRCallback) (err error) { fs.Debugf(f, "OBJ ls -R ./%s (%q, %q)", relative, bucketName, bucketPath) opts := &uplink.ListObjectsOptions{ Prefix: newPrefix(bucketPath), Recursive: true, System: true, Custom: true, } objects := f.project.ListObjects(ctx, bucketName, opts) for objects.Next() { object := objects.Item() err = callback(fs.DirEntries{f.newDirEntry(relative, opts.Prefix, object)}) if err != nil { return err } } err = objects.Err() if err != nil { return err } return nil } // NewObject finds the Object at relative. If it can't be found it returns the // error ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, relative string) (_ fs.Object, err error) { fs.Debugf(f, "stat ./%s", relative) bucketName, bucketPath := f.absolute(relative) object, err := f.project.StatObject(ctx, bucketName, bucketPath) if err != nil { fs.Debugf(f, "err: %+v", err) if errors.Is(err, uplink.ErrObjectNotFound) { return nil, fs.ErrorObjectNotFound } return nil, err } return newObjectFromUplink(f, relative, object), nil } // Put in to the remote path with the modTime given of the given size // // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Put should // either return an error or upload it properly (rather than e.g. calling // panic). // // May create the object even if it returns an error - if so will return the // object and the error, otherwise will return nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) { return f.put(ctx, in, src, src.Remote(), options...) } func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options ...fs.OpenOption) (_ fs.Object, err error) { fs.Debugf(f, "cp input ./%s # %+v %d", remote, options, src.Size()) // Reject options we don't support. for _, option := range options { if option.Mandatory() { fs.Errorf(f, "Unsupported mandatory option: %v", option) return nil, errors.New("unsupported mandatory option") } } bucketName, bucketPath := f.absolute(remote) upload, err := f.project.UploadObject(ctx, bucketName, bucketPath, nil) if err != nil { return nil, err } defer func() { if err != nil { aerr := upload.Abort() if aerr != nil && !errors.Is(aerr, uplink.ErrUploadDone) { fs.Errorf(f, "cp input ./%s %+v: %+v", remote, options, aerr) } } }() err = upload.SetCustomMetadata(ctx, uplink.CustomMetadata{ "rclone:mtime": src.ModTime(ctx).Format(time.RFC3339Nano), }) if err != nil { return nil, err } _, err = io.Copy(upload, in) if err != nil { if errors.Is(err, uplink.ErrBucketNotFound) { // Rclone assumes the backend will create the bucket if not existing yet. // Here we create the bucket and return a retry error for rclone to retry the upload. _, err = f.project.EnsureBucket(ctx, bucketName) if err != nil { return nil, err } return nil, fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried")) } err = fserrors.RetryError(err) fs.Errorf(f, "cp input ./%s %+v: %+v\n", remote, options, err) return nil, err } err = upload.Commit() if err != nil { if errors.Is(err, uplink.ErrBucketNotFound) { // Rclone assumes the backend will create the bucket if not existing yet. // Here we create the bucket and return a retry error for rclone to retry the upload. _, err = f.project.EnsureBucket(ctx, bucketName) if err != nil { return nil, err } err = fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried")) } else if errors.Is(err, uplink.ErrTooManyRequests) { // Storj has a rate limit of 1 per second of uploading to the same file. // This produces ErrTooManyRequests here, so we wait 1 second and retry. // // See: https://github.com/storj/uplink/issues/149 fs.Debugf(f, "uploading too fast - sleeping for 1 second: %v", err) time.Sleep(time.Second) err = fserrors.RetryError(err) } return nil, err } return newObjectFromUplink(f, remote, upload.Info()), nil } // PutStream uploads to the remote path with the modTime given of indeterminate // size. // // May create the object even if it returns an error - if so will return the // object and the error, otherwise will return nil and the error. func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) { return f.Put(ctx, in, src, options...) } // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists func (f *Fs) Mkdir(ctx context.Context, relative string) (err error) { fs.Debugf(f, "mkdir -p ./%s", relative) bucketName, _ := f.absolute(relative) _, err = f.project.EnsureBucket(ctx, bucketName) return err } // Rmdir removes the directory (container, bucket) // // NOTE: Despite code documentation to the contrary, this method should not // return an error if the directory does not exist. func (f *Fs) Rmdir(ctx context.Context, relative string) (err error) { fs.Debugf(f, "rmdir ./%s", relative) bucketName, bucketPath := f.absolute(relative) if bucketPath != "" { // If we can successfully stat it, then it is an object (and not a prefix). _, err := f.project.StatObject(ctx, bucketName, bucketPath) if err != nil { if errors.Is(err, uplink.ErrObjectNotFound) { // At this point we know it is not an object, // but we don't know if it is a prefix for one. // // We check this by doing a listing and if we // get any results back, then we know this is a // valid prefix (which implies the directory is // not empty). opts := &uplink.ListObjectsOptions{ Prefix: newPrefix(bucketPath), System: true, Custom: true, } objects := f.project.ListObjects(ctx, bucketName, opts) if objects.Next() { return fs.ErrorDirectoryNotEmpty } return objects.Err() } return err } return fs.ErrorIsFile } _, err = f.project.DeleteBucket(ctx, bucketName) if err != nil { if errors.Is(err, uplink.ErrBucketNotFound) { return fs.ErrorDirNotFound } if errors.Is(err, uplink.ErrBucketNotEmpty) { return fs.ErrorDirectoryNotEmpty } return err } return nil } // newPrefix returns a new prefix for listing conforming to the libuplink // requirements. In particular, libuplink requires a trailing slash for // listings, but rclone does not always provide one. Further, depending on how // the path was initially path normalization may have removed it (e.g. a // trailing slash from the CLI is removed before it ever gets to the backend // code). func newPrefix(prefix string) string { if prefix == "" { return prefix } if prefix[len(prefix)-1] == '/' { return prefix } return prefix + "/" } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } // Move parameters srcBucket, srcKey := bucket.Split(srcObj.absolute) dstBucket, dstKey := f.absolute(remote) options := uplink.MoveObjectOptions{} // Do the move err := f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options) if err != nil { // Make sure destination bucket exists _, err := f.project.EnsureBucket(ctx, dstBucket) if err != nil { return nil, fmt.Errorf("rename object failed to create destination bucket: %w", err) } // And try again err = f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options) if err != nil { return nil, fmt.Errorf("rename object failed: %w", err) } } // Read the new object return f.NewObject(ctx, remote) } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } // Copy parameters srcBucket, srcKey := bucket.Split(srcObj.absolute) dstBucket, dstKey := f.absolute(remote) options := uplink.CopyObjectOptions{} // Do the copy newObject, err := f.project.CopyObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options) if err != nil { // Make sure destination bucket exists _, err := f.project.EnsureBucket(ctx, dstBucket) if err != nil { return nil, fmt.Errorf("copy object failed to create destination bucket: %w", err) } // And try again newObject, err = f.project.CopyObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options) if err != nil { return nil, fmt.Errorf("copy object failed: %w", err) } } // Return the new object return newObjectFromUplink(f, remote, newObject), nil } // Purge all files in the directory specified // // Implement this if you have a way of deleting all the files // quicker than just running Remove() on the result of List() // // Return an error if it doesn't exist func (f *Fs) Purge(ctx context.Context, dir string) error { bucket, directory := f.absolute(dir) if bucket == "" { return errors.New("can't purge from root") } if directory == "" { _, err := f.project.DeleteBucketWithObjects(ctx, bucket) if errors.Is(err, uplink.ErrBucketNotFound) { return fs.ErrorDirNotFound } return err } fs.Infof(directory, "Quick delete is available only for entire bucket. Falling back to list and delete.") objects := f.project.ListObjects(ctx, bucket, &uplink.ListObjectsOptions{ Prefix: directory + "/", Recursive: true, }, ) if err := objects.Err(); err != nil { return err } empty := true for objects.Next() { empty = false _, err := f.project.DeleteObject(ctx, bucket, objects.Item().Key) if err != nil { return err } fs.Infof(objects.Item().Key, "Deleted") } if empty { return fs.ErrorDirNotFound } return nil } // PublicLink generates a public link to the remote path (usually readable by anyone) func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { bucket, key := f.absolute(remote) if bucket == "" { return "", errors.New("path must be specified") } // Rclone requires that a link is only generated if the remote path exists if key == "" { _, err := f.project.StatBucket(ctx, bucket) if err != nil { return "", err } } else { _, err := f.project.StatObject(ctx, bucket, key) if err != nil { if !errors.Is(err, uplink.ErrObjectNotFound) { return "", err } // No object found, check if there is such a prefix iter := f.project.ListObjects(ctx, bucket, &uplink.ListObjectsOptions{Prefix: key + "/"}) if iter.Err() != nil { return "", iter.Err() } if !iter.Next() { return "", err } } } sharedPrefix := uplink.SharePrefix{Bucket: bucket, Prefix: key} permission := uplink.ReadOnlyPermission() if expire.IsSet() { permission.NotAfter = time.Now().Add(time.Duration(expire)) } sharedAccess, err := f.access.Share(permission, sharedPrefix) if err != nil { return "", fmt.Errorf("sharing access to object failed: %w", err) } creds, err := (&edge.Config{ AuthServiceAddress: "auth.storjshare.io:7777", }).RegisterAccess(ctx, sharedAccess, &edge.RegisterAccessOptions{Public: true}) if err != nil { return "", fmt.Errorf("creating public link failed: %w", err) } return edge.JoinShareURL("https://link.storjshare.io", creds.AccessKeyID, bucket, key, nil) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/imagekit/util.go
backend/imagekit/util.go
package imagekit import ( "context" "fmt" "net/http" "slices" "strconv" "time" "github.com/rclone/rclone/backend/imagekit/client" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/lib/pacer" ) func (f *Fs) getFiles(ctx context.Context, path string, includeVersions bool) (files []client.File, err error) { files = make([]client.File, 0) var hasMore = true for hasMore { err = f.pacer.Call(func() (bool, error) { var data *[]client.File var res *http.Response res, data, err = f.ik.Files(ctx, client.FilesOrFolderParam{ Skip: len(files), Limit: 100, Path: path, }, includeVersions) hasMore = !(len(*data) == 0 || len(*data) < 100) if len(*data) > 0 { files = append(files, *data...) } return f.shouldRetry(ctx, res, err) }) } if err != nil { return make([]client.File, 0), err } return files, nil } func (f *Fs) getFolders(ctx context.Context, path string) (folders []client.Folder, err error) { folders = make([]client.Folder, 0) var hasMore = true for hasMore { err = f.pacer.Call(func() (bool, error) { var data *[]client.Folder var res *http.Response res, data, err = f.ik.Folders(ctx, client.FilesOrFolderParam{ Skip: len(folders), Limit: 100, Path: path, }) hasMore = !(len(*data) == 0 || len(*data) < 100) if len(*data) > 0 { folders = append(folders, *data...) } return f.shouldRetry(ctx, res, err) }) } if err != nil { return make([]client.Folder, 0), err } return folders, nil } func (f *Fs) getFileByName(ctx context.Context, path string, name string) (file *client.File) { err := f.pacer.Call(func() (bool, error) { res, data, err := f.ik.Files(ctx, client.FilesOrFolderParam{ Limit: 1, Path: path, SearchQuery: fmt.Sprintf(`type = "file" AND name = %s`, strconv.Quote(name)), }, false) if len(*data) == 0 { file = nil } else { file = &(*data)[0] } return f.shouldRetry(ctx, res, err) }) if err != nil { return nil } return file } func (f *Fs) getFolderByName(ctx context.Context, path string, name string) (folder *client.Folder, err error) { err = f.pacer.Call(func() (bool, error) { res, data, err := f.ik.Folders(ctx, client.FilesOrFolderParam{ Limit: 1, Path: path, SearchQuery: fmt.Sprintf(`type = "folder" AND name = %s`, strconv.Quote(name)), }) if len(*data) == 0 { folder = nil } else { folder = &(*data)[0] } return f.shouldRetry(ctx, res, err) }) if err != nil { return nil, err } return folder, nil } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 401, // Unauthorized (e.g. "Token has expired") 408, // Request Timeout 429, // Rate exceeded. 500, // Get occasional 500 Internal Server Error 503, // Service Unavailable 504, // Gateway Time-out } func shouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool { if resp == nil { return false } return slices.Contains(retryErrorCodes, resp.StatusCode) } func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } if resp != nil && (resp.StatusCode == 429 || resp.StatusCode == 503) { var retryAfter = 1 retryAfterString := resp.Header.Get("X-RateLimit-Reset") if retryAfterString != "" { var err error retryAfter, err = strconv.Atoi(retryAfterString) if err != nil { fs.Errorf(f, "Malformed %s header %q: %v", "X-RateLimit-Reset", retryAfterString, err) } } return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Millisecond) } return fserrors.ShouldRetry(err) || shouldRetryHTTP(resp, retryErrorCodes), err } // EncodePath encapsulates the logic for encoding a path func (f *Fs) EncodePath(str string) string { return f.opt.Enc.FromStandardPath(str) } // DecodePath encapsulates the logic for decoding a path func (f *Fs) DecodePath(str string) string { return f.opt.Enc.ToStandardPath(str) } // EncodeFileName encapsulates the logic for encoding a file name func (f *Fs) EncodeFileName(str string) string { return f.opt.Enc.FromStandardName(str) } // DecodeFileName encapsulates the logic for decoding a file name func (f *Fs) DecodeFileName(str string) string { return f.opt.Enc.ToStandardName(str) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/imagekit/imagekit_test.go
backend/imagekit/imagekit_test.go
package imagekit import ( "testing" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" ) func TestIntegration(t *testing.T) { debug := true fstest.Verbose = &debug fstests.Run(t, &fstests.Opt{ RemoteName: "TestImageKit:", NilObject: (*Object)(nil), SkipFsCheckWrap: true, }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/imagekit/imagekit.go
backend/imagekit/imagekit.go
// Package imagekit provides an interface to the ImageKit.io media library. package imagekit import ( "context" "errors" "fmt" "io" "math" "net/http" "path" "strconv" "strings" "time" "github.com/rclone/rclone/backend/imagekit/client" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/version" ) const ( minSleep = 1 * time.Millisecond maxSleep = 100 * time.Millisecond decayConstant = 2 ) var systemMetadataInfo = map[string]fs.MetadataHelp{ "btime": { Help: "Time of file birth (creation) read from Last-Modified header", Type: "RFC 3339", Example: "2006-01-02T15:04:05.999999999Z07:00", ReadOnly: true, }, "size": { Help: "Size of the object in bytes", Type: "int64", ReadOnly: true, }, "file-type": { Help: "Type of the file", Type: "string", Example: "image", ReadOnly: true, }, "height": { Help: "Height of the image or video in pixels", Type: "int", ReadOnly: true, }, "width": { Help: "Width of the image or video in pixels", Type: "int", ReadOnly: true, }, "has-alpha": { Help: "Whether the image has alpha channel or not", Type: "bool", ReadOnly: true, }, "tags": { Help: "Tags associated with the file", Type: "string", Example: "tag1,tag2", ReadOnly: true, }, "google-tags": { Help: "AI generated tags by Google Cloud Vision associated with the image", Type: "string", Example: "tag1,tag2", ReadOnly: true, }, "aws-tags": { Help: "AI generated tags by AWS Rekognition associated with the image", Type: "string", Example: "tag1,tag2", ReadOnly: true, }, "is-private-file": { Help: "Whether the file is private or not", Type: "bool", ReadOnly: true, }, "custom-coordinates": { Help: "Custom coordinates of the file", Type: "string", Example: "0,0,100,100", ReadOnly: true, }, } // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "imagekit", Description: "ImageKit.io", NewFs: NewFs, MetadataInfo: &fs.MetadataInfo{ System: systemMetadataInfo, Help: `Any metadata supported by the underlying remote is read and written.`, }, Options: []fs.Option{ { Name: "endpoint", Help: "You can find your ImageKit.io URL endpoint in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)", Required: true, }, { Name: "public_key", Help: "You can find your ImageKit.io public key in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)", Required: true, Sensitive: true, }, { Name: "private_key", Help: "You can find your ImageKit.io private key in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)", Required: true, Sensitive: true, }, { Name: "only_signed", Help: "If you have configured `Restrict unsigned image URLs` in your dashboard settings, set this to true.", Default: false, Advanced: true, }, { Name: "versions", Help: "Include old versions in directory listings.", Default: false, Advanced: true, }, { Name: "upload_tags", Help: "Tags to add to the uploaded files, e.g. \"tag1,tag2\".", Default: "", Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.EncodeZero | encoder.EncodeSlash | encoder.EncodeQuestion | encoder.EncodeHashPercent | encoder.EncodeCtl | encoder.EncodeDel | encoder.EncodeDot | encoder.EncodeDoubleQuote | encoder.EncodePercent | encoder.EncodeBackSlash | encoder.EncodeDollar | encoder.EncodeLtGt | encoder.EncodeSquareBracket | encoder.EncodeInvalidUtf8), }, }, }) } // Options defines the configuration for this backend type Options struct { Endpoint string `config:"endpoint"` PublicKey string `config:"public_key"` PrivateKey string `config:"private_key"` OnlySigned bool `config:"only_signed"` Versions bool `config:"versions"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a remote to ImageKit type Fs struct { name string // name of remote root string // root path opt Options // parsed options features *fs.Features // optional features ik *client.ImageKit // ImageKit client pacer *fs.Pacer // pacer for API calls } // Object describes a ImageKit file type Object struct { fs *Fs // The Fs this object is part of remote string // The remote path filePath string // The path to the file contentType string // The content type of the object if known - may be "" timestamp time.Time // The timestamp of the object if known - may be zero file client.File // The media file if known - may be nil versionID string // If present this points to an object version } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) { opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } ik, err := client.New(ctx, client.NewParams{ URLEndpoint: opt.Endpoint, PublicKey: opt.PublicKey, PrivateKey: opt.PrivateKey, }) if err != nil { return nil, err } f := &Fs{ name: name, opt: *opt, ik: ik, pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } f.root = path.Join("/", root) f.features = (&fs.Features{ CaseInsensitive: false, DuplicateFiles: false, ReadMimeType: true, WriteMimeType: false, CanHaveEmptyDirectories: true, BucketBased: false, ServerSideAcrossConfigs: false, IsLocal: false, SlowHash: true, ReadMetadata: true, WriteMetadata: false, UserMetadata: false, FilterAware: true, PartialUploads: false, NoMultiThreading: false, }).Fill(ctx, f) if f.root != "/" { r := f.root folderPath := f.EncodePath(r[:strings.LastIndex(r, "/")+1]) fileName := f.EncodeFileName(r[strings.LastIndex(r, "/")+1:]) file := f.getFileByName(ctx, folderPath, fileName) if file != nil { newRoot := path.Dir(f.root) f.root = newRoot return f, fs.ErrorIsFile } } return f, nil } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return strings.TrimLeft(f.root, "/") } // String returns a description of the FS func (f *Fs) String() string { return fmt.Sprintf("FS imagekit: %s", f.root) } // Precision of the ModTimes in this Fs func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported } // Hashes returns the supported hash types of the filesystem. func (f *Fs) Hashes() hash.Set { return hash.NewHashSet() } // Features returns the optional features of this Fs. func (f *Fs) Features() *fs.Features { return f.features } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { remote := path.Join(f.root, dir) remote = f.EncodePath(remote) if remote != "/" { parentFolderPath, folderName := path.Split(remote) folderExists, err := f.getFolderByName(ctx, parentFolderPath, folderName) if err != nil { return make(fs.DirEntries, 0), err } if folderExists == nil { return make(fs.DirEntries, 0), fs.ErrorDirNotFound } } folders, folderError := f.getFolders(ctx, remote) if folderError != nil { return make(fs.DirEntries, 0), folderError } files, fileError := f.getFiles(ctx, remote, f.opt.Versions) if fileError != nil { return make(fs.DirEntries, 0), fileError } res := make([]fs.DirEntry, 0, len(folders)+len(files)) for _, folder := range folders { folderPath := f.DecodePath(strings.TrimLeft(strings.Replace(folder.FolderPath, f.EncodePath(f.root), "", 1), "/")) res = append(res, fs.NewDir(folderPath, folder.UpdatedAt)) } for _, file := range files { res = append(res, f.newObject(ctx, remote, file)) } return res, nil } func (f *Fs) newObject(ctx context.Context, remote string, file client.File) *Object { remoteFile := strings.TrimLeft(strings.Replace(file.FilePath, f.EncodePath(f.root), "", 1), "/") folderPath, fileName := path.Split(remoteFile) folderPath = f.DecodePath(folderPath) fileName = f.DecodeFileName(fileName) remoteFile = path.Join(folderPath, fileName) if file.Type == "file-version" { remoteFile = version.Add(remoteFile, file.UpdatedAt) return &Object{ fs: f, remote: remoteFile, filePath: file.FilePath, contentType: file.Mime, timestamp: file.UpdatedAt, file: file, versionID: file.VersionInfo["id"], } } return &Object{ fs: f, remote: remoteFile, filePath: file.FilePath, contentType: file.Mime, timestamp: file.UpdatedAt, file: file, } } // NewObject finds the Object at remote. If it can't be found // it returns the error ErrorObjectNotFound. // // If remote points to a directory then it should return // ErrorIsDir if possible without doing any extra work, // otherwise ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { r := path.Join(f.root, remote) folderPath, fileName := path.Split(r) folderPath = f.EncodePath(folderPath) fileName = f.EncodeFileName(fileName) isFolder, err := f.getFolderByName(ctx, folderPath, fileName) if err != nil { return nil, err } if isFolder != nil { return nil, fs.ErrorIsDir } file := f.getFileByName(ctx, folderPath, fileName) if file == nil { return nil, fs.ErrorObjectNotFound } return f.newObject(ctx, r, *file), nil } // Put in to the remote path with the modTime given of the given size // // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Put should either // return an error or upload it properly (rather than e.g. calling panic). // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { if src.Size() == 0 { return nil, fs.ErrorCantUploadEmptyFiles } return uploadFile(ctx, f, in, src.Remote(), options...) } // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { remote := path.Join(f.root, dir) parentFolderPath, folderName := path.Split(remote) parentFolderPath = f.EncodePath(parentFolderPath) folderName = f.EncodeFileName(folderName) err = f.pacer.Call(func() (bool, error) { var res *http.Response res, err = f.ik.CreateFolder(ctx, client.CreateFolderParam{ ParentFolderPath: parentFolderPath, FolderName: folderName, }) return f.shouldRetry(ctx, res, err) }) return err } // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { entries, err := f.List(ctx, dir) if err != nil { return err } if len(entries) > 0 { return errors.New("directory is not empty") } err = f.pacer.Call(func() (bool, error) { var res *http.Response res, err = f.ik.DeleteFolder(ctx, client.DeleteFolderParam{ FolderPath: f.EncodePath(path.Join(f.root, dir)), }) if res.StatusCode == http.StatusNotFound { return false, fs.ErrorDirNotFound } return f.shouldRetry(ctx, res, err) }) return err } // Purge deletes all the files and the container // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge(ctx context.Context, dir string) (err error) { remote := path.Join(f.root, dir) err = f.pacer.Call(func() (bool, error) { var res *http.Response res, err = f.ik.DeleteFolder(ctx, client.DeleteFolderParam{ FolderPath: f.EncodePath(remote), }) if res.StatusCode == http.StatusNotFound { return false, fs.ErrorDirNotFound } return f.shouldRetry(ctx, res, err) }) return err } // PublicLink generates a public link to the remote path (usually readable by anyone) func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { duration := time.Duration(math.Abs(float64(expire))) expireSeconds := duration.Seconds() fileRemote := path.Join(f.root, remote) folderPath, fileName := path.Split(fileRemote) folderPath = f.EncodePath(folderPath) fileName = f.EncodeFileName(fileName) file := f.getFileByName(ctx, folderPath, fileName) if file == nil { return "", fs.ErrorObjectNotFound } // Pacer not needed as this doesn't use the API url, err := f.ik.URL(client.URLParam{ Src: file.URL, Signed: *file.IsPrivateFile || f.opt.OnlySigned, ExpireSeconds: int64(expireSeconds), QueryParameters: map[string]string{ "updatedAt": file.UpdatedAt.String(), }, }) if err != nil { return "", err } return url, nil } // Fs returns read only access to the Fs that this object is part of func (o *Object) Fs() fs.Info { return o.fs } // Hash returns the selected checksum of the file // If no checksum is available it returns "" func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) { return "", hash.ErrUnsupported } // Storable says whether this object can be stored func (o *Object) Storable() bool { return true } // String returns a description of the Object func (o *Object) String() string { if o == nil { return "<nil>" } return o.file.Name } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // ModTime returns the modification date of the file // It should return a best guess if one isn't available func (o *Object) ModTime(context.Context) time.Time { return o.file.UpdatedAt } // Size returns the size of the file func (o *Object) Size() int64 { return int64(o.file.Size) } // MimeType returns the MIME type of the file func (o *Object) MimeType(context.Context) string { return o.contentType } // Open opens the file for read. Call Close() on the returned io.ReadCloser func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { // Offset and Count for range download var offset int64 var count int64 fs.FixRangeOption(options, -1) partialContent := false for _, option := range options { switch x := option.(type) { case *fs.RangeOption: offset, count = x.Decode(-1) partialContent = true case *fs.SeekOption: offset = x.Offset partialContent = true default: if option.Mandatory() { fs.Logf(o, "Unsupported mandatory option: %v", option) } } } // Pacer not needed as this doesn't use the API url, err := o.fs.ik.URL(client.URLParam{ Src: o.file.URL, Signed: *o.file.IsPrivateFile || o.fs.opt.OnlySigned, QueryParameters: map[string]string{ "tr": "orig-true", "updatedAt": o.file.UpdatedAt.String(), }, }) if err != nil { return nil, err } client := &http.Client{} req, _ := http.NewRequest("GET", url, nil) req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+count-1)) resp, err := client.Do(req) if err != nil { return nil, err } end := resp.ContentLength if partialContent && resp.StatusCode == http.StatusOK { skip := offset if offset < 0 { skip = end + offset + 1 } _, err = io.CopyN(io.Discard, resp.Body, skip) if err != nil { if resp != nil { _ = resp.Body.Close() } return nil, err } return readers.NewLimitedReadCloser(resp.Body, end-skip), nil } return resp.Body, nil } // Update in to the object with the modTime given of the given size // // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either // return an error or update the object properly (rather than e.g. calling panic). func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { if src.Size() == 0 { return fs.ErrorCantUploadEmptyFiles } srcRemote := o.Remote() remote := path.Join(o.fs.root, srcRemote) folderPath, fileName := path.Split(remote) UseUniqueFileName := new(bool) *UseUniqueFileName = false var resp *client.UploadResult err = o.fs.pacer.CallNoRetry(func() (bool, error) { var res *http.Response res, resp, err = o.fs.ik.Upload(ctx, in, client.UploadParam{ FileName: fileName, Folder: folderPath, IsPrivateFile: o.file.IsPrivateFile, }) return o.fs.shouldRetry(ctx, res, err) }) if err != nil { return err } fileID := resp.FileID _, file, err := o.fs.ik.File(ctx, fileID) if err != nil { return err } o.file = *file return nil } // Remove this object func (o *Object) Remove(ctx context.Context) (err error) { err = o.fs.pacer.Call(func() (bool, error) { var res *http.Response res, err = o.fs.ik.DeleteFile(ctx, o.file.FileID) return o.fs.shouldRetry(ctx, res, err) }) return err } // SetModTime sets the metadata on the object to set the modification date func (o *Object) SetModTime(ctx context.Context, t time.Time) error { return fs.ErrorCantSetModTime } func uploadFile(ctx context.Context, f *Fs, in io.Reader, srcRemote string, options ...fs.OpenOption) (fs.Object, error) { remote := path.Join(f.root, srcRemote) folderPath, fileName := path.Split(remote) folderPath = f.EncodePath(folderPath) fileName = f.EncodeFileName(fileName) UseUniqueFileName := new(bool) *UseUniqueFileName = false err := f.pacer.CallNoRetry(func() (bool, error) { var res *http.Response var err error res, _, err = f.ik.Upload(ctx, in, client.UploadParam{ FileName: fileName, Folder: folderPath, IsPrivateFile: &f.opt.OnlySigned, }) return f.shouldRetry(ctx, res, err) }) if err != nil { return nil, err } return f.NewObject(ctx, srcRemote) } // Metadata returns the metadata for the object func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) { metadata.Set("btime", o.file.CreatedAt.Format(time.RFC3339)) metadata.Set("size", strconv.FormatUint(o.file.Size, 10)) metadata.Set("file-type", o.file.FileType) metadata.Set("height", strconv.Itoa(o.file.Height)) metadata.Set("width", strconv.Itoa(o.file.Width)) metadata.Set("has-alpha", strconv.FormatBool(o.file.HasAlpha)) for k, v := range o.file.EmbeddedMetadata { metadata.Set(k, fmt.Sprint(v)) } if o.file.Tags != nil { metadata.Set("tags", strings.Join(o.file.Tags, ",")) } if o.file.CustomCoordinates != nil { metadata.Set("custom-coordinates", *o.file.CustomCoordinates) } if o.file.IsPrivateFile != nil { metadata.Set("is-private-file", strconv.FormatBool(*o.file.IsPrivateFile)) } if o.file.AITags != nil { googleTags := []string{} awsTags := []string{} for _, tag := range o.file.AITags { if tag.Source == "google-auto-tagging" { googleTags = append(googleTags, tag.Name) } else if tag.Source == "aws-auto-tagging" { awsTags = append(awsTags, tag.Name) } } if len(googleTags) > 0 { metadata.Set("google-tags", strings.Join(googleTags, ",")) } if len(awsTags) > 0 { metadata.Set("aws-tags", strings.Join(awsTags, ",")) } } return metadata, nil } // Check the interfaces are satisfied. var ( _ fs.Fs = &Fs{} _ fs.Purger = &Fs{} _ fs.PublicLinker = &Fs{} _ fs.Object = &Object{} )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/imagekit/client/client.go
backend/imagekit/client/client.go
// Package client provides a client for interacting with the ImageKit API. package client import ( "context" "fmt" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/lib/rest" ) // ImageKit main struct type ImageKit struct { Prefix string UploadPrefix string Timeout int64 UploadTimeout int64 PrivateKey string PublicKey string URLEndpoint string HTTPClient *rest.Client } // NewParams is a struct to define parameters to imagekit type NewParams struct { PrivateKey string PublicKey string URLEndpoint string } // New returns ImageKit object from environment variables func New(ctx context.Context, params NewParams) (*ImageKit, error) { privateKey := params.PrivateKey publicKey := params.PublicKey endpointURL := params.URLEndpoint switch { case privateKey == "": return nil, fmt.Errorf("ImageKit.io URL endpoint is required") case publicKey == "": return nil, fmt.Errorf("ImageKit.io public key is required") case endpointURL == "": return nil, fmt.Errorf("ImageKit.io private key is required") } cliCtx, cliCfg := fs.AddConfig(ctx) cliCfg.UserAgent = "rclone/imagekit" client := rest.NewClient(fshttp.NewClient(cliCtx)) client.SetUserPass(privateKey, "") client.SetHeader("Accept", "application/json") return &ImageKit{ Prefix: "https://api.imagekit.io/v2", UploadPrefix: "https://upload.imagekit.io/api/v2", Timeout: 60, UploadTimeout: 3600, PrivateKey: params.PrivateKey, PublicKey: params.PublicKey, URLEndpoint: params.URLEndpoint, HTTPClient: client, }, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/imagekit/client/media.go
backend/imagekit/client/media.go
package client import ( "context" "errors" "fmt" "net/http" "net/url" "time" "github.com/rclone/rclone/lib/rest" "gopkg.in/validator.v2" ) // FilesOrFolderParam struct is a parameter type to ListFiles() function to search / list media library files. type FilesOrFolderParam struct { Path string `json:"path,omitempty"` Limit int `json:"limit,omitempty"` Skip int `json:"skip,omitempty"` SearchQuery string `json:"searchQuery,omitempty"` } // AITag represents an AI tag for a media library file. type AITag struct { Name string `json:"name"` Confidence float32 `json:"confidence"` Source string `json:"source"` } // File represents media library File details. type File struct { FileID string `json:"fileId"` Name string `json:"name"` FilePath string `json:"filePath"` Type string `json:"type"` VersionInfo map[string]string `json:"versionInfo"` IsPrivateFile *bool `json:"isPrivateFile"` CustomCoordinates *string `json:"customCoordinates"` URL string `json:"url"` Thumbnail string `json:"thumbnail"` FileType string `json:"fileType"` Mime string `json:"mime"` Height int `json:"height"` Width int `json:"Width"` Size uint64 `json:"size"` HasAlpha bool `json:"hasAlpha"` CustomMetadata map[string]any `json:"customMetadata,omitempty"` EmbeddedMetadata map[string]any `json:"embeddedMetadata"` CreatedAt time.Time `json:"createdAt"` UpdatedAt time.Time `json:"updatedAt"` Tags []string `json:"tags"` AITags []AITag `json:"AITags"` } // Folder represents media library Folder details. type Folder struct { *File FolderPath string `json:"folderPath"` } // CreateFolderParam represents parameter to create folder api type CreateFolderParam struct { FolderName string `validate:"nonzero" json:"folderName"` ParentFolderPath string `validate:"nonzero" json:"parentFolderPath"` } // DeleteFolderParam represents parameter to delete folder api type DeleteFolderParam struct { FolderPath string `validate:"nonzero" json:"folderPath"` } // MoveFolderParam represents parameter to move folder api type MoveFolderParam struct { SourceFolderPath string `validate:"nonzero" json:"sourceFolderPath"` DestinationPath string `validate:"nonzero" json:"destinationPath"` } // JobIDResponse represents response struct with JobID for folder operations type JobIDResponse struct { JobID string `json:"jobId"` } // JobStatus represents response Data to job status api type JobStatus struct { JobID string `json:"jobId"` Type string `json:"type"` Status string `json:"status"` } // File represents media library File details. func (ik *ImageKit) File(ctx context.Context, fileID string) (*http.Response, *File, error) { data := &File{} response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{ Method: "GET", Path: fmt.Sprintf("/files/%s/details", fileID), RootURL: ik.Prefix, IgnoreStatus: true, }, nil, data) return response, data, err } // Files retrieves media library files. Filter options can be supplied as FilesOrFolderParam. func (ik *ImageKit) Files(ctx context.Context, params FilesOrFolderParam, includeVersion bool) (*http.Response, *[]File, error) { var SearchQuery = `type = "file"` if includeVersion { SearchQuery = `type IN ["file", "file-version"]` } if params.SearchQuery != "" { SearchQuery = params.SearchQuery } parameters := url.Values{} parameters.Set("skip", fmt.Sprintf("%d", params.Skip)) parameters.Set("limit", fmt.Sprintf("%d", params.Limit)) parameters.Set("path", params.Path) parameters.Set("searchQuery", SearchQuery) data := &[]File{} response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{ Method: "GET", Path: "/files", RootURL: ik.Prefix, Parameters: parameters, }, nil, data) return response, data, err } // DeleteFile removes file by FileID from media library func (ik *ImageKit) DeleteFile(ctx context.Context, fileID string) (*http.Response, error) { var err error if fileID == "" { return nil, errors.New("fileID can not be empty") } response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{ Method: "DELETE", Path: fmt.Sprintf("/files/%s", fileID), RootURL: ik.Prefix, NoResponse: true, }, nil, nil) return response, err } // Folders retrieves media library files. Filter options can be supplied as FilesOrFolderParam. func (ik *ImageKit) Folders(ctx context.Context, params FilesOrFolderParam) (*http.Response, *[]Folder, error) { var SearchQuery = `type = "folder"` if params.SearchQuery != "" { SearchQuery = params.SearchQuery } parameters := url.Values{} parameters.Set("skip", fmt.Sprintf("%d", params.Skip)) parameters.Set("limit", fmt.Sprintf("%d", params.Limit)) parameters.Set("path", params.Path) parameters.Set("searchQuery", SearchQuery) data := &[]Folder{} resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{ Method: "GET", Path: "/files", RootURL: ik.Prefix, Parameters: parameters, }, nil, data) if err != nil { return resp, data, err } return resp, data, err } // CreateFolder creates a new folder in media library func (ik *ImageKit) CreateFolder(ctx context.Context, param CreateFolderParam) (*http.Response, error) { var err error if err = validator.Validate(&param); err != nil { return nil, err } response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{ Method: "POST", Path: "/folder", RootURL: ik.Prefix, NoResponse: true, }, param, nil) return response, err } // DeleteFolder removes the folder from media library func (ik *ImageKit) DeleteFolder(ctx context.Context, param DeleteFolderParam) (*http.Response, error) { var err error if err = validator.Validate(&param); err != nil { return nil, err } response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{ Method: "DELETE", Path: "/folder", RootURL: ik.Prefix, NoResponse: true, }, param, nil) return response, err } // MoveFolder moves given folder to new path in media library func (ik *ImageKit) MoveFolder(ctx context.Context, param MoveFolderParam) (*http.Response, *JobIDResponse, error) { var err error var response = &JobIDResponse{} if err = validator.Validate(&param); err != nil { return nil, nil, err } resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{ Method: "PUT", Path: "bulkJobs/moveFolder", RootURL: ik.Prefix, }, param, response) return resp, response, err } // BulkJobStatus retrieves the status of a bulk job by job ID. func (ik *ImageKit) BulkJobStatus(ctx context.Context, jobID string) (*http.Response, *JobStatus, error) { var err error var response = &JobStatus{} if jobID == "" { return nil, nil, errors.New("jobId can not be blank") } resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{ Method: "GET", Path: "bulkJobs/" + jobID, RootURL: ik.Prefix, }, nil, response) return resp, response, err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/imagekit/client/url.go
backend/imagekit/client/url.go
package client import ( "crypto/hmac" "crypto/sha1" "encoding/hex" "fmt" neturl "net/url" "strconv" "strings" "time" ) // URLParam represents parameters for generating url type URLParam struct { Path string Src string URLEndpoint string Signed bool ExpireSeconds int64 QueryParameters map[string]string } // URL generates url from URLParam func (ik *ImageKit) URL(params URLParam) (string, error) { var resultURL string var url *neturl.URL var err error var endpoint = params.URLEndpoint if endpoint == "" { endpoint = ik.URLEndpoint } endpoint = strings.TrimRight(endpoint, "/") + "/" if params.QueryParameters == nil { params.QueryParameters = make(map[string]string) } if url, err = neturl.Parse(params.Src); err != nil { return "", err } query := url.Query() for k, v := range params.QueryParameters { query.Set(k, v) } url.RawQuery = query.Encode() resultURL = url.String() if params.Signed { now := time.Now().Unix() var expires = strconv.FormatInt(now+params.ExpireSeconds, 10) var path = strings.Replace(resultURL, endpoint, "", 1) path += expires mac := hmac.New(sha1.New, []byte(ik.PrivateKey)) mac.Write([]byte(path)) signature := hex.EncodeToString(mac.Sum(nil)) if strings.Contains(resultURL, "?") { resultURL = resultURL + "&" + fmt.Sprintf("ik-t=%s&ik-s=%s", expires, signature) } else { resultURL = resultURL + "?" + fmt.Sprintf("ik-t=%s&ik-s=%s", expires, signature) } } return resultURL, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/imagekit/client/upload.go
backend/imagekit/client/upload.go
package client import ( "context" "errors" "fmt" "io" "net/http" "net/url" "github.com/rclone/rclone/lib/rest" ) // UploadParam defines upload parameters type UploadParam struct { FileName string `json:"fileName"` Folder string `json:"folder,omitempty"` // default value: / Tags string `json:"tags,omitempty"` IsPrivateFile *bool `json:"isPrivateFile,omitempty"` // default: false } // UploadResult defines the response structure for the upload API type UploadResult struct { FileID string `json:"fileId"` Name string `json:"name"` URL string `json:"url"` ThumbnailURL string `json:"thumbnailUrl"` Height int `json:"height"` Width int `json:"Width"` Size uint64 `json:"size"` FilePath string `json:"filePath"` AITags []map[string]any `json:"AITags"` VersionInfo map[string]string `json:"versionInfo"` } // Upload uploads an asset to a imagekit account. // // The asset can be: // - the actual data (io.Reader) // - the Data URI (Base64 encoded), max ~60 MB (62,910,000 chars) // - the remote FTP, HTTP or HTTPS URL address of an existing file // // https://docs.imagekit.io/api-reference/upload-file-api/server-side-file-upload func (ik *ImageKit) Upload(ctx context.Context, file io.Reader, param UploadParam) (*http.Response, *UploadResult, error) { var err error if param.FileName == "" { return nil, nil, errors.New("Upload: Filename is required") } // Initialize URL values formParams := url.Values{} formParams.Add("useUniqueFileName", fmt.Sprint(false)) // Add individual fields to URL values if param.FileName != "" { formParams.Add("fileName", param.FileName) } if param.Tags != "" { formParams.Add("tags", param.Tags) } if param.Folder != "" { formParams.Add("folder", param.Folder) } if param.IsPrivateFile != nil { formParams.Add("isPrivateFile", fmt.Sprintf("%v", *param.IsPrivateFile)) } response := &UploadResult{} formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName) if err != nil { return nil, nil, fmt.Errorf("failed to make multipart upload: %w", err) } opts := rest.Opts{ Method: "POST", Path: "/files/upload", RootURL: ik.UploadPrefix, Body: formReader, ContentType: contentType, } resp, err := ik.HTTPClient.CallJSON(ctx, &opts, nil, response) if err != nil { return resp, response, err } return resp, response, err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/b2/b2.go
backend/b2/b2.go
// Package b2 provides an interface to the Backblaze B2 object storage system. package b2 // FIXME should we remove sha1 checks from here as rclone now supports // checking SHA1s? import ( "bufio" "bytes" "context" "crypto/md5" "crypto/sha1" "encoding/base64" "encoding/json" "errors" "fmt" gohash "hash" "io" "net/http" "path" "slices" "strconv" "strings" "sync" "time" "github.com/rclone/rclone/backend/b2/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/multipart" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pool" "github.com/rclone/rclone/lib/rest" ) const ( defaultEndpoint = "https://api.backblazeb2.com" headerPrefix = "x-bz-info-" // lower case as that is what the server returns timeKey = "src_last_modified_millis" timeHeader = headerPrefix + timeKey sha1Key = "large_file_sha1" sha1Header = "X-Bz-Content-Sha1" testModeHeader = "X-Bz-Test-Mode" idHeader = "X-Bz-File-Id" nameHeader = "X-Bz-File-Name" timestampHeader = "X-Bz-Upload-Timestamp" retryAfterHeader = "Retry-After" sseAlgorithmHeader = "X-Bz-Server-Side-Encryption-Customer-Algorithm" sseKeyHeader = "X-Bz-Server-Side-Encryption-Customer-Key" sseMd5Header = "X-Bz-Server-Side-Encryption-Customer-Key-Md5" minSleep = 10 * time.Millisecond maxSleep = 5 * time.Minute decayConstant = 1 // bigger for slower decay, exponential maxParts = 10000 maxVersions = 100 // maximum number of versions we search in --b2-versions mode minChunkSize = 5 * fs.Mebi defaultChunkSize = 96 * fs.Mebi defaultUploadCutoff = 200 * fs.Mebi largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max defaultMaxAge = 24 * time.Hour ) // Globals var ( errNotWithVersions = errors.New("can't modify files in --b2-versions mode") errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode") ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "b2", Description: "Backblaze B2", NewFs: NewFs, CommandHelp: commandHelp, Options: []fs.Option{{ Name: "account", Help: "Account ID or Application Key ID.", Required: true, Sensitive: true, }, { Name: "key", Help: "Application Key.", Required: true, Sensitive: true, }, { Name: "endpoint", Help: "Endpoint for the service.\n\nLeave blank normally.", Advanced: true, }, { Name: "test_mode", Help: `A flag string for X-Bz-Test-Mode header for debugging. This is for debugging purposes only. Setting it to one of the strings below will cause b2 to return specific errors: * "fail_some_uploads" * "expire_some_account_authorization_tokens" * "force_cap_exceeded" These will be set in the "X-Bz-Test-Mode" header which is documented in the [b2 integrations checklist](https://www.backblaze.com/docs/cloud-storage-integration-checklist).`, Default: "", Hide: fs.OptionHideConfigurator, Advanced: true, }, { Name: "versions", Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.", Default: false, Advanced: true, }, { Name: "version_at", Help: "Show file versions as they were at the specified time.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.", Default: fs.Time{}, Advanced: true, }, { Name: "hard_delete", Help: "Permanently delete files on remote removal, otherwise hide files.", Default: false, }, { Name: "upload_cutoff", Help: `Cutoff for switching to chunked upload. Files above this size will be uploaded in chunks of "--b2-chunk-size". This value should be set no larger than 4.657 GiB (== 5 GB).`, Default: defaultUploadCutoff, Advanced: true, }, { Name: "copy_cutoff", Help: `Cutoff for switching to multipart copy. Any files larger than this that need to be server-side copied will be copied in chunks of this size. The minimum is 0 and the maximum is 4.6 GiB.`, Default: largeFileCopyCutoff, Advanced: true, }, { Name: "chunk_size", Help: `Upload chunk size. When uploading large files, chunk the file into this size. Must fit in memory. These chunks are buffered in memory and there might a maximum of "--transfers" chunks in progress at once. 5,000,000 Bytes is the minimum size.`, Default: defaultChunkSize, Advanced: true, }, { Name: "upload_concurrency", Help: `Concurrency for multipart uploads. This is the number of chunks of the same file that are uploaded concurrently. Note that chunks are stored in memory and there may be up to "--transfers" * "--b2-upload-concurrency" chunks stored at once in memory.`, Default: 4, Advanced: true, }, { Name: "disable_checksum", Help: `Disable checksums for large (> upload cutoff) files. Normally rclone will calculate the SHA1 checksum of the input before uploading it so it can add it to metadata on the object. This is great for data integrity checking but can cause long delays for large files to start uploading.`, Default: false, Advanced: true, }, { Name: "download_url", Help: `Custom endpoint for downloads. This is usually set to a Cloudflare CDN URL as Backblaze offers free egress for data downloaded through the Cloudflare network. Rclone works with private buckets by sending an "Authorization" header. If the custom endpoint rewrites the requests for authentication, e.g., in Cloudflare Workers, this header needs to be handled properly. Leave blank if you want to use the endpoint provided by Backblaze. The URL provided here SHOULD have the protocol and SHOULD NOT have a trailing slash or specify the /file/bucket subpath as rclone will request files with "{download_url}/file/{bucket_name}/{path}". Example: > https://mysubdomain.mydomain.tld (No trailing "/", "file" or "bucket")`, Advanced: true, }, { Name: "download_auth_duration", Help: `Time before the public link authorization token will expire in s or suffix ms|s|m|h|d. This is used in combination with "rclone link" for making files accessible to the public and sets the duration before the download authorization token will expire. The minimum value is 1 second. The maximum value is one week.`, Default: fs.Duration(7 * 24 * time.Hour), Advanced: true, }, { Name: "memory_pool_flush_time", Default: fs.Duration(time.Minute), Advanced: true, Hide: fs.OptionHideBoth, Help: `How often internal memory buffer pools will be flushed. (no longer used)`, }, { Name: "memory_pool_use_mmap", Default: false, Advanced: true, Hide: fs.OptionHideBoth, Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`, }, { Name: "lifecycle", Help: `Set the number of days deleted files should be kept when creating a bucket. On bucket creation, this parameter is used to create a lifecycle rule for the entire bucket. If lifecycle is 0 (the default) it does not create a lifecycle rule so the default B2 behaviour applies. This is to create versions of files on delete and overwrite and to keep them indefinitely. If lifecycle is >0 then it creates a single rule setting the number of days before a file that is deleted or overwritten is deleted permanently. This is known as daysFromHidingToDeleting in the b2 docs. The minimum value for this parameter is 1 day. You can also enable hard_delete in the config also which will mean deletions won't cause versions but overwrites will still cause versions to be made. See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket creation. `, Default: 0, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // See: https://www.backblaze.com/docs/cloud-storage-files // Encode invalid UTF-8 bytes as json doesn't handle them properly. // FIXME: allow /, but not leading, trailing or double Default: (encoder.Display | encoder.EncodeBackSlash | encoder.EncodeInvalidUtf8), }, { Name: "sse_customer_algorithm", Help: "If using SSE-C, the server-side encryption algorithm used when storing this object in B2.", Advanced: true, Examples: []fs.OptionExample{{ Value: "", Help: "None", }, { Value: "AES256", Help: "Advanced Encryption Standard (256 bits key length)", }}, }, { Name: "sse_customer_key", Help: `To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data Alternatively you can provide --sse-customer-key-base64.`, Advanced: true, Examples: []fs.OptionExample{{ Value: "", Help: "None", }}, Sensitive: true, }, { Name: "sse_customer_key_base64", Help: `To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data Alternatively you can provide --sse-customer-key.`, Advanced: true, Examples: []fs.OptionExample{{ Value: "", Help: "None", }}, Sensitive: true, }, { Name: "sse_customer_key_md5", Help: `If using SSE-C you may provide the secret encryption key MD5 checksum (optional). If you leave it blank, this is calculated automatically from the sse_customer_key provided. `, Advanced: true, Examples: []fs.OptionExample{{ Value: "", Help: "None", }}, Sensitive: true, }}, }) } // Options defines the configuration for this backend type Options struct { Account string `config:"account"` Key string `config:"key"` Endpoint string `config:"endpoint"` TestMode string `config:"test_mode"` Versions bool `config:"versions"` VersionAt fs.Time `config:"version_at"` HardDelete bool `config:"hard_delete"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` CopyCutoff fs.SizeSuffix `config:"copy_cutoff"` ChunkSize fs.SizeSuffix `config:"chunk_size"` UploadConcurrency int `config:"upload_concurrency"` DisableCheckSum bool `config:"disable_checksum"` DownloadURL string `config:"download_url"` DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"` Lifecycle int `config:"lifecycle"` Enc encoder.MultiEncoder `config:"encoding"` SSECustomerAlgorithm string `config:"sse_customer_algorithm"` SSECustomerKey string `config:"sse_customer_key"` SSECustomerKeyBase64 string `config:"sse_customer_key_base64"` SSECustomerKeyMD5 string `config:"sse_customer_key_md5"` } // Fs represents a remote b2 server type Fs struct { name string // name of this remote root string // the path we are working on if any opt Options // parsed config options ci *fs.ConfigInfo // global config features *fs.Features // optional features srv *rest.Client // the connection to the b2 server rootBucket string // bucket part of root (if any) rootDirectory string // directory part of root (if any) cache *bucket.Cache // cache for bucket creation status bucketIDMutex sync.Mutex // mutex to protect _bucketID _bucketID map[string]string // the ID of the bucket we are working on bucketTypeMutex sync.Mutex // mutex to protect _bucketType _bucketType map[string]string // the Type of the bucket we are working on info api.AuthorizeAccountResponse // result of authorize call uploadMu sync.Mutex // lock for upload variable uploads map[string][]*api.GetUploadURLResponse // Upload URLs by buckedID authMu sync.Mutex // lock for authorizing the account pacer *fs.Pacer // To pace and retry the API calls uploadToken *pacer.TokenDispenser // control concurrency } // Object describes a b2 object type Object struct { fs *Fs // what this object is part of remote string // The remote path id string // b2 id of the file modTime time.Time // The modified time of the object if known sha1 string // SHA-1 hash if known size int64 // Size of the object mimeType string // Content-Type of the object } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { if f.rootBucket == "" { return "B2 root" } if f.rootDirectory == "" { return fmt.Sprintf("B2 bucket %s", f.rootBucket) } return fmt.Sprintf("B2 bucket %s path %s", f.rootBucket, f.rootDirectory) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // parsePath parses a remote 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // split returns bucket and bucketPath from the rootRelativePath // relative to f.root func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) { return bucket.Split(path.Join(f.root, rootRelativePath)) } // split returns bucket and bucketPath from the object func (o *Object) split() (bucket, bucketPath string) { return o.fs.split(o.remote) } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 401, // Unauthorized (e.g. "Token has expired") 408, // Request Timeout 429, // Rate exceeded. 500, // Get occasional 500 Internal Server Error 503, // Service Unavailable 504, // Gateway Time-out } // shouldRetryNoReauth returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } // For 429 or 503 errors look at the Retry-After: header and // set the retry appropriately, starting with a minimum of 1 // second if it isn't set. if resp != nil && (resp.StatusCode == 429 || resp.StatusCode == 503) { var retryAfter = 1 retryAfterString := resp.Header.Get(retryAfterHeader) if retryAfterString != "" { var err error retryAfter, err = strconv.Atoi(retryAfterString) if err != nil { fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err) } } return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Second) } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if resp != nil && resp.StatusCode == 401 { fs.Debugf(f, "Unauthorized: %v", err) // Reauth authErr := f.authorizeAccount(ctx) if authErr != nil { err = authErr } return true, err } return f.shouldRetryNoReauth(ctx, resp, err) } // errorHandler parses a non 2xx error response into an error func errorHandler(resp *http.Response) error { body, err := rest.ReadBody(resp) if err != nil { fs.Errorf(nil, "Couldn't read error out of body: %v", err) body = nil } // Decode error response if there was one - they can be blank errResponse := new(api.Error) if len(body) > 0 { err = json.Unmarshal(body, errResponse) if err != nil { fs.Errorf(nil, "Couldn't decode error response: %v", err) } } if errResponse.Code == "" { errResponse.Code = "unknown" } if errResponse.Status == 0 { errResponse.Status = resp.StatusCode } if errResponse.Message == "" { errResponse.Message = "Unknown " + resp.Status } return errResponse } func checkUploadChunkSize(cs fs.SizeSuffix) error { if cs < minChunkSize { return fmt.Errorf("%s is less than %s", cs, minChunkSize) } return nil } func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadChunkSize(cs) if err == nil { old, f.opt.ChunkSize = f.opt.ChunkSize, cs } return } func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error { if cs < opt.ChunkSize { return fmt.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize) } return nil } func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadCutoff(&f.opt, cs) if err == nil { old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs } return } func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadChunkSize(cs) if err == nil { old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs } return } // setRoot changes the root of the Fs func (f *Fs) setRoot(root string) { f.root = parsePath(root) f.rootBucket, f.rootDirectory = bucket.Split(f.root) } // NewFs constructs an Fs from the path, bucket:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } if opt.UploadCutoff < opt.ChunkSize { opt.UploadCutoff = opt.ChunkSize fs.Infof(nil, "b2: raising upload cutoff to chunk size: %v", opt.UploadCutoff) } err = checkUploadCutoff(opt, opt.UploadCutoff) if err != nil { return nil, fmt.Errorf("b2: upload cutoff: %w", err) } err = checkUploadChunkSize(opt.ChunkSize) if err != nil { return nil, fmt.Errorf("b2: chunk size: %w", err) } if opt.Account == "" { return nil, errors.New("account not found") } if opt.Key == "" { return nil, errors.New("key not found") } if opt.Endpoint == "" { opt.Endpoint = defaultEndpoint } if opt.SSECustomerKey != "" && opt.SSECustomerKeyBase64 != "" { return nil, errors.New("b2: can't use both sse_customer_key and sse_customer_key_base64 at the same time") } else if opt.SSECustomerKeyBase64 != "" { // Decode the Base64-encoded key and store it in the SSECustomerKey field decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKeyBase64) if err != nil { return nil, fmt.Errorf("b2: Could not decode sse_customer_key_base64: %w", err) } opt.SSECustomerKey = string(decoded) } else { // Encode the raw key as Base64 opt.SSECustomerKeyBase64 = base64.StdEncoding.EncodeToString([]byte(opt.SSECustomerKey)) } if opt.SSECustomerKey != "" && opt.SSECustomerKeyMD5 == "" { // Calculate CustomerKeyMd5 if not supplied md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey)) opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:]) } ci := fs.GetConfig(ctx) f := &Fs{ name: name, opt: *opt, ci: ci, srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(errorHandler), cache: bucket.NewCache(), _bucketID: make(map[string]string, 1), _bucketType: make(map[string]string, 1), uploads: make(map[string][]*api.GetUploadURLResponse), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), uploadToken: pacer.NewTokenDispenser(ci.Transfers), } f.setRoot(root) f.features = (&fs.Features{ ReadMimeType: true, WriteMimeType: true, BucketBased: true, BucketBasedRootOK: true, ChunkWriterDoesntSeek: true, }).Fill(ctx, f) // Set the test flag if required if opt.TestMode != "" { testMode := strings.TrimSpace(opt.TestMode) f.srv.SetHeader(testModeHeader, testMode) fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode) } err = f.authorizeAccount(ctx) if err != nil { return nil, fmt.Errorf("failed to authorize account: %w", err) } // If this is a key limited to one or more buckets, one of them must exist // and be ours. if f.rootBucket != "" && len(f.info.APIs.Storage.Allowed.Buckets) != 0 { buckets := f.info.APIs.Storage.Allowed.Buckets var rootFound = false var rootID string for _, b := range buckets { allowedBucket := f.opt.Enc.ToStandardName(b.Name) if allowedBucket == "" { fs.Debugf(f, "bucket %q that application key is restricted to no longer exists", b.ID) continue } if allowedBucket == f.rootBucket { rootFound = true rootID = b.ID } } if !rootFound { return nil, fmt.Errorf("you must use bucket(s) %q with this application key", buckets) } f.cache.MarkOK(f.rootBucket) f.setBucketID(f.rootBucket, rootID) } if f.rootBucket != "" && f.rootDirectory != "" { // Check to see if the (bucket,directory) is actually an existing file oldRoot := f.root newRoot, leaf := path.Split(oldRoot) f.setRoot(newRoot) _, err := f.NewObject(ctx, leaf) if err != nil { // File doesn't exist so return old f f.setRoot(oldRoot) return f, nil } // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // authorizeAccount gets the API endpoint and auth token. Can be used // for reauthentication too. func (f *Fs) authorizeAccount(ctx context.Context) error { f.authMu.Lock() defer f.authMu.Unlock() opts := rest.Opts{ Method: "GET", Path: "/b2api/v4/b2_authorize_account", RootURL: f.opt.Endpoint, UserName: f.opt.Account, Password: f.opt.Key, ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request } err := f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, nil, &f.info) return f.shouldRetryNoReauth(ctx, resp, err) }) if err != nil { return fmt.Errorf("failed to authenticate: %w", err) } f.srv.SetRoot(f.info.APIs.Storage.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken) return nil } // hasPermission returns if the current AuthorizationToken has the selected permission func (f *Fs) hasPermission(permission string) bool { return slices.Contains(f.info.APIs.Storage.Allowed.Capabilities, permission) } // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken // // This should be returned with returnUploadURL when finished func (f *Fs) getUploadURL(ctx context.Context, bucket string) (upload *api.GetUploadURLResponse, err error) { f.uploadMu.Lock() defer f.uploadMu.Unlock() bucketID, err := f.getBucketID(ctx, bucket) if err != nil { return nil, err } // look for a stored upload URL for the correct bucketID uploads := f.uploads[bucketID] if len(uploads) > 0 { upload, uploads = uploads[0], uploads[1:] f.uploads[bucketID] = uploads return upload, nil } // get a new upload URL since not found opts := rest.Opts{ Method: "POST", Path: "/b2_get_upload_url", } var request = api.GetUploadURLRequest{ BucketID: bucketID, } err = f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, &request, &upload) return f.shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("failed to get upload URL: %w", err) } return upload, nil } // returnUploadURL returns the UploadURL to the cache func (f *Fs) returnUploadURL(upload *api.GetUploadURLResponse) { if upload == nil { return } f.uploadMu.Lock() f.uploads[upload.BucketID] = append(f.uploads[upload.BucketID], upload) f.uploadMu.Unlock() } // clearUploadURL clears the current UploadURL and the AuthorizationToken func (f *Fs) clearUploadURL(bucketID string) { f.uploadMu.Lock() delete(f.uploads, bucketID) f.uploadMu.Unlock() } // getRW gets a RW buffer and an upload token // // If noBuf is set then it just gets an upload token func (f *Fs) getRW(noBuf bool) (rw *pool.RW) { f.uploadToken.Get() if !noBuf { rw = multipart.NewRW() } return rw } // putRW returns a RW buffer to the memory pool and returns an upload // token // // If buf is nil then it just returns the upload token func (f *Fs) putRW(rw *pool.RW) { if rw != nil { _ = rw.Close() } f.uploadToken.Put() } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } if info != nil { err := o.decodeMetaData(info) if err != nil { return nil, err } } else { err := o.readMetaData(ctx) // reads info and headers, returning an error if err != nil { return nil, err } } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // listFn is called from list to handle an object type listFn func(remote string, object *api.File, isDirectory bool) error // errEndList is a sentinel used to end the list iteration now. // listFn should return it to end the iteration with no errors. var errEndList = errors.New("end list") // list lists the objects into the function supplied from // the bucket and root supplied // // (bucket, directory) is the starting directory // // If prefix is set then it is removed from all file names. // // If addBucket is set then it adds the bucket to the start of the // remotes generated. // // If recurse is set the function will recursively list. // // If limit is > 0 then it limits to that many files (must be less // than 1000). // // If hidden is set then it will list the hidden (deleted) files too. // // if findFile is set it will look for files called (bucket, directory) func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, limit int, hidden bool, findFile bool, fn listFn) error { if !findFile { if prefix != "" { prefix += "/" } if directory != "" { directory += "/" } } delimiter := "" if !recurse { delimiter = "/" } bucketID, err := f.getBucketID(ctx, bucket) if err != nil { return err } chunkSize := 1000 if limit > 0 { chunkSize = limit } var request = api.ListFileNamesRequest{ BucketID: bucketID, MaxFileCount: chunkSize, Prefix: f.opt.Enc.FromStandardPath(directory), Delimiter: delimiter, } if directory != "" { request.StartFileName = f.opt.Enc.FromStandardPath(directory) } opts := rest.Opts{ Method: "POST", Path: "/b2_list_file_names", } if hidden || f.opt.VersionAt.IsSet() { opts.Path = "/b2_list_file_versions" } lastFileName := "" for { var response api.ListFileNamesResponse err := f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, &request, &response) return f.shouldRetry(ctx, resp, err) }) if err != nil { return err } for i := range response.Files { file := &response.Files[i] file.Name = f.opt.Enc.ToStandardPath(file.Name) // Finish if file name no longer has prefix if prefix != "" && !strings.HasPrefix(file.Name, prefix) { return nil } if !strings.HasPrefix(file.Name, prefix) { fs.Debugf(f, "Odd name received %q", file.Name) continue } remote := file.Name[len(prefix):] // Check for directory isDirectory := remote == "" || strings.HasSuffix(remote, "/") if isDirectory && len(remote) > 1 { remote = remote[:len(remote)-1] } if addBucket { remote = path.Join(bucket, remote) } if f.opt.VersionAt.IsSet() { if time.Time(file.UploadTimestamp).After(time.Time(f.opt.VersionAt)) { // Ignore versions that were created after the specified time continue } if file.Name == lastFileName { // Ignore versions before the already returned version continue } } // Send object lastFileName = file.Name err = fn(remote, file, isDirectory) if err != nil { if err == errEndList { return nil } return err } } // end if no NextFileName if response.NextFileName == nil { break } request.StartFileName = *response.NextFileName if response.NextFileID != nil { request.StartFileID = *response.NextFileID } } return nil } // Convert a list item into a DirEntry func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) { if isDirectory { d := fs.NewDir(remote, time.Time{}) return d, nil } if remote == *last { remote = object.UploadTimestamp.AddVersion(remote) } else { *last = remote } // hide objects represent deleted files which we don't list if object.Action == "hide" { return nil, nil } o, err := f.newObjectWithInfo(ctx, remote, object) if err != nil { return nil, err } return o, nil } // listDir lists a single directory func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) { last := "" err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error { entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last) if err != nil { return err } if entry != nil { return callback(entry) } return nil }) if err != nil { return err } // bucket must be present if listing succeeded f.cache.MarkOK(bucket) return nil } // listBuckets returns all the buckets to out func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) { err = f.listBucketsToFn(ctx, "", func(bucket *api.Bucket) error { d := fs.NewDir(bucket.Name, time.Time{}) entries = append(entries, d) return nil }) if err != nil { return nil, err } return entries, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return list.WithListP(ctx, dir, f) } // ListP lists the objects and directories of the Fs starting // from dir non recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error { list := list.NewHelper(callback) bucket, directory := f.split(dir) if bucket == "" { if directory != "" { return fs.ErrorListBucketRequired } entries, err := f.listBuckets(ctx) if err != nil { return err } for _, entry := range entries { err = list.Add(entry) if err != nil { return err } } } else { err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add) if err != nil { return err } } return list.Flush() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/b2/b2_test.go
backend/b2/b2_test.go
// Test B2 filesystem interface package b2 import ( "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestB2:", NilObject: (*Object)(nil), ChunkedUpload: fstests.ChunkedUploadConfig{ MinChunkSize: minChunkSize, NeedMultipleChunks: true, }, }) } func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadChunkSize(cs) } func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadCutoff(cs) } func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setCopyCutoff(cs) } var ( _ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadCutoffer = (*Fs)(nil) _ fstests.SetCopyCutoffer = (*Fs)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/b2/b2_internal_test.go
backend/b2/b2_internal_test.go
package b2 import ( "context" "crypto/sha1" "fmt" "path" "sort" "strings" "testing" "time" "github.com/rclone/rclone/backend/b2/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/version" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // Test b2 string encoding // https://www.backblaze.com/docs/cloud-storage-native-api-string-encoding var encodeTest = []struct { fullyEncoded string minimallyEncoded string plainText string }{ {fullyEncoded: "%20", minimallyEncoded: "+", plainText: " "}, {fullyEncoded: "%21", minimallyEncoded: "!", plainText: "!"}, {fullyEncoded: "%22", minimallyEncoded: "%22", plainText: "\""}, {fullyEncoded: "%23", minimallyEncoded: "%23", plainText: "#"}, {fullyEncoded: "%24", minimallyEncoded: "$", plainText: "$"}, {fullyEncoded: "%25", minimallyEncoded: "%25", plainText: "%"}, {fullyEncoded: "%26", minimallyEncoded: "%26", plainText: "&"}, {fullyEncoded: "%27", minimallyEncoded: "'", plainText: "'"}, {fullyEncoded: "%28", minimallyEncoded: "(", plainText: "("}, {fullyEncoded: "%29", minimallyEncoded: ")", plainText: ")"}, {fullyEncoded: "%2A", minimallyEncoded: "*", plainText: "*"}, {fullyEncoded: "%2B", minimallyEncoded: "%2B", plainText: "+"}, {fullyEncoded: "%2C", minimallyEncoded: "%2C", plainText: ","}, {fullyEncoded: "%2D", minimallyEncoded: "-", plainText: "-"}, {fullyEncoded: "%2E", minimallyEncoded: ".", plainText: "."}, {fullyEncoded: "%2F", minimallyEncoded: "/", plainText: "/"}, {fullyEncoded: "%30", minimallyEncoded: "0", plainText: "0"}, {fullyEncoded: "%31", minimallyEncoded: "1", plainText: "1"}, {fullyEncoded: "%32", minimallyEncoded: "2", plainText: "2"}, {fullyEncoded: "%33", minimallyEncoded: "3", plainText: "3"}, {fullyEncoded: "%34", minimallyEncoded: "4", plainText: "4"}, {fullyEncoded: "%35", minimallyEncoded: "5", plainText: "5"}, {fullyEncoded: "%36", minimallyEncoded: "6", plainText: "6"}, {fullyEncoded: "%37", minimallyEncoded: "7", plainText: "7"}, {fullyEncoded: "%38", minimallyEncoded: "8", plainText: "8"}, {fullyEncoded: "%39", minimallyEncoded: "9", plainText: "9"}, {fullyEncoded: "%3A", minimallyEncoded: ":", plainText: ":"}, {fullyEncoded: "%3B", minimallyEncoded: ";", plainText: ";"}, {fullyEncoded: "%3C", minimallyEncoded: "%3C", plainText: "<"}, {fullyEncoded: "%3D", minimallyEncoded: "=", plainText: "="}, {fullyEncoded: "%3E", minimallyEncoded: "%3E", plainText: ">"}, {fullyEncoded: "%3F", minimallyEncoded: "%3F", plainText: "?"}, {fullyEncoded: "%40", minimallyEncoded: "@", plainText: "@"}, {fullyEncoded: "%41", minimallyEncoded: "A", plainText: "A"}, {fullyEncoded: "%42", minimallyEncoded: "B", plainText: "B"}, {fullyEncoded: "%43", minimallyEncoded: "C", plainText: "C"}, {fullyEncoded: "%44", minimallyEncoded: "D", plainText: "D"}, {fullyEncoded: "%45", minimallyEncoded: "E", plainText: "E"}, {fullyEncoded: "%46", minimallyEncoded: "F", plainText: "F"}, {fullyEncoded: "%47", minimallyEncoded: "G", plainText: "G"}, {fullyEncoded: "%48", minimallyEncoded: "H", plainText: "H"}, {fullyEncoded: "%49", minimallyEncoded: "I", plainText: "I"}, {fullyEncoded: "%4A", minimallyEncoded: "J", plainText: "J"}, {fullyEncoded: "%4B", minimallyEncoded: "K", plainText: "K"}, {fullyEncoded: "%4C", minimallyEncoded: "L", plainText: "L"}, {fullyEncoded: "%4D", minimallyEncoded: "M", plainText: "M"}, {fullyEncoded: "%4E", minimallyEncoded: "N", plainText: "N"}, {fullyEncoded: "%4F", minimallyEncoded: "O", plainText: "O"}, {fullyEncoded: "%50", minimallyEncoded: "P", plainText: "P"}, {fullyEncoded: "%51", minimallyEncoded: "Q", plainText: "Q"}, {fullyEncoded: "%52", minimallyEncoded: "R", plainText: "R"}, {fullyEncoded: "%53", minimallyEncoded: "S", plainText: "S"}, {fullyEncoded: "%54", minimallyEncoded: "T", plainText: "T"}, {fullyEncoded: "%55", minimallyEncoded: "U", plainText: "U"}, {fullyEncoded: "%56", minimallyEncoded: "V", plainText: "V"}, {fullyEncoded: "%57", minimallyEncoded: "W", plainText: "W"}, {fullyEncoded: "%58", minimallyEncoded: "X", plainText: "X"}, {fullyEncoded: "%59", minimallyEncoded: "Y", plainText: "Y"}, {fullyEncoded: "%5A", minimallyEncoded: "Z", plainText: "Z"}, {fullyEncoded: "%5B", minimallyEncoded: "%5B", plainText: "["}, {fullyEncoded: "%5C", minimallyEncoded: "%5C", plainText: "\\"}, {fullyEncoded: "%5D", minimallyEncoded: "%5D", plainText: "]"}, {fullyEncoded: "%5E", minimallyEncoded: "%5E", plainText: "^"}, {fullyEncoded: "%5F", minimallyEncoded: "_", plainText: "_"}, {fullyEncoded: "%60", minimallyEncoded: "%60", plainText: "`"}, {fullyEncoded: "%61", minimallyEncoded: "a", plainText: "a"}, {fullyEncoded: "%62", minimallyEncoded: "b", plainText: "b"}, {fullyEncoded: "%63", minimallyEncoded: "c", plainText: "c"}, {fullyEncoded: "%64", minimallyEncoded: "d", plainText: "d"}, {fullyEncoded: "%65", minimallyEncoded: "e", plainText: "e"}, {fullyEncoded: "%66", minimallyEncoded: "f", plainText: "f"}, {fullyEncoded: "%67", minimallyEncoded: "g", plainText: "g"}, {fullyEncoded: "%68", minimallyEncoded: "h", plainText: "h"}, {fullyEncoded: "%69", minimallyEncoded: "i", plainText: "i"}, {fullyEncoded: "%6A", minimallyEncoded: "j", plainText: "j"}, {fullyEncoded: "%6B", minimallyEncoded: "k", plainText: "k"}, {fullyEncoded: "%6C", minimallyEncoded: "l", plainText: "l"}, {fullyEncoded: "%6D", minimallyEncoded: "m", plainText: "m"}, {fullyEncoded: "%6E", minimallyEncoded: "n", plainText: "n"}, {fullyEncoded: "%6F", minimallyEncoded: "o", plainText: "o"}, {fullyEncoded: "%70", minimallyEncoded: "p", plainText: "p"}, {fullyEncoded: "%71", minimallyEncoded: "q", plainText: "q"}, {fullyEncoded: "%72", minimallyEncoded: "r", plainText: "r"}, {fullyEncoded: "%73", minimallyEncoded: "s", plainText: "s"}, {fullyEncoded: "%74", minimallyEncoded: "t", plainText: "t"}, {fullyEncoded: "%75", minimallyEncoded: "u", plainText: "u"}, {fullyEncoded: "%76", minimallyEncoded: "v", plainText: "v"}, {fullyEncoded: "%77", minimallyEncoded: "w", plainText: "w"}, {fullyEncoded: "%78", minimallyEncoded: "x", plainText: "x"}, {fullyEncoded: "%79", minimallyEncoded: "y", plainText: "y"}, {fullyEncoded: "%7A", minimallyEncoded: "z", plainText: "z"}, {fullyEncoded: "%7B", minimallyEncoded: "%7B", plainText: "{"}, {fullyEncoded: "%7C", minimallyEncoded: "%7C", plainText: "|"}, {fullyEncoded: "%7D", minimallyEncoded: "%7D", plainText: "}"}, {fullyEncoded: "%7E", minimallyEncoded: "~", plainText: "~"}, {fullyEncoded: "%7F", minimallyEncoded: "%7F", plainText: "\u007f"}, {fullyEncoded: "%E8%87%AA%E7%94%B1", minimallyEncoded: "%E8%87%AA%E7%94%B1", plainText: "自由"}, {fullyEncoded: "%F0%90%90%80", minimallyEncoded: "%F0%90%90%80", plainText: "𐐀"}, } func TestUrlEncode(t *testing.T) { for _, test := range encodeTest { got := urlEncode(test.plainText) if got != test.minimallyEncoded && got != test.fullyEncoded { t.Errorf("urlEncode(%q) got %q wanted %q or %q", test.plainText, got, test.minimallyEncoded, test.fullyEncoded) } } } func TestTimeString(t *testing.T) { for _, test := range []struct { in time.Time want string }{ {fstest.Time("1970-01-01T00:00:00.000000000Z"), "0"}, {fstest.Time("2001-02-03T04:05:10.123123123Z"), "981173110123"}, {fstest.Time("2001-02-03T05:05:10.123123123+01:00"), "981173110123"}, } { got := timeString(test.in) if test.want != got { t.Logf("%v: want %v got %v", test.in, test.want, got) } } } func TestParseTimeString(t *testing.T) { for _, test := range []struct { in string want time.Time wantError string }{ {"0", fstest.Time("1970-01-01T00:00:00.000000000Z"), ""}, {"981173110123", fstest.Time("2001-02-03T04:05:10.123000000Z"), ""}, {"", time.Time{}, ""}, {"potato", time.Time{}, `strconv.ParseInt: parsing "potato": invalid syntax`}, } { o := Object{} err := o.parseTimeString(test.in) got := o.modTime var gotError string if err != nil { gotError = err.Error() } if test.want != got { t.Logf("%v: want %v got %v", test.in, test.want, got) } if test.wantError != gotError { t.Logf("%v: want error %v got error %v", test.in, test.wantError, gotError) } } } // Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix func OpenOptionToMetaData(options []fs.OpenOption) map[string]string { var headers = make(map[string]string) for _, option := range options { k, v := option.Header() k = strings.ToLower(k) if strings.HasPrefix(k, headerPrefix) { headers[k[len(headerPrefix):]] = v } } return headers } func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) { what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize) t.Run(what, func(t *testing.T) { ctx := context.Background() ss := fs.SizeSuffix(0) err := ss.Set(size) require.NoError(t, err) original := random.String(int(ss)) contents := fstest.Gz(t, original) mimeType := "text/html" if chunkSize != "" { ss := fs.SizeSuffix(0) err := ss.Set(chunkSize) require.NoError(t, err) _, err = f.SetUploadChunkSize(ss) require.NoError(t, err) } if uploadCutoff != "" { ss := fs.SizeSuffix(0) err := ss.Set(uploadCutoff) require.NoError(t, err) _, err = f.SetUploadCutoff(ss) require.NoError(t, err) } item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z")) btime := time.Now() metadata := fs.Metadata{ // Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any "mtime": "2009-05-06T04:05:06.499Z", } // Need to specify HTTP options with the header prefix since they are passed as-is options := []fs.OpenOption{ &fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"}, &fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"}, } obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...) defer func() { assert.NoError(t, obj.Remove(ctx)) }() o := obj.(*Object) gotMetadata, err := o.getMetaData(ctx) require.NoError(t, err) // X-Bz-Info-a & X-Bz-Info-b optMetadata := OpenOptionToMetaData(options) for k, v := range optMetadata { got := gotMetadata.Info[k] assert.Equal(t, v, got, k) } assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type") // Modification time from the x-bz-info-src_last_modified_millis header var mtime api.Timestamp err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey])) if err != nil { fs.Debugf(o, "Bad "+timeHeader+" header: %v", err) } assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time") // Upload time gotBtime := time.Time(gotMetadata.UploadTimestamp) dt := gotBtime.Sub(btime) assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt)) t.Run("GzipEncoding", func(t *testing.T) { // Test that the gzipped file we uploaded can be // downloaded checkDownload := func(wantContents string, wantSize int64, wantHash string) { gotContents := fstests.ReadObject(ctx, t, o, -1) assert.Equal(t, wantContents, gotContents) assert.Equal(t, wantSize, o.Size()) gotHash, err := o.Hash(ctx, hash.SHA1) require.NoError(t, err) assert.Equal(t, wantHash, gotHash) } t.Run("NoDecompress", func(t *testing.T) { checkDownload(contents, int64(len(contents)), sha1Sum(t, contents)) }) }) }) } func (f *Fs) InternalTestMetadata(t *testing.T) { // 1 kB regular file f.internalTestMetadata(t, "1kiB", "", "") // 10 MiB large file f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB") } func sha1Sum(t *testing.T, s string) string { hash := sha1.Sum([]byte(s)) return fmt.Sprintf("%x", hash) } // This is adapted from the s3 equivalent. func (f *Fs) InternalTestVersions(t *testing.T) { ctx := context.Background() // Small pause to make the LastModified different since AWS // only seems to track them to 1 second granularity time.Sleep(2 * time.Second) // Create an object const dirName = "versions" const fileName = dirName + "/" + "test-versions.txt" contents := random.String(100) item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) obj := fstests.PutTestContents(ctx, t, f, &item, contents, true) defer func() { assert.NoError(t, obj.Remove(ctx)) }() objMetadata, err := obj.(*Object).getMetaData(ctx) require.NoError(t, err) // Small pause time.Sleep(2 * time.Second) // Remove it assert.NoError(t, obj.Remove(ctx)) // Small pause to make the LastModified different since AWS only seems to track them to 1 second granularity time.Sleep(2 * time.Second) // And create it with different size and contents newContents := random.String(101) newItem := fstest.NewItem(fileName, newContents, fstest.Time("2002-05-06T04:05:06.499999999Z")) newObj := fstests.PutTestContents(ctx, t, f, &newItem, newContents, true) newObjMetadata, err := newObj.(*Object).getMetaData(ctx) require.NoError(t, err) t.Run("Versions", func(t *testing.T) { // Set --b2-versions for this test f.opt.Versions = true defer func() { f.opt.Versions = false }() // Read the contents entries, err := f.List(ctx, dirName) require.NoError(t, err) tests := 0 var fileNameVersion string for _, entry := range entries { t.Log(entry) remote := entry.Remote() if remote == fileName { t.Run("ReadCurrent", func(t *testing.T) { assert.Equal(t, newContents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1)) }) tests++ } else if versionTime, p := version.Remove(remote); !versionTime.IsZero() && p == fileName { t.Run("ReadVersion", func(t *testing.T) { assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1)) }) assert.WithinDuration(t, time.Time(objMetadata.UploadTimestamp), versionTime, time.Second, "object time must be with 1 second of version time") fileNameVersion = remote tests++ } } assert.Equal(t, 2, tests, "object missing from listing") // Check we can read the object with a version suffix t.Run("NewObject", func(t *testing.T) { o, err := f.NewObject(ctx, fileNameVersion) require.NoError(t, err) require.NotNil(t, o) assert.Equal(t, int64(100), o.Size(), o.Remote()) }) // Check we can make a NewFs from that object with a version suffix t.Run("NewFs", func(t *testing.T) { newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion) // Make sure --b2-versions is set in the config of the new remote fs.Debugf(nil, "oldPath = %q", newPath) lastColon := strings.LastIndex(newPath, ":") require.True(t, lastColon >= 0) newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:] fs.Debugf(nil, "newPath = %q", newPath) fNew, err := cache.Get(ctx, newPath) // This should return pointing to a file require.Equal(t, fs.ErrorIsFile, err) require.NotNil(t, fNew) // With the directory above assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew))) }) }) t.Run("VersionAt", func(t *testing.T) { // We set --b2-version-at for this test so make sure we reset it at the end defer func() { f.opt.VersionAt = fs.Time{} }() var ( firstObjectTime = time.Time(objMetadata.UploadTimestamp) secondObjectTime = time.Time(newObjMetadata.UploadTimestamp) ) for _, test := range []struct { what string at time.Time want []fstest.Item wantErr error wantSize int64 }{ { what: "Before", at: firstObjectTime.Add(-time.Second), want: fstests.InternalTestFiles, wantErr: fs.ErrorObjectNotFound, }, { what: "AfterOne", at: firstObjectTime.Add(time.Second), want: append([]fstest.Item{item}, fstests.InternalTestFiles...), wantSize: 100, }, { what: "AfterDelete", at: secondObjectTime.Add(-time.Second), want: fstests.InternalTestFiles, wantErr: fs.ErrorObjectNotFound, }, { what: "AfterTwo", at: secondObjectTime.Add(time.Second), want: append([]fstest.Item{newItem}, fstests.InternalTestFiles...), wantSize: 101, }, } { t.Run(test.what, func(t *testing.T) { f.opt.VersionAt = fs.Time(test.at) t.Run("List", func(t *testing.T) { fstest.CheckListing(t, f, test.want) }) t.Run("NewObject", func(t *testing.T) { gotObj, gotErr := f.NewObject(ctx, fileName) assert.Equal(t, test.wantErr, gotErr) if gotErr == nil { assert.Equal(t, test.wantSize, gotObj.Size()) } }) }) } }) t.Run("Cleanup", func(t *testing.T) { t.Run("DryRun", func(t *testing.T) { f.opt.Versions = true defer func() { f.opt.Versions = false }() // Listing should be unchanged after dry run before := listAllFiles(ctx, t, f, dirName) ctx, ci := fs.AddConfig(ctx) ci.DryRun = true require.NoError(t, f.cleanUp(ctx, true, false, 0)) after := listAllFiles(ctx, t, f, dirName) assert.Equal(t, before, after) }) t.Run("RealThing", func(t *testing.T) { f.opt.Versions = true defer func() { f.opt.Versions = false }() // Listing should reflect current state after cleanup require.NoError(t, f.cleanUp(ctx, true, false, 0)) items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...) fstest.CheckListing(t, f, items) }) }) // Purge gets tested later } func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) { ctx := context.Background() // B2CleanupHidden tests cleaning up hidden files t.Run("CleanupUnfinished", func(t *testing.T) { dirName := "unfinished" fileCount := 5 expectedFiles := []string{} for i := 1; i < fileCount; i++ { fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i) expectedFiles = append(expectedFiles, fileName) obj := &Object{ fs: f, remote: fileName, } objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil) _, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil) require.NoError(t, err) } checkListing(ctx, t, f, dirName, expectedFiles) t.Run("DryRun", func(t *testing.T) { // Listing should not change after dry run ctx, ci := fs.AddConfig(ctx) ci.DryRun = true require.NoError(t, f.cleanUp(ctx, false, true, 0)) checkListing(ctx, t, f, dirName, expectedFiles) }) t.Run("RealThing", func(t *testing.T) { // Listing should be empty after real cleanup require.NoError(t, f.cleanUp(ctx, false, true, 0)) checkListing(ctx, t, f, dirName, []string{}) }) }) } func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string { bucket, directory := f.split(dirName) foundFiles := []string{} require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error { if !isDirectory { foundFiles = append(foundFiles, object.Name) } return nil })) sort.Strings(foundFiles) return foundFiles } func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) { foundFiles := listAllFiles(ctx, t, f, dirName) sort.Strings(expectedFiles) assert.Equal(t, expectedFiles, foundFiles) } func (f *Fs) InternalTestLifecycleRules(t *testing.T) { ctx := context.Background() opt := map[string]string{} t.Run("InitState", func(t *testing.T) { // There should be no lifecycle rules at the outset lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt) lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule) require.NoError(t, err) assert.Equal(t, 0, len(lifecycleRules)) }) t.Run("DryRun", func(t *testing.T) { // There should still be no lifecycle rules after each dry run operation ctx, ci := fs.AddConfig(ctx) ci.DryRun = true opt["daysFromHidingToDeleting"] = "30" lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt) lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule) require.NoError(t, err) assert.Equal(t, 0, len(lifecycleRules)) delete(opt, "daysFromHidingToDeleting") opt["daysFromUploadingToHiding"] = "40" lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt) lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule) require.NoError(t, err) assert.Equal(t, 0, len(lifecycleRules)) opt["daysFromHidingToDeleting"] = "30" lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt) lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule) require.NoError(t, err) assert.Equal(t, 0, len(lifecycleRules)) }) t.Run("RealThing", func(t *testing.T) { opt["daysFromHidingToDeleting"] = "30" lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt) lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule) require.NoError(t, err) assert.Equal(t, 1, len(lifecycleRules)) assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting) delete(opt, "daysFromHidingToDeleting") opt["daysFromUploadingToHiding"] = "40" lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt) lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule) require.NoError(t, err) assert.Equal(t, 1, len(lifecycleRules)) assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding) opt["daysFromHidingToDeleting"] = "30" lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt) lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule) require.NoError(t, err) assert.Equal(t, 1, len(lifecycleRules)) assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting) assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding) }) } // -run TestIntegration/FsMkdir/FsPutFiles/Internal func (f *Fs) InternalTest(t *testing.T) { t.Run("Metadata", f.InternalTestMetadata) t.Run("Versions", f.InternalTestVersions) t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished) t.Run("LifecycleRules", f.InternalTestLifecycleRules) } var _ fstests.InternalTester = (*Fs)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/b2/upload.go
backend/b2/upload.go
// Upload large files for b2 // // Docs - https://www.backblaze.com/docs/cloud-storage-large-files package b2 import ( "context" "crypto/sha1" "encoding/hex" "fmt" gohash "hash" "io" "strings" "sync" "github.com/rclone/rclone/backend/b2/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/chunksize" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/pool" "github.com/rclone/rclone/lib/rest" "golang.org/x/sync/errgroup" ) type hashAppendingReader struct { h gohash.Hash in io.Reader hexSum string hexReader io.Reader } // Read returns bytes all bytes from the original reader, then the hex sum // of what was read so far, then EOF. func (har *hashAppendingReader) Read(b []byte) (int, error) { if har.hexReader == nil { n, err := har.in.Read(b) if err == io.EOF { har.in = nil // allow GC err = nil // allow reading hexSum before EOF har.hexSum = hex.EncodeToString(har.h.Sum(nil)) har.hexReader = strings.NewReader(har.hexSum) } return n, err } return har.hexReader.Read(b) } // AdditionalLength returns how many bytes the appended hex sum will take up. func (har *hashAppendingReader) AdditionalLength() int { return hex.EncodedLen(har.h.Size()) } // HexSum returns the hash sum as hex. It's only available after the original // reader has EOF'd. It's an empty string before that. func (har *hashAppendingReader) HexSum() string { return har.hexSum } // newHashAppendingReader takes a Reader and a Hash and will append the hex sum // after the original reader reaches EOF. The increased size depends on the // given hash, which may be queried through AdditionalLength() func newHashAppendingReader(in io.Reader, h gohash.Hash) *hashAppendingReader { withHash := io.TeeReader(in, h) return &hashAppendingReader{h: h, in: withHash} } // largeUpload is used to control the upload of large files which need chunking type largeUpload struct { f *Fs // parent Fs o *Object // object being uploaded doCopy bool // doing copy rather than upload what string // text name of operation for logs in io.Reader // read the data from here wrap accounting.WrapFn // account parts being transferred id string // ID of the file being uploaded size int64 // total size parts int // calculated number of parts, if known sha1smu sync.Mutex // mutex to protect sha1s sha1s []string // slice of SHA1s for each part uploadMu sync.Mutex // lock for upload variable uploads []*api.GetUploadPartURLResponse // result of get upload URL calls chunkSize int64 // chunk size to use src *Object // if copying, object we are reading from info *api.FileInfo // final response with info about the object } // newLargeUpload starts an upload of object o from in with metadata in src // // If newInfo is set then metadata from that will be used instead of reading it from src func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) { size := src.Size() parts := 0 chunkSize := defaultChunkSize if size == -1 { fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize) } else { chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize) parts = int(size / int64(chunkSize)) if size%int64(chunkSize) != 0 { parts++ } } bucket, bucketPath := o.split() bucketID, err := f.getBucketID(ctx, bucket) if err != nil { return nil, err } var request = api.StartLargeFileRequest{ BucketID: bucketID, Name: f.opt.Enc.FromStandardPath(bucketPath), } optionsToSend := make([]fs.OpenOption, 0, len(options)) if newInfo == nil { modTime, err := o.getModTime(ctx, src, options) if err != nil { return nil, err } request.ContentType = fs.MimeType(ctx, src) request.Info = map[string]string{ timeKey: timeString(modTime), } // Custom upload headers - remove header prefix since they are sent in the body for _, option := range options { k, v := option.Header() k = strings.ToLower(k) if strings.HasPrefix(k, headerPrefix) { request.Info[k[len(headerPrefix):]] = v } else { optionsToSend = append(optionsToSend, option) } } // Set the SHA1 if known if !o.fs.opt.DisableCheckSum || doCopy { if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" { request.Info[sha1Key] = calculatedSha1 } } } else { request.ContentType = newInfo.ContentType request.Info = newInfo.Info } if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" { request.ServerSideEncryption = &api.ServerSideEncryption{ Mode: "SSE-C", Algorithm: o.fs.opt.SSECustomerAlgorithm, CustomerKey: o.fs.opt.SSECustomerKeyBase64, CustomerKeyMd5: o.fs.opt.SSECustomerKeyMD5, } } opts := rest.Opts{ Method: "POST", Path: "/b2_start_large_file", Options: optionsToSend, } var response api.StartLargeFileResponse err = f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, &request, &response) return f.shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } up = &largeUpload{ f: f, o: o, doCopy: doCopy, what: "upload", id: response.ID, size: size, parts: parts, sha1s: make([]string, 0, 16), chunkSize: int64(chunkSize), } // unwrap the accounting from the input, we use wrap to put it // back on after the buffering if doCopy { up.what = "copy" up.src = src.(*Object) } else { up.in, up.wrap = accounting.UnWrap(in) } return up, nil } // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken // // This should be returned with returnUploadURL when finished func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) { up.uploadMu.Lock() if len(up.uploads) > 0 { upload, up.uploads = up.uploads[0], up.uploads[1:] up.uploadMu.Unlock() return upload, nil } up.uploadMu.Unlock() opts := rest.Opts{ Method: "POST", Path: "/b2_get_upload_part_url", } var request = api.GetUploadPartURLRequest{ ID: up.id, } err = up.f.pacer.Call(func() (bool, error) { resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload) return up.f.shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("failed to get upload URL: %w", err) } return upload, nil } // returnUploadURL returns the UploadURL to the cache func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) { if upload == nil { return } up.uploadMu.Lock() up.uploads = append(up.uploads, upload) up.uploadMu.Unlock() } // Add an sha1 to the being built up sha1s func (up *largeUpload) addSha1(chunkNumber int, sha1 string) { up.sha1smu.Lock() defer up.sha1smu.Unlock() if len(up.sha1s) < chunkNumber+1 { up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...) } up.sha1s[chunkNumber] = sha1 } // WriteChunk will write chunk number with reader bytes, where chunk number >= 0 func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) { // Only account after the checksum reads have been done if do, ok := reader.(pool.DelayAccountinger); ok { // To figure out this number, do a transfer and if the accounted size is 0 or a // multiple of what it should be, increase or decrease this number. do.DelayAccounting(1) } err = up.f.pacer.Call(func() (bool, error) { // Discover the size by seeking to the end size, err = reader.Seek(0, io.SeekEnd) if err != nil { return false, err } // rewind the reader on retry and after reading size _, err = reader.Seek(0, io.SeekStart) if err != nil { return false, err } fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size) // Get upload URL upload, err := up.getUploadURL(ctx) if err != nil { return false, err } in := newHashAppendingReader(reader, sha1.New()) sizeWithHash := size + int64(in.AdditionalLength()) // Authorization // // An upload authorization token, from b2_get_upload_part_url. // // X-Bz-Part-Number // // A number from 1 to 10000. The parts uploaded for one file // must have contiguous numbers, starting with 1. // // Content-Length // // The number of bytes in the file being uploaded. Note that // this header is required; you cannot leave it out and just // use chunked encoding. The minimum size of every part but // the last one is 100 MB (100,000,000 bytes) // // X-Bz-Content-Sha1 // // The SHA1 checksum of the this part of the file. B2 will // check this when the part is uploaded, to make sure that the // data arrived correctly. The same SHA1 checksum must be // passed to b2_finish_large_file. opts := rest.Opts{ Method: "POST", RootURL: upload.UploadURL, Body: up.wrap(in), ExtraHeaders: map[string]string{ "Authorization": upload.AuthorizationToken, "X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1), sha1Header: "hex_digits_at_end", }, ContentLength: &sizeWithHash, } if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" { opts.ExtraHeaders[sseAlgorithmHeader] = up.o.fs.opt.SSECustomerAlgorithm opts.ExtraHeaders[sseKeyHeader] = up.o.fs.opt.SSECustomerKeyBase64 opts.ExtraHeaders[sseMd5Header] = up.o.fs.opt.SSECustomerKeyMD5 } var response api.UploadPartResponse resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response) retry, err := up.f.shouldRetry(ctx, resp, err) if err != nil { fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err) } // On retryable error clear PartUploadURL if retry { fs.Debugf(up.o, "Clearing part upload URL because of error: %v", err) upload = nil } up.returnUploadURL(upload) up.addSha1(chunkNumber, in.HexSum()) return retry, err }) if err != nil { fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err) } else { fs.Debugf(up.o, "Done sending chunk %d", chunkNumber) } return size, err } // Copy a chunk func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error { err := up.f.pacer.Call(func() (bool, error) { fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize) opts := rest.Opts{ Method: "POST", Path: "/b2_copy_part", } offset := int64(part) * up.chunkSize // where we are in the source file var request = api.CopyPartRequest{ SourceID: up.src.id, LargeFileID: up.id, PartNumber: int64(part + 1), Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1), } if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" { serverSideEncryptionConfig := api.ServerSideEncryption{ Mode: "SSE-C", Algorithm: up.o.fs.opt.SSECustomerAlgorithm, CustomerKey: up.o.fs.opt.SSECustomerKeyBase64, CustomerKeyMd5: up.o.fs.opt.SSECustomerKeyMD5, } request.SourceServerSideEncryption = &serverSideEncryptionConfig request.DestinationServerSideEncryption = &serverSideEncryptionConfig } var response api.UploadPartResponse resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response) retry, err := up.f.shouldRetry(ctx, resp, err) if err != nil { fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err) } up.addSha1(part, response.SHA1) return retry, err }) if err != nil { fs.Debugf(up.o, "Error copying chunk %d: %v", part, err) } else { fs.Debugf(up.o, "Done copying chunk %d", part) } return err } // Close closes off the large upload func (up *largeUpload) Close(ctx context.Context) error { fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts) opts := rest.Opts{ Method: "POST", Path: "/b2_finish_large_file", } var request = api.FinishLargeFileRequest{ ID: up.id, SHA1s: up.sha1s, } var response api.FileInfo err := up.f.pacer.Call(func() (bool, error) { resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response) return up.f.shouldRetry(ctx, resp, err) }) if err != nil { return err } up.info = &response return nil } // Abort aborts the large upload func (up *largeUpload) Abort(ctx context.Context) error { fs.Debugf(up.o, "Cancelling large file %s", up.what) opts := rest.Opts{ Method: "POST", Path: "/b2_cancel_large_file", } var request = api.CancelLargeFileRequest{ ID: up.id, } var response api.CancelLargeFileResponse err := up.f.pacer.Call(func() (bool, error) { resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response) return up.f.shouldRetry(ctx, resp, err) }) if err != nil { fs.Errorf(up.o, "Failed to cancel large file %s: %v", up.what, err) } return err } // Stream uploads the chunks from the input, starting with a required initial // chunk. Assumes the file size is unknown and will upload until the input // reaches EOF. // // Note that initialUploadBlock must be returned to f.putBuf() func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) { defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })() fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id) var ( g, gCtx = errgroup.WithContext(ctx) hasMoreParts = true ) up.size = initialUploadBlock.Size() up.parts = 0 for part := 0; hasMoreParts; part++ { // Get a block of memory from the pool and token which limits concurrency. var rw *pool.RW if part == 0 { rw = initialUploadBlock } else { rw = up.f.getRW(false) } // Fail fast, in case an errgroup managed function returns an error // gCtx is cancelled. There is no point in uploading all the other parts. if gCtx.Err() != nil { up.f.putRW(rw) break } // Read the chunk var n int64 if part == 0 { n = rw.Size() } else { n, err = io.CopyN(rw, up.in, up.chunkSize) if err == io.EOF { if n == 0 { fs.Debugf(up.o, "Not sending empty chunk after EOF - ending.") up.f.putRW(rw) break } else { fs.Debugf(up.o, "Read less than a full chunk %d, making this the last one.", n) } hasMoreParts = false } else if err != nil { // other kinds of errors indicate failure up.f.putRW(rw) return err } } // Keep stats up to date up.parts += 1 up.size += n if part > maxParts { up.f.putRW(rw) return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts) } part := part // for the closure g.Go(func() (err error) { defer up.f.putRW(rw) _, err = up.WriteChunk(gCtx, part, rw) return err }) } err = g.Wait() if err != nil { return err } return up.Close(ctx) } // Copy the chunks from the source to the destination func (up *largeUpload) Copy(ctx context.Context) (err error) { defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })() fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id) var ( g, gCtx = errgroup.WithContext(ctx) remaining = up.size ) g.SetLimit(up.f.opt.UploadConcurrency) for part := range up.parts { // Fail fast, in case an errgroup managed function returns an error // gCtx is cancelled. There is no point in copying all the other parts. if gCtx.Err() != nil { break } reqSize := min(remaining, up.chunkSize) part := part // for the closure g.Go(func() (err error) { return up.copyChunk(gCtx, part, reqSize) }) remaining -= reqSize } err = g.Wait() if err != nil { return err } return up.Close(ctx) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/b2/api/types_test.go
backend/b2/api/types_test.go
package api_test import ( "testing" "time" "github.com/rclone/rclone/backend/b2/api" "github.com/rclone/rclone/fstest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var ( emptyT api.Timestamp t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z")) t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z")) ) func TestTimestampMarshalJSON(t *testing.T) { resB, err := t0.MarshalJSON() res := string(resB) require.NoError(t, err) assert.Equal(t, "3661123", res) resB, err = t1.MarshalJSON() res = string(resB) require.NoError(t, err) assert.Equal(t, "981173106123", res) } func TestTimestampUnmarshalJSON(t *testing.T) { var tActual api.Timestamp err := tActual.UnmarshalJSON([]byte("981173106123")) require.NoError(t, err) assert.Equal(t, (time.Time)(t1), (time.Time)(tActual)) } func TestTimestampIsZero(t *testing.T) { assert.True(t, emptyT.IsZero()) assert.False(t, t0.IsZero()) assert.False(t, t1.IsZero()) } func TestTimestampEqual(t *testing.T) { assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver assert.False(t, t0.Equal(emptyT)) assert.False(t, emptyT.Equal(t0)) assert.False(t, t0.Equal(t1)) assert.False(t, t1.Equal(t0)) assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/b2/api/types.go
backend/b2/api/types.go
// Package api provides types used by the Backblaze B2 API. package api import ( "fmt" "strconv" "time" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/lib/version" ) // Error describes a B2 error response type Error struct { Status int `json:"status"` // The numeric HTTP status code. Always matches the status in the HTTP response. Code string `json:"code"` // A single-identifier code that identifies the error. Message string `json:"message"` // A human-readable message, in English, saying what went wrong. } // Error satisfies the error interface func (e *Error) Error() string { return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code) } // Fatal satisfies the Fatal interface // // It indicates which errors should be treated as fatal func (e *Error) Fatal() bool { return e.Status == 403 // 403 errors shouldn't be retried } var _ fserrors.Fataler = (*Error)(nil) // Bucket describes a B2 bucket type Bucket struct { ID string `json:"bucketId"` AccountID string `json:"accountId"` Name string `json:"bucketName"` Type string `json:"bucketType"` LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"` } // LifecycleRule is a single lifecycle rule type LifecycleRule struct { DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"` DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"` DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"` FileNamePrefix string `json:"fileNamePrefix"` } // ServerSideEncryption is a configuration object for B2 Server-Side Encryption type ServerSideEncryption struct { Mode string `json:"mode"` Algorithm string `json:"algorithm"` // Encryption algorithm to use CustomerKey string `json:"customerKey"` // User provided Base64 encoded key that is used by the server to encrypt files CustomerKeyMd5 string `json:"customerKeyMd5"` // An MD5 hash of the decoded key } // Timestamp is a UTC time when this file was uploaded. It is a base // 10 number of milliseconds since midnight, January 1, 1970 UTC. This // fits in a 64 bit integer such as the type "long" in the programming // language Java. It is intended to be compatible with Java's time // long. For example, it can be passed directly into the java call // Date.setTime(long time). type Timestamp time.Time // MarshalJSON turns a Timestamp into JSON (in UTC) func (t *Timestamp) MarshalJSON() (out []byte, err error) { timestamp := (*time.Time)(t).UTC().UnixNano() return []byte(strconv.FormatInt(timestamp/1e6, 10)), nil } // UnmarshalJSON turns JSON into a Timestamp func (t *Timestamp) UnmarshalJSON(data []byte) error { timestamp, err := strconv.ParseInt(string(data), 10, 64) if err != nil { return err } *t = Timestamp(time.Unix(timestamp/1e3, (timestamp%1e3)*1e6).UTC()) return nil } // HasVersion returns true if it looks like the passed filename has a timestamp on it. // // Note that the passed filename's timestamp may still be invalid even if this // function returns true. func HasVersion(remote string) bool { return version.Match(remote) } // AddVersion adds the timestamp as a version string into the filename passed in. func (t Timestamp) AddVersion(remote string) string { return version.Add(remote, time.Time(t)) } // RemoveVersion removes the timestamp from a filename as a version string. // // It returns the new file name and a timestamp, or the old filename // and a zero timestamp. func RemoveVersion(remote string) (t Timestamp, newRemote string) { time, newRemote := version.Remove(remote) t = Timestamp(time) return } // IsZero returns true if the timestamp is uninitialized func (t Timestamp) IsZero() bool { return time.Time(t).IsZero() } // Equal compares two timestamps // // If either are !IsZero then it returns false func (t Timestamp) Equal(s Timestamp) bool { if time.Time(t).IsZero() { return false } if time.Time(s).IsZero() { return false } return time.Time(t).Equal(time.Time(s)) } // File is info about a file type File struct { ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version. Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name. Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both. Size int64 `json:"size"` // The number of bytes in the file. UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded. SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file. ContentType string `json:"contentType"` // The MIME type of the file. Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file. } // StorageAPI is as returned from the b2_authorize_account call type StorageAPI struct { AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file. Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it. Buckets []struct { // When present, access is restricted to one or more buckets. ID string `json:"id"` // ID of bucket Name string `json:"name"` // When present, name of bucket - may be empty } `json:"buckets"` Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has for every bucket. NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix } `json:"allowed"` APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files. DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files. MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead. RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance. } // AuthorizeAccountResponse is as returned from the b2_authorize_account call type AuthorizeAccountResponse struct { AccountID string `json:"accountId"` // The identifier for the account. AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header. APIs struct { // Supported APIs for this account / key. These are API-dependent JSON objects. Storage StorageAPI `json:"storageApi"` } `json:"apiInfo"` } // ListBucketsRequest is parameters for b2_list_buckets call type ListBucketsRequest struct { AccountID string `json:"accountId"` // The identifier for the account. BucketID string `json:"bucketId,omitempty"` // When specified, the result will be a list containing just this bucket. BucketName string `json:"bucketName,omitempty"` // When specified, the result will be a list containing just this bucket. BucketTypes []string `json:"bucketTypes,omitempty"` // If present, B2 will use it as a filter for bucket types returned in the list buckets response. } // ListBucketsResponse is as returned from the b2_list_buckets call type ListBucketsResponse struct { Buckets []Bucket `json:"buckets"` } // ListFileNamesRequest is as passed to b2_list_file_names or b2_list_file_versions type ListFileNamesRequest struct { BucketID string `json:"bucketId"` // required - The bucket to look for file names in. StartFileName string `json:"startFileName,omitempty"` // optional - The first file name to return. If there is a file with this name, it will be returned in the list. If not, the first file name after this the first one after this name. MaxFileCount int `json:"maxFileCount,omitempty"` // optional - The maximum number of files to return from this call. The default value is 100, and the maximum allowed is 1000. StartFileID string `json:"startFileId,omitempty"` // optional - What to pass in to startFileId for the next search to continue where this one left off. Prefix string `json:"prefix,omitempty"` // optional - Files returned will be limited to those with the given prefix. Defaults to the empty string, which matches all files. Delimiter string `json:"delimiter,omitempty"` // Files returned will be limited to those within the top folder, or any one subfolder. Defaults to NULL. Folder names will also be returned. The delimiter character will be used to "break" file names into folders. } // ListFileNamesResponse is as received from b2_list_file_names or b2_list_file_versions type ListFileNamesResponse struct { Files []File `json:"files"` // An array of objects, each one describing one file. NextFileName *string `json:"nextFileName"` // What to pass in to startFileName for the next search to continue where this one left off, or null if there are no more files. NextFileID *string `json:"nextFileId"` // What to pass in to startFileId for the next search to continue where this one left off, or null if there are no more files. } // GetUploadURLRequest is passed to b2_get_upload_url type GetUploadURLRequest struct { BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to. } // GetUploadURLResponse is received from b2_get_upload_url type GetUploadURLResponse struct { BucketID string `json:"bucketId"` // The unique ID of the bucket. UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_file. AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file. } // GetDownloadAuthorizationRequest is passed to b2_get_download_authorization type GetDownloadAuthorizationRequest struct { BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to. FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to. ValidDurationInSeconds int64 `json:"validDurationInSeconds"` // The number of seconds before the authorization token will expire. The minimum value is 1 second. The maximum value is 604800 which is one week in seconds. B2ContentDisposition string `json:"b2ContentDisposition,omitempty"` // optional - If this is present, download requests using the returned authorization must include the same value for b2ContentDisposition. } // GetDownloadAuthorizationResponse is received from b2_get_download_authorization type GetDownloadAuthorizationResponse struct { BucketID string `json:"bucketId"` // The unique ID of the bucket. FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to. AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when downloading files, see b2_download_file_by_name. } // FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file type FileInfo struct { ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version. Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name. Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both. AccountID string `json:"accountId"` // Your account ID. BucketID string `json:"bucketId"` // The bucket that the file is in. Size int64 `json:"contentLength"` // The number of bytes stored in the file. UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded. SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file. ContentType string `json:"contentType"` // The MIME type of the file. Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file. } // CreateBucketRequest is used to create a bucket type CreateBucketRequest struct { AccountID string `json:"accountId"` Name string `json:"bucketName"` Type string `json:"bucketType"` LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"` } // DeleteBucketRequest is used to create a bucket type DeleteBucketRequest struct { ID string `json:"bucketId"` AccountID string `json:"accountId"` } // DeleteFileRequest is used to delete a file version type DeleteFileRequest struct { ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions. Name string `json:"fileName"` // The name of this file. } // HideFileRequest is used to delete a file type HideFileRequest struct { BucketID string `json:"bucketId"` // The bucket containing the file to hide. Name string `json:"fileName"` // The name of the file to hide. } // GetFileInfoRequest is used to return a FileInfo struct with b2_get_file_info type GetFileInfoRequest struct { ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions. } // StartLargeFileRequest (b2_start_large_file) Prepares for uploading the parts of a large file. // // If the original source of the file being uploaded has a last // modified time concept, Backblaze recommends using // src_last_modified_millis as the name, and a string holding the base // 10 number of milliseconds since midnight, January 1, 1970 // UTC. This fits in a 64 bit integer such as the type "long" in the // programming language Java. It is intended to be compatible with // Java's time long. For example, it can be passed directly into the // Java call Date.setTime(long time). // // If the caller knows the SHA1 of the entire large file being // uploaded, Backblaze recommends using large_file_sha1 as the name, // and a 40 byte hex string representing the SHA1. // // Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" } type StartLargeFileRequest struct { BucketID string `json:"bucketId"` // The ID of the bucket that the file will go in. Name string `json:"fileName"` // The name of the file. See Files for requirements on file names. ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream. Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info. ServerSideEncryption *ServerSideEncryption `json:"serverSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption } // StartLargeFileResponse is the response to StartLargeFileRequest type StartLargeFileResponse struct { ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version. Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name. AccountID string `json:"accountId"` // The identifier for the account. BucketID string `json:"bucketId"` // The unique ID of the bucket. ContentType string `json:"contentType"` // The MIME type of the file. Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file. UploadTimestamp Timestamp `json:"uploadTimestamp,omitempty"` // This is a UTC time when this file was uploaded. } // GetUploadPartURLRequest is passed to b2_get_upload_part_url type GetUploadPartURLRequest struct { ID string `json:"fileId"` // The unique identifier of the file being uploaded. } // GetUploadPartURLResponse is received from b2_get_upload_url type GetUploadPartURLResponse struct { ID string `json:"fileId"` // The unique identifier of the file being uploaded. UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_part. AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_part. } // UploadPartResponse is the response to b2_upload_part type UploadPartResponse struct { ID string `json:"fileId"` // The unique identifier of the file being uploaded. PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1) Size int64 `json:"contentLength"` // The number of bytes stored in the file. SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file. } // FinishLargeFileRequest is passed to b2_finish_large_file // // The response is a FileInfo object (with extra AccountID and BucketID fields which we ignore). // // Large files do not have a SHA1 checksum. The value will always be "none". type FinishLargeFileRequest struct { ID string `json:"fileId"` // The unique identifier of the file being uploaded. SHA1s []string `json:"partSha1Array"` // A JSON array of hex SHA1 checksums of the parts of the large file. This is a double-check that the right parts were uploaded in the right order, and that none were missed. Note that the part numbers start at 1, and the SHA1 of the part 1 is the first string in the array, at index 0. } // CancelLargeFileRequest is passed to b2_finish_large_file // // The response is a CancelLargeFileResponse type CancelLargeFileRequest struct { ID string `json:"fileId"` // The unique identifier of the file being uploaded. } // CancelLargeFileResponse is the response to CancelLargeFileRequest type CancelLargeFileResponse struct { ID string `json:"fileId"` // The unique identifier of the file being uploaded. Name string `json:"fileName"` // The name of this file. AccountID string `json:"accountId"` // The identifier for the account. BucketID string `json:"bucketId"` // The unique ID of the bucket. } // CopyFileRequest is as passed to b2_copy_file type CopyFileRequest struct { SourceID string `json:"sourceFileId"` // The ID of the source file being copied. Name string `json:"fileName"` // The name of the new file being created. Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied. MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only) Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only) DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file } // CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse type CopyPartRequest struct { SourceID string `json:"sourceFileId"` // The ID of the source file being copied. LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file. PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1) Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied. SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file } // UpdateBucketRequest describes a request to modify a B2 bucket type UpdateBucketRequest struct { ID string `json:"bucketId"` AccountID string `json:"accountId"` Type string `json:"bucketType,omitempty"` LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/swift/swift_test.go
backend/swift/swift_test.go
// Test Swift filesystem interface package swift import ( "bytes" "context" "errors" "io" "testing" "github.com/ncw/swift/v2" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/readers" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestSwiftAIO:", NilObject: (*Object)(nil), }) } func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadChunkSize(cs) } var _ fstests.SetUploadChunkSizer = (*Fs)(nil) // Check that PutStream works with NoChunk as it is the major code // deviation func (f *Fs) testNoChunk(t *testing.T) { ctx := context.Background() f.opt.NoChunk = true defer func() { f.opt.NoChunk = false }() file := fstest.Item{ ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"), Path: "piped data no chunk.txt", Size: -1, // use unknown size during upload } const contentSize = 100 contents := random.String(contentSize) buf := bytes.NewBufferString(contents) uploadHash := hash.NewMultiHasher() in := io.TeeReader(buf, uploadHash) // Track how much space is used before we put our object. usage, err := f.About(ctx) require.NoError(t, err) usedBeforePut := *usage.Used file.Size = -1 obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil) obj, err := f.Features().PutStream(ctx, in, obji) require.NoError(t, err) file.Hashes = uploadHash.Sums() file.Size = int64(contentSize) // use correct size when checking file.Check(t, obj, f.Precision()) // Re-read the object and check again obj, err = f.NewObject(ctx, file.Path) require.NoError(t, err) file.Check(t, obj, f.Precision()) // Check how much space is used after the upload, should match the amount we // uploaded.. usage, err = f.About(ctx) require.NoError(t, err) expectedUsed := usedBeforePut + obj.Size() require.EqualValues(t, expectedUsed, *usage.Used) // Delete the object assert.NoError(t, obj.Remove(ctx)) } // Additional tests that aren't in the framework func (f *Fs) InternalTest(t *testing.T) { t.Run("PolicyDiscovery", f.testPolicyDiscovery) t.Run("NoChunk", f.testNoChunk) t.Run("WithChunk", f.testWithChunk) t.Run("WithChunkFail", f.testWithChunkFail) t.Run("CopyLargeObject", f.testCopyLargeObject) } func (f *Fs) testWithChunk(t *testing.T) { preConfChunkSize := f.opt.ChunkSize preConfChunk := f.opt.NoChunk f.opt.NoChunk = false f.opt.ChunkSize = 1024 * fs.SizeSuffixBase defer func() { //restore old config after test f.opt.ChunkSize = preConfChunkSize f.opt.NoChunk = preConfChunk }() file := fstest.Item{ ModTime: fstest.Time("2020-12-31T04:05:06.499999999Z"), Path: "piped data chunk.txt", Size: -1, // use unknown size during upload } const contentSize = 2048 contents := random.String(contentSize) buf := bytes.NewBufferString(contents) uploadHash := hash.NewMultiHasher() in := io.TeeReader(buf, uploadHash) // Track how much space is used before we put our object. ctx := context.TODO() usage, err := f.About(ctx) require.NoError(t, err) usedBeforePut := *usage.Used file.Size = -1 obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil) obj, err := f.Features().PutStream(ctx, in, obji) require.NoError(t, err) require.NotEmpty(t, obj) // Check how much space is used after the upload, should match the amount we // uploaded.. usage, err = f.About(ctx) require.NoError(t, err) expectedUsed := usedBeforePut + obj.Size() require.EqualValues(t, expectedUsed, *usage.Used) } func (f *Fs) testWithChunkFail(t *testing.T) { preConfChunkSize := f.opt.ChunkSize preConfChunk := f.opt.NoChunk f.opt.NoChunk = false f.opt.ChunkSize = 1024 * fs.SizeSuffixBase segmentContainer := f.root + "_segments" if !f.opt.UseSegmentsContainer.Value { segmentContainer = f.root } defer func() { //restore config f.opt.ChunkSize = preConfChunkSize f.opt.NoChunk = preConfChunk }() path := "piped data chunk with error.txt" file := fstest.Item{ ModTime: fstest.Time("2021-01-04T03:46:00.499999999Z"), Path: path, Size: -1, // use unknown size during upload } const contentSize = 4096 const errPosition = 3072 contents := random.String(contentSize) buf := bytes.NewBufferString(contents[:errPosition]) errMessage := "potato" er := &readers.ErrorReader{Err: errors.New(errMessage)} in := io.NopCloser(io.MultiReader(buf, er)) file.Size = contentSize obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil) ctx := context.TODO() _, err := f.Features().PutStream(ctx, in, obji) // error is potato require.NotNil(t, err) require.Equal(t, errMessage, err.Error()) _, _, err = f.c.Object(ctx, f.rootContainer, path) assert.Equal(t, swift.ObjectNotFound, err) prefix := path if !f.opt.UseSegmentsContainer.Value { prefix = segmentsDirectory + "/" + prefix } objs, err := f.c.Objects(ctx, segmentContainer, &swift.ObjectsOpts{ Prefix: prefix, }) require.NoError(t, err) require.Empty(t, objs) } func (f *Fs) testCopyLargeObject(t *testing.T) { preConfChunkSize := f.opt.ChunkSize preConfChunk := f.opt.NoChunk f.opt.NoChunk = false f.opt.ChunkSize = 1024 * fs.SizeSuffixBase defer func() { //restore old config after test f.opt.ChunkSize = preConfChunkSize f.opt.NoChunk = preConfChunk }() file := fstest.Item{ ModTime: fstest.Time("2020-12-31T04:05:06.499999999Z"), Path: "large.txt", Size: -1, // use unknown size during upload } const contentSize = 2048 contents := random.String(contentSize) buf := bytes.NewBufferString(contents) uploadHash := hash.NewMultiHasher() in := io.TeeReader(buf, uploadHash) // Track how much space is used before we put our object. ctx := context.TODO() usage, err := f.About(ctx) require.NoError(t, err) usedBeforePut := *usage.Used file.Size = -1 obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil) obj, err := f.Features().PutStream(ctx, in, obji) require.NoError(t, err) require.NotEmpty(t, obj) remoteTarget := "large.txt (copy)" objTarget, err := f.Features().Copy(ctx, obj, remoteTarget) require.NoError(t, err) require.NotEmpty(t, objTarget) require.Equal(t, obj.Size(), objTarget.Size()) // Check how much space is used after the upload, should match the amount we // uploaded *and* the copy. usage, err = f.About(ctx) require.NoError(t, err) expectedUsed := usedBeforePut + obj.Size() + objTarget.Size() require.EqualValues(t, expectedUsed, *usage.Used) } func (f *Fs) testPolicyDiscovery(t *testing.T) { ctx := context.TODO() container := "testPolicyDiscovery-1" // Reset the policy so we can test if it is populated. f.opt.StoragePolicy = "" err := f.makeContainer(ctx, container) require.NoError(t, err) _, err = f.fetchStoragePolicy(ctx, container) require.NoError(t, err) // Default policy for SAIO image is 1replica. assert.Equal(t, "1replica", f.opt.StoragePolicy) // Create a container using a non-default policy, and check to ensure // that the created segments container uses the same non-default policy. policy := "Policy-1" container = "testPolicyDiscovery-2" f.opt.StoragePolicy = policy err = f.makeContainer(ctx, container) require.NoError(t, err) // Reset the policy so we can test if it is populated, and set to the // non-default policy. f.opt.StoragePolicy = "" _, err = f.fetchStoragePolicy(ctx, container) require.NoError(t, err) assert.Equal(t, policy, f.opt.StoragePolicy) // Test that when a segmented upload container is made, the newly // created container inherits the non-default policy of the base // container. f.opt.StoragePolicy = "" f.opt.UseSegmentsContainer.Value = true su, err := f.newSegmentedUpload(ctx, container, "") require.NoError(t, err) // The container name we expected? segmentsContainer := container + segmentsContainerSuffix assert.Equal(t, segmentsContainer, su.container) // The policy we expected? f.opt.StoragePolicy = "" _, err = f.fetchStoragePolicy(ctx, su.container) require.NoError(t, err) assert.Equal(t, policy, f.opt.StoragePolicy) } var _ fstests.InternalTester = (*Fs)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/swift/auth.go
backend/swift/auth.go
package swift import ( "context" "net/http" "time" "github.com/ncw/swift/v2" ) // auth is an authenticator for swift. It overrides the StorageUrl // and AuthToken with fixed values. type auth struct { parentAuth swift.Authenticator storageURL string authToken string } // newAuth creates a swift authenticator wrapper to override the // StorageUrl and AuthToken values. // // Note that parentAuth can be nil func newAuth(parentAuth swift.Authenticator, storageURL string, authToken string) *auth { return &auth{ parentAuth: parentAuth, storageURL: storageURL, authToken: authToken, } } // Request creates an http.Request for the auth - return nil if not needed func (a *auth) Request(ctx context.Context, c *swift.Connection) (*http.Request, error) { if a.parentAuth == nil { return nil, nil } return a.parentAuth.Request(ctx, c) } // Response parses the http.Response func (a *auth) Response(ctx context.Context, resp *http.Response) error { if a.parentAuth == nil { return nil } return a.parentAuth.Response(ctx, resp) } // The public storage URL - set Internal to true to read // internal/service net URL func (a *auth) StorageUrl(Internal bool) string { // nolint if a.storageURL != "" { return a.storageURL } if a.parentAuth == nil { return "" } return a.parentAuth.StorageUrl(Internal) } // The access token func (a *auth) Token() string { if a.authToken != "" { return a.authToken } if a.parentAuth == nil { return "" } return a.parentAuth.Token() } // Expires returns the time the token expires if known or Zero if not. func (a *auth) Expires() (t time.Time) { if do, ok := a.parentAuth.(swift.Expireser); ok { t = do.Expires() } return t } // The CDN url if available func (a *auth) CdnUrl() string { // nolint if a.parentAuth == nil { return "" } return a.parentAuth.CdnUrl() } // Check the interfaces are satisfied var ( _ swift.Authenticator = (*auth)(nil) _ swift.Expireser = (*auth)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/swift/swift.go
backend/swift/swift.go
// Package swift provides an interface to the Swift object storage system package swift import ( "bufio" "bytes" "context" "errors" "fmt" "io" "maps" "path" "regexp" "slices" "strconv" "strings" "sync" "time" "github.com/ncw/swift/v2" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/readers" ) // Constants const ( directoryMarkerContentType = "application/directory" // content type of directory marker objects listChunks = 1000 // chunk size to read directory listings defaultChunkSize = 5 * fs.Gibi minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep. segmentsContainerSuffix = "_segments" segmentsDirectory = ".file-segments" segmentsDirectorySlash = segmentsDirectory + "/" ) // Auth URLs which imply using fileSegmentsDirectory var needFileSegmentsDirectory = regexp.MustCompile(`(?s)\.(ain?\.net|blomp\.com|praetector\.com|signmy\.name|rackfactory\.com)($|/)`) // SharedOptions are shared between swift and backends which depend on swift var SharedOptions = []fs.Option{{ Name: "chunk_size", Help: strings.ReplaceAll(`Above this size files will be chunked. Above this size files will be chunked into a a |`+segmentsContainerSuffix+`| container or a |`+segmentsDirectory+`| directory. (See the |use_segments_container| option for more info). Default for this is 5 GiB which is its maximum value, which means only files above this size will be chunked. Rclone uploads chunked files as dynamic large objects (DLO). `, "|", "`"), Default: defaultChunkSize, Advanced: true, }, { Name: "no_chunk", Help: strings.ReplaceAll(`Don't chunk files during streaming upload. When doing streaming uploads (e.g. using |rcat| or |mount| with |--vfs-cache-mode off|) setting this flag will cause the swift backend to not upload chunked files. This will limit the maximum streamed upload size to 5 GiB. This is useful because non chunked files are easier to deal with and have an MD5SUM. Rclone will still chunk files bigger than |chunk_size| when doing normal copy operations.`, "|", "`"), Default: false, Advanced: true, }, { Name: "no_large_objects", Help: strings.ReplaceAll(`Disable support for static and dynamic large objects Swift cannot transparently store files bigger than 5 GiB. There are two schemes for chunking large files, static large objects (SLO) or dynamic large objects (DLO), and the API does not allow rclone to determine whether a file is a static or dynamic large object without doing a HEAD on the object. Since these need to be treated differently, this means rclone has to issue HEAD requests for objects for example when reading checksums. When |no_large_objects| is set, rclone will assume that there are no static or dynamic large objects stored. This means it can stop doing the extra HEAD calls which in turn increases performance greatly especially when doing a swift to swift transfer with |--checksum| set. Setting this option implies |no_chunk| and also that no files will be uploaded in chunks, so files bigger than 5 GiB will just fail on upload. If you set this option and there **are** static or dynamic large objects, then this will give incorrect hashes for them. Downloads will succeed, but other operations such as Remove and Copy will fail. `, "|", "`"), Default: false, Advanced: true, }, { Name: "use_segments_container", Help: strings.ReplaceAll(`Choose destination for large object segments Swift cannot transparently store files bigger than 5 GiB and rclone will chunk files larger than |chunk_size| (default 5 GiB) in order to upload them. If this value is |true| the chunks will be stored in an additional container named the same as the destination container but with |`+segmentsContainerSuffix+`| appended. This means that there won't be any duplicated data in the original container but having another container may not be acceptable. If this value is |false| the chunks will be stored in a |`+segmentsDirectory+`| directory in the root of the container. This directory will be omitted when listing the container. Some providers (eg Blomp) require this mode as creating additional containers isn't allowed. If it is desired to see the |`+segmentsDirectory+`| directory in the root then this flag must be set to |true|. If this value is |unset| (the default), then rclone will choose the value to use. It will be |false| unless rclone detects any |auth_url|s that it knows need it to be |true|. In this case you'll see a message in the DEBUG log. `, "|", "`"), Default: fs.Tristate{}, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.EncodeInvalidUtf8 | encoder.EncodeSlash), }} // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "swift", Description: "OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)", NewFs: NewFs, Options: append([]fs.Option{{ Name: "env_auth", Help: "Get swift credentials from environment variables in standard OpenStack form.", Default: false, Examples: []fs.OptionExample{ { Value: "false", Help: "Enter swift credentials in the next step.", }, { Value: "true", Help: "Get swift credentials from environment vars.\nLeave other fields blank if using this.", }, }, }, { Name: "user", Help: "User name to log in (OS_USERNAME).", Sensitive: true, }, { Name: "key", Help: "API key or password (OS_PASSWORD).", Sensitive: true, }, { Name: "auth", Help: "Authentication URL for server (OS_AUTH_URL).", Examples: []fs.OptionExample{{ Value: "https://auth.api.rackspacecloud.com/v1.0", Help: "Rackspace US", }, { Value: "https://lon.auth.api.rackspacecloud.com/v1.0", Help: "Rackspace UK", }, { Value: "https://identity.api.rackspacecloud.com/v2.0", Help: "Rackspace v2", }, { Value: "https://auth.storage.memset.com/v1.0", Help: "Memset Memstore UK", }, { Value: "https://auth.storage.memset.com/v2.0", Help: "Memset Memstore UK v2", }, { Value: "https://auth.cloud.ovh.net/v3", Help: "OVH", }, { Value: "https://authenticate.ain.net", Help: "Blomp Cloud Storage", }}, }, { Name: "user_id", Help: "User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).", Sensitive: true, }, { Name: "domain", Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)", Sensitive: true, }, { Name: "tenant", Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME).", Sensitive: true, }, { Name: "tenant_id", Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).", Sensitive: true, }, { Name: "tenant_domain", Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).", Sensitive: true, }, { Name: "region", Help: "Region name - optional (OS_REGION_NAME).", }, { Name: "storage_url", Help: "Storage URL - optional (OS_STORAGE_URL).", }, { Name: "auth_token", Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).", Sensitive: true, }, { Name: "application_credential_id", Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).", Sensitive: true, }, { Name: "application_credential_name", Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).", Sensitive: true, }, { Name: "application_credential_secret", Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).", Sensitive: true, }, { Name: "auth_version", Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION).", Default: 0, }, { Name: "endpoint_type", Help: "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE).", Default: "public", Examples: []fs.OptionExample{{ Value: "public", Help: "Public (default, choose this if not sure)", }, { Value: "internal", Help: "Internal (use internal service net)", }, { Value: "admin", Help: "Admin", }}, }, { Name: "leave_parts_on_error", Help: `If true avoid calling abort upload on a failure. It should be set to true for resuming uploads across different sessions.`, Default: false, Advanced: true, }, { Name: "storage_policy", Help: `The storage policy to use when creating a new container. This applies the specified storage policy when creating a new container. The policy cannot be changed afterwards. The allowed configuration values and their meaning depend on your Swift storage provider.`, Default: "", Examples: []fs.OptionExample{{ Value: "", Help: "Default", }, { Value: "pcs", Help: "OVH Public Cloud Storage", }, { Value: "pca", Help: "OVH Public Cloud Archive", }}, }, { Name: "fetch_until_empty_page", Help: `When paginating, always fetch unless we received an empty page. Consider using this option if rclone listings show fewer objects than expected, or if repeated syncs copy unchanged objects. It is safe to enable this, but rclone may make more API calls than necessary. This is one of a pair of workarounds to handle implementations of the Swift API that do not implement pagination as expected. See also "partial_page_fetch_threshold".`, Default: false, Advanced: true, }, { Name: "partial_page_fetch_threshold", Help: `When paginating, fetch if the current page is within this percentage of the limit. Consider using this option if rclone listings show fewer objects than expected, or if repeated syncs copy unchanged objects. It is safe to enable this, but rclone may make more API calls than necessary. This is one of a pair of workarounds to handle implementations of the Swift API that do not implement pagination as expected. See also "fetch_until_empty_page".`, Default: 0, Advanced: true, }}, SharedOptions...), }) } // Options defines the configuration for this backend type Options struct { EnvAuth bool `config:"env_auth"` User string `config:"user"` Key string `config:"key"` Auth string `config:"auth"` UserID string `config:"user_id"` Domain string `config:"domain"` Tenant string `config:"tenant"` TenantID string `config:"tenant_id"` TenantDomain string `config:"tenant_domain"` Region string `config:"region"` StorageURL string `config:"storage_url"` AuthToken string `config:"auth_token"` AuthVersion int `config:"auth_version"` ApplicationCredentialID string `config:"application_credential_id"` ApplicationCredentialName string `config:"application_credential_name"` ApplicationCredentialSecret string `config:"application_credential_secret"` LeavePartsOnError bool `config:"leave_parts_on_error"` StoragePolicy string `config:"storage_policy"` EndpointType string `config:"endpoint_type"` ChunkSize fs.SizeSuffix `config:"chunk_size"` NoChunk bool `config:"no_chunk"` NoLargeObjects bool `config:"no_large_objects"` UseSegmentsContainer fs.Tristate `config:"use_segments_container"` Enc encoder.MultiEncoder `config:"encoding"` FetchUntilEmptyPage bool `config:"fetch_until_empty_page"` PartialPageFetchThreshold int `config:"partial_page_fetch_threshold"` } // Fs represents a remote swift server type Fs struct { name string // name of this remote root string // the path we are working on if any features *fs.Features // optional features opt Options // options for this backend ci *fs.ConfigInfo // global config c *swift.Connection // the connection to the swift server rootContainer string // container part of root (if any) rootDirectory string // directory part of root (if any) cache *bucket.Cache // cache of container status noCheckContainer bool // don't check the container before creating it pacer *fs.Pacer // To pace the API calls } // Object describes a swift object // // Will definitely have info but maybe not meta type Object struct { fs *Fs // what this object is part of remote string // The remote path size int64 lastModified time.Time contentType string md5 string headers swift.Headers // The object headers if known } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { if f.rootContainer == "" { return "Swift root" } if f.rootDirectory == "" { return fmt.Sprintf("Swift container %s", f.rootContainer) } return fmt.Sprintf("Swift container %s path %s", f.rootContainer, f.rootDirectory) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 401, // Unauthorized (e.g. "Token has expired") 408, // Request Timeout 409, // Conflict - various states that could be resolved on a retry 429, // Rate exceeded. 500, // Get occasional 500 Internal Server Error 503, // Service Unavailable/Slow Down - "Reduce your request rate" 504, // Gateway Time-out } // shouldRetry returns a boolean as to whether this err deserves to be // retried. It returns the err as a convenience func shouldRetry(ctx context.Context, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } // If this is a swift.Error object extract the HTTP error code if swiftError, ok := err.(*swift.Error); ok { if slices.Contains(retryErrorCodes, swiftError.StatusCode) { return true, err } } // Check for generic failure conditions return fserrors.ShouldRetry(err), err } // shouldRetryHeaders returns a boolean as to whether this err // deserves to be retried. It reads the headers passed in looking for // `Retry-After`. It returns the err as a convenience func shouldRetryHeaders(ctx context.Context, headers swift.Headers, err error) (bool, error) { if swiftError, ok := err.(*swift.Error); ok && swiftError.StatusCode == 429 { if value := headers["Retry-After"]; value != "" { retryAfter, parseErr := strconv.Atoi(value) if parseErr != nil { fs.Errorf(nil, "Failed to parse Retry-After: %q: %v", value, parseErr) } else { duration := time.Second * time.Duration(retryAfter) if duration <= 60*time.Second { // Do a short sleep immediately fs.Debugf(nil, "Sleeping for %v to obey Retry-After", duration) time.Sleep(duration) return true, err } // Delay a long sleep for a retry return false, fserrors.NewErrorRetryAfter(duration) } } } return shouldRetry(ctx, err) } // parsePath parses a remote 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // split returns container and containerPath from the rootRelativePath // relative to f.root func (f *Fs) split(rootRelativePath string) (container, containerPath string) { container, containerPath = bucket.Split(path.Join(f.root, rootRelativePath)) return f.opt.Enc.FromStandardName(container), f.opt.Enc.FromStandardPath(containerPath) } // split returns container and containerPath from the object func (o *Object) split() (container, containerPath string) { return o.fs.split(o.remote) } // swiftConnection makes a connection to swift func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Connection, error) { ci := fs.GetConfig(ctx) c := &swift.Connection{ // Keep these in the same order as the Config for ease of checking UserName: opt.User, ApiKey: opt.Key, AuthUrl: opt.Auth, UserId: opt.UserID, Domain: opt.Domain, Tenant: opt.Tenant, TenantId: opt.TenantID, TenantDomain: opt.TenantDomain, Region: opt.Region, StorageUrl: opt.StorageURL, AuthToken: opt.AuthToken, AuthVersion: opt.AuthVersion, ApplicationCredentialId: opt.ApplicationCredentialID, ApplicationCredentialName: opt.ApplicationCredentialName, ApplicationCredentialSecret: opt.ApplicationCredentialSecret, EndpointType: swift.EndpointType(opt.EndpointType), ConnectTimeout: time.Duration(10 * ci.ConnectTimeout), // Use the timeouts in the transport Timeout: time.Duration(10 * ci.Timeout), // Use the timeouts in the transport Transport: fshttp.NewTransport(ctx), FetchUntilEmptyPage: opt.FetchUntilEmptyPage, PartialPageFetchThreshold: opt.PartialPageFetchThreshold, } if opt.EnvAuth { err := c.ApplyEnvironment() if err != nil { return nil, fmt.Errorf("failed to read environment variables: %w", err) } } StorageUrl, AuthToken := c.StorageUrl, c.AuthToken // nolint if !c.Authenticated() { if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret == "" { if c.UserName == "" && c.UserId == "" { return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)") } if c.ApiKey == "" { return nil, errors.New("key not found") } } if c.AuthUrl == "" { return nil, errors.New("auth not found") } err := c.Authenticate(ctx) // fills in c.StorageUrl and c.AuthToken if err != nil { return nil, err } } // Make sure we re-auth with the AuthToken and StorageUrl // provided by wrapping the existing auth, so we can just // override one or the other or both. if StorageUrl != "" || AuthToken != "" { // Re-write StorageURL and AuthToken if they are being // overridden as c.Authenticate above will have // overwritten them. if StorageUrl != "" { c.StorageUrl = StorageUrl } if AuthToken != "" { c.AuthToken = AuthToken } c.Auth = newAuth(c.Auth, StorageUrl, AuthToken) } return c, nil } func checkUploadChunkSize(cs fs.SizeSuffix) error { const minChunkSize = fs.SizeSuffixBase if cs < minChunkSize { return fmt.Errorf("%s is less than %s", cs, minChunkSize) } return nil } func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadChunkSize(cs) if err == nil { old, f.opt.ChunkSize = f.opt.ChunkSize, cs } return } // setRoot changes the root of the Fs func (f *Fs) setRoot(root string) { f.root = parsePath(root) f.rootContainer, f.rootDirectory = bucket.Split(f.root) } // Fetch the base container's policy to be used if/when we need to create a // segments container to ensure we use the same policy. func (f *Fs) fetchStoragePolicy(ctx context.Context, container string) (fs.Fs, error) { err := f.pacer.Call(func() (bool, error) { var rxHeaders swift.Headers _, rxHeaders, err := f.c.Container(ctx, container) f.opt.StoragePolicy = rxHeaders["X-Storage-Policy"] fs.Debugf(f, "Auto set StoragePolicy to %s", f.opt.StoragePolicy) return shouldRetryHeaders(ctx, rxHeaders, err) }) return nil, err } // NewFsWithConnection constructs an Fs from the path, container:path // and authenticated connection. // // if noCheckContainer is set then the Fs won't check the container // exists before creating it. func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) { ci := fs.GetConfig(ctx) f := &Fs{ name: name, opt: *opt, ci: ci, c: c, noCheckContainer: noCheckContainer, pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))), cache: bucket.NewCache(), } f.setRoot(root) f.features = (&fs.Features{ ReadMimeType: true, WriteMimeType: true, BucketBased: true, BucketBasedRootOK: true, SlowModTime: true, }).Fill(ctx, f) if !f.opt.UseSegmentsContainer.Valid { f.opt.UseSegmentsContainer.Value = !needFileSegmentsDirectory.MatchString(opt.Auth) f.opt.UseSegmentsContainer.Valid = true fs.Debugf(f, "Auto set use_segments_container to %v", f.opt.UseSegmentsContainer.Value) } if f.rootContainer != "" && f.rootDirectory != "" { // Check to see if the object exists - ignoring directory markers var info swift.Object var err error encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory) err = f.pacer.Call(func() (bool, error) { var rxHeaders swift.Headers info, rxHeaders, err = f.c.Object(ctx, f.rootContainer, encodedDirectory) return shouldRetryHeaders(ctx, rxHeaders, err) }) if err == nil && info.ContentType != directoryMarkerContentType { newRoot := path.Dir(f.root) if newRoot == "." { newRoot = "" } f.setRoot(newRoot) // return an error with an fs which points to the parent return f, fs.ErrorIsFile } } return f, nil } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } err = checkUploadChunkSize(opt.ChunkSize) if err != nil { return nil, fmt.Errorf("swift: chunk size: %w", err) } c, err := swiftConnection(ctx, opt, name) if err != nil { return nil, err } return NewFsWithConnection(ctx, opt, name, root, c, false) } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *swift.Object) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } // Note that due to a quirk of swift, dynamic large objects are // returned as 0 bytes in the listing. Correct this here by // making sure we read the full metadata for all 0 byte files. // We don't read the metadata for directory marker objects. if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" && !o.fs.opt.NoLargeObjects { err := o.readMetaData(ctx) // reads info and headers, returning an error if err == fs.ErrorObjectNotFound { // We have a dangling large object here so just return the original metadata fs.Errorf(o, "dangling large object with no contents") } else if err != nil { return nil, err } else { return o, nil } } if info != nil { // Set info but not headers err := o.decodeMetaData(info) if err != nil { return nil, err } } else { err := o.readMetaData(ctx) // reads info and headers, returning an error if err != nil { return nil, err } } return o, nil } // NewObject finds the Object at remote. If it can't be found it // returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // listFn is called from list and listContainerRoot to handle an object. type listFn func(remote string, object *swift.Object, isDirectory bool) error // listContainerRoot lists the objects into the function supplied from // the container and directory supplied. The remote has prefix // removed from it and if addContainer is set then it adds the // container to the start. // // Set recurse to read sub directories func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn listFn) error { if prefix != "" && !strings.HasSuffix(prefix, "/") { prefix += "/" } if directory != "" && !strings.HasSuffix(directory, "/") { directory += "/" } // Options for ObjectsWalk opts := swift.ObjectsOpts{ Prefix: directory, Limit: listChunks, } if !recurse { opts.Delimiter = '/' } return f.c.ObjectsWalk(ctx, container, &opts, func(ctx context.Context, opts *swift.ObjectsOpts) (any, error) { var objects []swift.Object var err error err = f.pacer.Call(func() (bool, error) { objects, err = f.c.Objects(ctx, container, opts) return shouldRetry(ctx, err) }) if err == nil { for i := range objects { object := &objects[i] if !includeDirMarkers && !f.opt.UseSegmentsContainer.Value && (object.Name == segmentsDirectory || strings.HasPrefix(object.Name, segmentsDirectorySlash)) { // Don't show segments in listing unless showing directory markers continue } isDirectory := false if !recurse { isDirectory = strings.HasSuffix(object.Name, "/") } remote := f.opt.Enc.ToStandardPath(object.Name) if !strings.HasPrefix(remote, prefix) { fs.Logf(f, "Odd name received %q", remote) continue } if !includeDirMarkers && remote == prefix { // If we have zero length directory markers ending in / then swift // will return them in the listing for the directory which causes // duplicate directories. Ignore them here. continue } remote = remote[len(prefix):] if addContainer { remote = path.Join(container, remote) } err = fn(remote, object, isDirectory) if err != nil { break } } } return objects, err }) } type addEntryFn func(fs.DirEntry) error // list the objects into the function supplied func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn addEntryFn) error { err := f.listContainerRoot(ctx, container, directory, prefix, addContainer, recurse, includeDirMarkers, func(remote string, object *swift.Object, isDirectory bool) (err error) { if isDirectory { remote = strings.TrimRight(remote, "/") d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes) err = fn(d) } else { // newObjectWithInfo does a full metadata read on 0 size objects which might be dynamic large objects var o fs.Object o, err = f.newObjectWithInfo(ctx, remote, object) if err != nil { return err } if includeDirMarkers || o.Storable() { err = fn(o) } } return err }) if err == swift.ContainerNotFound { err = fs.ErrorDirNotFound } return err } // listDir lists a single directory func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) { if container == "" { return fs.ErrorListBucketRequired } // List the objects err = f.list(ctx, container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error { return callback(entry) }) if err != nil { return err } // container must be present if listing succeeded f.cache.MarkOK(container) return nil } // listContainers lists the containers func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) { var containers []swift.Container err = f.pacer.Call(func() (bool, error) { containers, err = f.c.ContainersAll(ctx, nil) return shouldRetry(ctx, err) }) if err != nil { return nil, fmt.Errorf("container listing failed: %w", err) } for _, container := range containers { f.cache.MarkOK(container.Name) d := fs.NewDir(f.opt.Enc.ToStandardName(container.Name), time.Time{}).SetSize(container.Bytes).SetItems(container.Count) entries = append(entries, d) } return entries, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return list.WithListP(ctx, dir, f) } // ListP lists the objects and directories of the Fs starting // from dir non recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error { list := list.NewHelper(callback) container, directory := f.split(dir) if container == "" { if directory != "" { return fs.ErrorListBucketRequired } entries, err := f.listContainers(ctx) if err != nil { return err } for _, entry := range entries { err = list.Add(entry) if err != nil { return err } } } else { err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add) if err != nil { return err } } return list.Flush() } // ListR lists the objects and directories of the Fs starting // from dir recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. // // Don't implement this unless you have a more efficient way // of listing recursively than doing a directory traversal. func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { container, directory := f.split(dir) list := list.NewHelper(callback) listR := func(container, directory, prefix string, addContainer bool) error { return f.list(ctx, container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error { return list.Add(entry) }) } if container == "" { entries, err := f.listContainers(ctx) if err != nil { return err } for _, entry := range entries { err = list.Add(entry) if err != nil { return err } container := entry.Remote() err = listR(container, "", f.rootDirectory, true) if err != nil { return err } // container must be present if listing succeeded f.cache.MarkOK(container) } } else { err = listR(container, directory, f.rootDirectory, f.rootContainer == "") if err != nil { return err } // container must be present if listing succeeded f.cache.MarkOK(container) } return list.Flush() } // About gets quota information func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { var used, objects, total int64 if f.rootContainer != "" { var container swift.Container err = f.pacer.Call(func() (bool, error) { container, _, err = f.c.Container(ctx, f.rootContainer) return shouldRetry(ctx, err) }) if err != nil { return nil, fmt.Errorf("container info failed: %w", err) } used = container.Bytes objects = container.Count total = container.QuotaBytes if f.opt.UseSegmentsContainer.Value {
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/swift/swift_internal_test.go
backend/swift/swift_internal_test.go
package swift import ( "context" "testing" "time" "github.com/ncw/swift/v2" "github.com/rclone/rclone/fs/fserrors" "github.com/stretchr/testify/assert" ) func TestInternalUrlEncode(t *testing.T) { for _, test := range []struct { in string want string }{ {"", ""}, {"abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"}, {"ABCDEFGHIJKLMNOPQRSTUVWXYZ", "ABCDEFGHIJKLMNOPQRSTUVWXYZ"}, {"0123456789", "0123456789"}, {"abc/ABC/123", "abc/ABC/123"}, {" ", "%20%20%20"}, {"&", "%26"}, {"ߣ", "%C3%9F%C2%A3"}, {"Vidéo Potato Sausage?&£.mkv", "Vid%C3%A9o%20Potato%20Sausage%3F%26%C2%A3.mkv"}, } { got := urlEncode(test.in) if got != test.want { t.Logf("%q: want %q got %q", test.in, test.want, got) } } } func TestInternalShouldRetryHeaders(t *testing.T) { ctx := context.Background() headers := swift.Headers{ "Content-Length": "64", "Content-Type": "text/html; charset=UTF-8", "Date": "Mon: 18 Mar 2019 12:11:23 GMT", "Retry-After": "1", } err := &swift.Error{ StatusCode: 429, Text: "Too Many Requests", } // Short sleep should just do the sleep start := time.Now() retry, gotErr := shouldRetryHeaders(ctx, headers, err) dt := time.Since(start) assert.True(t, retry) assert.Equal(t, err, gotErr) assert.True(t, dt > time.Second/2) // Long sleep should return RetryError headers["Retry-After"] = "3600" start = time.Now() retry, gotErr = shouldRetryHeaders(ctx, headers, err) dt = time.Since(start) assert.True(t, dt < time.Second) assert.False(t, retry) assert.Equal(t, true, fserrors.IsRetryAfterError(gotErr)) after := gotErr.(fserrors.RetryAfter).RetryAfter() dt = after.Sub(start) assert.True(t, dt >= time.Hour-time.Second && dt <= time.Hour+time.Second) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/compress/compress.go
backend/compress/compress.go
// Package compress provides wrappers for Fs and Object which implement compression. package compress import ( "bytes" "context" "encoding/base64" "encoding/binary" "encoding/hex" "encoding/json" "errors" "fmt" "io" "os" "path" "regexp" "strings" "time" "github.com/buengese/sgzip" "github.com/gabriel-vasile/mimetype" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/chunkedreader" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/operations" ) // Globals const ( initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently maxChunkSize = 8388608 // at 256 KiB and 8 MiB. chunkStreams = 0 // Streams to use for reading bufferSize = 8388608 heuristicBytes = 1048576 minCompressionRatio = 1.1 gzFileExt = ".gz" zstdFileExt = ".zst" metaFileExt = ".json" uncompressedFileExt = ".bin" ) // Compression modes const ( Uncompressed = 0 Gzip = 2 Zstd = 4 ) var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`) // Register with Fs func init() { // Build compression mode options. compressionModeOptions := []fs.OptionExample{ { // Default compression mode options { Value: "gzip", Help: "Standard gzip compression with fastest parameters.", }, { Value: "zstd", Help: "Zstandard compression — fast modern algorithm offering adjustable speed-to-compression tradeoffs.", }, } // Register our remote fs.Register(&fs.RegInfo{ Name: "compress", Description: "Compress a remote", NewFs: NewFs, MetadataInfo: &fs.MetadataInfo{ Help: `Any metadata supported by the underlying remote is read and written.`, }, Options: []fs.Option{{ Name: "remote", Help: "Remote to compress.", Required: true, }, { Name: "mode", Help: "Compression mode.", Default: "gzip", Examples: compressionModeOptions, }, { Name: "level", Help: `GZIP (levels -2 to 9): - -2 — Huffman encoding only. Only use if you know what you're doing. - -1 (default) — recommended; equivalent to level 5. - 0 — turns off compression. - 1–9 — increase compression at the cost of speed. Going past 6 generally offers very little return. ZSTD (levels 0 to 4): - 0 — turns off compression entirely. - 1 — fastest compression with the lowest ratio. - 2 (default) — good balance of speed and compression. - 3 — better compression, but uses about 2–3x more CPU than the default. - 4 — best possible compression ratio (highest CPU cost). Notes: - Choose GZIP for wide compatibility; ZSTD for better speed/ratio tradeoffs. - Negative gzip levels: -2 = Huffman-only, -1 = default (≈ level 5).`, Required: true, }, { Name: "ram_cache_limit", Help: `Some remotes don't allow the upload of files with unknown size. In this case the compressed file will need to be cached to determine it's size. Files smaller than this limit will be cached in RAM, files larger than this limit will be cached on disk.`, Default: fs.SizeSuffix(20 * 1024 * 1024), Advanced: true, }}, }) } // compressionModeHandler defines the interface for handling different compression modes type compressionModeHandler interface { // processFileNameGetFileExtension returns the file extension for the given compression mode processFileNameGetFileExtension(compressionMode int) string // newObjectGetOriginalSize returns the original file size from the metadata newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) // isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds // the configured threshold isCompressible(r io.Reader, compressionMode int) (bool, error) // putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata putCompress( ctx context.Context, f *Fs, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string, ) (fs.Object, *ObjectMetadata, error) // openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method openGetReadCloser( ctx context.Context, o *Object, offset int64, limit int64, cr chunkedreader.ChunkedReader, closer io.Closer, options ...fs.OpenOption, ) (rc io.ReadCloser, err error) // putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) // This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata. // Warning: This function panics if cmeta is not of the expected type. newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata } // Options defines the configuration for this backend type Options struct { Remote string `config:"remote"` CompressionMode string `config:"mode"` CompressionLevel int `config:"level"` RAMCacheLimit fs.SizeSuffix `config:"ram_cache_limit"` } /*** FILESYSTEM FUNCTIONS ***/ // Fs represents a wrapped fs.Fs type Fs struct { fs.Fs wrapper fs.Fs name string root string opt Options mode int // compression mode id features *fs.Features // optional features modeHandler compressionModeHandler // compression mode handler } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } remote := opt.Remote if strings.HasPrefix(remote, name+":") { return nil, errors.New("can't point press remote at itself - check the value of the remote setting") } wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote) if err != nil { return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err) } // Strip trailing slashes if they exist in rpath rpath = strings.TrimRight(rpath, "\\/") // First, check for a file // If a metadata file was found, return an error. Otherwise, check for a directory remotePath := fspath.JoinRootPath(wPath, makeMetadataName(rpath)) wrappedFs, err := wInfo.NewFs(ctx, wName, remotePath, wConfig) if err != fs.ErrorIsFile { remotePath = fspath.JoinRootPath(wPath, rpath) wrappedFs, err = wInfo.NewFs(ctx, wName, remotePath, wConfig) } if err != nil && err != fs.ErrorIsFile { return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err) } compressionMode := compressionModeFromName(opt.CompressionMode) var modeHandler compressionModeHandler switch compressionMode { case Gzip: modeHandler = &gzipModeHandler{} case Zstd: modeHandler = &zstdModeHandler{} case Uncompressed: modeHandler = &uncompressedModeHandler{} default: modeHandler = &unknownModeHandler{} } // Create the wrapping fs f := &Fs{ Fs: wrappedFs, name: name, root: rpath, opt: *opt, mode: compressionMode, modeHandler: modeHandler, } // Correct root if definitely pointing to a file if err == fs.ErrorIsFile { f.root = path.Dir(f.root) if f.root == "." || f.root == "/" { f.root = "" } } // the features here are ones we could support, and they are // ANDed with the ones from wrappedFs f.features = (&fs.Features{ CaseInsensitive: true, DuplicateFiles: false, ReadMimeType: false, WriteMimeType: false, GetTier: true, SetTier: true, BucketBased: true, CanHaveEmptyDirectories: true, ReadMetadata: true, WriteMetadata: true, UserMetadata: true, ReadDirMetadata: true, WriteDirMetadata: true, WriteDirSetModTime: true, UserDirMetadata: true, DirModTimeUpdatesOnWrite: true, PartialUploads: true, }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) // We support reading MIME types no matter the wrapped fs f.features.ReadMimeType = true // We can only support putstream if we have serverside copy or move if !operations.CanServerSideMove(wrappedFs) { f.features.Disable("PutStream") } // Enable ListP always f.features.ListP = f.ListP return f, err } // compressionModeFromName converts a compression mode name to its int representation. func compressionModeFromName(name string) int { switch name { case "gzip": return Gzip case "zstd": return Zstd default: return Uncompressed } } // Converts an int64 to base64 func int64ToBase64(number int64) string { intBytes := make([]byte, 8) binary.LittleEndian.PutUint64(intBytes, uint64(number)) return base64.RawURLEncoding.EncodeToString(intBytes) } // Converts base64 to int64 func base64ToInt64(str string) (int64, error) { intBytes, err := base64.RawURLEncoding.DecodeString(str) if err != nil { return 0, err } return int64(binary.LittleEndian.Uint64(intBytes)), nil } // Processes a file name for a compressed file. Returns the original file name, the extension, and the size of the original file. // Returns -2 for the original size if the file is uncompressed. func processFileName(compressedFileName string, modeHandler compressionModeHandler) (origFileName string, extension string, origSize int64, err error) { // Separate the filename and size from the extension extensionPos := strings.LastIndex(compressedFileName, ".") if extensionPos == -1 { return "", "", 0, errors.New("file name has no extension") } extension = compressedFileName[extensionPos:] nameWithSize := compressedFileName[:extensionPos] if extension == uncompressedFileExt { return nameWithSize, extension, -2, nil } match := nameRegexp.FindStringSubmatch(nameWithSize) if match == nil || len(match) != 3 { return "", "", 0, errors.New("invalid filename") } size, err := base64ToInt64(match[2]) if err != nil { return "", "", 0, errors.New("could not decode size") } ext := modeHandler.processFileNameGetFileExtension(compressionModeFromName(compressedFileName[extensionPos+1:])) return match[1], ext, size, nil } // Generates the file name for a metadata file func makeMetadataName(remote string) (newRemote string) { return remote + metaFileExt } // Checks whether a file is a metadata file func isMetadataFile(filename string) bool { return strings.HasSuffix(filename, metaFileExt) } // Checks whether a file is a metadata file and returns the original // file name and a flag indicating whether it was a metadata file or // not. func unwrapMetadataFile(filename string) (string, bool) { if !isMetadataFile(filename) { return "", false } return filename[:len(filename)-len(metaFileExt)], true } // makeDataName generates the file name for a data file with specified compression mode func makeDataName(remote string, size int64, mode int) (newRemote string) { switch mode { case Gzip: newRemote = remote + "." + int64ToBase64(size) + gzFileExt case Zstd: newRemote = remote + "." + int64ToBase64(size) + zstdFileExt default: newRemote = remote + uncompressedFileExt } return newRemote } // dataName generates the file name for data file func (f *Fs) dataName(remote string, size int64, compressed bool) (name string) { if !compressed { return makeDataName(remote, size, Uncompressed) } return makeDataName(remote, size, f.mode) } // addData parses an object and adds it to the DirEntries func (f *Fs) addData(entries *fs.DirEntries, o fs.Object) { origFileName, _, size, err := processFileName(o.Remote(), f.modeHandler) if err != nil { fs.Errorf(o, "Error on parsing file name: %v", err) return } if size == -2 { // File is uncompressed size = o.Size() } metaName := makeMetadataName(origFileName) *entries = append(*entries, f.newObjectSizeAndNameOnly(o, metaName, size)) } // addDir adds a dir to the dir entries func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) { *entries = append(*entries, f.newDir(dir)) } // newDir returns a dir func (f *Fs) newDir(dir fs.Directory) fs.Directory { return dir // We're using the same dir } // processEntries parses the file names and adds metadata to the dir entries func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) { newEntries = entries[:0] // in place filter for _, entry := range entries { switch x := entry.(type) { case fs.Object: if !isMetadataFile(x.Remote()) { f.addData(&newEntries, x) // Only care about data files for now; metadata files are redundant. } case fs.Directory: f.addDir(&newEntries, x) default: return nil, fmt.Errorf("unknown object type %T", entry) } } return newEntries, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // List entries and process them func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return list.WithListP(ctx, dir, f) } // ListP lists the objects and directories of the Fs starting // from dir non recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error { wrappedCallback := func(entries fs.DirEntries) error { entries, err := f.processEntries(entries) if err != nil { return err } return callback(entries) } listP := f.Fs.Features().ListP if listP == nil { entries, err := f.Fs.List(ctx, dir) if err != nil { return err } return wrappedCallback(entries) } return listP(ctx, dir, wrappedCallback) } // ListR lists the objects and directories of the Fs starting // from dir recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { return f.Fs.Features().ListR(ctx, dir, func(entries fs.DirEntries) error { newEntries, err := f.processEntries(entries) if err != nil { return err } return callback(newEntries) }) } // NewObject finds the Object at remote. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { // Read metadata from metadata object mo, err := f.Fs.NewObject(ctx, makeMetadataName(remote)) if err != nil { return nil, err } meta, err := readMetadata(ctx, mo) if err != nil { return nil, fmt.Errorf("error decoding metadata: %w", err) } size, err := f.modeHandler.newObjectGetOriginalSize(meta) if err != nil { return nil, fmt.Errorf("error reading metadata: %w", err) } // Create our Object o, err := f.Fs.NewObject(ctx, makeDataName(remote, size, meta.Mode)) if err != nil { return nil, err } return f.newObject(o, mo, meta), nil } // checkCompressAndType checks if an object is compressible and determines it's mime type // returns a multireader with the bytes that were read to determine mime type func checkCompressAndType(in io.Reader, compressionMode int, modeHandler compressionModeHandler) (newReader io.Reader, compressible bool, mimeType string, err error) { in, wrap := accounting.UnWrap(in) buf := make([]byte, heuristicBytes) n, err := in.Read(buf) buf = buf[:n] if err != nil && err != io.EOF { return nil, false, "", err } mime := mimetype.Detect(buf) compressible, err = modeHandler.isCompressible(bytes.NewReader(buf), compressionMode) if err != nil { return nil, false, "", err } in = io.MultiReader(bytes.NewReader(buf), in) return wrap(in), compressible, mime.String(), nil } // verifyObjectHash verifies the Objects hash func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.MultiHasher, ht hash.Type) error { srcHash := hasher.Sums()[ht] dstHash, err := o.Hash(ctx, ht) if err != nil { return fmt.Errorf("failed to read destination hash: %w", err) } if srcHash != "" && dstHash != "" && srcHash != dstHash { // remove object err = o.Remove(ctx) if err != nil { fs.Errorf(o, "Failed to remove corrupted object: %v", err) } return fmt.Errorf("corrupted on transfer: %v compressed hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash) } return nil } type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) type compressionResult[T sgzip.GzipMetadata | SzstdMetadata] struct { err error meta T } // replicating some of operations.Rcat functionality because we want to support remotes without streaming // support and of course cannot know the size of a compressed file before compressing it. func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, modTime time.Time, options []fs.OpenOption) (o fs.Object, err error) { // cache small files in memory and do normal upload buf := make([]byte, f.opt.RAMCacheLimit) if n, err := io.ReadFull(in, buf); err == io.EOF || err == io.ErrUnexpectedEOF { src := object.NewStaticObjectInfo(dstFileName, modTime, int64(len(buf[:n])), false, nil, f.Fs) return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...) } // Need to include what we already read in = &ReadCloserWrapper{ Reader: io.MultiReader(bytes.NewReader(buf), in), Closer: in, } canStream := f.Fs.Features().PutStream != nil if canStream { src := object.NewStaticObjectInfo(dstFileName, modTime, -1, false, nil, f.Fs) return f.Fs.Features().PutStream(ctx, in, src, options...) } fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file") tempFile, err := os.CreateTemp("", "rclone-press-") defer func() { // these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish // to ignore them _ = tempFile.Close() _ = os.Remove(tempFile.Name()) }() if err != nil { return nil, fmt.Errorf("failed to create temporary local FS to spool file: %w", err) } if _, err = io.Copy(tempFile, in); err != nil { return nil, fmt.Errorf("failed to write temporary local file: %w", err) } if _, err = tempFile.Seek(0, 0); err != nil { return nil, fmt.Errorf("failed to seek temporary local file: %w", err) } finfo, err := tempFile.Stat() if err != nil { return nil, fmt.Errorf("failed to stat temporary local file: %w", err) } return f.Fs.Put(ctx, tempFile, object.NewStaticObjectInfo(dstFileName, modTime, finfo.Size(), false, nil, f.Fs)) } // Put a compressed version of a file. Returns a wrappable object and metadata. func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) { return f.modeHandler.putCompress(ctx, f, in, src, options, mimeType) } // Put an uncompressed version of a file. Returns a wrappable object and metadata. func (f *Fs) putUncompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, put putFn, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) { // Unwrap the accounting, add our metadata hasher, then wrap it back on in, wrap := accounting.UnWrap(in) hs := hash.NewHashSet(hash.MD5) ht := f.Fs.Hashes().GetOne() if !hs.Contains(ht) { hs.Add(ht) } metaHasher, err := hash.NewMultiHasherTypes(hs) if err != nil { return nil, nil, err } in = io.TeeReader(in, metaHasher) wrappedIn := wrap(in) // Put the object o, err := put(ctx, wrappedIn, f.wrapInfo(src, makeDataName(src.Remote(), src.Size(), Uncompressed), src.Size()), options...) if err != nil { if o != nil { removeErr := o.Remove(ctx) if removeErr != nil { fs.Errorf(o, "Failed to remove partially transferred object: %v", err) } } return nil, nil, err } // Check the hashes of the compressed data if we were comparing them if ht != hash.None { err := f.verifyObjectHash(ctx, o, metaHasher, ht) if err != nil { return nil, nil, err } } // Return our object and metadata sum, err := metaHasher.Sum(hash.MD5) if err != nil { return nil, nil, err } return f.modeHandler.putUncompressGetNewMetadata(o, Uncompressed, hex.EncodeToString(sum), mimeType, sum) } // This function will write a metadata struct to a metadata Object for an src. Returns a wrappable metadata object. func (f *Fs) putMetadata(ctx context.Context, meta *ObjectMetadata, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (mo fs.Object, err error) { // Generate the metadata contents data, err := json.Marshal(meta) if err != nil { return nil, err } metaReader := bytes.NewReader(data) // Put the data mo, err = put(ctx, metaReader, f.wrapInfo(src, makeMetadataName(src.Remote()), int64(len(data))), options...) if err != nil { if mo != nil { removeErr := mo.Remove(ctx) if removeErr != nil { fs.Errorf(mo, "Failed to remove partially transferred object: %v", err) } } return nil, err } return mo, nil } // This function will put both the data and metadata for an Object. // putData is the function used for data, while putMeta is the function used for metadata. // The putData function will only be used when the object is not compressible if the // data is compressible this parameter will be ignored. func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, putData putFn, putMeta putFn, compressible bool, mimeType string) (*Object, error) { // Put file then metadata var dataObject fs.Object var meta *ObjectMetadata var err error if compressible { dataObject, meta, err = f.putCompress(ctx, in, src, options, mimeType) } else { dataObject, meta, err = f.putUncompress(ctx, in, src, putData, options, mimeType) } if err != nil { return nil, err } mo, err := f.putMetadata(ctx, meta, src, options, putMeta) // meta data upload may fail. in this case we try to remove the original object if err != nil { removeError := dataObject.Remove(ctx) if removeError != nil { return nil, removeError } return nil, err } return f.newObject(dataObject, mo, meta), nil } // Put in to the remote path with the modTime given of the given size // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // If there's already an existent objects we need to make sure to explicitly update it to make sure we don't leave // orphaned data. Alternatively we could also deleted (which would simpler) but has the disadvantage that it // destroys all server-side versioning. o, err := f.NewObject(ctx, src.Remote()) if err == fs.ErrorObjectNotFound { // Get our file compressibility in, compressible, mimeType, err := checkCompressAndType(in, f.mode, f.modeHandler) if err != nil { return nil, err } return f.putWithCustomFunctions(ctx, in, src, options, f.Fs.Put, f.Fs.Put, compressible, mimeType) } if err != nil { return nil, err } return o, o.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { oldObj, err := f.NewObject(ctx, src.Remote()) if err != nil && err != fs.ErrorObjectNotFound { return nil, err } found := err == nil in, compressible, mimeType, err := checkCompressAndType(in, f.mode, f.modeHandler) if err != nil { return nil, err } newObj, err := f.putWithCustomFunctions(ctx, in, src, options, f.Fs.Features().PutStream, f.Fs.Put, compressible, mimeType) if err != nil { return nil, err } // Our transfer is now complete. We have to make sure to remove the old object because our new object will // have a different name except when both the old and the new object where uncompressed. if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) { err = oldObj.(*Object).Object.Remove(ctx) if err != nil { return nil, fmt.Errorf("couldn't remove original object: %w", err) } } // If our new object is compressed we have to rename it with the correct size. // Uncompressed objects don't store the size in the name so we they'll already have the correct name. if compressible { wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object) if err != nil { return nil, fmt.Errorf("couldn't rename streamed object: %w", err) } newObj.Object = wrapObj } return newObj, nil } // Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects // will break stuff. Right no I can't think of a way to make this work. // PutUnchecked uploads the object // // This will create a duplicate if we upload a new file without // checking to see if there is one already - use Put() for that. // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.MD5) } // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists func (f *Fs) Mkdir(ctx context.Context, dir string) error { return f.Fs.Mkdir(ctx, dir) } // MkdirMetadata makes the root directory of the Fs object func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) { if do := f.Fs.Features().MkdirMetadata; do != nil { return do(ctx, dir, metadata) } return nil, fs.ErrorNotImplemented } // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.Fs.Rmdir(ctx, dir) } // Purge all files in the root and the root directory // // Implement this if you have a way of deleting all the files // quicker than just running Remove() on the result of List() // // Return an error if it doesn't exist func (f *Fs) Purge(ctx context.Context, dir string) error { do := f.Fs.Features().Purge if do == nil { return fs.ErrorCantPurge } return do(ctx, dir) } // Copy src to this remote using server side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { do := f.Fs.Features().Copy if do == nil { return nil, fs.ErrorCantCopy } o, ok := src.(*Object) if !ok { return nil, fs.ErrorCantCopy } // We might be trying to overwrite a file with a newer version but due to size difference the name // is different. Therefore we have to remove the old file first (if it exists). dstFile, err := f.NewObject(ctx, remote) if err != nil && err != fs.ErrorObjectNotFound { return nil, err } if err == nil { err := dstFile.Remove(ctx) if err != nil { return nil, err } } // Copy over metadata err = o.loadMetadataIfNotLoaded(ctx) if err != nil { return nil, err } newFilename := makeMetadataName(remote) moResult, err := do(ctx, o.mo, newFilename) if err != nil { return nil, err } // Copy over data newFilename = makeDataName(remote, src.Size(), o.meta.Mode) oResult, err := do(ctx, o.Object, newFilename) if err != nil { return nil, err } return f.newObject(oResult, moResult, o.meta), nil } // Move src to this remote using server side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { do := f.Fs.Features().Move if do == nil { return nil, fs.ErrorCantMove } o, ok := src.(*Object) if !ok { return nil, fs.ErrorCantMove } // We might be trying to overwrite a file with a newer version but due to size difference the name // is different. Therefore we have to remove the old file first (if it exists). dstFile, err := f.NewObject(ctx, remote) if err != nil && err != fs.ErrorObjectNotFound { return nil, err } if err == nil { err := dstFile.Remove(ctx) if err != nil { return nil, err } } // Move metadata err = o.loadMetadataIfNotLoaded(ctx) if err != nil { return nil, err } newFilename := makeMetadataName(remote) moResult, err := do(ctx, o.mo, newFilename) if err != nil { return nil, err } // Move data newFilename = makeDataName(remote, src.Size(), o.meta.Mode) oResult, err := do(ctx, o.Object, newFilename) if err != nil { return nil, err } return f.newObject(oResult, moResult, o.meta), nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { do := f.Fs.Features().DirMove if do == nil { return fs.ErrorCantDirMove } srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } return do(ctx, srcFs.Fs, srcRemote, dstRemote) } // DirSetModTime sets the directory modtime for dir func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error { if do := f.Fs.Features().DirSetModTime; do != nil { return do(ctx, dir, modTime) } return fs.ErrorNotImplemented } // CleanUp the trash in the Fs // // Implement this if you have a way of emptying the trash or // otherwise cleaning up old versions of files. func (f *Fs) CleanUp(ctx context.Context) error { do := f.Fs.Features().CleanUp if do == nil { return errors.New("not supported by underlying remote") } return do(ctx) } // About gets quota information from the Fs func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { do := f.Fs.Features().About if do == nil { return nil, errors.New("not supported by underlying remote") } return do(ctx) } // UnWrap returns the Fs that this Fs is wrapping func (f *Fs) UnWrap() fs.Fs { return f.Fs } // WrapFs returns the Fs that is wrapping this Fs func (f *Fs) WrapFs() fs.Fs { return f.wrapper } // SetWrapper sets the Fs that is wrapping this Fs func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper } // MergeDirs merges the contents of all the directories passed // in into the first one and rmdirs the other directories. func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { do := f.Fs.Features().MergeDirs if do == nil { return errors.New("MergeDirs not supported") } out := make([]fs.Directory, len(dirs)) for i, dir := range dirs { out[i] = fs.NewDirCopy(ctx, dir).SetRemote(dir.Remote()) } return do(ctx, out) } // DirCacheFlush resets the directory cache - used in testing // as an optional interface func (f *Fs) DirCacheFlush() { do := f.Fs.Features().DirCacheFlush if do != nil { do() } } // ChangeNotify calls the passed function with a path // that has had changes. If the implementation // uses polling, it should adhere to the given interval.
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/compress/uncompressed_handler.go
backend/compress/uncompressed_handler.go
package compress import ( "context" "fmt" "io" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/chunkedreader" ) // uncompressedModeHandler implements compressionModeHandler for uncompressed files type uncompressedModeHandler struct{} // isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds // the configured threshold func (u *uncompressedModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) { return false, nil } // newObjectGetOriginalSize returns the original file size from the metadata func (u *uncompressedModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) { return 0, nil } // openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method func (u *uncompressedModeHandler) openGetReadCloser( ctx context.Context, o *Object, offset int64, limit int64, cr chunkedreader.ChunkedReader, closer io.Closer, options ...fs.OpenOption, ) (rc io.ReadCloser, err error) { return o.Object.Open(ctx, options...) } // processFileNameGetFileExtension returns the file extension for the given compression mode func (u *uncompressedModeHandler) processFileNameGetFileExtension(compressionMode int) string { return "" } // putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata func (u *uncompressedModeHandler) putCompress( ctx context.Context, f *Fs, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string, ) (fs.Object, *ObjectMetadata, error) { return nil, nil, fmt.Errorf("unsupported compression mode %d", f.mode) } // putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm func (u *uncompressedModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) { return nil, nil, fmt.Errorf("unsupported compression mode %d", Uncompressed) } // This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata. // Warning: This function panics if cmeta is not of the expected type. func (u *uncompressedModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata { return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/compress/zstd_handler.go
backend/compress/zstd_handler.go
package compress import ( "bufio" "bytes" "context" "crypto/md5" "encoding/hex" "errors" "io" "github.com/klauspost/compress/zstd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/chunkedreader" "github.com/rclone/rclone/fs/hash" ) // zstdModeHandler implements compressionModeHandler for zstd type zstdModeHandler struct{} // isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds // the configured threshold func (z *zstdModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) { var b bytes.Buffer var n int64 w, err := NewWriterSzstd(&b, zstd.WithEncoderLevel(zstd.SpeedDefault)) if err != nil { return false, err } n, err = io.Copy(w, r) if err != nil { return false, err } err = w.Close() if err != nil { return false, err } ratio := float64(n) / float64(b.Len()) return ratio > minCompressionRatio, nil } // newObjectGetOriginalSize returns the original file size from the metadata func (z *zstdModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) { if meta.CompressionMetadataZstd == nil { return 0, errors.New("missing zstd metadata") } return meta.CompressionMetadataZstd.Size, nil } // openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method func (z *zstdModeHandler) openGetReadCloser( ctx context.Context, o *Object, offset int64, limit int64, cr chunkedreader.ChunkedReader, closer io.Closer, options ...fs.OpenOption, ) (rc io.ReadCloser, err error) { var file io.Reader if offset != 0 { file, err = NewReaderAtSzstd(cr, o.meta.CompressionMetadataZstd, offset) } else { file, err = zstd.NewReader(cr) } if err != nil { return nil, err } var fileReader io.Reader if limit != -1 { fileReader = io.LimitReader(file, limit) } else { fileReader = file } // Return a ReadCloser return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil } // processFileNameGetFileExtension returns the file extension for the given compression mode func (z *zstdModeHandler) processFileNameGetFileExtension(compressionMode int) string { if compressionMode == Zstd { return zstdFileExt } return "" } // putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata func (z *zstdModeHandler) putCompress( ctx context.Context, f *Fs, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string, ) (fs.Object, *ObjectMetadata, error) { // Unwrap reader accounting in, wrap := accounting.UnWrap(in) // Add the metadata hasher metaHasher := md5.New() in = io.TeeReader(in, metaHasher) // Compress the file pipeReader, pipeWriter := io.Pipe() resultsZstd := make(chan compressionResult[SzstdMetadata]) go func() { writer, err := NewWriterSzstd(pipeWriter, zstd.WithEncoderLevel(zstd.EncoderLevel(f.opt.CompressionLevel))) if err != nil { resultsZstd <- compressionResult[SzstdMetadata]{err: err} close(resultsZstd) return } _, err = io.Copy(writer, in) if wErr := writer.Close(); wErr != nil && err == nil { err = wErr } if cErr := pipeWriter.Close(); cErr != nil && err == nil { err = cErr } resultsZstd <- compressionResult[SzstdMetadata]{err: err, meta: writer.GetMetadata()} close(resultsZstd) }() wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) ht := f.Fs.Hashes().GetOne() var hasher *hash.MultiHasher var err error if ht != hash.None { wrappedIn, wrap = accounting.UnWrap(wrappedIn) hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht)) if err != nil { return nil, nil, err } wrappedIn = io.TeeReader(wrappedIn, hasher) wrappedIn = wrap(wrappedIn) } o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options) if err != nil { return nil, nil, err } result := <-resultsZstd if result.err != nil { if o != nil { _ = o.Remove(ctx) } return nil, nil, result.err } // Build metadata using uncompressed size for filename meta := z.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType) if ht != hash.None && hasher != nil { err = f.verifyObjectHash(ctx, o, hasher, ht) if err != nil { return nil, nil, err } } return o, meta, nil } // putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm func (z *zstdModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) { return o, z.newMetadata(o.Size(), mode, SzstdMetadata{}, hex.EncodeToString(sum), mimeType), nil } // This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata. // Warning: This function panics if cmeta is not of the expected type. func (z *zstdModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata { meta, ok := cmeta.(SzstdMetadata) if !ok { panic("invalid cmeta type: expected SzstdMetadata") } objMeta := new(ObjectMetadata) objMeta.Size = size objMeta.Mode = mode objMeta.CompressionMetadataGzip = nil objMeta.CompressionMetadataZstd = &meta objMeta.MD5 = md5 objMeta.MimeType = mimeType return objMeta }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/compress/unknown_handler.go
backend/compress/unknown_handler.go
package compress import ( "context" "fmt" "io" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/chunkedreader" ) // unknownModeHandler implements compressionModeHandler for unknown compression types type unknownModeHandler struct{} // isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds // the configured threshold func (unk *unknownModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) { return false, fmt.Errorf("unknown compression mode %d", compressionMode) } // newObjectGetOriginalSize returns the original file size from the metadata func (unk *unknownModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) { return 0, nil } // openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method func (unk *unknownModeHandler) openGetReadCloser( ctx context.Context, o *Object, offset int64, limit int64, cr chunkedreader.ChunkedReader, closer io.Closer, options ...fs.OpenOption, ) (rc io.ReadCloser, err error) { return nil, fmt.Errorf("unknown compression mode %d", o.meta.Mode) } // processFileNameGetFileExtension returns the file extension for the given compression mode func (unk *unknownModeHandler) processFileNameGetFileExtension(compressionMode int) string { return "" } // putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata func (unk *unknownModeHandler) putCompress( ctx context.Context, f *Fs, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string, ) (fs.Object, *ObjectMetadata, error) { return nil, nil, fmt.Errorf("unknown compression mode %d", f.mode) } // putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm func (unk *unknownModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) { return nil, nil, fmt.Errorf("unknown compression mode") } // This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata. // Warning: This function panics if cmeta is not of the expected type. func (unk *unknownModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata { return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/compress/szstd_helper.go
backend/compress/szstd_helper.go
package compress import ( "context" "errors" "io" "runtime" "sync" szstd "github.com/a1ex3/zstd-seekable-format-go/pkg" "github.com/klauspost/compress/zstd" ) const szstdChunkSize int = 1 << 20 // 1 MiB chunk size // SzstdMetadata holds metadata for szstd compressed files. type SzstdMetadata struct { BlockSize int // BlockSize is the size of the blocks in the zstd file Size int64 // Size is the uncompressed size of the file BlockData []uint32 // BlockData is the block data for the zstd file, used for seeking } // SzstdWriter is a writer that compresses data in szstd format. type SzstdWriter struct { enc *zstd.Encoder w szstd.ConcurrentWriter metadata SzstdMetadata mu sync.Mutex } // NewWriterSzstd creates a new szstd writer with the specified options. // It initializes the szstd writer with a zstd encoder and returns a pointer to the SzstdWriter. // The writer can be used to write data in chunks, and it will automatically handle block sizes and metadata. func NewWriterSzstd(w io.Writer, opts ...zstd.EOption) (*SzstdWriter, error) { encoder, err := zstd.NewWriter(nil, opts...) if err != nil { return nil, err } sw, err := szstd.NewWriter(w, encoder) if err != nil { if err := encoder.Close(); err != nil { return nil, err } return nil, err } return &SzstdWriter{ enc: encoder, w: sw, metadata: SzstdMetadata{ BlockSize: szstdChunkSize, Size: 0, }, }, nil } // Write writes data to the szstd writer in chunks of szstdChunkSize. // It handles the block size and metadata updates automatically. func (w *SzstdWriter) Write(p []byte) (int, error) { if len(p) == 0 { return 0, nil } if w.metadata.BlockData == nil { numBlocks := (len(p) + w.metadata.BlockSize - 1) / w.metadata.BlockSize w.metadata.BlockData = make([]uint32, 1, numBlocks+1) w.metadata.BlockData[0] = 0 } start := 0 total := len(p) var writerFunc szstd.FrameSource = func() ([]byte, error) { if start >= total { return nil, nil } end := min(start+w.metadata.BlockSize, total) chunk := p[start:end] size := end - start w.mu.Lock() w.metadata.Size += int64(size) w.mu.Unlock() start = end return chunk, nil } // write sizes of compressed blocks in the callback err := w.w.WriteMany(context.Background(), writerFunc, szstd.WithWriteCallback(func(size uint32) { w.mu.Lock() lastOffset := w.metadata.BlockData[len(w.metadata.BlockData)-1] w.metadata.BlockData = append(w.metadata.BlockData, lastOffset+size) w.mu.Unlock() }), ) if err != nil { return 0, err } return total, nil } // Close closes the SzstdWriter and its underlying encoder. func (w *SzstdWriter) Close() error { if err := w.w.Close(); err != nil { return err } if err := w.enc.Close(); err != nil { return err } return nil } // GetMetadata returns the metadata of the szstd writer. func (w *SzstdWriter) GetMetadata() SzstdMetadata { return w.metadata } // SzstdReaderAt is a reader that allows random access in szstd compressed data. type SzstdReaderAt struct { r szstd.Reader decoder *zstd.Decoder metadata *SzstdMetadata pos int64 mu sync.Mutex } // NewReaderAtSzstd creates a new SzstdReaderAt at the specified io.ReadSeeker. func NewReaderAtSzstd(rs io.ReadSeeker, meta *SzstdMetadata, offset int64, opts ...zstd.DOption) (*SzstdReaderAt, error) { decoder, err := zstd.NewReader(nil, opts...) if err != nil { return nil, err } r, err := szstd.NewReader(rs, decoder) if err != nil { decoder.Close() return nil, err } sr := &SzstdReaderAt{ r: r, decoder: decoder, metadata: meta, pos: 0, } // Set initial position to the provided offset if _, err := sr.Seek(offset, io.SeekStart); err != nil { if err := sr.Close(); err != nil { return nil, err } return nil, err } return sr, nil } // Seek sets the offset for the next Read. func (s *SzstdReaderAt) Seek(offset int64, whence int) (int64, error) { s.mu.Lock() defer s.mu.Unlock() pos, err := s.r.Seek(offset, whence) if err == nil { s.pos = pos } return pos, err } func (s *SzstdReaderAt) Read(p []byte) (int, error) { s.mu.Lock() defer s.mu.Unlock() n, err := s.r.Read(p) if err == nil { s.pos += int64(n) } return n, err } // ReadAt reads data at the specified offset. func (s *SzstdReaderAt) ReadAt(p []byte, off int64) (int, error) { if off < 0 { return 0, errors.New("invalid offset") } if off >= s.metadata.Size { return 0, io.EOF } endOff := min(off+int64(len(p)), s.metadata.Size) // Find all blocks covered by the range type blockInfo struct { index int // Block index offsetInBlock int64 // Offset within the block for starting reading bytesToRead int64 // How many bytes to read from this block } var blocks []blockInfo uncompressedOffset := int64(0) currentOff := off for i := 0; i < len(s.metadata.BlockData)-1; i++ { blockUncompressedEnd := min(uncompressedOffset+int64(s.metadata.BlockSize), s.metadata.Size) if currentOff < blockUncompressedEnd && endOff > uncompressedOffset { offsetInBlock := max(0, currentOff-uncompressedOffset) bytesToRead := min(blockUncompressedEnd-uncompressedOffset-offsetInBlock, endOff-currentOff) blocks = append(blocks, blockInfo{ index: i, offsetInBlock: offsetInBlock, bytesToRead: bytesToRead, }) currentOff += bytesToRead if currentOff >= endOff { break } } uncompressedOffset = blockUncompressedEnd } if len(blocks) == 0 { return 0, io.EOF } // Parallel block decoding type decodeResult struct { index int data []byte err error } resultCh := make(chan decodeResult, len(blocks)) var wg sync.WaitGroup sem := make(chan struct{}, runtime.NumCPU()) for _, block := range blocks { wg.Add(1) go func(block blockInfo) { defer wg.Done() sem <- struct{}{} defer func() { <-sem }() startOffset := int64(s.metadata.BlockData[block.index]) endOffset := int64(s.metadata.BlockData[block.index+1]) compressedSize := endOffset - startOffset compressed := make([]byte, compressedSize) _, err := s.r.ReadAt(compressed, startOffset) if err != nil && err != io.EOF { resultCh <- decodeResult{index: block.index, err: err} return } decoded, err := s.decoder.DecodeAll(compressed, nil) if err != nil { resultCh <- decodeResult{index: block.index, err: err} return } resultCh <- decodeResult{index: block.index, data: decoded, err: nil} }(block) } go func() { wg.Wait() close(resultCh) }() // Collect results in block index order totalRead := 0 results := make(map[int]decodeResult) expected := len(blocks) minIndex := blocks[0].index for res := range resultCh { results[res.index] = res for { if result, ok := results[minIndex]; ok { if result.err != nil { return 0, result.err } // find the corresponding blockInfo var blk blockInfo for _, b := range blocks { if b.index == result.index { blk = b break } } start := blk.offsetInBlock end := start + blk.bytesToRead copy(p[totalRead:totalRead+int(blk.bytesToRead)], result.data[start:end]) totalRead += int(blk.bytesToRead) minIndex++ if minIndex-blocks[0].index >= len(blocks) { break } } else { break } } if len(results) == expected && minIndex-blocks[0].index >= len(blocks) { break } } return totalRead, nil } // Close closes the SzstdReaderAt and underlying decoder. func (s *SzstdReaderAt) Close() error { if err := s.r.Close(); err != nil { return err } s.decoder.Close() return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/compress/gzip_handler.go
backend/compress/gzip_handler.go
package compress import ( "bufio" "bytes" "context" "crypto/md5" "encoding/hex" "errors" "io" "github.com/buengese/sgzip" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/chunkedreader" "github.com/rclone/rclone/fs/hash" ) // gzipModeHandler implements compressionModeHandler for gzip type gzipModeHandler struct{} // isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds // the configured threshold func (g *gzipModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) { var b bytes.Buffer var n int64 w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression) if err != nil { return false, err } n, err = io.Copy(w, r) if err != nil { return false, err } err = w.Close() if err != nil { return false, err } ratio := float64(n) / float64(b.Len()) return ratio > minCompressionRatio, nil } // newObjectGetOriginalSize returns the original file size from the metadata func (g *gzipModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) { if meta.CompressionMetadataGzip == nil { return 0, errors.New("missing gzip metadata") } return meta.CompressionMetadataGzip.Size, nil } // openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method func (g *gzipModeHandler) openGetReadCloser( ctx context.Context, o *Object, offset int64, limit int64, cr chunkedreader.ChunkedReader, closer io.Closer, options ...fs.OpenOption, ) (rc io.ReadCloser, err error) { var file io.Reader if offset != 0 { file, err = sgzip.NewReaderAt(cr, o.meta.CompressionMetadataGzip, offset) } else { file, err = sgzip.NewReader(cr) } if err != nil { return nil, err } var fileReader io.Reader if limit != -1 { fileReader = io.LimitReader(file, limit) } else { fileReader = file } // Return a ReadCloser return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil } // processFileNameGetFileExtension returns the file extension for the given compression mode func (g *gzipModeHandler) processFileNameGetFileExtension(compressionMode int) string { if compressionMode == Gzip { return gzFileExt } return "" } // putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata func (g *gzipModeHandler) putCompress( ctx context.Context, f *Fs, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string, ) (fs.Object, *ObjectMetadata, error) { // Unwrap reader accounting in, wrap := accounting.UnWrap(in) // Add the metadata hasher metaHasher := md5.New() in = io.TeeReader(in, metaHasher) // Compress the file pipeReader, pipeWriter := io.Pipe() resultsGzip := make(chan compressionResult[sgzip.GzipMetadata]) go func() { gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel) if err != nil { resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: sgzip.GzipMetadata{}} close(resultsGzip) return } _, err = io.Copy(gz, in) gzErr := gz.Close() if gzErr != nil && err == nil { err = gzErr } closeErr := pipeWriter.Close() if closeErr != nil && err == nil { err = closeErr } resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: gz.MetaData()} close(resultsGzip) }() wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering // Find a hash the destination supports to compute a hash of // the compressed data. ht := f.Fs.Hashes().GetOne() var hasher *hash.MultiHasher var err error if ht != hash.None { // unwrap the accounting again wrappedIn, wrap = accounting.UnWrap(wrappedIn) hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht)) if err != nil { return nil, nil, err } // add the hasher and re-wrap the accounting wrappedIn = io.TeeReader(wrappedIn, hasher) wrappedIn = wrap(wrappedIn) } // Transfer the data o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options) if err != nil { if o != nil { if removeErr := o.Remove(ctx); removeErr != nil { fs.Errorf(o, "Failed to remove partially transferred object: %v", removeErr) } } return nil, nil, err } // Check whether we got an error during compression result := <-resultsGzip if result.err != nil { if o != nil { if removeErr := o.Remove(ctx); removeErr != nil { fs.Errorf(o, "Failed to remove partially compressed object: %v", removeErr) } } return nil, nil, result.err } // Generate metadata meta := g.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType) // Check the hashes of the compressed data if we were comparing them if ht != hash.None && hasher != nil { err = f.verifyObjectHash(ctx, o, hasher, ht) if err != nil { return nil, nil, err } } return o, meta, nil } // putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm func (g *gzipModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) { return o, g.newMetadata(o.Size(), mode, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil } // This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata. // Warning: This function panics if cmeta is not of the expected type. func (g *gzipModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata { meta, ok := cmeta.(sgzip.GzipMetadata) if !ok { panic("invalid cmeta type: expected sgzip.GzipMetadata") } objMeta := new(ObjectMetadata) objMeta.Size = size objMeta.Mode = mode objMeta.CompressionMetadataGzip = &meta objMeta.CompressionMetadataZstd = nil objMeta.MD5 = md5 objMeta.MimeType = mimeType return objMeta }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/compress/compress_test.go
backend/compress/compress_test.go
// Test Crypt filesystem interface package compress import ( "os" "path/filepath" "testing" _ "github.com/rclone/rclone/backend/drive" _ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/s3" _ "github.com/rclone/rclone/backend/swift" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" ) var defaultOpt = fstests.Opt{ RemoteName: "TestCompress:", NilObject: (*Object)(nil), UnimplementableFsMethods: []string{ "OpenWriterAt", "OpenChunkWriter", "MergeDirs", "DirCacheFlush", "PutUnchecked", "PutStream", "UserInfo", "Disconnect", }, TiersToTest: []string{"STANDARD", "STANDARD_IA"}, UnimplementableObjectMethods: []string{}, } // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &defaultOpt) } // TestRemoteGzip tests GZIP compression func TestRemoteGzip(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("Skipping as -remote set") } tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip") name := "TestCompressGzip" opt := defaultOpt opt.RemoteName = name + ":" opt.ExtraConfig = []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "compress"}, {Name: name, Key: "remote", Value: tempdir}, {Name: name, Key: "mode", Value: "gzip"}, {Name: name, Key: "level", Value: "-1"}, } opt.QuickTestOK = true fstests.Run(t, &opt) } // TestRemoteZstd tests ZSTD compression func TestRemoteZstd(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("Skipping as -remote set") } tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-zstd") name := "TestCompressZstd" opt := defaultOpt opt.RemoteName = name + ":" opt.ExtraConfig = []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "compress"}, {Name: name, Key: "remote", Value: tempdir}, {Name: name, Key: "mode", Value: "zstd"}, {Name: name, Key: "level", Value: "2"}, } opt.QuickTestOK = true fstests.Run(t, &opt) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/ftp/ftp_internal_test.go
backend/ftp/ftp_internal_test.go
package ftp import ( "context" "fmt" "strings" "testing" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/lib/readers" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type settings map[string]any func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs { fsName := strings.Split(f.Name(), "{")[0] // strip off hash configMap := configmap.Simple{} for key, val := range opts { configMap[key] = fmt.Sprintf("%v", val) } remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), f.Root()) fixFs, err := fs.NewFs(ctx, remote) require.NoError(t, err) return fixFs } // test that big file uploads do not cause network i/o timeout func (f *Fs) testUploadTimeout(t *testing.T) { const ( fileSize = 100000000 // 100 MiB idleTimeout = 1 * time.Second // small because test server is local maxTime = 10 * time.Second // prevent test hangup ) if testing.Short() { t.Skip("not running with -short") } ctx := context.Background() ci := fs.GetConfig(ctx) saveLowLevelRetries := ci.LowLevelRetries saveTimeout := ci.Timeout defer func() { ci.LowLevelRetries = saveLowLevelRetries ci.Timeout = saveTimeout }() ci.LowLevelRetries = 1 ci.Timeout = fs.Duration(idleTimeout) upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) { fixFs := deriveFs(ctx, t, f, settings{ "concurrency": concurrency, "shut_timeout": shutTimeout, }) // Make test object fileTime := fstest.Time("2020-03-08T09:30:00.000000000Z") meta := object.NewStaticObjectInfo("upload-timeout.test", fileTime, int64(fileSize), true, nil, nil) data := readers.NewPatternReader(int64(fileSize)) // Run upload and ensure maximum time done := make(chan bool) deadline := time.After(maxTime) go func() { obj, err = fixFs.Put(ctx, data, meta) done <- true }() select { case <-done: case <-deadline: t.Fatalf("Upload got stuck for %v !", maxTime) } return obj, err } // non-zero shut_timeout should fix i/o errors obj, err := upload(f.opt.Concurrency, time.Second) assert.NoError(t, err) assert.NotNil(t, obj) if obj != nil { _ = obj.Remove(ctx) } } // rclone must support precise time with ProFtpd and PureFtpd out of the box. // The VsFtpd server does not support the MFMT command to set file time like // other servers but by default supports the MDTM command in the non-standard // two-argument form for the same purpose. // See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html func (f *Fs) testTimePrecision(t *testing.T) { name := f.Name() if pos := strings.Index(name, "{"); pos != -1 { name = name[:pos] } switch name { case "TestFTPProftpd", "TestFTPPureftpd", "TestFTPVsftpd": assert.LessOrEqual(t, f.Precision(), time.Second) } } // InternalTest dispatches all internal tests func (f *Fs) InternalTest(t *testing.T) { t.Run("UploadTimeout", f.testUploadTimeout) t.Run("TimePrecision", f.testTimePrecision) } var _ fstests.InternalTester = (*Fs)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/ftp/ftp.go
backend/ftp/ftp.go
// Package ftp interfaces with FTP servers package ftp import ( "context" "crypto/tls" "errors" "fmt" "io" "net" "net/textproto" "net/url" "path" "runtime" "strings" "sync" "time" "github.com/jlaffaye/ftp" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/proxy" "github.com/rclone/rclone/lib/readers" ) var ( currentUser = env.CurrentUser() ) const ( minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "ftp", Description: "FTP", NewFs: NewFs, Options: []fs.Option{{ Name: "host", Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".", Required: true, Sensitive: true, }, { Name: "user", Help: "FTP username.", Default: currentUser, Sensitive: true, }, { Name: "port", Help: "FTP port number.", Default: 21, }, { Name: "pass", Help: "FTP password.", IsPassword: true, }, { Name: "tls", Help: `Use Implicit FTPS (FTP over TLS). When using implicit FTP over TLS the client connects using TLS right from the start which breaks compatibility with non-TLS-aware servers. This is usually served over port 990 rather than port 21. Cannot be used in combination with explicit FTPS.`, Default: false, }, { Name: "explicit_tls", Help: `Use Explicit FTPS (FTP over TLS). When using explicit FTP over TLS the client explicitly requests security from the server in order to upgrade a plain text connection to an encrypted one. Cannot be used in combination with implicit FTPS.`, Default: false, }, { Name: "concurrency", Help: strings.ReplaceAll(`Maximum number of FTP simultaneous connections, 0 for unlimited. Note that setting this is very likely to cause deadlocks so it should be used with care. If you are doing a sync or copy then make sure concurrency is one more than the sum of |--transfers| and |--checkers|. If you use |--check-first| then it just needs to be one more than the maximum of |--checkers| and |--transfers|. So for |concurrency 3| you'd use |--checkers 2 --transfers 2 --check-first| or |--checkers 1 --transfers 1|. `, "|", "`"), Default: 0, Advanced: true, }, { Name: "no_check_certificate", Help: "Do not verify the TLS certificate of the server.", Default: false, Advanced: true, }, { Name: "disable_epsv", Help: "Disable using EPSV even if server advertises support.", Default: false, Advanced: true, }, { Name: "disable_mlsd", Help: "Disable using MLSD even if server advertises support.", Default: false, Advanced: true, }, { Name: "disable_utf8", Help: "Disable using UTF-8 even if server advertises support.", Default: false, Advanced: true, }, { Name: "writing_mdtm", Help: "Use MDTM to set modification time (VsFtpd quirk)", Default: false, Advanced: true, }, { Name: "force_list_hidden", Help: "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.", Default: false, Advanced: true, }, { Name: "idle_timeout", Default: fs.Duration(60 * time.Second), Help: `Max time before closing idle connections. If no connections have been returned to the connection pool in the time given, rclone will empty the connection pool. Set to 0 to keep connections indefinitely. `, Advanced: true, }, { Name: "close_timeout", Help: "Maximum time to wait for a response to close.", Default: fs.Duration(60 * time.Second), Advanced: true, }, { Name: "tls_cache_size", Help: `Size of TLS session cache for all control and data connections. TLS cache allows to resume TLS sessions and reuse PSK between connections. Increase if default size is not enough resulting in TLS resumption errors. Enabled by default. Use 0 to disable.`, Default: 32, Advanced: true, }, { Name: "disable_tls13", Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)", Default: false, Advanced: true, }, { Name: "allow_insecure_tls_ciphers", Help: `Allow insecure TLS ciphers Setting this flag will allow the usage of the following TLS ciphers in addition to the secure defaults: - TLS_RSA_WITH_AES_128_GCM_SHA256 `, Default: false, Advanced: true, }, { Name: "shut_timeout", Help: "Maximum time to wait for data connection closing status.", Default: fs.Duration(60 * time.Second), Advanced: true, }, { Name: "ask_password", Default: false, Help: `Allow asking for FTP password when needed. If this is set and no password is supplied then rclone will ask for a password `, Advanced: true, }, { Name: "socks_proxy", Default: "", Help: `Socks 5 proxy host. Supports the format user:pass@host:port, user@host:port, host:port. Example: myUser:myPass@localhost:9005 `, Advanced: true, }, { Name: "http_proxy", Default: "", Help: `URL for HTTP CONNECT proxy Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb. `, Advanced: true, }, { Name: "no_check_upload", Default: false, Help: `Don't check the upload is OK Normally rclone will try to check the upload exists after it has uploaded a file to make sure the size and modification time are as expected. This flag stops rclone doing these checks. This enables uploading to folders which are write only. You will likely need to use the --inplace flag also if uploading to a write only folder. `, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // The FTP protocol can't handle trailing spaces // (for instance, pureftpd turns them into '_') Default: (encoder.Display | encoder.EncodeRightSpace), Examples: []fs.OptionExample{{ Value: "Asterisk,Ctl,Dot,Slash", Help: "ProFTPd can't handle '*' in file names", }, { Value: "BackSlash,Ctl,Del,Dot,RightSpace,Slash,SquareBracket", Help: "PureFTPd can't handle '[]' or '*' in file names", }, { Value: "Ctl,LeftPeriod,Slash", Help: "VsFTPd can't handle file names starting with dot", }}, }}, }) } // Options defines the configuration for this backend type Options struct { Host string `config:"host"` User string `config:"user"` Pass string `config:"pass"` Port string `config:"port"` TLS bool `config:"tls"` ExplicitTLS bool `config:"explicit_tls"` TLSCacheSize int `config:"tls_cache_size"` DisableTLS13 bool `config:"disable_tls13"` AllowInsecureTLSCiphers bool `config:"allow_insecure_tls_ciphers"` Concurrency int `config:"concurrency"` SkipVerifyTLSCert bool `config:"no_check_certificate"` DisableEPSV bool `config:"disable_epsv"` DisableMLSD bool `config:"disable_mlsd"` DisableUTF8 bool `config:"disable_utf8"` WritingMDTM bool `config:"writing_mdtm"` ForceListHidden bool `config:"force_list_hidden"` IdleTimeout fs.Duration `config:"idle_timeout"` CloseTimeout fs.Duration `config:"close_timeout"` ShutTimeout fs.Duration `config:"shut_timeout"` AskPassword bool `config:"ask_password"` Enc encoder.MultiEncoder `config:"encoding"` SocksProxy string `config:"socks_proxy"` HTTPProxy string `config:"http_proxy"` NoCheckUpload bool `config:"no_check_upload"` } // Fs represents a remote FTP server type Fs struct { name string // name of this remote root string // the path we are working on if any opt Options // parsed options ci *fs.ConfigInfo // global config features *fs.Features // optional features url string user string pass string dialAddr string tlsConf *tls.Config // default TLS client config poolMu sync.Mutex pool []*ftp.ServerConn drain *time.Timer // used to drain the pool when we stop using the connections tokens *pacer.TokenDispenser proxyURL *url.URL // address of HTTP proxy read from environment pacer *fs.Pacer // pacer for FTP connections fGetTime bool // true if the ftp library accepts GetTime fSetTime bool // true if the ftp library accepts SetTime fLstTime bool // true if the List call returns precise time } // Object describes an FTP file type Object struct { fs *Fs remote string info *FileInfo } // FileInfo is the metadata known about an FTP file type FileInfo struct { Name string Size uint64 ModTime time.Time precise bool // true if the time is precise IsDir bool } // ------------------------------------------------------------ // Name of this fs func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String returns a description of the FS func (f *Fs) String() string { return f.url } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // Enable debugging output type debugLog struct { mu sync.Mutex auth bool } // Write writes len(p) bytes from p to the underlying data stream. It returns // the number of bytes written from p (0 <= n <= len(p)) and any error // encountered that caused the write to stop early. Write must return a non-nil // error if it returns n < len(p). Write must not modify the slice data, even // temporarily. // // Implementations must not retain p. // // This writes debug info to the log func (dl *debugLog) Write(p []byte) (n int, err error) { dl.mu.Lock() defer dl.mu.Unlock() _, file, _, ok := runtime.Caller(1) direction := "FTP Rx" if ok && strings.Contains(file, "multi") { direction = "FTP Tx" } lines := strings.Split(string(p), "\r\n") if lines[len(lines)-1] == "" { lines = lines[:len(lines)-1] } for _, line := range lines { if !dl.auth && strings.HasPrefix(line, "PASS") { fs.Debugf(direction, "PASS *****") continue } fs.Debugf(direction, "%q", line) } return len(p), nil } // Return a *textproto.Error if err contains one or nil otherwise func textprotoError(err error) (errX *textproto.Error) { if errors.As(err, &errX) { return errX } return nil } // returns true if this FTP error should be retried func isRetriableFtpError(err error) bool { if errX := textprotoError(err); errX != nil { switch errX.Code { case ftp.StatusNotAvailable, ftp.StatusTransfertAborted: return true } } return false } // shouldRetry returns a boolean as to whether this err deserve to be // retried. It returns the err as a convenience func shouldRetry(ctx context.Context, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } if isRetriableFtpError(err) { return true, err } return fserrors.ShouldRetry(err), err } // Get a TLS config with a unique session cache. // // We can't share session caches between connections. // // See: https://github.com/rclone/rclone/issues/7234 func (f *Fs) tlsConfig() *tls.Config { var tlsConfig *tls.Config if f.opt.TLS || f.opt.ExplicitTLS { if f.tlsConf != nil { tlsConfig = f.tlsConf.Clone() } else { tlsConfig = new(tls.Config) } tlsConfig.ServerName = f.opt.Host if f.opt.SkipVerifyTLSCert { tlsConfig.InsecureSkipVerify = true } if f.opt.TLSCacheSize > 0 { tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize) } if f.opt.DisableTLS13 { tlsConfig.MaxVersion = tls.VersionTLS12 } if f.opt.AllowInsecureTLSCiphers { var ids []uint16 // Read default ciphers for _, cs := range tls.CipherSuites() { ids = append(ids, cs.ID) } tlsConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256) } } return tlsConfig } // Open a new connection to the FTP server. func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) { fs.Debugf(f, "Connecting to FTP server") // tls.Config for this connection only. Will be used for data // and control connections. tlsConfig := f.tlsConfig() // Make ftp library dial with fshttp dialer optionally using TLS initialConnection := true dial := func(network, address string) (conn net.Conn, err error) { fs.Debugf(f, "dial(%q,%q)", network, address) defer func() { if err != nil { fs.Debugf(f, "> dial: conn=%v, err=%v", conn, err) } else { fs.Debugf(f, "> dial: conn=%s->%s, err=%v", conn.LocalAddr(), conn.RemoteAddr(), err) } }() baseDialer := fshttp.NewDialer(ctx) if f.opt.SocksProxy != "" || f.proxyURL != nil { // We need to make the onward connection to f.opt.Host. However the FTP // library sets the host to the proxy IP after using EPSV or PASV so we need // to correct that here. var dialPort string _, dialPort, err = net.SplitHostPort(address) if err != nil { return nil, err } dialAddress := net.JoinHostPort(f.opt.Host, dialPort) if f.opt.SocksProxy != "" { conn, err = proxy.SOCKS5Dial(network, dialAddress, f.opt.SocksProxy, baseDialer) } else { conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer) } } else { conn, err = baseDialer.Dial(network, address) } if err != nil { return nil, err } // Connect using cleartext only for non TLS if tlsConfig == nil { return conn, nil } // Initial connection only needs to be cleartext for explicit TLS if f.opt.ExplicitTLS && initialConnection { initialConnection = false return conn, nil } // Upgrade connection to TLS tlsConn := tls.Client(conn, tlsConfig) // Do the initial handshake - tls.Client doesn't do it for us // If we do this then connections to proftpd/pureftpd lock up // See: https://github.com/rclone/rclone/issues/6426 // See: https://github.com/jlaffaye/ftp/issues/282 if false { err = tlsConn.HandshakeContext(ctx) if err != nil { _ = conn.Close() return nil, err } } return tlsConn, nil } ftpConfig := []ftp.DialOption{ ftp.DialWithContext(ctx), ftp.DialWithDialFunc(dial), } if f.opt.TLS { // Our dialer takes care of TLS but ftp library also needs tlsConf // as a trigger for sending PSBZ and PROT options to server. ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig)) } else if f.opt.ExplicitTLS { ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig)) } if f.opt.DisableEPSV { ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true)) } if f.opt.DisableMLSD { ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true)) } if f.opt.DisableUTF8 { ftpConfig = append(ftpConfig, ftp.DialWithDisabledUTF8(true)) } if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff { ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout))) } if f.opt.WritingMDTM { ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true)) } if f.opt.ForceListHidden { ftpConfig = append(ftpConfig, ftp.DialWithForceListHidden(true)) } if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 { ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0})) } err = f.pacer.Call(func() (bool, error) { c, err = ftp.Dial(f.dialAddr, ftpConfig...) if err != nil { return shouldRetry(ctx, err) } err = c.Login(f.user, f.pass) if err != nil { _ = c.Quit() return shouldRetry(ctx, err) } return false, nil }) if err != nil { err = fmt.Errorf("failed to make FTP connection to %q: %w", f.dialAddr, err) } return c, err } // Get an FTP connection from the pool, or open a new one func (f *Fs) getFtpConnection(ctx context.Context) (c *ftp.ServerConn, err error) { if f.opt.Concurrency > 0 { f.tokens.Get() } accounting.LimitTPS(ctx) f.poolMu.Lock() if len(f.pool) > 0 { c = f.pool[0] f.pool = f.pool[1:] } f.poolMu.Unlock() if c != nil { return c, nil } c, err = f.ftpConnection(ctx) if err != nil && f.opt.Concurrency > 0 { f.tokens.Put() } return c, err } // Return an FTP connection to the pool // // It nils the pointed to connection out so it can't be reused // // if err is not nil then it checks the connection is alive using a // NOOP request func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) { if f.opt.Concurrency > 0 { defer f.tokens.Put() } if pc == nil { return } c := *pc if c == nil { return } *pc = nil if err != nil { // If not a regular FTP error code then check the connection if tpErr := textprotoError(err); tpErr != nil { nopErr := c.NoOp() if nopErr != nil { fs.Debugf(f, "Connection failed, closing: %v", nopErr) _ = c.Quit() return } } } f.poolMu.Lock() f.pool = append(f.pool, c) if f.opt.IdleTimeout > 0 { f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer } f.poolMu.Unlock() } // Drain the pool of any connections func (f *Fs) drainPool(ctx context.Context) (err error) { f.poolMu.Lock() defer f.poolMu.Unlock() if f.opt.IdleTimeout > 0 { f.drain.Stop() } if len(f.pool) != 0 { fs.Debugf(f, "closing %d unused connections", len(f.pool)) } for i, c := range f.pool { if cErr := c.Quit(); cErr != nil { err = cErr } f.pool[i] = nil } f.pool = nil return err } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) { // defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err) // Parse config into Options struct opt := new(Options) err = configstruct.Set(m, opt) if err != nil { return nil, err } pass := "" if opt.AskPassword && opt.Pass == "" { pass = config.GetPassword("FTP server password") } else { pass, err = obscure.Reveal(opt.Pass) if err != nil { return nil, fmt.Errorf("NewFS decrypt password: %w", err) } } user := opt.User if user == "" { user = currentUser } port := opt.Port if port == "" { port = "21" } dialAddr := opt.Host + ":" + port protocol := "ftp://" if opt.TLS { protocol = "ftps://" } if opt.TLS && opt.ExplicitTLS { return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config") } u := protocol + path.Join(dialAddr+"/", root) ci := fs.GetConfig(ctx) f := &Fs{ name: name, root: root, opt: *opt, ci: ci, url: u, user: user, pass: pass, dialAddr: dialAddr, tokens: pacer.NewTokenDispenser(opt.Concurrency), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), tlsConf: fshttp.NewTransport(ctx).TLSClientConfig, } f.features = (&fs.Features{ CanHaveEmptyDirectories: true, PartialUploads: true, }).Fill(ctx, f) // get proxy URL if set if opt.HTTPProxy != "" { proxyURL, err := url.Parse(opt.HTTPProxy) if err != nil { return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err) } f.proxyURL = proxyURL } // set the pool drainer timer going if f.opt.IdleTimeout > 0 { f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) }) } // Make a connection and pool it to return errors early c, err := f.getFtpConnection(ctx) if err != nil { return nil, fmt.Errorf("NewFs: %w", err) } f.fGetTime = c.IsGetTimeSupported() f.fSetTime = c.IsSetTimeSupported() f.fLstTime = c.IsTimePreciseInList() if !f.fLstTime && f.fGetTime { f.features.SlowModTime = true } f.putFtpConnection(&c, nil) if root != "" { // Check to see if the root actually an existing file remote := path.Base(root) f.root = path.Dir(root) if f.root == "." { f.root = "" } _, err := f.NewObject(ctx, remote) if err != nil { if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) { // File doesn't exist so return old f f.root = root return f, nil } return nil, err } // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, err } // Shutdown the backend, closing any background tasks and any // cached connections. func (f *Fs) Shutdown(ctx context.Context) error { return f.drainPool(ctx) } // translateErrorFile turns FTP errors into rclone errors if possible for a file func translateErrorFile(err error) error { if errX := textprotoError(err); errX != nil { switch errX.Code { case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored: err = fs.ErrorObjectNotFound } } return err } // translateErrorDir turns FTP errors into rclone errors if possible for a directory func translateErrorDir(err error) error { if errX := textprotoError(err); errX != nil { switch errX.Code { case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored: err = fs.ErrorDirNotFound } } return err } // entryToStandard converts an incoming ftp.Entry to Standard encoding func (f *Fs) entryToStandard(entry *ftp.Entry) { // Skip . and .. as we don't want these encoded if entry.Name == "." || entry.Name == ".." { return } entry.Name = f.opt.Enc.ToStandardName(entry.Name) entry.Target = f.opt.Enc.ToStandardPath(entry.Target) } // dirFromStandardPath returns dir in encoded form. func (f *Fs) dirFromStandardPath(dir string) string { // Skip . and .. as we don't want these encoded if dir == "." || dir == ".." { return dir } return f.opt.Enc.FromStandardPath(dir) } // findItem finds a directory entry for the name in its parent directory func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) { // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) if remote == "" || remote == "." || remote == "/" { // if root, assume exists and synthesize an entry return &ftp.Entry{ Name: "", Type: ftp.EntryTypeFolder, Time: time.Now(), }, nil } c, err := f.getFtpConnection(ctx) if err != nil { return nil, fmt.Errorf("findItem: %w", err) } // returns TRUE if MLST is supported which is required to call GetEntry if c.IsTimePreciseInList() { entry, err := c.GetEntry(f.opt.Enc.FromStandardPath(remote)) f.putFtpConnection(&c, err) if err != nil { err = translateErrorFile(err) if err == fs.ErrorObjectNotFound { return nil, nil } if errX := textprotoError(err); errX != nil { switch errX.Code { case ftp.StatusBadArguments: err = nil } } return nil, err } if entry != nil { f.entryToStandard(entry) } return entry, nil } dir := path.Dir(remote) base := path.Base(remote) files, err := c.List(f.dirFromStandardPath(dir)) f.putFtpConnection(&c, err) if err != nil { return nil, translateErrorFile(err) } for _, file := range files { f.entryToStandard(file) if file.Name == base { return file, nil } } return nil, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) { // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) entry, err := f.findItem(ctx, path.Join(f.root, remote)) if err != nil { return nil, err } if entry != nil && entry.Type != ftp.EntryTypeFolder { o := &Object{ fs: f, remote: remote, } o.info = &FileInfo{ Name: remote, Size: entry.Size, ModTime: entry.Time, precise: f.fLstTime, } return o, nil } return nil, fs.ErrorObjectNotFound } // dirExists checks the directory pointed to by remote exists or not func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) { entry, err := f.findItem(ctx, path.Join(f.root, remote)) if err != nil { return false, fmt.Errorf("dirExists: %w", err) } if entry != nil && entry.Type == ftp.EntryTypeFolder { return true, nil } return false, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { // defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err) c, err := f.getFtpConnection(ctx) if err != nil { return nil, fmt.Errorf("list: %w", err) } var listErr error var files []*ftp.Entry resultchan := make(chan []*ftp.Entry, 1) errchan := make(chan error, 1) go func() { result, err := c.List(f.dirFromStandardPath(path.Join(f.root, dir))) f.putFtpConnection(&c, err) if err != nil { errchan <- err return } resultchan <- result }() // Wait for List for up to Timeout seconds timer := time.NewTimer(f.ci.TimeoutOrInfinite()) select { case listErr = <-errchan: timer.Stop() return nil, translateErrorDir(listErr) case files = <-resultchan: timer.Stop() case <-timer.C: // if timer fired assume no error but connection dead fs.Errorf(f, "Timeout when waiting for List") return nil, errors.New("timeout when waiting for List") } // Annoyingly FTP returns success for a directory which // doesn't exist, so check it really doesn't exist if no // entries found. if len(files) == 0 { exists, err := f.dirExists(ctx, dir) if err != nil { return nil, fmt.Errorf("list: %w", err) } if !exists { return nil, fs.ErrorDirNotFound } } for i := range files { object := files[i] f.entryToStandard(object) newremote := path.Join(dir, object.Name) switch object.Type { case ftp.EntryTypeFolder: if object.Name == "." || object.Name == ".." { continue } d := fs.NewDir(newremote, object.Time) entries = append(entries, d) default: o := &Object{ fs: f, remote: newremote, } info := &FileInfo{ Name: newremote, Size: object.Size, ModTime: object.Time, precise: f.fLstTime, } o.info = info entries = append(entries, o) } } return entries, nil } // Hashes are not supported func (f *Fs) Hashes() hash.Set { return 0 } // Precision shows whether modified time is supported or not depending on the // FTP server capabilities, namely whether FTP server: // - accepts the MDTM command to get file time (fGetTime) // or supports MLSD returning precise file time in the list (fLstTime) // - accepts the MFMT command to set file time (fSetTime) // or non-standard form of the MDTM command (fSetTime, too) // used by VsFtpd for the same purpose (WritingMDTM) // // See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html func (f *Fs) Precision() time.Duration { if (f.fGetTime || f.fLstTime) && f.fSetTime { return time.Second } return fs.ModTimeNotSupported } // Put in to the remote path with the modTime given of the given size // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // fs.Debugf(f, "Trying to put file %s", src.Remote()) err := f.mkParentDir(ctx, src.Remote()) if err != nil { return nil, fmt.Errorf("Put mkParentDir failed: %w", err) } o := &Object{ fs: f, remote: src.Remote(), } err = o.Update(ctx, in, src, options...) return o, err } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(ctx, in, src, options...) } // getInfo reads the FileInfo for a path func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) { // defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err) file, err := f.findItem(ctx, remote) if err != nil { return nil, err } else if file != nil { info := &FileInfo{ Name: remote, Size: file.Size, ModTime: file.Time, precise: f.fLstTime, IsDir: file.Type == ftp.EntryTypeFolder, } return info, nil } return nil, fs.ErrorObjectNotFound } // mkdir makes the directory and parents using unrooted paths func (f *Fs) mkdir(ctx context.Context, abspath string) error { abspath = path.Clean(abspath) if abspath == "." || abspath == "/" { return nil } fi, err := f.getInfo(ctx, abspath) if err == nil { if fi.IsDir { return nil } return fs.ErrorIsFile } else if err != fs.ErrorObjectNotFound { return fmt.Errorf("mkdir %q failed: %w", abspath, err) } parent := path.Dir(abspath) err = f.mkdir(ctx, parent) if err != nil { return err } c, connErr := f.getFtpConnection(ctx) if connErr != nil { return fmt.Errorf("mkdir: %w", connErr) } err = c.MakeDir(f.dirFromStandardPath(abspath)) f.putFtpConnection(&c, err) if errX := textprotoError(err); errX != nil { switch errX.Code { case ftp.StatusRequestedFileActionOK: // some ftp servers apparently return 250 instead of 257 err = nil // see: https://forum.rclone.org/t/rclone-pop-up-an-i-o-error-when-creating-a-folder-in-a-mounted-ftp-drive/44368/ case ftp.StatusFileUnavailable: // dir already exists: see issue #2181 err = nil case 521: // dir already exists: error number according to RFC 959: issue #2363 err = nil } } return err } // mkParentDir makes the parent of remote if necessary and any // directories above that func (f *Fs) mkParentDir(ctx context.Context, remote string) error { parent := path.Dir(remote) return f.mkdir(ctx, path.Join(f.root, parent)) } // Mkdir creates the directory if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { // defer fs.Trace(dir, "")("err=%v", &err) root := path.Join(f.root, dir) return f.mkdir(ctx, root) } // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { c, err := f.getFtpConnection(ctx) if err != nil { return fmt.Errorf("Rmdir: %w", translateErrorFile(err)) } err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir))) f.putFtpConnection(&c, err) return translateErrorDir(err) } // Move renames a remote file object func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } err := f.mkParentDir(ctx, remote) if err != nil { return nil, fmt.Errorf("Move mkParentDir failed: %w", err) } c, err := f.getFtpConnection(ctx) if err != nil { return nil, fmt.Errorf("Move: %w", err) } err = c.Rename( f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)), f.opt.Enc.FromStandardPath(path.Join(f.root, remote)), ) f.putFtpConnection(&c, err) if err != nil { return nil, fmt.Errorf("Move Rename failed: %w", err) } dstObj, err := f.NewObject(ctx, remote) if err != nil { return nil, fmt.Errorf("Move NewObject failed: %w", err) } return dstObj, nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } srcPath := path.Join(srcFs.root, srcRemote)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/ftp/ftp_test.go
backend/ftp/ftp_test.go
// Test FTP filesystem interface package ftp_test import ( "testing" "github.com/rclone/rclone/backend/ftp" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against rclone FTP server func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestFTPRclone:", NilObject: (*ftp.Object)(nil), }) } // TestIntegrationProftpd runs integration tests against proFTPd func TestIntegrationProftpd(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("skipping as -remote is set") } fstests.Run(t, &fstests.Opt{ RemoteName: "TestFTPProftpd:", NilObject: (*ftp.Object)(nil), }) } // TestIntegrationPureftpd runs integration tests against pureFTPd func TestIntegrationPureftpd(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("skipping as -remote is set") } fstests.Run(t, &fstests.Opt{ RemoteName: "TestFTPPureftpd:", NilObject: (*ftp.Object)(nil), }) } // TestIntegrationVsftpd runs integration tests against vsFTPd func TestIntegrationVsftpd(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("skipping as -remote is set") } fstests.Run(t, &fstests.Opt{ RemoteName: "TestFTPVsftpd:", NilObject: (*ftp.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/shade/shade.go
backend/shade/shade.go
// Package shade provides an interface to the Shade storage system. package shade import ( "bytes" "context" "encoding/base64" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "path" "path/filepath" "strings" "sync" "time" "github.com/rclone/rclone/backend/shade/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" ) const ( defaultEndpoint = "https://fs.shade.inc" // Default local development endpoint apiEndpoint = "https://api.shade.inc" // API endpoint for getting tokens minSleep = 10 * time.Millisecond // Minimum sleep time for the pacer maxSleep = 5 * time.Minute // Maximum sleep time for the pacer decayConstant = 1 // Bigger for slower decay, exponential defaultChunkSize = int64(64 * 1024 * 1024) // Default chunk size (64MB) minChunkSize = int64(5 * 1024 * 1024) // Minimum chunk size (5MB) - S3 requirement maxChunkSize = int64(5 * 1024 * 1024 * 1024) // Maximum chunk size (5GB) maxUploadParts = 10000 // maximum allowed number of parts in a multipart upload ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "shade", Description: "Shade FS", NewFs: NewFS, Options: []fs.Option{{ Name: "drive_id", Help: "The ID of your drive, see this in the drive settings. Individual rclone configs must be made per drive.", Required: true, Sensitive: false, }, { Name: "api_key", Help: "An API key for your account.", Required: true, Sensitive: true, }, { Name: "endpoint", Help: "Endpoint for the service.\n\nLeave blank normally.", Advanced: true, }, { Name: "chunk_size", Help: "Chunk size to use for uploading.\n\nAny files larger than this will be uploaded in chunks of this size.\n\nNote that this is stored in memory per transfer, so increasing it will\nincrease memory usage.\n\nMinimum is 5MB, maximum is 5GB.", Default: fs.SizeSuffix(defaultChunkSize), Advanced: true, }, { Name: "upload_concurrency", Help: `Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded concurrently for multipart uploads and copies.`, Default: 4, Advanced: true, }, { Name: "max_upload_parts", Help: "Maximum amount of parts in a multipart upload.", Default: maxUploadParts, Advanced: true, }, { Name: "token", Help: "JWT Token for performing Shade FS operations. Don't set this value - rclone will set it automatically", Default: "", Advanced: true, }, { Name: "token_expiry", Help: "JWT Token Expiration time. Don't set this value - rclone will set it automatically", Default: "", Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: encoder.Display | encoder.EncodeBackSlash | encoder.EncodeInvalidUtf8, }}, }) } // refreshJWTToken retrieves or refreshes the ShadeFS token func (f *Fs) refreshJWTToken(ctx context.Context) (string, error) { f.tokenMu.Lock() defer f.tokenMu.Unlock() // Return existing token if it's still valid checkTime := f.tokenExp.Add(-2 * time.Minute) //If the token expires in less than two minutes, just get a new one if f.token != "" && time.Now().Before(checkTime) { return f.token, nil } // Token has expired or doesn't exist, get a new one opts := rest.Opts{ Method: "GET", RootURL: apiEndpoint, Path: fmt.Sprintf("/workspaces/drives/%s/shade-fs-token", f.drive), ExtraHeaders: map[string]string{ "Authorization": f.opt.APIKey, }, } var err error var tokenStr string err = f.pacer.Call(func() (bool, error) { res, err := f.apiSrv.Call(ctx, &opts) if err != nil { fs.Debugf(f, "Token request failed: %v", err) return false, err } defer fs.CheckClose(res.Body, &err) if res.StatusCode != http.StatusOK { fs.Debugf(f, "Token request failed with code: %d", res.StatusCode) return res.StatusCode == http.StatusTooManyRequests, fmt.Errorf("failed to get ShadeFS token, status: %d", res.StatusCode) } // Read token directly as plain text tokenBytes, err := io.ReadAll(res.Body) if err != nil { return false, err } tokenStr = strings.TrimSpace(string(tokenBytes)) return false, nil }) if err != nil { return "", err } if tokenStr == "" { return "", fmt.Errorf("empty token received from server") } parts := strings.Split(tokenStr, ".") if len(parts) < 2 { return "", fmt.Errorf("invalid token received from server") } // Decode the payload (2nd part of the token) payload, err := base64.RawURLEncoding.DecodeString(parts[1]) if err != nil { return "", fmt.Errorf("invalid token received from server") } var claims map[string]interface{} if err := json.Unmarshal(payload, &claims); err != nil { return "", err } var exp int64 // Extract exp/ if v, ok := claims["exp"].(float64); ok { exp = int64(v) } f.token = tokenStr f.tokenExp = time.Unix(exp, 0) f.m.Set("token", f.token) f.m.Set("token_expiry", f.tokenExp.Format(time.RFC3339)) return f.token, nil } func (f *Fs) callAPI(ctx context.Context, method, path string, response interface{}) (*http.Response, error) { token, err := f.refreshJWTToken(ctx) if err != nil { return nil, err } opts := rest.Opts{ Method: method, Path: path, RootURL: f.endpoint, ExtraHeaders: map[string]string{ "Authorization": "Bearer " + token, }, } var res *http.Response err = f.pacer.Call(func() (bool, error) { if response != nil { res, err = f.srv.CallJSON(ctx, &opts, nil, response) } else { res, err = f.srv.Call(ctx, &opts) } if err != nil { return res != nil && res.StatusCode == http.StatusTooManyRequests, err } return false, nil }) return res, err } // Options defines the configuration for this backend type Options struct { Drive string `config:"drive_id"` APIKey string `config:"api_key"` Endpoint string `config:"endpoint"` ChunkSize fs.SizeSuffix `config:"chunk_size"` MaxUploadParts int `config:"max_upload_parts"` Concurrency int `config:"upload_concurrency"` Token string `config:"token"` TokenExpiry string `config:"token_expiry"` Encoding encoder.MultiEncoder } // Fs represents a shade remote type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options features *fs.Features // optional features srv *rest.Client // REST client for ShadeFS API apiSrv *rest.Client // REST client for Shade API endpoint string // endpoint for ShadeFS drive string // drive ID pacer *fs.Pacer // pacer for API calls token string // ShadeFS token tokenExp time.Time // Token expiration time tokenMu sync.Mutex m configmap.Mapper //Config Mapper to store tokens for future use recursive bool createdDirs map[string]bool // Cache of directories we've created createdDirMu sync.RWMutex // Mutex for createdDirs map } // Object describes a ShadeFS object type Object struct { fs *Fs // what this object is part of remote string // The remote path mtime int64 // Modified time size int64 // Size of the object original string //Presigned download link } // Directory describes a ShadeFS directory type Directory struct { fs *Fs // Reference to the filesystem remote string // Path to the directory mtime int64 // Modification time size int64 // Size (typically 0 for directories) } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String returns a description of the FS func (f *Fs) String() string { return fmt.Sprintf("Shade drive %s path %s", f.opt.Drive, f.root) } // Precision returns the precision of the ModTimes func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } token, err := f.refreshJWTToken(ctx) if err != nil { return nil, err } //Need to make sure destination exists err = f.ensureParentDirectories(ctx, remote) if err != nil { return nil, err } // Create temporary object o := &Object{ fs: f, remote: remote, mtime: srcObj.mtime, size: srcObj.size, } fromFullPath := path.Join(src.Fs().Root(), srcObj.remote) toFullPath := path.Join(f.root, remote) // Build query parameters params := url.Values{} params.Set("path", remote) params.Set("from", fromFullPath) params.Set("to", toFullPath) opts := rest.Opts{ Method: "POST", Path: fmt.Sprintf("/%s/fs/move?%s", f.drive, params.Encode()), ExtraHeaders: map[string]string{ "Authorization": "Bearer " + token, }, } err = o.fs.pacer.Call(func() (bool, error) { resp, err := f.srv.Call(ctx, &opts) if err != nil && resp.StatusCode == http.StatusBadRequest { fs.Debugf(f, "Bad token from server: %v", token) } return resp != nil && resp.StatusCode == http.StatusTooManyRequests, err }) if err != nil { return nil, err } return o, nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } //Need to check if destination exists fullPath := f.buildFullPath(dstRemote) var response api.ListDirResponse res, _ := f.callAPI(ctx, "GET", fmt.Sprintf("/%s/fs/attr?path=%s", f.drive, fullPath), &response) if res.StatusCode != http.StatusNotFound { return fs.ErrorDirExists } fullPathSrc := f.buildFullPath(srcRemote) fullPathSrcUnencoded, err := url.QueryUnescape(fullPathSrc) if err != nil { return err } fullPathDstUnencoded, err := url.QueryUnescape(fullPath) if err != nil { return err } err = f.ensureParentDirectories(ctx, dstRemote) if err != nil { return err } o := &Object{ fs: srcFs, remote: srcRemote, } _, err = f.Move(ctx, o, dstRemote) if err == nil { f.createdDirMu.Lock() f.createdDirs[fullPathSrcUnencoded] = false f.createdDirs[fullPathDstUnencoded] = true f.createdDirMu.Unlock() } return err } // Hashes returns the supported hash types func (f *Fs) Hashes() hash.Set { return hash.Set(hash.None) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // NewFS constructs an FS from the path, container:path func NewFS(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } fs.Debugf(nil, "Creating new ShadeFS backend with drive: %s", opt.Drive) f := &Fs{ name: name, root: root, opt: *opt, drive: opt.Drive, m: m, srv: rest.NewClient(fshttp.NewClient(ctx)).SetRoot(defaultEndpoint), apiSrv: rest.NewClient(fshttp.NewClient(ctx)), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), recursive: true, createdDirs: make(map[string]bool), token: opt.Token, } f.features = &fs.Features{ // Initially set minimal features // We'll expand this in a future iteration CanHaveEmptyDirectories: true, Move: f.Move, DirMove: f.DirMove, OpenChunkWriter: f.OpenChunkWriter, } if opt.TokenExpiry != "" { tokenExpiry, err := time.Parse(time.RFC3339, opt.TokenExpiry) if err != nil { fs.Errorf(nil, "Failed to parse token_expiry option: %v", err) } else { f.tokenExp = tokenExpiry } } // Set the endpoint if opt.Endpoint == "" { f.endpoint = defaultEndpoint } else { f.endpoint = opt.Endpoint } // Validate and set chunk size if opt.ChunkSize == 0 { opt.ChunkSize = fs.SizeSuffix(defaultChunkSize) } else if opt.ChunkSize < fs.SizeSuffix(minChunkSize) { return nil, fmt.Errorf("chunk_size %d is less than minimum %d", opt.ChunkSize, minChunkSize) } else if opt.ChunkSize > fs.SizeSuffix(maxChunkSize) { return nil, fmt.Errorf("chunk_size %d is greater than maximum %d", opt.ChunkSize, maxChunkSize) } // Ensure root doesn't have trailing slash f.root = strings.Trim(f.root, "/") // Check that we can log in by getting a token _, err = f.refreshJWTToken(ctx) if err != nil { return nil, fmt.Errorf("failed to get ShadeFS token: %w", err) } var response api.ListDirResponse _, _ = f.callAPI(ctx, "GET", fmt.Sprintf("/%s/fs/attr?path=%s", f.drive, url.QueryEscape(root)), &response) if response.Type == "file" { //Specified a single file path, not a directory. f.root = filepath.Dir(f.root) return f, fs.ErrorIsFile } return f, nil } // NewObject finds the Object at remote func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { fullPath := f.buildFullPath(remote) var response api.ListDirResponse res, err := f.callAPI(ctx, "GET", fmt.Sprintf("/%s/fs/attr?path=%s", f.drive, fullPath), &response) if res != nil && res.StatusCode == http.StatusNotFound { return nil, fs.ErrorObjectNotFound } if err != nil { return nil, err } if res != nil && res.StatusCode != http.StatusOK { return nil, fmt.Errorf("attr failed with status code: %d", res.StatusCode) } if response.Type == "tree" { return nil, fs.ErrorIsDir } if response.Type != "file" { return nil, fmt.Errorf("path is not a file: %s", remote) } return &Object{ fs: f, remote: remote, mtime: response.Mtime, size: response.Size, }, nil } // Put uploads a file func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // Create temporary object o := &Object{ fs: f, remote: src.Remote(), } return o, o.Update(ctx, in, src, options...) } // List the objects and directories in dir into entries func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { fullPath := f.buildFullPath(dir) var response []api.ListDirResponse res, err := f.callAPI(ctx, "GET", fmt.Sprintf("/%s/fs/listdir?path=%s", f.drive, fullPath), &response) if err != nil { fs.Debugf(f, "Error from List call: %v", err) return nil, fs.ErrorDirNotFound } if res.StatusCode == http.StatusNotFound { fs.Debugf(f, "Directory not found") return nil, fs.ErrorDirNotFound } if res.StatusCode != http.StatusOK { return nil, fmt.Errorf("listdir failed with status code: %d", res.StatusCode) } for _, r := range response { if r.Draft { continue } // Make path relative to f.root entryPath := strings.TrimPrefix(r.Path, "/") if f.root != "" { if !strings.HasPrefix(entryPath, f.root) { continue } entryPath = strings.TrimPrefix(strings.TrimPrefix(entryPath, f.root), "/") } if r.Type == "file" { entries = append(entries, &Object{ fs: f, remote: entryPath, mtime: r.Mtime, size: r.Size, }) } else if r.Type == "tree" { dirEntry := &Directory{ fs: f, remote: entryPath, mtime: r.Mtime, size: r.Size, // Typically 0 for directories } entries = append(entries, dirEntry) } else { fs.Debugf(f, "Unknown entry type: %s for path: %s", r.Type, entryPath) } } return entries, nil } // ensureParentDirectories creates all parent directories for a given path func (f *Fs) ensureParentDirectories(ctx context.Context, remotePath string) error { // Build the full path including root fullPath := remotePath if f.root != "" { fullPath = path.Join(f.root, remotePath) } // Get the parent directory path parentDir := path.Dir(fullPath) // If parent is root, empty, or current dir, nothing to create if parentDir == "" || parentDir == "." || parentDir == "/" { return nil } // Ensure the full parent directory path exists return f.ensureDirectoryPath(ctx, parentDir) } // ensureDirectoryPath creates all directories in a path func (f *Fs) ensureDirectoryPath(ctx context.Context, dirPath string) error { // Check cache first f.createdDirMu.RLock() if f.createdDirs[dirPath] { f.createdDirMu.RUnlock() return nil } f.createdDirMu.RUnlock() // Build list of all directories that need to be created var dirsToCreate []string currentPath := dirPath for currentPath != "" && currentPath != "." && currentPath != "/" { // Check if this directory is already in cache f.createdDirMu.RLock() inCache := f.createdDirs[currentPath] f.createdDirMu.RUnlock() if !inCache { dirsToCreate = append([]string{currentPath}, dirsToCreate...) } currentPath = path.Dir(currentPath) } // If all directories are cached, we're done if len(dirsToCreate) == 0 { return nil } // Create each directory in order for _, dir := range dirsToCreate { fullPath := url.QueryEscape(dir) res, err := f.callAPI(ctx, "POST", fmt.Sprintf("/%s/fs/mkdir?path=%s", f.drive, fullPath), nil) // If directory already exists, that's fine if err == nil && res != nil { if res.StatusCode == http.StatusConflict || res.StatusCode == http.StatusUnprocessableEntity { f.createdDirMu.Lock() f.createdDirs[dir] = true f.createdDirMu.Unlock() } else if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated { fs.Debugf(f, "Failed to create directory %s: status code %d", dir, res.StatusCode) } else { f.createdDirMu.Lock() f.createdDirs[dir] = true f.createdDirMu.Unlock() } fs.CheckClose(res.Body, &err) } else if err != nil { fs.Debugf(f, "Error creating directory %s: %v", dir, err) // Continue anyway continue } } // Mark the full path as created in cache f.createdDirMu.Lock() f.createdDirs[dirPath] = true f.createdDirMu.Unlock() return nil } // Mkdir creates the container if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { // Build the full path for the directory fullPath := dir if dir == "" { // If dir is empty, we're creating the root directory if f.root != "" && f.root != "/" && f.root != "." { fullPath = f.root } else { // Nothing to create return nil } } else if f.root != "" { fullPath = path.Join(f.root, dir) } // Ensure all parent directories exist first if err := f.ensureDirectoryPath(ctx, fullPath); err != nil { return fmt.Errorf("failed to create directory path: %w", err) } // Add to cache f.createdDirMu.Lock() f.createdDirs[fullPath] = true f.createdDirMu.Unlock() return nil } // Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { fullPath := f.buildFullPath(dir) if fullPath == "" { return errors.New("cannot delete root directory") } var response []api.ListDirResponse res, err := f.callAPI(ctx, "GET", fmt.Sprintf("/%s/fs/listdir?path=%s", f.drive, fullPath), &response) if res != nil && res.StatusCode != http.StatusOK { return err } if len(response) > 0 { return fs.ErrorDirectoryNotEmpty } // Use the delete endpoint which handles both files and directories res, err = f.callAPI(ctx, "POST", fmt.Sprintf("/%s/fs/delete?path=%s", f.drive, fullPath), nil) if err != nil { return err } defer fs.CheckClose(res.Body, &err) if res.StatusCode == http.StatusNotFound { return fs.ErrorDirNotFound } if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated { return fmt.Errorf("rmdir failed with status code: %d", res.StatusCode) } f.createdDirMu.Lock() defer f.createdDirMu.Unlock() unescapedPath, err := url.QueryUnescape(fullPath) if err != nil { return err } f.createdDirs[unescapedPath] = false return nil } // Attempts to construct the full path for an object query-escaped func (f *Fs) buildFullPath(remote string) string { if f.root == "" { return url.QueryEscape(remote) } return url.QueryEscape(path.Join(f.root, remote)) } // ------------------------------------------------- // Object implementation // ------------------------------------------------- // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // String returns a description of the Object func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Hash returns the requested hash of the object content func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { return "", hash.ErrUnsupported } // Size returns the size of the object func (o *Object) Size() int64 { return o.size } // ModTime returns the modification date of the object func (o *Object) ModTime(context.Context) time.Time { return time.Unix(0, o.mtime*int64(time.Millisecond)) } // SetModTime sets the modification time of the object func (o *Object) SetModTime(context.Context, time.Time) error { // Not implemented for now return fs.ErrorCantSetModTime } // Storable returns whether this object is storable func (o *Object) Storable() bool { return true } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { if o.size == 0 { // Empty file: return an empty reader return io.NopCloser(bytes.NewReader(nil)), nil } fs.FixRangeOption(options, o.size) token, err := o.fs.refreshJWTToken(ctx) if err != nil { return nil, err } fullPath := o.fs.buildFullPath(o.remote) // Construct the initial request URL downloadURL := fmt.Sprintf("%s/%s/fs/download?path=%s", o.fs.endpoint, o.fs.drive, fullPath) // Create HTTP request manually req, err := http.NewRequestWithContext(ctx, "GET", downloadURL, nil) if err != nil { fs.Debugf(o.fs, "Failed to create request: %v", err) return nil, fmt.Errorf("failed to create request: %w", err) } req.Header.Set("Authorization", "Bearer "+token) // Use pacer to manage retries and rate limiting var res *http.Response err = o.fs.pacer.Call(func() (bool, error) { if res != nil { err = res.Body.Close() if err != nil { return false, err } } client := http.Client{ CheckRedirect: func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse // Don't follow redirects }, } res, err = client.Do(req) if err != nil { return false, err } return res.StatusCode == http.StatusTooManyRequests, nil }) if err != nil { return nil, fmt.Errorf("download request failed: %w", err) } if res == nil { return nil, fmt.Errorf("no response received from initial request") } // Handle response based on status code switch res.StatusCode { case http.StatusOK: return res.Body, nil case http.StatusTemporaryRedirect: // Read the presigned URL from the body bodyBytes, err := io.ReadAll(res.Body) fs.CheckClose(res.Body, &err) // Close body after reading if err != nil { return nil, fmt.Errorf("failed to read redirect body: %w", err) } presignedURL := strings.TrimSpace(string(bodyBytes)) o.original = presignedURL //Save for later for hashing client := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(presignedURL) var downloadRes *http.Response opts := rest.Opts{ Method: "GET", Path: "", Options: options, } err = o.fs.pacer.Call(func() (bool, error) { downloadRes, err = client.Call(ctx, &opts) if err != nil { return false, err } if downloadRes == nil { return false, fmt.Errorf("failed to fetch presigned URL") } return downloadRes.StatusCode == http.StatusTooManyRequests, nil }) if err != nil { return nil, fmt.Errorf("presigned URL request failed: %w", err) } if downloadRes == nil { return nil, fmt.Errorf("no response received from presigned URL request") } if downloadRes.StatusCode != http.StatusOK && downloadRes.StatusCode != http.StatusPartialContent { body, _ := io.ReadAll(downloadRes.Body) fs.CheckClose(downloadRes.Body, &err) return nil, fmt.Errorf("presigned URL request failed with status %d: %q", downloadRes.StatusCode, string(body)) } return downloadRes.Body, nil default: body, _ := io.ReadAll(res.Body) fs.CheckClose(res.Body, &err) return nil, fmt.Errorf("download failed with status %d: %q", res.StatusCode, string(body)) } } // Update in to the object with the modTime given of the given size // // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either // return an error or update the object properly (rather than e.g. calling panic). func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { //Need to ensure parent directories exist before updating err := o.fs.ensureParentDirectories(ctx, o.remote) if err != nil { return err } //If the source remote is different from this object's remote, as in we're updating a file with some other file's data, //need to construct a new object info in order to correctly upload to THIS object, not the src one var srcInfo fs.ObjectInfo if o.remote != src.Remote() { srcInfo = object.NewStaticObjectInfo(o.remote, src.ModTime(ctx), src.Size(), true, nil, o.Fs()) } else { srcInfo = src } return o.uploadMultipart(ctx, srcInfo, in, options...) } // Remove removes the object func (o *Object) Remove(ctx context.Context) error { fullPath := o.fs.buildFullPath(o.remote) res, err := o.fs.callAPI(ctx, "POST", fmt.Sprintf("/%s/fs/delete?path=%s", o.fs.drive, fullPath), nil) if err != nil { return err } defer fs.CheckClose(res.Body, &err) // Ensure body is closed if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated { return fmt.Errorf("object removal failed with status code: %d", res.StatusCode) } return nil } // ------------------------------------------------- // Directory implementation // ------------------------------------------------- // Remote returns the remote path func (d *Directory) Remote() string { return d.remote } // ModTime returns the modification time func (d *Directory) ModTime(context.Context) time.Time { return time.Unix(0, d.mtime*int64(time.Millisecond)) } // Size returns the size (0 for directories) func (d *Directory) Size() int64 { return d.size } // Fs returns the filesystem info func (d *Directory) Fs() fs.Info { return d.fs } // Hash is unsupported for directories func (d *Directory) Hash(context.Context, hash.Type) (string, error) { return "", hash.ErrUnsupported } // SetModTime is unsupported for directories func (d *Directory) SetModTime(context.Context, time.Time) error { return fs.ErrorCantSetModTime } // Storable indicates directories aren’t storable as files func (d *Directory) Storable() bool { return false } // Open returns an error for directories func (d *Directory) Open() (io.ReadCloser, error) { return nil, fs.ErrorIsDir } // Items returns the number of items in the directory (-1 if unknown) func (d *Directory) Items() int64 { return -1 // Unknown } // ID returns the directory ID (empty if not applicable) func (d *Directory) ID() string { return "" } func (d *Directory) String() string { return fmt.Sprintf("Directory: %s", d.remote) } var ( _ fs.Fs = &Fs{} _ fs.Object = &Object{} _ fs.Directory = &Directory{} _ fs.Mover = &Fs{} _ fs.DirMover = &Fs{} )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/shade/upload.go
backend/shade/upload.go
//multipart upload for shade package shade import ( "bytes" "context" "fmt" "io" "net/http" "net/url" "path" "sort" "sync" "github.com/rclone/rclone/backend/shade/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/chunksize" "github.com/rclone/rclone/lib/multipart" "github.com/rclone/rclone/lib/rest" ) var warnStreamUpload sync.Once type shadeChunkWriter struct { initToken string chunkSize int64 size int64 f *Fs o *Object completedParts []api.CompletedPart completedPartsMu sync.Mutex } // uploadMultipart handles multipart upload for larger files func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error { chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{ Open: o.fs, OpenOptions: options, }) if err != nil { return err } var shadeWriter = chunkWriter.(*shadeChunkWriter) o.size = shadeWriter.size return nil } // OpenChunkWriter returns the chunk size and a ChunkWriter // // Pass in the remote and the src object // You can also use options to hint at the desired chunk size func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) { // Temporary Object under construction o := &Object{ fs: f, remote: remote, } uploadParts := f.opt.MaxUploadParts if uploadParts < 1 { uploadParts = 1 } else if uploadParts > maxUploadParts { uploadParts = maxUploadParts } size := src.Size() fs.FixRangeOption(options, size) // calculate size of parts chunkSize := f.opt.ChunkSize // size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize // buffers here (default 64 MB). With a maximum number of parts (10,000) this will be a file of // 640 GB. if size == -1 { warnStreamUpload.Do(func() { fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v", chunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts))) }) } else { chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize) } token, err := o.fs.refreshJWTToken(ctx) if err != nil { return info, nil, fmt.Errorf("failed to get token: %w", err) } err = f.ensureParentDirectories(ctx, remote) if err != nil { return info, nil, fmt.Errorf("failed to ensure parent directories: %w", err) } fullPath := remote if f.root != "" { fullPath = path.Join(f.root, remote) } // Initiate multipart upload type initRequest struct { Path string `json:"path"` PartSize int64 `json:"partSize"` } reqBody := initRequest{ Path: fullPath, PartSize: int64(chunkSize), } var initResp struct { Token string `json:"token"` } opts := rest.Opts{ Method: "POST", Path: fmt.Sprintf("/%s/upload/multipart", o.fs.drive), RootURL: o.fs.endpoint, ExtraHeaders: map[string]string{ "Authorization": "Bearer " + token, }, Options: options, } err = o.fs.pacer.Call(func() (bool, error) { res, err := o.fs.srv.CallJSON(ctx, &opts, reqBody, &initResp) if err != nil { return res != nil && res.StatusCode == http.StatusTooManyRequests, err } return false, nil }) if err != nil { return info, nil, fmt.Errorf("failed to initiate multipart upload: %w", err) } chunkWriter := &shadeChunkWriter{ initToken: initResp.Token, chunkSize: int64(chunkSize), size: size, f: f, o: o, } info = fs.ChunkWriterInfo{ ChunkSize: int64(chunkSize), Concurrency: f.opt.Concurrency, LeavePartsOnError: false, } return info, chunkWriter, err } // WriteChunk will write chunk number with reader bytes, where chunk number >= 0 func (s *shadeChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) { token, err := s.f.refreshJWTToken(ctx) if err != nil { return 0, err } // Read chunk var chunk bytes.Buffer n, err := io.Copy(&chunk, reader) if n == 0 { return 0, nil } if err != nil { return 0, fmt.Errorf("failed to read chunk: %w", err) } // Get presigned URL for this part var partURL api.PartURL partOpts := rest.Opts{ Method: "POST", Path: fmt.Sprintf("/%s/upload/multipart/part/%d?token=%s", s.f.drive, chunkNumber+1, url.QueryEscape(s.initToken)), RootURL: s.f.endpoint, ExtraHeaders: map[string]string{ "Authorization": "Bearer " + token, }, } err = s.f.pacer.Call(func() (bool, error) { res, err := s.f.srv.CallJSON(ctx, &partOpts, nil, &partURL) if err != nil { return res != nil && res.StatusCode == http.StatusTooManyRequests, err } return false, nil }) if err != nil { return 0, fmt.Errorf("failed to get part URL: %w", err) } opts := rest.Opts{ Method: "PUT", RootURL: partURL.URL, Body: &chunk, ContentType: "", ContentLength: &n, } // Add headers var uploadRes *http.Response if len(partURL.Headers) > 0 { opts.ExtraHeaders = make(map[string]string) for k, v := range partURL.Headers { opts.ExtraHeaders[k] = v } } err = s.f.pacer.Call(func() (bool, error) { uploadRes, err = s.f.srv.Call(ctx, &opts) if err != nil { return uploadRes != nil && uploadRes.StatusCode == http.StatusTooManyRequests, err } return false, nil }) if err != nil { return 0, fmt.Errorf("failed to upload part %d: %w", chunk, err) } if uploadRes.StatusCode != http.StatusOK && uploadRes.StatusCode != http.StatusCreated { body, _ := io.ReadAll(uploadRes.Body) fs.CheckClose(uploadRes.Body, &err) return 0, fmt.Errorf("part upload failed with status %d: %s", uploadRes.StatusCode, string(body)) } // Get ETag from response etag := uploadRes.Header.Get("ETag") fs.CheckClose(uploadRes.Body, &err) s.completedPartsMu.Lock() defer s.completedPartsMu.Unlock() s.completedParts = append(s.completedParts, api.CompletedPart{ PartNumber: int32(chunkNumber + 1), ETag: etag, }) return n, nil } // Close complete chunked writer finalising the file. func (s *shadeChunkWriter) Close(ctx context.Context) error { // Complete multipart upload sort.Slice(s.completedParts, func(i, j int) bool { return s.completedParts[i].PartNumber < s.completedParts[j].PartNumber }) type completeRequest struct { Parts []api.CompletedPart `json:"parts"` } var completeBody completeRequest if s.completedParts == nil { completeBody = completeRequest{Parts: []api.CompletedPart{}} } else { completeBody = completeRequest{Parts: s.completedParts} } token, err := s.f.refreshJWTToken(ctx) if err != nil { return err } completeOpts := rest.Opts{ Method: "POST", Path: fmt.Sprintf("/%s/upload/multipart/complete?token=%s", s.f.drive, url.QueryEscape(s.initToken)), RootURL: s.f.endpoint, ExtraHeaders: map[string]string{ "Authorization": "Bearer " + token, }, } var response http.Response err = s.f.pacer.Call(func() (bool, error) { res, err := s.f.srv.CallJSON(ctx, &completeOpts, completeBody, &response) if err != nil && res == nil { return false, err } if res.StatusCode == http.StatusTooManyRequests { return true, err // Retry on 429 } if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated { body, _ := io.ReadAll(res.Body) return false, fmt.Errorf("complete multipart failed with status %d: %s", res.StatusCode, string(body)) } return false, nil }) if err != nil { return fmt.Errorf("failed to complete multipart upload: %w", err) } return nil } // Abort chunk write // // You can and should call Abort without calling Close. func (s *shadeChunkWriter) Abort(ctx context.Context) error { token, err := s.f.refreshJWTToken(ctx) if err != nil { return err } opts := rest.Opts{ Method: "POST", Path: fmt.Sprintf("/%s/upload/abort/multipart?token=%s", s.f.drive, url.QueryEscape(s.initToken)), RootURL: s.f.endpoint, ExtraHeaders: map[string]string{ "Authorization": "Bearer " + token, }, } err = s.f.pacer.Call(func() (bool, error) { res, err := s.f.srv.Call(ctx, &opts) if err != nil { fs.Debugf(s.f, "Failed to abort multipart upload: %v", err) return false, nil // Don't retry abort } if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated { fs.Debugf(s.f, "Abort returned status %d", res.StatusCode) } return false, nil }) if err != nil { return fmt.Errorf("failed to abort multipart upload: %w", err) } return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/shade/shade_test.go
backend/shade/shade_test.go
package shade_test import ( "testing" "github.com/rclone/rclone/backend/shade" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { name := "TestShade" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":", NilObject: (*shade.Object)(nil), SkipInvalidUTF8: true, ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "eventually_consistent_delay", Value: "7"}, }, }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/shade/api/types.go
backend/shade/api/types.go
// Package api has type definitions for shade package api // ListDirResponse ------------------------------------------------- // Format from shade api type ListDirResponse struct { Type string `json:"type"` // "file" or "tree" Path string `json:"path"` // Full path including root Ino int `json:"ino"` // inode number Mtime int64 `json:"mtime"` // Modified time in milliseconds Ctime int64 `json:"ctime"` // Created time in milliseconds Size int64 `json:"size"` // Size in bytes Hash string `json:"hash"` // MD5 hash Draft bool `json:"draft"` // Whether this is a draft file } // PartURL Type for multipart upload/download type PartURL struct { URL string `json:"url"` Headers map[string]string `json:"headers,omitempty"` } // CompletedPart Type for completed parts when making a multipart upload. type CompletedPart struct { ETag string PartNumber int32 }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/all/all.go
backend/all/all.go
// Package all imports all the backends package all import ( // Active file systems _ "github.com/rclone/rclone/backend/alias" _ "github.com/rclone/rclone/backend/archive" _ "github.com/rclone/rclone/backend/azureblob" _ "github.com/rclone/rclone/backend/azurefiles" _ "github.com/rclone/rclone/backend/b2" _ "github.com/rclone/rclone/backend/box" _ "github.com/rclone/rclone/backend/cache" _ "github.com/rclone/rclone/backend/chunker" _ "github.com/rclone/rclone/backend/cloudinary" _ "github.com/rclone/rclone/backend/combine" _ "github.com/rclone/rclone/backend/compress" _ "github.com/rclone/rclone/backend/crypt" _ "github.com/rclone/rclone/backend/doi" _ "github.com/rclone/rclone/backend/drive" _ "github.com/rclone/rclone/backend/dropbox" _ "github.com/rclone/rclone/backend/fichier" _ "github.com/rclone/rclone/backend/filefabric" _ "github.com/rclone/rclone/backend/filelu" _ "github.com/rclone/rclone/backend/filescom" _ "github.com/rclone/rclone/backend/ftp" _ "github.com/rclone/rclone/backend/gofile" _ "github.com/rclone/rclone/backend/googlecloudstorage" _ "github.com/rclone/rclone/backend/googlephotos" _ "github.com/rclone/rclone/backend/hasher" _ "github.com/rclone/rclone/backend/hdfs" _ "github.com/rclone/rclone/backend/hidrive" _ "github.com/rclone/rclone/backend/http" _ "github.com/rclone/rclone/backend/iclouddrive" _ "github.com/rclone/rclone/backend/imagekit" _ "github.com/rclone/rclone/backend/internetarchive" _ "github.com/rclone/rclone/backend/jottacloud" _ "github.com/rclone/rclone/backend/koofr" _ "github.com/rclone/rclone/backend/linkbox" _ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/mailru" _ "github.com/rclone/rclone/backend/mega" _ "github.com/rclone/rclone/backend/memory" _ "github.com/rclone/rclone/backend/netstorage" _ "github.com/rclone/rclone/backend/onedrive" _ "github.com/rclone/rclone/backend/opendrive" _ "github.com/rclone/rclone/backend/oracleobjectstorage" _ "github.com/rclone/rclone/backend/pcloud" _ "github.com/rclone/rclone/backend/pikpak" _ "github.com/rclone/rclone/backend/pixeldrain" _ "github.com/rclone/rclone/backend/premiumizeme" _ "github.com/rclone/rclone/backend/protondrive" _ "github.com/rclone/rclone/backend/putio" _ "github.com/rclone/rclone/backend/qingstor" _ "github.com/rclone/rclone/backend/quatrix" _ "github.com/rclone/rclone/backend/s3" _ "github.com/rclone/rclone/backend/seafile" _ "github.com/rclone/rclone/backend/sftp" _ "github.com/rclone/rclone/backend/shade" _ "github.com/rclone/rclone/backend/sharefile" _ "github.com/rclone/rclone/backend/sia" _ "github.com/rclone/rclone/backend/smb" _ "github.com/rclone/rclone/backend/storj" _ "github.com/rclone/rclone/backend/sugarsync" _ "github.com/rclone/rclone/backend/swift" _ "github.com/rclone/rclone/backend/ulozto" _ "github.com/rclone/rclone/backend/union" _ "github.com/rclone/rclone/backend/uptobox" _ "github.com/rclone/rclone/backend/webdav" _ "github.com/rclone/rclone/backend/yandex" _ "github.com/rclone/rclone/backend/zoho" )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/filefabric/filefabric.go
backend/filefabric/filefabric.go
// Package filefabric provides an interface to Storage Made Easy's // Enterprise File Fabric storage system. package filefabric /* Docs: https://product-demo.smestorage.com/?p=apidoc Missing features: - M-Stream support - Oauth-like flow (soon being changed to oauth) // TestFileFabric maxFileLength = 14094 */ import ( "bytes" "context" "encoding/base64" "errors" "fmt" "io" "net/http" "net/url" "path" "strings" "sync" "sync/atomic" "time" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/backend/filefabric/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" ) const ( minSleep = 20 * time.Millisecond maxSleep = 10 * time.Second decayConstant = 2 // bigger for slower decay, exponential listChunks = 1000 // chunk size to read directory listings tokenLifeTime = 55 * time.Minute // 1 hour minus a bit of leeway defaultRootID = "" // default root ID emptyMimeType = "application/vnd.rclone.empty.file" ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "filefabric", Description: "Enterprise File Fabric", NewFs: NewFs, Options: []fs.Option{{ Name: "url", Help: "URL of the Enterprise File Fabric to connect to.", Required: true, Examples: []fs.OptionExample{{ Value: "https://storagemadeeasy.com", Help: "Storage Made Easy US", }, { Value: "https://eu.storagemadeeasy.com", Help: "Storage Made Easy EU", }, { Value: "https://yourfabric.smestorage.com", Help: "Connect to your Enterprise File Fabric", }}, }, { Name: "root_folder_id", Help: `ID of the root folder. Leave blank normally. Fill in to make rclone start with directory of a given ID. `, Sensitive: true, }, { Name: "permanent_token", Help: `Permanent Authentication Token. A Permanent Authentication Token can be created in the Enterprise File Fabric, on the users Dashboard under Security, there is an entry you'll see called "My Authentication Tokens". Click the Manage button to create one. These tokens are normally valid for several years. For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens `, Sensitive: true, }, { Name: "token", Help: `Session Token. This is a session token which rclone caches in the config file. It is usually valid for 1 hour. Don't set this value - rclone will set it automatically. `, Advanced: true, Sensitive: true, }, { Name: "token_expiry", Help: `Token expiry time. Don't set this value - rclone will set it automatically. `, Advanced: true, }, { Name: "version", Help: `Version read from the file fabric. Don't set this value - rclone will set it automatically. `, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.Display | encoder.EncodeInvalidUtf8), }}, }) } // Options defines the configuration for this backend type Options struct { URL string `config:"url"` RootFolderID string `config:"root_folder_id"` PermanentToken string `config:"permanent_token"` Token string `config:"token"` TokenExpiry string `config:"token_expiry"` Version string `config:"version"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a remote filefabric type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options features *fs.Features // optional features m configmap.Mapper // to save config srv *rest.Client // the connection to the server dirCache *dircache.DirCache // Map of directory path to directory id pacer *fs.Pacer // pacer for API calls tokenMu sync.Mutex // hold when reading the token token string // current access token tokenExpiry time.Time // time the current token expires tokenExpired atomic.Int32 canCopyWithName bool // set if detected that can use fi_name in copy precision time.Duration // precision reported } // Object describes a filefabric object // // Will definitely have info but maybe not meta type Object struct { fs *Fs // what this object is part of remote string // The remote path hasMetaData bool // whether info below has been set size int64 // size of the object modTime time.Time // modification time of the object id string // ID of the object contentType string // ContentType of object } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("filefabric root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // parsePath parses a filefabric 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // Retry any of these var retryStatusCodes = []struct { code string sleep time.Duration }{ { // Can not create folder now. We are not able to complete the // requested operation with such name. We are processing // delete in that folder. Please try again later or use // another name. (error_background) code: "error_background", sleep: 1 * time.Second, }, } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience // try should be the number of the tries so far, counting up from 1 func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError, try int) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } if err != nil { return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } if status != nil && !status.OK() { err = status // return the error from the RPC code := status.GetCode() if code == "login_token_expired" { f.tokenExpired.Add(1) } else { for _, retryCode := range retryStatusCodes { if code == retryCode.code { if retryCode.sleep > 0 { // make this thread only sleep exponentially increasing extra time sleepTime := retryCode.sleep << (try - 1) fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", sleepTime, retryCode.code) time.Sleep(sleepTime) } return true, err } } } } return false, err } // readMetaDataForPath reads the metadata from the path func (f *Fs) readMetaDataForPath(ctx context.Context, rootID string, path string) (info *api.Item, err error) { var resp api.FileResponse _, err = f.rpc(ctx, "checkPathExists", params{ "path": f.opt.Enc.FromStandardPath(path), "pid": rootID, }, &resp, nil) if err != nil { return nil, fmt.Errorf("failed to check path exists: %w", err) } if resp.Exists != "y" { return nil, fs.ErrorObjectNotFound } return &resp.Item, nil /* // defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, fs.ErrorObjectNotFound } return nil, err } found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool { if item.Name == leaf { info = item return true } return false }) if err != nil { return nil, err } if !found { return nil, fs.ErrorObjectNotFound } return info, nil */ } // Get the appliance info so we can set Version func (f *Fs) getApplianceInfo(ctx context.Context) error { var applianceInfo api.ApplianceInfo _, err := f.rpc(ctx, "getApplianceInfo", params{ "token": "*", }, &applianceInfo, nil) if err != nil { return fmt.Errorf("failed to read appliance version: %w", err) } f.opt.Version = applianceInfo.SoftwareVersionLabel f.m.Set("version", f.opt.Version) return nil } // Gets the token or gets a new one if necessary func (f *Fs) getToken(ctx context.Context) (token string, err error) { f.tokenMu.Lock() var refreshed = false defer func() { if refreshed { f.tokenExpired.Store(0) } f.tokenMu.Unlock() }() expired := f.tokenExpired.Load() != 0 if expired { fs.Debugf(f, "Token invalid - refreshing") } if f.token == "" { fs.Debugf(f, "Empty token - refreshing") expired = true } now := time.Now() if f.tokenExpiry.IsZero() || now.After(f.tokenExpiry) { fs.Debugf(f, "Token expired - refreshing") expired = true } if !expired { return f.token, nil } var info api.GetTokenByAuthTokenResponse _, err = f.rpc(ctx, "getTokenByAuthToken", params{ "token": "*", "authtoken": f.opt.PermanentToken, }, &info, nil) if err != nil { return "", fmt.Errorf("failed to get session token: %w", err) } refreshed = true now = now.Add(tokenLifeTime) f.token = info.Token f.tokenExpiry = now f.m.Set("token", f.token) f.m.Set("token_expiry", now.Format(time.RFC3339)) // Read appliance info when we update the token err = f.getApplianceInfo(ctx) if err != nil { return "", err } f.setCapabilities() return f.token, nil } // params for rpc type params map[string]any // rpc calls the rpc.php method of the SME file fabric // // This is an entry point to all the method calls. // // If result is nil then resp.Body will need closing func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKError, options []fs.OpenOption) (resp *http.Response, err error) { defer log.Trace(f, "%s(%+v) options=%+v", function, p, options)("result=%+v, err=%v", &result, &err) // Get the token from params if present otherwise call getToken var token string if tokenI, ok := p["token"]; !ok { token, err = f.getToken(ctx) if err != nil { return resp, err } } else { token = tokenI.(string) } var data = url.Values{ "function": {function}, "token": {token}, "apiformat": {"json"}, } for k, v := range p { data.Set(k, fmt.Sprint(v)) } opts := rest.Opts{ Method: "POST", Path: "/api/rpc.php", ContentType: "application/x-www-form-urlencoded", Options: options, } try := 0 err = f.pacer.Call(func() (bool, error) { try++ // Refresh the body each retry opts.Body = strings.NewReader(data.Encode()) resp, err = f.srv.CallJSON(ctx, &opts, nil, result) return f.shouldRetry(ctx, resp, err, result, try) }) if err != nil { return resp, err } return resp, nil } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } opt.URL = strings.TrimSuffix(opt.URL, "/") if opt.URL == "" { return nil, errors.New("url must be set") } root = parsePath(root) client := fshttp.NewClient(ctx) f := &Fs{ name: name, root: root, opt: *opt, m: m, srv: rest.NewClient(client).SetRoot(opt.URL), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), token: opt.Token, } f.features = (&fs.Features{ CaseInsensitive: true, CanHaveEmptyDirectories: true, ReadMimeType: true, WriteMimeType: true, }).Fill(ctx, f) if f.opt.Version == "" { err = f.getApplianceInfo(ctx) if err != nil { return nil, err } } f.setCapabilities() if opt.TokenExpiry != "" { tokenExpiry, err := time.Parse(time.RFC3339, opt.TokenExpiry) if err != nil { fs.Errorf(nil, "Failed to parse token_expiry option: %v", err) } else { f.tokenExpiry = tokenExpiry } } if opt.RootFolderID == "" { opt.RootFolderID = defaultRootID } f.dirCache = dircache.New(f.root, opt.RootFolderID, f) // Find out whether the root is a file or a directory or doesn't exist var errReturn error if f.root != "" { info, err := f.readMetaDataForPath(ctx, f.opt.RootFolderID, f.root) if err == nil && info != nil { if info.Type == api.ItemTypeFile { // Root is a file // Point the root to the parent directory f.root, _ = dircache.SplitPath(root) f.dirCache = dircache.New(f.root, opt.RootFolderID, f) errReturn = fs.ErrorIsFile // Cache the ID of the parent of the file as the root ID f.dirCache.Put(f.root, info.PID) } else if info.Type == api.ItemTypeFolder { // Root is a dir - cache its ID f.dirCache.Put(f.root, info.ID) } //} else { // Root is not found so a directory } } return f, errReturn } // set the capabilities of this version of software func (f *Fs) setCapabilities() { version := f.opt.Version if version == "" { version = "0000.00" } if version >= "2006.02" { f.precision = time.Second f.canCopyWithName = true } else { // times can be altered this much on renames f.precision = 1 * time.Hour f.canCopyWithName = false } } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } var err error if info != nil { // Set info err = o.setMetaData(info) } else { err = o.readMetaData(ctx) // reads info and meta, returning an error } if err != nil { return nil, err } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // FindLeaf finds a directory of name leaf in the folder with ID pathID func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { // Find the leaf in pathID found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool { if strings.EqualFold(item.Name, leaf) { pathIDOut = item.ID return true } return false }) return pathIDOut, found, err } // CreateDir makes a directory with pathID as parent and name leaf func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { //fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) var info api.DoCreateNewFolderResponse _, err = f.rpc(ctx, "doCreateNewFolder", params{ "fi_pid": pathID, "fi_name": f.opt.Enc.FromStandardName(leaf), }, &info, nil) if err != nil { return "", fmt.Errorf("failed to create directory: %w", err) } // fmt.Printf("...Id %q\n", *info.Id) return info.Item.ID, nil } // list the objects into the function supplied // // If directories is set it only sends directories // User function to process a File item from listAll // // Should return true to finish processing type listAllFn func(*api.Item) bool // Lists the directory required calling the user function on each item found // // If the user fn ever returns true then it early exits with found = true func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { var ( p = params{ "fi_pid": dirID, "count": listChunks, "subfolders": "y", // Cut down the things that are returned "options": "filelist|" + api.ItemFields, } n = 0 ) OUTER: for { var info api.GetFolderContentsResponse _, err = f.rpc(ctx, "getFolderContents", p, &info, nil) if err != nil { return false, fmt.Errorf("failed to list directory: %w", err) } for i := range info.Items { item := &info.Items[i] if item.Type == api.ItemTypeFolder { if filesOnly { continue } } else if item.Type == api.ItemTypeFile { if directoriesOnly { continue } } else { fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type) continue } if item.Trash { continue } item.Name = f.opt.Enc.ToStandardName(item.Name) if fn(item) { found = true break OUTER } } // if didn't get any items then exit if len(info.Items) == 0 { break } n += len(info.Items) if n >= info.Total { break } p["from"] = n } return found, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } var iErr error _, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool { remote := path.Join(dir, info.Name) if info.Type == api.ItemTypeFolder { // cache the directory ID for later lookups f.dirCache.Put(remote, info.ID) d := fs.NewDir(remote, time.Time(info.Modified)).SetID(info.ID).SetItems(info.SubFolders) entries = append(entries, d) } else if info.Type == api.ItemTypeFile { o, err := f.newObjectWithInfo(ctx, remote, info) if err != nil { iErr = err return true } entries = append(entries, o) } return false }) if err != nil { return nil, err } if iErr != nil { return nil, iErr } return entries, nil } // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // // Returns the object, leaf, directoryID and error. // // Used to create new objects func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { // Create the directory for the object if it doesn't exist leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true) if err != nil { return } // Temporary Object under construction o = &Object{ fs: f, remote: remote, } return o, leaf, directoryID, nil } // Put the object // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() modTime := src.ModTime(ctx) o, _, _, err := f.createObject(ctx, remote, modTime, size) if err != nil { return nil, err } return o, o.Update(ctx, in, src, options...) } // Mkdir creates the container if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { _, err := f.dirCache.FindDir(ctx, dir, true) return err } // deleteObject removes an object by ID func (f *Fs) deleteObject(ctx context.Context, id string) (err error) { var info api.DeleteResponse _, err = f.rpc(ctx, "doDeleteFile", params{ "fi_id": id, "completedeletion": "n", }, &info, nil) if err != nil { return fmt.Errorf("failed to delete file: %w", err) } return nil } // purgeCheck removes the root directory, if check is set then it // refuses to do so if it has anything in func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } dc := f.dirCache rootID, err := dc.FindDir(ctx, dir, false) if err != nil { return err } if check { found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool { fs.Debugf(dir, "Rmdir: contains file: %q", item.Name) return true }) if err != nil { return err } if found { return fs.ErrorDirectoryNotEmpty } } var info api.EmptyResponse _, err = f.rpc(ctx, "doDeleteFolder", params{ "fi_id": rootID, }, &info, nil) f.dirCache.FlushDir(dir) if err != nil { return fmt.Errorf("failed to remove directory: %w", err) } return nil } // Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { //fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) return f.purgeCheck(ctx, dir, true) } // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { return f.precision } // Copy src to this remote using server side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } err := srcObj.readMetaData(ctx) if err != nil { return nil, err } // Create temporary object dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } if !f.canCopyWithName && leaf != path.Base(srcObj.remote) { fs.Debugf(src, "Can't copy - can't change the name of files") return nil, fs.ErrorCantCopy } // Copy the object var info api.FileResponse p := params{ "fi_id": srcObj.id, "fi_pid": directoryID, "force": "y", "options": "allownoextension", // without this the filefabric adds extensions to files without } if f.canCopyWithName { p["fi_name"] = f.opt.Enc.FromStandardName(leaf) } _, err = f.rpc(ctx, "doCopyFile", p, &info, nil) if err != nil { return nil, fmt.Errorf("failed to copy file: %w", err) } err = dstObj.setMetaData(&info.Item) if err != nil { return nil, err } return dstObj, nil } // Purge deletes all the files and the container // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, false) } // Wait for the background task to complete if necessary func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) { if taskID == "" || taskID == "0" { // No task to wait for return nil } start := time.Now() sleepTime := time.Second for { var info api.TasksResponse _, err = f.rpc(ctx, "getUserBackgroundTasks", params{ "taskid": taskID, }, &info, nil) if err != nil { return fmt.Errorf("failed to wait for task %s to complete: %w", taskID, err) } if len(info.Tasks) == 0 { // task has finished break } if len(info.Tasks) > 1 { fs.Errorf(f, "Unexpected number of tasks returned %d", len(info.Tasks)) } task := info.Tasks[0] if task.BtStatus == "c" { // task completed break } dt := time.Since(start) fs.Debugf(f, "Waiting for task ID %s: %s: to completed for %v - waited %v already", task.BtID, task.BtTitle, sleepTime, dt) time.Sleep(sleepTime) } return nil } // Rename the leaf of a file or directory in a directory func (f *Fs) renameLeaf(ctx context.Context, isDir bool, id string, newLeaf string) (item *api.Item, err error) { var info api.FileResponse method := "doRenameFile" if isDir { method = "doRenameFolder" } _, err = f.rpc(ctx, method, params{ "fi_id": id, "fi_name": newLeaf, }, &info, nil) if err != nil { return nil, fmt.Errorf("failed to rename leaf: %w", err) } err = f.waitForBackgroundTask(ctx, info.Status.TaskID) if err != nil { return nil, err } return &info.Item, nil } // move a file or folder // // This is complicated by the fact that there is an API to move files // between directories and a separate one to rename them. We try to // call the minimum number of API calls. func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDirectoryID, newDirectoryID string) (item *api.Item, err error) { newLeaf = f.opt.Enc.FromStandardName(newLeaf) oldLeaf = f.opt.Enc.FromStandardName(oldLeaf) doRenameLeaf := oldLeaf != newLeaf doMove := oldDirectoryID != newDirectoryID // Now rename the leaf to a temporary name if we are moving to // another directory to make sure we don't overwrite something // in the destination directory by accident if doRenameLeaf && doMove { tmpLeaf := newLeaf + "." + random.String(8) item, err = f.renameLeaf(ctx, isDir, id, tmpLeaf) if err != nil { return nil, err } } // Move the object to a new directory (with the existing name) // if required if doMove { var info api.MoveFilesResponse method := "doMoveFiles" if isDir { method = "doMoveFolders" } _, err = f.rpc(ctx, method, params{ "fi_ids": id, "dir_id": newDirectoryID, }, &info, nil) if err != nil { return nil, fmt.Errorf("failed to move file to new directory: %w", err) } item = &info.Item err = f.waitForBackgroundTask(ctx, info.Status.TaskID) if err != nil { return nil, err } } // Rename the leaf to its final name if required if doRenameLeaf { item, err = f.renameLeaf(ctx, isDir, id, newLeaf) if err != nil { return nil, err } } return item, nil } // Move src to this remote using server side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } // find the source directoryID srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) if err != nil { return nil, err } // Create temporary object dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } // Do the move item, err := f.move(ctx, false, srcObj.id, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID) if err != nil { return nil, err } // Set the metadata from what was returned or read it fresh if item == nil { err = dstObj.readMetaData(ctx) if err != nil { return nil, err } } else { err = dstObj.setMetaData(item) if err != nil { return nil, err } } return dstObj, nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } srcID, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) if err != nil { return err } // Do the move _, err = f.move(ctx, true, srcID, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID) if err != nil { return err } srcFs.dirCache.FlushDir(srcRemote) return nil } // CleanUp empties the trash func (f *Fs) CleanUp(ctx context.Context) (err error) { var info api.EmptyResponse _, err = f.rpc(ctx, "emptyTrashInBackground", params{}, &info, nil) if err != nil { return fmt.Errorf("failed to empty trash: %w", err) } return nil } // DirCacheFlush resets the directory cache - used in testing as an // optional interface func (f *Fs) DirCacheFlush() { f.dirCache.ResetRoot() } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.None) } // ------------------------------------------------------------ // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Hash of the object in the requested format as a lowercase hex string func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { return "", hash.ErrUnsupported } // Size returns the size of an object in bytes func (o *Object) Size() int64 { err := o.readMetaData(context.TODO()) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return 0 } if o.contentType == emptyMimeType { return 0 } return o.size } // setMetaData sets the metadata from info func (o *Object) setMetaData(info *api.Item) (err error) { if info.Type != api.ItemTypeFile { return fs.ErrorIsDir } o.hasMetaData = true o.size = info.Size o.modTime = time.Time(info.Modified) if !time.Time(info.LocalTime).IsZero() { o.modTime = time.Time(info.LocalTime) } o.id = info.ID o.contentType = info.ContentType return nil } // readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info func (o *Object) readMetaData(ctx context.Context) (err error) { if o.hasMetaData { return nil } rootID, err := o.fs.dirCache.RootID(ctx, false) if err != nil { if err == fs.ErrorDirNotFound { err = fs.ErrorObjectNotFound } return err } info, err := o.fs.readMetaDataForPath(ctx, rootID, o.remote) if err != nil { if apiErr, ok := err.(*api.Status); ok { if apiErr.Code == "not_found" || apiErr.Code == "trashed" { return fs.ErrorObjectNotFound } } return err } return o.setMetaData(info) } // ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime(ctx context.Context) time.Time { err := o.readMetaData(ctx) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return time.Now() } return o.modTime } // modifyFile updates file metadata // // keyValues should be key, value pairs func (o *Object) modifyFile(ctx context.Context, keyValues [][2]string) error { var info api.FileResponse var data strings.Builder for _, keyValue := range keyValues { data.WriteString(keyValue[0]) data.WriteRune('=') data.WriteString(keyValue[1]) data.WriteRune('\n') } _, err := o.fs.rpc(ctx, "doModifyFile", params{ "fi_id": o.id, "data": data.String(), }, &info, nil) if err != nil { return fmt.Errorf("failed to update metadata: %w", err) } return o.setMetaData(&info.Item) } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return o.modifyFile(ctx, [][2]string{ {"fi_localtime", api.Time(modTime).String()}, }) } // Storable returns a boolean showing whether this object storable func (o *Object) Storable() bool { return true } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { if o.id == "" { return nil, errors.New("can't download - no id") } if o.contentType == emptyMimeType { return io.NopCloser(bytes.NewReader([]byte{})), nil } fs.FixRangeOption(options, o.size) resp, err := o.fs.rpc(ctx, "getFile", params{ "fi_id": o.id, }, nil, options) if err != nil { return nil, err } return resp.Body, nil } // Update the object with the contents of the io.Reader, modTime and size // // If existing is set then it updates the object rather than creating a new one. // // The new object may have been created if an error is returned func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/filefabric/filefabric_test.go
backend/filefabric/filefabric_test.go
// Test filefabric filesystem interface package filefabric_test import ( "testing" "github.com/rclone/rclone/backend/filefabric" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestFileFabric:", NilObject: (*filefabric.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/filefabric/api/types.go
backend/filefabric/api/types.go
// Package api has type definitions for filefabric // // Converted from the API responses with help from https://mholt.github.io/json-to-go/ package api import ( "bytes" "encoding/json" "fmt" "reflect" "strings" "time" ) const ( // TimeFormat for parameters (UTC) timeFormatParameters = `2006-01-02 15:04:05` // "2020-08-11 10:10:04" for JSON parsing timeFormatJSON = `"` + timeFormatParameters + `"` ) // Time represents date and time information for the // filefabric API type Time time.Time // MarshalJSON turns a Time into JSON (in UTC) func (t *Time) MarshalJSON() (out []byte, err error) { timeString := (*time.Time)(t).UTC().Format(timeFormatJSON) return []byte(timeString), nil } var zeroTime = []byte(`"0000-00-00 00:00:00"`) // UnmarshalJSON turns JSON into a Time (in UTC) func (t *Time) UnmarshalJSON(data []byte) error { // Set a Zero time.Time if we receive a zero time input if bytes.Equal(data, zeroTime) { *t = Time(time.Time{}) return nil } newT, err := time.Parse(timeFormatJSON, string(data)) if err != nil { return err } *t = Time(newT) return nil } // String turns a Time into a string in UTC suitable for the API // parameters func (t Time) String() string { return time.Time(t).UTC().Format(timeFormatParameters) } // Int represents an integer which can be represented in JSON as a // quoted integer or an integer. type Int int // MarshalJSON turns a Int into JSON func (i *Int) MarshalJSON() (out []byte, err error) { return json.Marshal((*int)(i)) } // UnmarshalJSON turns JSON into a Int func (i *Int) UnmarshalJSON(data []byte) error { if len(data) >= 2 && data[0] == '"' && data[len(data)-1] == '"' { data = data[1 : len(data)-1] } return json.Unmarshal(data, (*int)(i)) } // String represents an string which can be represented in JSON as a // quoted string or an integer. type String string // MarshalJSON turns a String into JSON func (s *String) MarshalJSON() (out []byte, err error) { return json.Marshal((*string)(s)) } // UnmarshalJSON turns JSON into a String func (s *String) UnmarshalJSON(data []byte) error { err := json.Unmarshal(data, (*string)(s)) if err != nil { *s = String(data) } return nil } // Status return returned in all status responses type Status struct { Code string `json:"status"` Message string `json:"statusmessage"` TaskID String `json:"taskid"` // Warning string `json:"warning"` // obsolete } // Status satisfies the error interface func (e *Status) Error() string { return fmt.Sprintf("%s (%s)", e.Message, e.Code) } // OK returns true if the status is all good func (e *Status) OK() bool { return e.Code == "ok" } // GetCode returns the status code if any func (e *Status) GetCode() string { return e.Code } // OKError defines an interface for items which can be OK or be an error type OKError interface { error OK() bool GetCode() string } // Check Status satisfies the OKError interface var _ OKError = (*Status)(nil) // EmptyResponse is response which just returns the error condition type EmptyResponse struct { Status } // GetTokenByAuthTokenResponse is the response to getTokenByAuthToken type GetTokenByAuthTokenResponse struct { Status Token string `json:"token"` UserID string `json:"userid"` AllowLoginRemember string `json:"allowloginremember"` LastLogin Time `json:"lastlogin"` AutoLoginCode string `json:"autologincode"` } // ApplianceInfo is the response to getApplianceInfo type ApplianceInfo struct { Status Sitetitle string `json:"sitetitle"` OauthLoginSupport string `json:"oauthloginsupport"` IsAppliance string `json:"isappliance"` SoftwareVersion string `json:"softwareversion"` SoftwareVersionLabel string `json:"softwareversionlabel"` } // GetFolderContentsResponse is returned from getFolderContents type GetFolderContentsResponse struct { Status Total int `json:"total,string"` Items []Item `json:"filelist"` Folder Item `json:"folder"` From Int `json:"from"` //Count int `json:"count"` Pid string `json:"pid"` RefreshResult Status `json:"refreshresult"` // Curfolder Item `json:"curfolder"` - sometimes returned as "ROOT"? Parents []Item `json:"parents"` CustomPermissions CustomPermissions `json:"custompermissions"` } // ItemType determine whether it is a file or a folder type ItemType uint8 // Types of things in Item const ( ItemTypeFile ItemType = 0 ItemTypeFolder ItemType = 1 ) // Item ia a File or a Folder type Item struct { ID string `json:"fi_id"` PID string `json:"fi_pid"` // UID string `json:"fi_uid"` Name string `json:"fi_name"` // S3Name string `json:"fi_s3name"` // Extension string `json:"fi_extension"` // Description string `json:"fi_description"` Type ItemType `json:"fi_type,string"` // Created Time `json:"fi_created"` Size int64 `json:"fi_size,string"` ContentType string `json:"fi_contenttype"` // Tags string `json:"fi_tags"` // MainCode string `json:"fi_maincode"` // Public int `json:"fi_public,string"` // Provider string `json:"fi_provider"` // ProviderFolder string `json:"fi_providerfolder"` // folder // Encrypted int `json:"fi_encrypted,string"` // StructType string `json:"fi_structtype"` // Bname string `json:"fi_bname"` // folder // OrgID string `json:"fi_orgid"` // Favorite int `json:"fi_favorite,string"` // IspartOf string `json:"fi_ispartof"` // folder Modified Time `json:"fi_modified"` // LastAccessed Time `json:"fi_lastaccessed"` // Hits int64 `json:"fi_hits,string"` // IP string `json:"fi_ip"` // folder // BigDescription string `json:"fi_bigdescription"` LocalTime Time `json:"fi_localtime"` // OrgfolderID string `json:"fi_orgfolderid"` // StorageIP string `json:"fi_storageip"` // folder // RemoteTime Time `json:"fi_remotetime"` // ProviderOptions string `json:"fi_provideroptions"` // Access string `json:"fi_access"` // Hidden string `json:"fi_hidden"` // folder // VersionOf string `json:"fi_versionof"` Trash bool `json:"trash"` // Isbucket string `json:"isbucket"` // filelist SubFolders int64 `json:"subfolders"` // folder } // ItemFields is a | separated list of fields in Item var ItemFields = mustFields(Item{}) // fields returns the JSON fields in use by opt as a | separated // string. func fields(opt any) (pipeTags string, err error) { var tags []string def := reflect.ValueOf(opt) defType := def.Type() for i := range def.NumField() { field := defType.Field(i) tag, ok := field.Tag.Lookup("json") if !ok { continue } if comma := strings.IndexRune(tag, ','); comma >= 0 { tag = tag[:comma] } if tag == "" { continue } tags = append(tags, tag) } return strings.Join(tags, "|"), nil } // mustFields returns the JSON fields in use by opt as a | separated // string. It panics on failure. func mustFields(opt any) string { tags, err := fields(opt) if err != nil { panic(err) } return tags } // CustomPermissions is returned as part of GetFolderContentsResponse type CustomPermissions struct { Upload string `json:"upload"` CreateSubFolder string `json:"createsubfolder"` Rename string `json:"rename"` Delete string `json:"delete"` Move string `json:"move"` ManagePermissions string `json:"managepermissions"` ListOnly string `json:"listonly"` VisibleInTrash string `json:"visibleintrash"` } // DoCreateNewFolderResponse is response from foCreateNewFolder type DoCreateNewFolderResponse struct { Status Item Item `json:"file"` } // DoInitUploadResponse is response from doInitUpload type DoInitUploadResponse struct { Status ProviderID string `json:"providerid"` UploadCode string `json:"uploadcode"` FileType string `json:"filetype"` DirectUploadSupport string `json:"directuploadsupport"` ResumeAllowed string `json:"resumeallowed"` } // UploaderResponse is returned from /cgi-bin/uploader/uploader1.cgi // // Sometimes the response is returned as XML and sometimes as JSON type UploaderResponse struct { FileSize int64 `xml:"filesize" json:"filesize,string"` MD5 string `xml:"md5" json:"md5"` Success string `xml:"success" json:"success"` } // UploadStatus is returned from getUploadStatus type UploadStatus struct { Status UploadCode string `json:"uploadcode"` Metafile string `json:"metafile"` Percent int `json:"percent,string"` Uploaded int64 `json:"uploaded,string"` Size int64 `json:"size,string"` Filename string `json:"filename"` Nofile string `json:"nofile"` Completed string `json:"completed"` Completsuccess string `json:"completsuccess"` Completerror string `json:"completerror"` } // DoCompleteUploadResponse is the response to doCompleteUpload type DoCompleteUploadResponse struct { Status UploadedSize int64 `json:"uploadedsize,string"` StorageIP string `json:"storageip"` UploadedName string `json:"uploadedname"` // Versioned []interface{} `json:"versioned"` // VersionedID int `json:"versionedid"` // Comment interface{} `json:"comment"` File Item `json:"file"` // UsSize string `json:"us_size"` // PaSize string `json:"pa_size"` // SpaceInfo SpaceInfo `json:"spaceinfo"` } // Providers is returned as part of UploadResponse type Providers struct { Max string `json:"max"` Used string `json:"used"` ID string `json:"id"` Private string `json:"private"` Limit string `json:"limit"` Percent int `json:"percent"` } // Total is returned as part of UploadResponse type Total struct { Max string `json:"max"` Used string `json:"used"` ID string `json:"id"` Priused string `json:"priused"` Primax string `json:"primax"` Limit string `json:"limit"` Percent int `json:"percent"` Pripercent int `json:"pripercent"` } // UploadResponse is returned as part of SpaceInfo type UploadResponse struct { Providers []Providers `json:"providers"` Total Total `json:"total"` } // SpaceInfo is returned as part of DoCompleteUploadResponse type SpaceInfo struct { Response UploadResponse `json:"response"` Status string `json:"status"` } // DeleteResponse is returned from doDeleteFile type DeleteResponse struct { Status Deleted []string `json:"deleted"` Errors []any `json:"errors"` ID string `json:"fi_id"` BackgroundTask int `json:"backgroundtask"` UsSize string `json:"us_size"` PaSize string `json:"pa_size"` //SpaceInfo SpaceInfo `json:"spaceinfo"` } // FileResponse is returned from doRenameFile type FileResponse struct { Status Item Item `json:"file"` Exists string `json:"exists"` } // MoveFilesResponse is returned from doMoveFiles type MoveFilesResponse struct { Status Filesleft string `json:"filesleft"` Addedtobackground string `json:"addedtobackground"` Moved string `json:"moved"` Item Item `json:"file"` IDs []string `json:"fi_ids"` Length int `json:"length"` DirID string `json:"dir_id"` MovedObjects []Item `json:"movedobjects"` // FolderTasks []interface{} `json:"foldertasks"` } // TasksResponse is the response to getUserBackgroundTasks type TasksResponse struct { Status Tasks []Task `json:"tasks"` Total string `json:"total"` } // BtData is part of TasksResponse type BtData struct { Callback string `json:"callback"` } // Task describes a task returned in TasksResponse type Task struct { BtID string `json:"bt_id"` UsID string `json:"us_id"` BtType string `json:"bt_type"` BtData BtData `json:"bt_data"` BtStatustext string `json:"bt_statustext"` BtStatusdata string `json:"bt_statusdata"` BtMessage string `json:"bt_message"` BtProcent string `json:"bt_procent"` BtAdded string `json:"bt_added"` BtStatus string `json:"bt_status"` BtCompleted string `json:"bt_completed"` BtTitle string `json:"bt_title"` BtCredentials string `json:"bt_credentials"` BtHidden string `json:"bt_hidden"` BtAutoremove string `json:"bt_autoremove"` BtDevsite string `json:"bt_devsite"` BtPriority string `json:"bt_priority"` BtReport string `json:"bt_report"` BtSitemarker string `json:"bt_sitemarker"` BtExecuteafter string `json:"bt_executeafter"` BtCompletestatus string `json:"bt_completestatus"` BtSubtype string `json:"bt_subtype"` BtCanceled string `json:"bt_canceled"` Callback string `json:"callback"` CanBeCanceled bool `json:"canbecanceled"` CanBeRestarted bool `json:"canberestarted"` Type string `json:"type"` Status string `json:"status"` Settings string `json:"settings"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/internetarchive/internetarchive.go
backend/internetarchive/internetarchive.go
// Package internetarchive provides an interface to Internet Archive's Item // via their native API than using S3-compatible endpoints. package internetarchive import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "path" "regexp" "slices" "strconv" "strings" "time" "github.com/ncw/swift/v2" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/rest" ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "internetarchive", Description: "Internet Archive", NewFs: NewFs, MetadataInfo: &fs.MetadataInfo{ System: map[string]fs.MetadataHelp{ "name": { Help: "Full file path, without the bucket part", Type: "filename", Example: "backend/internetarchive/internetarchive.go", ReadOnly: true, }, "source": { Help: "The source of the file", Type: "string", Example: "original", ReadOnly: true, }, "mtime": { Help: "Time of last modification, managed by Rclone", Type: "RFC 3339", Example: "2006-01-02T15:04:05.999999999Z", ReadOnly: true, }, "size": { Help: "File size in bytes", Type: "decimal number", Example: "123456", ReadOnly: true, }, "md5": { Help: "MD5 hash calculated by Internet Archive", Type: "string", Example: "01234567012345670123456701234567", ReadOnly: true, }, "crc32": { Help: "CRC32 calculated by Internet Archive", Type: "string", Example: "01234567", ReadOnly: true, }, "sha1": { Help: "SHA1 hash calculated by Internet Archive", Type: "string", Example: "0123456701234567012345670123456701234567", ReadOnly: true, }, "format": { Help: "Name of format identified by Internet Archive", Type: "string", Example: "Comma-Separated Values", ReadOnly: true, }, "old_version": { Help: "Whether the file was replaced and moved by keep-old-version flag", Type: "boolean", Example: "true", ReadOnly: true, }, "viruscheck": { Help: "The last time viruscheck process was run for the file (?)", Type: "unixtime", Example: "1654191352", ReadOnly: true, }, "summation": { Help: "Check https://forum.rclone.org/t/31922 for how it is used", Type: "string", Example: "md5", ReadOnly: true, }, "rclone-ia-mtime": { Help: "Time of last modification, managed by Internet Archive", Type: "RFC 3339", Example: "2006-01-02T15:04:05.999999999Z", }, "rclone-mtime": { Help: "Time of last modification, managed by Rclone", Type: "RFC 3339", Example: "2006-01-02T15:04:05.999999999Z", }, "rclone-update-track": { Help: "Random value used by Rclone for tracking changes inside Internet Archive", Type: "string", Example: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", }, }, Help: `Metadata fields provided by Internet Archive. If there are multiple values for a key, only the first one is returned. This is a limitation of Rclone, that supports one value per one key. Owner is able to add custom keys. Metadata feature grabs all the keys including them. `, }, Options: []fs.Option{{ Name: "access_key_id", Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php", Sensitive: true, }, { Name: "secret_access_key", Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.", Sensitive: true, }, { // their official client (https://github.com/jjjake/internetarchive) hardcodes following the two Name: "endpoint", Help: "IAS3 Endpoint.\n\nLeave blank for default value.", Default: "https://s3.us.archive.org", Advanced: true, }, { Name: "front_endpoint", Help: "Host of InternetArchive Frontend.\n\nLeave blank for default value.", Default: "https://archive.org", Advanced: true, }, { Name: "item_metadata", Help: `Metadata to be set on the IA item, this is different from file-level metadata that can be set using --metadata-set. Format is key=value and the 'x-archive-meta-' prefix is automatically added.`, Default: []string{}, Hide: fs.OptionHideConfigurator, Advanced: true, }, { Name: "item_derive", Help: `Whether to trigger derive on the IA item or not. If set to false, the item will not be derived by IA upon upload. The derive process produces a number of secondary files from an upload to make an upload more usable on the web. Setting this to false is useful for uploading files that are already in a format that IA can display or reduce burden on IA's infrastructure.`, Default: true, }, { Name: "disable_checksum", Help: `Don't ask the server to test against MD5 checksum calculated by rclone. Normally rclone will calculate the MD5 checksum of the input before uploading it so it can ask the server to check the object against checksum. This is great for data integrity checking but can cause long delays for large files to start uploading.`, Default: true, Advanced: true, }, { Name: "wait_archive", Help: `Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish. Only enable if you need to be guaranteed to be reflected after write operations. 0 to disable waiting. No errors to be thrown in case of timeout.`, Default: fs.Duration(0), Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: encoder.EncodeZero | encoder.EncodeSlash | encoder.EncodeLtGt | encoder.EncodeCrLf | encoder.EncodeDel | encoder.EncodeCtl | encoder.EncodeInvalidUtf8 | encoder.EncodeDot, }, }}) } // maximum size of an item. this is constant across all items const iaItemMaxSize int64 = 1099511627776 // metadata keys that are not writeable var roMetadataKey = map[string]any{ // do not add mtime here, it's a documented exception "name": nil, "source": nil, "size": nil, "md5": nil, "crc32": nil, "sha1": nil, "format": nil, "old_version": nil, "viruscheck": nil, "summation": nil, } // Options defines the configuration for this backend type Options struct { AccessKeyID string `config:"access_key_id"` SecretAccessKey string `config:"secret_access_key"` Endpoint string `config:"endpoint"` FrontEndpoint string `config:"front_endpoint"` DisableChecksum bool `config:"disable_checksum"` ItemMetadata []string `config:"item_metadata"` ItemDerive bool `config:"item_derive"` WaitArchive fs.Duration `config:"wait_archive"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents an IAS3 remote type Fs struct { name string // name of this remote root string // the path we are working on if any opt Options // parsed config options features *fs.Features // optional features srv *rest.Client // the connection to IAS3 front *rest.Client // the connection to frontend pacer *fs.Pacer // pacer for API calls ctx context.Context } // Object describes a file at IA type Object struct { fs *Fs // reference to Fs remote string // the remote path modTime time.Time // last modified time size int64 // size of the file in bytes md5 string // md5 hash of the file presented by the server sha1 string // sha1 hash of the file presented by the server crc32 string // crc32 of the file presented by the server rawData json.RawMessage } // IAFile represents a subset of object in MetadataResponse.Files type IAFile struct { Name string `json:"name"` // Source string `json:"source"` Mtime string `json:"mtime"` RcloneMtime json.RawMessage `json:"rclone-mtime"` UpdateTrack json.RawMessage `json:"rclone-update-track"` Size string `json:"size"` Md5 string `json:"md5"` Crc32 string `json:"crc32"` Sha1 string `json:"sha1"` Summation string `json:"summation"` rawData json.RawMessage } // MetadataResponse represents subset of the JSON object returned by (frontend)/metadata/ type MetadataResponse struct { Files []IAFile `json:"files"` ItemSize int64 `json:"item_size"` } // MetadataResponseRaw is the form of MetadataResponse to deal with metadata type MetadataResponseRaw struct { Files []json.RawMessage `json:"files"` ItemSize int64 `json:"item_size"` } // ModMetadataResponse represents response for amending metadata type ModMetadataResponse struct { // https://archive.org/services/docs/api/md-write.html#example Success bool `json:"success"` Error string `json:"error"` } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { bucket, file := f.split("") if bucket == "" { return "Internet Archive root" } if file == "" { return fmt.Sprintf("Internet Archive item %s", bucket) } return fmt.Sprintf("Internet Archive item %s path %s", bucket, file) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // Hashes returns type of hashes supported by IA func (f *Fs) Hashes() hash.Set { return hash.NewHashSet(hash.MD5, hash.SHA1, hash.CRC32) } // Precision returns the precision of mtime that the server responds func (f *Fs) Precision() time.Duration { if f.opt.WaitArchive == 0 { return fs.ModTimeNotSupported } return time.Nanosecond } // retryErrorCodes is a slice of error codes that we will retry // See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html var retryErrorCodes = []int{ 429, // Too Many Requests 500, // Internal Server Error - "We encountered an internal error. Please try again." 503, // Service Unavailable/Slow Down - "Reduce your request rate" } // NewFs constructs an Fs from the path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } // Parse the endpoints ep, err := url.Parse(opt.Endpoint) if err != nil { return nil, err } fe, err := url.Parse(opt.FrontEndpoint) if err != nil { return nil, err } root = strings.Trim(root, "/") f := &Fs{ name: name, opt: *opt, ctx: ctx, } f.setRoot(root) f.features = (&fs.Features{ BucketBased: true, ReadMetadata: true, WriteMetadata: true, UserMetadata: true, }).Fill(ctx, f) f.srv = rest.NewClient(fshttp.NewClient(ctx)) f.srv.SetRoot(ep.String()) f.front = rest.NewClient(fshttp.NewClient(ctx)) f.front.SetRoot(fe.String()) if opt.AccessKeyID != "" && opt.SecretAccessKey != "" { auth := fmt.Sprintf("LOW %s:%s", opt.AccessKeyID, opt.SecretAccessKey) f.srv.SetHeader("Authorization", auth) f.front.SetHeader("Authorization", auth) } f.pacer = fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(10*time.Millisecond))) // test if the root exists as a file _, err = f.NewObject(ctx, "/") if err == nil { f.setRoot(betterPathDir(root)) return f, fs.ErrorIsFile } return f, nil } // setRoot changes the root of the Fs func (f *Fs) setRoot(root string) { f.root = strings.Trim(root, "/") } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // ModTime is the last modified time (read-only) func (o *Object) ModTime(ctx context.Context) time.Time { return o.modTime } // Size is the file length func (o *Object) Size() int64 { return o.size } // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Hash returns the hash value presented by IA func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) { if ty == hash.MD5 { return o.md5, nil } if ty == hash.SHA1 { return o.sha1, nil } if ty == hash.CRC32 { return o.crc32, nil } return "", hash.ErrUnsupported } // Storable returns if this object is storable func (o *Object) Storable() bool { return true } // SetModTime sets modTime on a particular file func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) { bucket, reqDir := o.split() if bucket == "" { return fs.ErrorCantSetModTime } if reqDir == "" { return fs.ErrorCantSetModTime } // https://archive.org/services/docs/api/md-write.html // the following code might be useful for modifying metadata of an uploaded file patch := []map[string]string{ // we should drop it first to clear all rclone-provided mtimes { "op": "remove", "path": "/rclone-mtime", }, { "op": "add", "path": "/rclone-mtime", "value": t.Format(time.RFC3339Nano), }} res, err := json.Marshal(patch) if err != nil { return err } params := url.Values{} params.Add("-target", fmt.Sprintf("files/%s", reqDir)) params.Add("-patch", string(res)) body := []byte(params.Encode()) bodyLen := int64(len(body)) var resp *http.Response var result ModMetadataResponse // make a POST request to (frontend)/metadata/:item/ opts := rest.Opts{ Method: "POST", Path: path.Join("/metadata/", bucket), Body: bytes.NewReader(body), ContentLength: &bodyLen, ContentType: "application/x-www-form-urlencoded", } err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.front.CallJSON(ctx, &opts, nil, &result) return o.fs.shouldRetry(resp, err) }) if err != nil { return err } if result.Success { o.modTime = t return nil } return errors.New(result.Error) } // List files and directories in a directory func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { bucket, reqDir := f.split(dir) if bucket == "" { if reqDir != "" { return nil, fs.ErrorListBucketRequired } return entries, nil } grandparent := f.opt.Enc.ToStandardPath(strings.Trim(path.Join(bucket, reqDir), "/") + "/") allEntries, err := f.listAllUnconstrained(ctx, bucket) if err != nil { return entries, err } for _, ent := range allEntries { obj, ok := ent.(*Object) if ok && strings.HasPrefix(obj.remote, grandparent) { path := trimPathPrefix(obj.remote, grandparent, f.opt.Enc) if !strings.Contains(path, "/") { obj.remote = trimPathPrefix(obj.remote, f.root, f.opt.Enc) entries = append(entries, obj) } } dire, ok := ent.(*fs.Dir) if ok && strings.HasPrefix(dire.Remote(), grandparent) { path := trimPathPrefix(dire.Remote(), grandparent, f.opt.Enc) if !strings.Contains(path, "/") { dire.SetRemote(trimPathPrefix(dire.Remote(), f.root, f.opt.Enc)) entries = append(entries, dire) } } } return entries, nil } // Mkdir can't be performed on IA like git repositories func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { return nil } // Rmdir as well, unless we're asked for recursive deletion func (f *Fs) Rmdir(ctx context.Context, dir string) error { return nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (ret fs.Object, err error) { bucket, filepath := f.split(remote) filepath = strings.Trim(filepath, "/") if bucket == "" { if filepath != "" { return nil, fs.ErrorListBucketRequired } return nil, fs.ErrorIsDir } grandparent := f.opt.Enc.ToStandardPath(strings.Trim(path.Join(bucket, filepath), "/")) allEntries, err := f.listAllUnconstrained(ctx, bucket) if err != nil { return nil, err } for _, ent := range allEntries { obj, ok := ent.(*Object) if ok && obj.remote == grandparent { obj.remote = trimPathPrefix(obj.remote, f.root, f.opt.Enc) return obj, nil } } return nil, fs.ErrorObjectNotFound } // Put uploads a file func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { o := &Object{ fs: f, remote: src.Remote(), modTime: src.ModTime(ctx), size: src.Size(), } err := o.Update(ctx, in, src, options...) if err == nil { return o, nil } return nil, err } // PublicLink generates a public link to the remote path (usually readable by anyone) func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) { if strings.HasSuffix(remote, "/") { return "", fs.ErrorCantShareDirectories } if _, err := f.NewObject(ctx, remote); err != nil { return "", err } bucket, bucketPath := f.split(remote) return path.Join(f.opt.FrontEndpoint, "/download/", bucket, rest.URLPathEscapeAll(bucketPath)), nil } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Object, err error) { dstBucket, dstPath := f.split(remote) srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } srcBucket, srcPath := srcObj.split() if dstBucket == srcBucket && dstPath == srcPath { // https://github.com/jjjake/internetarchive/blob/2456376533251df9d05e0a14d796ec1ced4959f5/internetarchive/cli/ia_copy.py#L68 fs.Debugf(src, "Can't copy - the source and destination files cannot be the same!") return nil, fs.ErrorCantCopy } updateTracker := random.String(32) headers := map[string]string{ "x-archive-auto-make-bucket": "1", "x-archive-queue-derive": "0", "x-archive-keep-old-version": "0", "x-amz-copy-source": rest.URLPathEscapeAll(path.Join("/", srcBucket, srcPath)), "x-amz-metadata-directive": "COPY", "x-archive-filemeta-sha1": srcObj.sha1, "x-archive-filemeta-md5": srcObj.md5, "x-archive-filemeta-crc32": srcObj.crc32, "x-archive-filemeta-size": fmt.Sprint(srcObj.size), // add this too for sure "x-archive-filemeta-rclone-mtime": srcObj.modTime.Format(time.RFC3339Nano), "x-archive-filemeta-rclone-update-track": updateTracker, } // make a PUT request at (IAS3)/:item/:path without body var resp *http.Response opts := rest.Opts{ Method: "PUT", Path: "/" + url.PathEscape(path.Join(dstBucket, dstPath)), ExtraHeaders: headers, } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) return f.shouldRetry(resp, err) }) if err != nil { return nil, err } // we can't update/find metadata here as IA will also // queue server-side copy as well as upload/delete. return f.waitFileUpload(ctx, trimPathPrefix(path.Join(dstBucket, dstPath), f.root, f.opt.Enc), updateTracker, srcObj.size) } // ListR lists the objects and directories of the Fs starting // from dir recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. // // Don't implement this unless you have a more efficient way // of listing recursively than doing a directory traversal. func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { var allEntries, entries fs.DirEntries bucket, reqDir := f.split(dir) if bucket == "" { if reqDir != "" { return fs.ErrorListBucketRequired } return callback(entries) } grandparent := f.opt.Enc.ToStandardPath(strings.Trim(path.Join(bucket, reqDir), "/") + "/") allEntries, err = f.listAllUnconstrained(ctx, bucket) if err != nil { return err } for _, ent := range allEntries { obj, ok := ent.(*Object) if ok && strings.HasPrefix(obj.remote, grandparent) { obj.remote = trimPathPrefix(obj.remote, f.root, f.opt.Enc) entries = append(entries, obj) } dire, ok := ent.(*fs.Dir) if ok && strings.HasPrefix(dire.Remote(), grandparent) { dire.SetRemote(trimPathPrefix(dire.Remote(), f.root, f.opt.Enc)) entries = append(entries, dire) } } return callback(entries) } // CleanUp removes all files inside history/ func (f *Fs) CleanUp(ctx context.Context) (err error) { bucket, _ := f.split("/") if bucket == "" { return fs.ErrorListBucketRequired } entries, err := f.listAllUnconstrained(ctx, bucket) if err != nil { return err } for _, ent := range entries { obj, ok := ent.(*Object) if ok && strings.HasPrefix(obj.remote, bucket+"/history/") { err = obj.Remove(ctx) if err != nil { return err } } // we can fully ignore directories, as they're just virtual entries to // comply with rclone's requirement } return nil } // About returns things about remaining and used spaces func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) { bucket, _ := f.split("/") if bucket == "" { return nil, fs.ErrorListBucketRequired } result, err := f.requestMetadata(ctx, bucket) if err != nil { return nil, err } // perform low-level operation here since it's ridiculous to make 2 same requests var historySize int64 for _, ent := range result.Files { if strings.HasPrefix(ent.Name, "history/") { size := parseSize(ent.Size) if size < 0 { // parse error can be ignored since it's not fatal continue } historySize += size } } usage := &fs.Usage{ Total: fs.NewUsageValue(iaItemMaxSize), Free: fs.NewUsageValue(iaItemMaxSize - result.ItemSize), Used: fs.NewUsageValue(result.ItemSize), Trashed: fs.NewUsageValue(historySize), // bytes in trash } return usage, nil } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { var optionsFixed []fs.OpenOption for _, opt := range options { if optRange, ok := opt.(*fs.RangeOption); ok { // Ignore range option if file is empty if o.Size() == 0 && optRange.Start == 0 && optRange.End > 0 { continue } } optionsFixed = append(optionsFixed, opt) } var resp *http.Response // make a GET request to (frontend)/download/:item/:path opts := rest.Opts{ Method: "GET", Path: path.Join("/download/", o.fs.root, rest.URLPathEscapeAll(o.fs.opt.Enc.FromStandardPath(o.remote))), Options: optionsFixed, } err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.front.Call(ctx, &opts) return o.fs.shouldRetry(resp, err) }) if err != nil { return nil, err } return resp.Body, nil } // Update the Object from in with modTime and size func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { bucket, bucketPath := o.split() modTime := src.ModTime(ctx) size := src.Size() updateTracker := random.String(32) // Set the mtime in the metadata // internetarchive backend builds at header level as IAS3 has extension outside X-Amz- headers := map[string]string{ // https://github.com/jjjake/internetarchive/blob/2456376533251df9d05e0a14d796ec1ced4959f5/internetarchive/iarequest.py#L158 "x-amz-filemeta-rclone-mtime": modTime.Format(time.RFC3339Nano), "x-amz-filemeta-rclone-update-track": updateTracker, // we add some more headers for intuitive actions "x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already "x-archive-auto-make-bucket": "1", // same as above in IAS3 original way "x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds) "x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself) } if size >= 0 { headers["Content-Length"] = fmt.Sprintf("%d", size) headers["x-archive-size-hint"] = fmt.Sprintf("%d", size) } // This is IA's ITEM metadata, not file metadata headers, err = o.appendItemMetadataHeaders(headers, o.fs.opt) if err != nil { return err } var mdata fs.Metadata mdata, err = fs.GetMetadataOptions(ctx, o.fs, src, options) if err == nil && mdata != nil { for mk, mv := range mdata { mk = strings.ToLower(mk) if strings.HasPrefix(mk, "rclone-") { fs.LogPrintf(fs.LogLevelWarning, o, "reserved metadata key %s is about to set", mk) } else if _, ok := roMetadataKey[mk]; ok { fs.LogPrintf(fs.LogLevelWarning, o, "setting or modifying read-only key %s is requested, skipping", mk) continue } else if mk == "mtime" { // redirect to make it work mk = "rclone-mtime" } headers[fmt.Sprintf("x-amz-filemeta-%s", mk)] = mv } } // read the md5sum if available var md5sumHex string if !o.fs.opt.DisableChecksum { md5sumHex, err = src.Hash(ctx, hash.MD5) if err == nil && matchMd5.MatchString(md5sumHex) { // Set the md5sum in header on the object if // the user wants it // https://github.com/jjjake/internetarchive/blob/245637653/internetarchive/item.py#L969 headers["Content-MD5"] = md5sumHex } } // make a PUT request at (IAS3)/encoded(:item/:path) var resp *http.Response opts := rest.Opts{ Method: "PUT", Path: "/" + url.PathEscape(path.Join(bucket, bucketPath)), Body: in, ContentLength: &size, ExtraHeaders: headers, } err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return o.fs.shouldRetry(resp, err) }) // we can't update/find metadata here as IA will "ingest" uploaded file(s) // upon uploads. (you can find its progress at https://archive.org/history/ItemNameHere ) // or we have to wait for finish? (needs polling (frontend)/metadata/:item or scraping (frontend)/history/:item) var newObj *Object if err == nil { newObj, err = o.fs.waitFileUpload(ctx, o.remote, updateTracker, size) } else { newObj = &Object{} } o.crc32 = newObj.crc32 o.md5 = newObj.md5 o.sha1 = newObj.sha1 o.modTime = newObj.modTime o.size = newObj.size return err } func (o *Object) appendItemMetadataHeaders(headers map[string]string, options Options) (newHeaders map[string]string, err error) { metadataCounter := make(map[string]int) metadataValues := make(map[string][]string) // First pass: count occurrences and collect values for _, v := range options.ItemMetadata { parts := strings.SplitN(v, "=", 2) if len(parts) != 2 { return newHeaders, errors.New("item metadata key=value should be in the form key=value") } key, value := parts[0], parts[1] metadataCounter[key]++ metadataValues[key] = append(metadataValues[key], value) } // Second pass: add headers with appropriate prefixes for key, count := range metadataCounter { if count == 1 { // Only one occurrence, use x-archive-meta- headers[fmt.Sprintf("x-archive-meta-%s", key)] = metadataValues[key][0] } else { // Multiple occurrences, use x-archive-meta01-, x-archive-meta02-, etc. for i, value := range metadataValues[key] { headers[fmt.Sprintf("x-archive-meta%02d-%s", i+1, key)] = value } } } if o.fs.opt.ItemDerive { headers["x-archive-queue-derive"] = "1" } else { headers["x-archive-queue-derive"] = "0" } fs.Debugf(o, "Setting IA item derive: %t", o.fs.opt.ItemDerive) for k, v := range headers { if strings.HasPrefix(k, "x-archive-meta") { fs.Debugf(o, "Setting IA item metadata: %s=%s", k, v) } } return headers, nil } // Remove an object func (o *Object) Remove(ctx context.Context) (err error) { bucket, bucketPath := o.split() // make a DELETE request at (IAS3)/:item/:path var resp *http.Response opts := rest.Opts{ Method: "DELETE", Path: "/" + url.PathEscape(path.Join(bucket, bucketPath)), } err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return o.fs.shouldRetry(resp, err) }) // deleting files can take bit longer as // it'll be processed on same queue as uploads if err == nil { err = o.fs.waitDelete(ctx, bucket, bucketPath) } return err } // String converts this Fs to a string func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Metadata returns all file metadata provided by Internet Archive func (o *Object) Metadata(ctx context.Context) (m fs.Metadata, err error) { if o.rawData == nil { return nil, nil } raw := make(map[string]json.RawMessage) err = json.Unmarshal(o.rawData, &raw) if err != nil { // fatal: json parsing failed return } for k, v := range raw { items, err := listOrString(v) if len(items) == 0 || err != nil { // skip: an entry failed to parse continue } m.Set(k, items[0]) } // move the old mtime to an another key if v, ok := m["mtime"]; ok { m["rclone-ia-mtime"] = v } // overwrite with a correct mtime m["mtime"] = o.modTime.Format(time.RFC3339Nano) return } func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) { if resp != nil { if slices.Contains(retryErrorCodes, resp.StatusCode) { return true, err } } // Ok, not an awserr, check for generic failure conditions return fserrors.ShouldRetry(err), err } var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`) // split returns bucket and bucketPath from the rootRelativePath // relative to f.root func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) { bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath)) return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath) } // split returns bucket and bucketPath from the object func (o *Object) split() (bucket, bucketPath string) { return o.fs.split(o.remote) } func (f *Fs) requestMetadata(ctx context.Context, bucket string) (result *MetadataResponse, err error) { var resp *http.Response // make a GET request to (frontend)/metadata/:item/ opts := rest.Opts{ Method: "GET", Path: path.Join("/metadata/", bucket), } var temp MetadataResponseRaw err = f.pacer.Call(func() (bool, error) { resp, err = f.front.CallJSON(ctx, &opts, nil, &temp) return f.shouldRetry(resp, err) }) if err != nil { return } return temp.unraw() } // list up all files/directories without any filters func (f *Fs) listAllUnconstrained(ctx context.Context, bucket string) (entries fs.DirEntries, err error) { result, err := f.requestMetadata(ctx, bucket) if err != nil { return nil, err } knownDirs := map[string]time.Time{ "": time.Unix(0, 0), } for _, file := range result.Files { dir := strings.Trim(betterPathDir(file.Name), "/") nameWithBucket := path.Join(bucket, file.Name) mtimeTime := file.parseMtime() // populate children directories child := dir for { if _, ok := knownDirs[child]; ok { break } // directory d := fs.NewDir(f.opt.Enc.ToStandardPath(path.Join(bucket, child)), mtimeTime) entries = append(entries, d) knownDirs[child] = mtimeTime child = strings.Trim(betterPathDir(child), "/") } if _, ok := knownDirs[betterPathDir(file.Name)]; !ok { continue } size := parseSize(file.Size) o := makeValidObject(f, f.opt.Enc.ToStandardPath(nameWithBucket), file, mtimeTime, size) entries = append(entries, o) } return entries, nil } func (f *Fs) waitFileUpload(ctx context.Context, reqPath, tracker string, newSize int64) (ret *Object, err error) { bucket, bucketPath := f.split(reqPath) ret = &Object{ fs: f, remote: trimPathPrefix(path.Join(bucket, bucketPath), f.root, f.opt.Enc), modTime: time.Unix(0, 0), size: -1, } if f.opt.WaitArchive == 0 { // user doesn't want to poll, let's not ret2, err := f.NewObject(ctx, reqPath) if err == nil { ret2, ok := ret2.(*Object) if ok { ret = ret2 ret.crc32 = "" ret.md5 = "" ret.sha1 = "" ret.size = -1 } } return ret, nil } retC := make(chan struct { *Object error }, 1) go func() { isFirstTime := true existed := false for {
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/internetarchive/internetarchive_test.go
backend/internetarchive/internetarchive_test.go
// Test internetarchive filesystem interface package internetarchive_test import ( "testing" "github.com/rclone/rclone/backend/internetarchive" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestIA:lesmi-rclone-test/", NilObject: (*internetarchive.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/azurefiles/azurefiles.go
backend/azurefiles/azurefiles.go
//go:build !plan9 && !js // Package azurefiles provides an interface to Microsoft Azure Files package azurefiles /* TODO This uses LastWriteTime which seems to work. The API return also has LastModified - needs investigation Needs pacer to have retries HTTP headers need to be passed Could support Metadata FIXME write mime type See FIXME markers Optional interfaces for Object - ID */ import ( "bytes" "context" "crypto/md5" "encoding/hex" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "os" "path" "strings" "sync" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/readers" ) const ( maxFileSize = 4 * fs.Tebi defaultChunkSize = 4 * fs.Mebi storageDefaultBaseURL = "file.core.windows.net" ) func init() { fs.Register(&fs.RegInfo{ Name: "azurefiles", Description: "Microsoft Azure Files", NewFs: NewFs, Options: []fs.Option{{ Name: "account", Help: `Azure Storage Account Name. Set this to the Azure Storage Account Name in use. Leave blank to use SAS URL or connection string, otherwise it needs to be set. If this is blank and if env_auth is set it will be read from the environment variable ` + "`AZURE_STORAGE_ACCOUNT_NAME`" + ` if possible. `, Sensitive: true, }, { Name: "share_name", Help: `Azure Files Share Name. This is required and is the name of the share to access. `, }, { Name: "env_auth", Help: `Read credentials from runtime (environment variables, CLI or MSI). See the [authentication docs](/azurefiles#authentication) for full info.`, Default: false, }, { Name: "key", Help: `Storage Account Shared Key. Leave blank to use SAS URL or connection string.`, Sensitive: true, }, { Name: "sas_url", Help: `SAS URL. Leave blank if using account/key or connection string.`, Sensitive: true, }, { Name: "connection_string", Help: `Azure Files Connection String.`, Sensitive: true, }, { Name: "tenant", Help: `ID of the service principal's tenant. Also called its directory ID. Set this if using - Service principal with client secret - Service principal with certificate - User with username and password `, Sensitive: true, }, { Name: "client_id", Help: `The ID of the client in use. Set this if using - Service principal with client secret - Service principal with certificate - User with username and password `, Sensitive: true, }, { Name: "client_secret", Help: `One of the service principal's client secrets Set this if using - Service principal with client secret `, Sensitive: true, }, { Name: "client_certificate_path", Help: `Path to a PEM or PKCS12 certificate file including the private key. Set this if using - Service principal with certificate `, }, { Name: "client_certificate_password", Help: `Password for the certificate file (optional). Optionally set this if using - Service principal with certificate And the certificate has a password. `, IsPassword: true, }, { Name: "client_send_certificate_chain", Help: `Send the certificate chain when using certificate auth. Specifies whether an authentication request will include an x5c header to support subject name / issuer based authentication. When set to true, authentication requests include the x5c header. Optionally set this if using - Service principal with certificate `, Default: false, Advanced: true, }, { Name: "username", Help: `User name (usually an email address) Set this if using - User with username and password `, Advanced: true, Sensitive: true, }, { Name: "password", Help: `The user's password Set this if using - User with username and password `, IsPassword: true, Advanced: true, }, { Name: "service_principal_file", Help: `Path to file containing credentials for use with a service principal. Leave blank normally. Needed only if you want to use a service principal instead of interactive login. $ az ad sp create-for-rbac --name "<name>" \ --role "Storage Files Data Owner" \ --scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \ > azure-principal.json See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to files data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details. **NB** this section needs updating for Azure Files - pull requests appreciated! It may be more convenient to put the credentials directly into the rclone config file under the ` + "`client_id`, `tenant` and `client_secret`" + ` keys instead of setting ` + "`service_principal_file`" + `. `, Advanced: true, }, { Name: "use_msi", Help: `Use a managed service identity to authenticate (only works in Azure). When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/) to authenticate to Azure Storage instead of a SAS token or account key. If the VM(SS) on which this program is running has a system-assigned identity, it will be used by default. If the resource has no system-assigned but exactly one user-assigned identity, the user-assigned identity will be used by default. If the resource has multiple user-assigned identities, the identity to use must be explicitly specified using exactly one of the msi_object_id, msi_client_id, or msi_mi_res_id parameters.`, Default: false, Advanced: true, }, { Name: "msi_object_id", Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.", Advanced: true, Sensitive: true, }, { Name: "msi_client_id", Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.", Advanced: true, Sensitive: true, }, { Name: "msi_mi_res_id", Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.", Advanced: true, Sensitive: true, }, { Name: "disable_instance_discovery", Help: `Skip requesting Microsoft Entra instance metadata This should be set true only by applications authenticating in disconnected clouds, or private clouds such as Azure Stack. It determines whether rclone requests Microsoft Entra instance metadata from ` + "`https://login.microsoft.com/`" + ` before authenticating. Setting this to true will skip this request, making you responsible for ensuring the configured authority is valid and trustworthy. `, Default: false, Advanced: true, }, { Name: "use_az", Help: `Use Azure CLI tool az for authentication Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/) as the sole means of authentication. Setting this can be useful if you wish to use the az CLI on a host with a System Managed Identity that you do not want to use. Don't set env_auth at the same time. `, Default: false, Advanced: true, }, { Name: "endpoint", Help: "Endpoint for the service.\n\nLeave blank normally.", Advanced: true, }, { Name: "chunk_size", Help: `Upload chunk size. Note that this is stored in memory and there may be up to "--transfers" * "--azurefile-upload-concurrency" chunks stored at once in memory.`, Default: defaultChunkSize, Advanced: true, }, { Name: "upload_concurrency", Help: `Concurrency for multipart uploads. This is the number of chunks of the same file that are uploaded concurrently. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing this may help to speed up the transfers. Note that chunks are stored in memory and there may be up to "--transfers" * "--azurefile-upload-concurrency" chunks stored at once in memory.`, Default: 16, Advanced: true, }, { Name: "max_stream_size", Help: strings.ReplaceAll(`Max size for streamed files. Azure files needs to know in advance how big the file will be. When rclone doesn't know it uses this value instead. This will be used when rclone is streaming data, the most common uses are: - Uploading files with |--vfs-cache-mode off| with |rclone mount| - Using |rclone rcat| - Copying files with unknown length You will need this much free space in the share as the file will be this size temporarily. `, "|", "`"), Default: 10 * fs.Gibi, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.EncodeDoubleQuote | encoder.EncodeBackSlash | encoder.EncodeSlash | encoder.EncodeColon | encoder.EncodePipe | encoder.EncodeLtGt | encoder.EncodeAsterisk | encoder.EncodeQuestion | encoder.EncodeInvalidUtf8 | encoder.EncodeCtl | encoder.EncodeDel | encoder.EncodeDot | encoder.EncodeRightPeriod), }}, }) } // Options defines the configuration for this backend type Options struct { Account string `config:"account"` ShareName string `config:"share_name"` EnvAuth bool `config:"env_auth"` Key string `config:"key"` SASURL string `config:"sas_url"` ConnectionString string `config:"connection_string"` Tenant string `config:"tenant"` ClientID string `config:"client_id"` ClientSecret string `config:"client_secret"` ClientCertificatePath string `config:"client_certificate_path"` ClientCertificatePassword string `config:"client_certificate_password"` ClientSendCertificateChain bool `config:"client_send_certificate_chain"` Username string `config:"username"` Password string `config:"password"` ServicePrincipalFile string `config:"service_principal_file"` DisableInstanceDiscovery bool `config:"disable_instance_discovery"` UseMSI bool `config:"use_msi"` MSIObjectID string `config:"msi_object_id"` MSIClientID string `config:"msi_client_id"` MSIResourceID string `config:"msi_mi_res_id"` UseAZ bool `config:"use_az"` Endpoint string `config:"endpoint"` ChunkSize fs.SizeSuffix `config:"chunk_size"` MaxStreamSize fs.SizeSuffix `config:"max_stream_size"` UploadConcurrency int `config:"upload_concurrency"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a root directory inside a share. The root directory can be "" type Fs struct { name string // name of this remote root string // the path we are working on if any opt Options // parsed config options features *fs.Features // optional features shareClient *share.Client // a client for the share itself svc *directory.Client // the root service } // Object describes a Azure File Share File type Object struct { fs *Fs // what this object is part of remote string // The remote path size int64 // Size of the object md5 []byte // MD5 hash if known modTime time.Time // The modified time of the object if known contentType string // content type if known } // Wrap the http.Transport to satisfy the Transporter interface type transporter struct { http.RoundTripper } // Make a new transporter func newTransporter(ctx context.Context) transporter { return transporter{ RoundTripper: fshttp.NewTransport(ctx), } } // Do sends the HTTP request and returns the HTTP response or error. func (tr transporter) Do(req *http.Request) (*http.Response, error) { return tr.RoundTripper.RoundTrip(req) } type servicePrincipalCredentials struct { AppID string `json:"appId"` Password string `json:"password"` Tenant string `json:"tenant"` } // parseServicePrincipalCredentials unmarshals a service principal credentials JSON file as generated by az cli. func parseServicePrincipalCredentials(ctx context.Context, credentialsData []byte) (*servicePrincipalCredentials, error) { var spCredentials servicePrincipalCredentials if err := json.Unmarshal(credentialsData, &spCredentials); err != nil { return nil, fmt.Errorf("error parsing credentials from JSON file: %w", err) } // TODO: support certificate credentials // Validate all fields present if spCredentials.AppID == "" || spCredentials.Password == "" || spCredentials.Tenant == "" { return nil, fmt.Errorf("missing fields in credentials file") } return &spCredentials, nil } // Factored out from NewFs so that it can be tested with opt *Options and without m configmap.Mapper func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.Fs, error) { // Client options specifying our own transport policyClientOptions := policy.ClientOptions{ Transport: newTransporter(ctx), } backup := service.ShareTokenIntentBackup clientOpt := service.ClientOptions{ ClientOptions: policyClientOptions, FileRequestIntent: &backup, } // Here we auth by setting one of cred, sharedKeyCred or f.client var ( cred azcore.TokenCredential sharedKeyCred *service.SharedKeyCredential client *service.Client err error ) switch { case opt.EnvAuth: // Read account from environment if needed if opt.Account == "" { opt.Account, _ = os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") } // Read credentials from the environment options := azidentity.DefaultAzureCredentialOptions{ ClientOptions: policyClientOptions, DisableInstanceDiscovery: opt.DisableInstanceDiscovery, } cred, err = azidentity.NewDefaultAzureCredential(&options) if err != nil { return nil, fmt.Errorf("create azure environment credential failed: %w", err) } case opt.Account != "" && opt.Key != "": sharedKeyCred, err = service.NewSharedKeyCredential(opt.Account, opt.Key) if err != nil { return nil, fmt.Errorf("create new shared key credential failed: %w", err) } case opt.UseAZ: options := azidentity.AzureCLICredentialOptions{} cred, err = azidentity.NewAzureCLICredential(&options) fmt.Println(cred) if err != nil { return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err) } case opt.SASURL != "": client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt) if err != nil { return nil, fmt.Errorf("unable to create SAS URL client: %w", err) } case opt.ConnectionString != "": client, err = service.NewClientFromConnectionString(opt.ConnectionString, &clientOpt) if err != nil { return nil, fmt.Errorf("unable to create connection string client: %w", err) } case opt.ClientID != "" && opt.Tenant != "" && opt.ClientSecret != "": // Service principal with client secret options := azidentity.ClientSecretCredentialOptions{ ClientOptions: policyClientOptions, } cred, err = azidentity.NewClientSecretCredential(opt.Tenant, opt.ClientID, opt.ClientSecret, &options) if err != nil { return nil, fmt.Errorf("error creating a client secret credential: %w", err) } case opt.ClientID != "" && opt.Tenant != "" && opt.ClientCertificatePath != "": // Service principal with certificate // // Read the certificate data, err := os.ReadFile(env.ShellExpand(opt.ClientCertificatePath)) if err != nil { return nil, fmt.Errorf("error reading client certificate file: %w", err) } // NewClientCertificateCredential requires at least one *x509.Certificate, and a // crypto.PrivateKey. // // ParseCertificates returns these given certificate data in PEM or PKCS12 format. // It handles common scenarios but has limitations, for example it doesn't load PEM // encrypted private keys. var password []byte if opt.ClientCertificatePassword != "" { pw, err := obscure.Reveal(opt.Password) if err != nil { return nil, fmt.Errorf("certificate password decode failed - did you obscure it?: %w", err) } password = []byte(pw) } certs, key, err := azidentity.ParseCertificates(data, password) if err != nil { return nil, fmt.Errorf("failed to parse client certificate file: %w", err) } options := azidentity.ClientCertificateCredentialOptions{ ClientOptions: policyClientOptions, SendCertificateChain: opt.ClientSendCertificateChain, } cred, err = azidentity.NewClientCertificateCredential( opt.Tenant, opt.ClientID, certs, key, &options, ) if err != nil { return nil, fmt.Errorf("create azure service principal with client certificate credential failed: %w", err) } case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "": // User with username and password //nolint:staticcheck // this is deprecated due to Azure policy options := azidentity.UsernamePasswordCredentialOptions{ ClientOptions: policyClientOptions, } password, err := obscure.Reveal(opt.Password) if err != nil { return nil, fmt.Errorf("user password decode failed - did you obscure it?: %w", err) } cred, err = azidentity.NewUsernamePasswordCredential( opt.Tenant, opt.ClientID, opt.Username, password, &options, ) if err != nil { return nil, fmt.Errorf("authenticate user with password failed: %w", err) } case opt.ServicePrincipalFile != "": // Loading service principal credentials from file. loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServicePrincipalFile)) if err != nil { return nil, fmt.Errorf("error opening service principal credentials file: %w", err) } parsedCreds, err := parseServicePrincipalCredentials(ctx, loadedCreds) if err != nil { return nil, fmt.Errorf("error parsing service principal credentials file: %w", err) } options := azidentity.ClientSecretCredentialOptions{ ClientOptions: policyClientOptions, } cred, err = azidentity.NewClientSecretCredential(parsedCreds.Tenant, parsedCreds.AppID, parsedCreds.Password, &options) if err != nil { return nil, fmt.Errorf("error creating a client secret credential: %w", err) } case opt.UseMSI: // Specifying a user-assigned identity. Exactly one of the above IDs must be specified. // Validate and ensure exactly one is set. (To do: better validation.) b2i := map[bool]int{false: 0, true: 1} set := b2i[opt.MSIClientID != ""] + b2i[opt.MSIObjectID != ""] + b2i[opt.MSIResourceID != ""] if set > 1 { return nil, errors.New("more than one user-assigned identity ID is set") } var options azidentity.ManagedIdentityCredentialOptions switch { case opt.MSIClientID != "": options.ID = azidentity.ClientID(opt.MSIClientID) case opt.MSIObjectID != "": // FIXME this doesn't appear to be in the new SDK? return nil, fmt.Errorf("MSI object ID is currently unsupported") case opt.MSIResourceID != "": options.ID = azidentity.ResourceID(opt.MSIResourceID) } cred, err = azidentity.NewManagedIdentityCredential(&options) if err != nil { return nil, fmt.Errorf("failed to acquire MSI token: %w", err) } case opt.ClientID != "" && opt.Tenant != "" && opt.MSIClientID != "": // Workload Identity based authentication var options azidentity.ManagedIdentityCredentialOptions options.ID = azidentity.ClientID(opt.MSIClientID) msiCred, err := azidentity.NewManagedIdentityCredential(&options) if err != nil { return nil, fmt.Errorf("failed to acquire MSI token: %w", err) } getClientAssertions := func(context.Context) (string, error) { token, err := msiCred.GetToken(context.Background(), policy.TokenRequestOptions{ Scopes: []string{"api://AzureADTokenExchange"}, }) if err != nil { return "", fmt.Errorf("failed to acquire MSI token: %w", err) } return token.Token, nil } assertOpts := &azidentity.ClientAssertionCredentialOptions{} cred, err = azidentity.NewClientAssertionCredential( opt.Tenant, opt.ClientID, getClientAssertions, assertOpts) if err != nil { return nil, fmt.Errorf("failed to acquire client assertion token: %w", err) } default: return nil, errors.New("no authentication method configured") } // Make the client if not already created if client == nil { // Work out what the endpoint is if it is still unset if opt.Endpoint == "" { if opt.Account == "" { return nil, fmt.Errorf("account must be set: can't make service URL") } u, err := url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, storageDefaultBaseURL)) if err != nil { return nil, fmt.Errorf("failed to make azure storage URL from account: %w", err) } opt.Endpoint = u.String() } if sharedKeyCred != nil { // Shared key cred client, err = service.NewClientWithSharedKeyCredential(opt.Endpoint, sharedKeyCred, &clientOpt) if err != nil { return nil, fmt.Errorf("create client with shared key failed: %w", err) } } else if cred != nil { // Azidentity cred client, err = service.NewClient(opt.Endpoint, cred, &clientOpt) if err != nil { return nil, fmt.Errorf("create client failed: %w", err) } } } if client == nil { return nil, fmt.Errorf("internal error: auth failed to make credentials or client") } shareClient := client.NewShareClient(opt.ShareName) svc := shareClient.NewRootDirectoryClient() f := &Fs{ shareClient: shareClient, svc: svc, name: name, root: root, opt: *opt, } f.features = (&fs.Features{ CanHaveEmptyDirectories: true, PartialUploads: true, // files are visible as they are being uploaded CaseInsensitive: true, SlowHash: true, // calling Hash() generally takes an extra transaction ReadMimeType: true, WriteMimeType: true, }).Fill(ctx, f) // Check whether a file exists at this location _, propsErr := f.fileClient("").GetProperties(ctx, nil) if propsErr == nil { f.root = path.Dir(root) return f, fs.ErrorIsFile } return f, nil } // NewFs constructs an Fs from the root func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } return newFsFromOptions(ctx, name, root, opt) } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("azurefiles root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // Precision return the precision of this Fs // // One second. FileREST API times are in RFC1123 which in the example shows a precision of seconds // Source: https://learn.microsoft.com/en-us/rest/api/storageservices/representation-of-date-time-values-in-headers func (f *Fs) Precision() time.Duration { return time.Second } // Hashes returns the supported hash sets. // // MD5: since it is listed as header in the response for file properties // Source: https://learn.microsoft.com/en-us/rest/api/storageservices/get-file-properties func (f *Fs) Hashes() hash.Set { return hash.NewHashSet(hash.MD5) } // Encode remote and turn it into an absolute path in the share func (f *Fs) absPath(remote string) string { return f.opt.Enc.FromStandardPath(path.Join(f.root, remote)) } // Make a directory client from the dir func (f *Fs) dirClient(dir string) *directory.Client { return f.svc.NewSubdirectoryClient(f.absPath(dir)) } // Make a file client from the remote func (f *Fs) fileClient(remote string) *file.Client { return f.svc.NewFileClient(f.absPath(remote)) } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. // // Does not return ErrorIsDir when a directory exists instead of file. since the documentation // for [rclone.fs.Fs.NewObject] rqeuires no extra work to determine whether it is directory // // This initiates a network request and returns an error if object is not found. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { resp, err := f.fileClient(remote).GetProperties(ctx, nil) if fileerror.HasCode(err, fileerror.ParentNotFound, fileerror.ResourceNotFound) { return nil, fs.ErrorObjectNotFound } else if err != nil { return nil, fmt.Errorf("unable to find object remote %q: %w", remote, err) } o := &Object{ fs: f, remote: remote, } o.setMetadata(&resp) return o, nil } // Make a directory using the absolute path from the root of the share // // This recursiely creating parent directories all the way to the root // of the share. func (f *Fs) absMkdir(ctx context.Context, absPath string) error { if absPath == "" { return nil } dirClient := f.svc.NewSubdirectoryClient(absPath) // now := time.Now() // smbProps := &file.SMBProperties{ // LastWriteTime: &now, // } // dirCreateOptions := &directory.CreateOptions{ // FileSMBProperties: smbProps, // } _, createDirErr := dirClient.Create(ctx, nil) if fileerror.HasCode(createDirErr, fileerror.ParentNotFound) { parentDir := path.Dir(absPath) if parentDir == absPath { return fmt.Errorf("internal error: infinite recursion since parent and remote are equal") } makeParentErr := f.absMkdir(ctx, parentDir) if makeParentErr != nil { return fmt.Errorf("could not make parent of %q: %w", absPath, makeParentErr) } return f.absMkdir(ctx, absPath) } else if fileerror.HasCode(createDirErr, fileerror.ResourceAlreadyExists) { return nil } else if createDirErr != nil { return fmt.Errorf("unable to MkDir: %w", createDirErr) } return nil } // Mkdir creates nested directories func (f *Fs) Mkdir(ctx context.Context, remote string) error { return f.absMkdir(ctx, f.absPath(remote)) } // Make the parent directory of remote func (f *Fs) mkParentDir(ctx context.Context, remote string) error { // Can't make the parent of root if remote == "" { return nil } return f.Mkdir(ctx, path.Dir(remote)) } // Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { dirClient := f.dirClient(dir) _, err := dirClient.Delete(ctx, nil) if err != nil { if fileerror.HasCode(err, fileerror.DirectoryNotEmpty) { return fs.ErrorDirectoryNotEmpty } else if fileerror.HasCode(err, fileerror.ResourceNotFound) { return fs.ErrorDirNotFound } return fmt.Errorf("could not rmdir dir %q: %w", dir, err) } return nil } // Put the object // // Copies the reader in to the new object. This new object is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // Temporary Object under construction fs := &Object{ fs: f, remote: src.Remote(), } return fs, fs.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(ctx, in, src, options...) } // List the objects and directories in dir into entries. The entries can be // returned in any order but should be for a complete directory. // // dir should be "" to list the root, and should not have trailing slashes. // // This should return ErrDirNotFound if the directory isn't found. func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) { return list.WithListP(ctx, dir, f) } // ListP lists the objects and directories of the Fs starting // from dir non recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error { list := list.NewHelper(callback) subDirClient := f.dirClient(dir) // Checking whether directory exists _, err := subDirClient.GetProperties(ctx, nil) if fileerror.HasCode(err, fileerror.ParentNotFound, fileerror.ResourceNotFound) { return fs.ErrorDirNotFound } else if err != nil { return err } opt := &directory.ListFilesAndDirectoriesOptions{ Include: directory.ListFilesInclude{ Timestamps: true, }, } pager := subDirClient.NewListFilesAndDirectoriesPager(opt) for pager.More() { resp, err := pager.NextPage(ctx) if err != nil { return err } for _, directory := range resp.Segment.Directories { // Name *string `xml:"Name"` // Attributes *string `xml:"Attributes"` // ID *string `xml:"FileId"` // PermissionKey *string `xml:"PermissionKey"` // Properties.ContentLength *int64 `xml:"Content-Length"` // Properties.ChangeTime *time.Time `xml:"ChangeTime"` // Properties.CreationTime *time.Time `xml:"CreationTime"` // Properties.ETag *azcore.ETag `xml:"Etag"` // Properties.LastAccessTime *time.Time `xml:"LastAccessTime"` // Properties.LastModified *time.Time `xml:"Last-Modified"` // Properties.LastWriteTime *time.Time `xml:"LastWriteTime"` var modTime time.Time if directory.Properties.LastWriteTime != nil { modTime = *directory.Properties.LastWriteTime } leaf := f.opt.Enc.ToStandardPath(*directory.Name) entry := fs.NewDir(path.Join(dir, leaf), modTime) if directory.ID != nil { entry.SetID(*directory.ID) } if directory.Properties.ContentLength != nil { entry.SetSize(*directory.Properties.ContentLength) } err = list.Add(entry) if err != nil { return err } } for _, file := range resp.Segment.Files { leaf := f.opt.Enc.ToStandardPath(*file.Name) entry := &Object{ fs: f, remote: path.Join(dir, leaf), } if file.Properties.ContentLength != nil { entry.size = *file.Properties.ContentLength } if file.Properties.LastWriteTime != nil { entry.modTime = *file.Properties.LastWriteTime } err = list.Add(entry) if err != nil { return err } } } return list.Flush() } // ------------------------------------------------------------ // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Size of object in bytes func (o *Object) Size() int64 { return o.size } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // fileClient makes a specialized client for this object func (o *Object) fileClient() *file.Client { return o.fs.fileClient(o.remote) } // set the metadata from file.GetPropertiesResponse func (o *Object) setMetadata(resp *file.GetPropertiesResponse) { if resp.ContentLength != nil { o.size = *resp.ContentLength }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/azurefiles/azurefiles_unsupported.go
backend/azurefiles/azurefiles_unsupported.go
// Build for azurefiles for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 || js // Package azurefiles provides an interface to Microsoft Azure Files package azurefiles
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/azurefiles/azurefiles_internal_test.go
backend/azurefiles/azurefiles_internal_test.go
//go:build !plan9 && !js package azurefiles import ( "context" "math/rand" "strings" "testing" "github.com/rclone/rclone/fstest/fstests" "github.com/stretchr/testify/assert" ) func (f *Fs) InternalTest(t *testing.T) { t.Run("Authentication", f.InternalTestAuth) } var _ fstests.InternalTester = (*Fs)(nil) func (f *Fs) InternalTestAuth(t *testing.T) { t.Skip("skipping since this requires authentication credentials which are not part of repo") shareName := "test-rclone-oct-2023" testCases := []struct { name string options *Options }{ { name: "ConnectionString", options: &Options{ ShareName: shareName, ConnectionString: "", }, }, { name: "AccountAndKey", options: &Options{ ShareName: shareName, Account: "", Key: "", }}, { name: "SASUrl", options: &Options{ ShareName: shareName, SASURL: "", }}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { fs, err := newFsFromOptions(context.TODO(), "TestAzureFiles", "", tc.options) assert.NoError(t, err) dirName := randomString(10) assert.NoError(t, fs.Mkdir(context.TODO(), dirName)) }) } } const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX" func randomString(charCount int) string { strBldr := strings.Builder{} for range charCount { randPos := rand.Int63n(52) strBldr.WriteByte(chars[randPos]) } return strBldr.String() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/azurefiles/azurefiles_test.go
backend/azurefiles/azurefiles_test.go
//go:build !plan9 && !js package azurefiles import ( "testing" "github.com/rclone/rclone/fstest/fstests" ) func TestIntegration(t *testing.T) { var objPtr *Object fstests.Run(t, &fstests.Opt{ RemoteName: "TestAzureFiles:", NilObject: objPtr, }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/protondrive/protondrive_test.go
backend/protondrive/protondrive_test.go
package protondrive_test import ( "testing" "github.com/rclone/rclone/backend/protondrive" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestProtonDrive:", NilObject: (*protondrive.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/protondrive/protondrive.go
backend/protondrive/protondrive.go
// Package protondrive implements the Proton Drive backend package protondrive import ( "context" "errors" "fmt" "io" "path" "strings" "time" protonDriveAPI "github.com/henrybear327/Proton-API-Bridge" "github.com/henrybear327/go-proton-api" "github.com/pquerna/otp/totp" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/readers" ) /* - dirCache operates on relative path to root - path sanitization - rule of thumb: sanitize before use, but store things as-is - the paths cached in dirCache are after sanitizing - the remote/dir passed in aren't, and are stored as-is */ const ( minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential clientUIDKey = "client_uid" clientAccessTokenKey = "client_access_token" clientRefreshTokenKey = "client_refresh_token" clientSaltedKeyPassKey = "client_salted_key_pass" ) var ( errCanNotUploadFileWithUnknownSize = errors.New("proton Drive can't upload files with unknown size") errCanNotPurgeRootDirectory = errors.New("can't purge root directory") // for the auth/deauth handler _mapper configmap.Mapper _saltedKeyPass string ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "protondrive", Description: "Proton Drive", NewFs: NewFs, Options: []fs.Option{{ Name: "username", Help: `The username of your proton account`, Required: true, }, { Name: "password", Help: "The password of your proton account.", Required: true, IsPassword: true, }, { Name: "mailbox_password", Help: `The mailbox password of your two-password proton account. For more information regarding the mailbox password, please check the following official knowledge base article: https://proton.me/support/the-difference-between-the-mailbox-password-and-login-password `, IsPassword: true, Advanced: true, }, { Name: "2fa", Help: `The 2FA code The value can also be provided with --protondrive-2fa=000000 The 2FA code of your proton drive account if the account is set up with two-factor authentication`, Required: false, }, { Name: "otp_secret_key", Help: `The OTP secret key The value can also be provided with --protondrive-otp-secret-key=ABCDEFGHIJKLMNOPQRSTUVWXYZ234567 The OTP secret key of your proton drive account if the account is set up with two-factor authentication`, Required: false, Sensitive: true, IsPassword: true, }, { Name: clientUIDKey, Help: "Client uid key (internal use only)", Required: false, Advanced: true, Sensitive: true, Hide: fs.OptionHideBoth, }, { Name: clientAccessTokenKey, Help: "Client access token key (internal use only)", Required: false, Advanced: true, Sensitive: true, Hide: fs.OptionHideBoth, }, { Name: clientRefreshTokenKey, Help: "Client refresh token key (internal use only)", Required: false, Advanced: true, Sensitive: true, Hide: fs.OptionHideBoth, }, { Name: clientSaltedKeyPassKey, Help: "Client salted key pass key (internal use only)", Required: false, Advanced: true, Sensitive: true, Hide: fs.OptionHideBoth, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.Base | encoder.EncodeInvalidUtf8 | encoder.EncodeLeftSpace | encoder.EncodeRightSpace), }, { Name: "original_file_size", Help: `Return the file size before encryption The size of the encrypted file will be different from (bigger than) the original file size. Unless there is a reason to return the file size after encryption is performed, otherwise, set this option to true, as features like Open() which will need to be supplied with original content size, will fail to operate properly`, Advanced: true, Default: true, }, { Name: "app_version", Help: `The app version string The app version string indicates the client that is currently performing the API request. This information is required and will be sent with every API request.`, Advanced: true, Default: "macos-drive@1.0.0-alpha.1+rclone", }, { Name: "replace_existing_draft", Help: `Create a new revision when filename conflict is detected When a file upload is cancelled or failed before completion, a draft will be created and the subsequent upload of the same file to the same location will be reported as a conflict. The value can also be set by --protondrive-replace-existing-draft=true If the option is set to true, the draft will be replaced and then the upload operation will restart. If there are other clients also uploading at the same file location at the same time, the behavior is currently unknown. Need to set to true for integration tests. If the option is set to false, an error "a draft exist - usually this means a file is being uploaded at another client, or, there was a failed upload attempt" will be returned, and no upload will happen.`, Advanced: true, Default: false, }, { Name: "enable_caching", Help: `Caches the files and folders metadata to reduce API calls Notice: If you are mounting ProtonDrive as a VFS, please disable this feature, as the current implementation doesn't update or clear the cache when there are external changes. The files and folders on ProtonDrive are represented as links with keyrings, which can be cached to improve performance and be friendly to the API server. The cache is currently built for the case when the rclone is the only instance performing operations to the mount point. The event system, which is the proton API system that provides visibility of what has changed on the drive, is yet to be implemented, so updates from other clients won’t be reflected in the cache. Thus, if there are concurrent clients accessing the same mount point, then we might have a problem with caching the stale data.`, Advanced: true, Default: true, }}, }) } // Options defines the configuration for this backend type Options struct { Username string `config:"username"` Password string `config:"password"` MailboxPassword string `config:"mailbox_password"` TwoFA string `config:"2fa"` OtpSecretKey string `config:"otp_secret_key"` // advanced Enc encoder.MultiEncoder `config:"encoding"` ReportOriginalSize bool `config:"original_file_size"` AppVersion string `config:"app_version"` ReplaceExistingDraft bool `config:"replace_existing_draft"` EnableCaching bool `config:"enable_caching"` } // Fs represents a remote proton drive type Fs struct { name string // name of this remote // Notice that for ProtonDrive, it's attached under rootLink (usually /root) root string // the path we are working on. opt Options // parsed config options ci *fs.ConfigInfo // global config features *fs.Features // optional features pacer *fs.Pacer // pacer for API calls dirCache *dircache.DirCache // Map of directory path to directory id protonDrive *protonDriveAPI.ProtonDrive // the Proton API bridging library } // Object describes an object type Object struct { fs *Fs // what this object is part of remote string // The remote path (relative to the fs.root) size int64 // size of the object (on server, after encryption) originalSize *int64 // size of the object (after decryption) digests *string // object original content blockSizes []int64 // the block sizes of the encrypted file modTime time.Time // modification time of the object createdTime time.Time // creation time of the object id string // ID of the object mimetype string // mimetype of the file link *proton.Link // link data on proton server } // shouldRetry returns a boolean as to whether this err deserves to be // retried. It returns the err as a convenience func shouldRetry(ctx context.Context, err error) (bool, error) { return false, err } //------------------------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.opt.Enc.ToStandardPath(f.root) } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("proton drive root link ID '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // run all the dir/remote through this func (f *Fs) sanitizePath(_path string) string { _path = path.Clean(_path) if _path == "." || _path == "/" { return "" } return f.opt.Enc.FromStandardPath(_path) } func getConfigMap(m configmap.Mapper) (uid, accessToken, refreshToken, saltedKeyPass string, ok bool) { if accessToken, ok = m.Get(clientAccessTokenKey); !ok { return } if uid, ok = m.Get(clientUIDKey); !ok { return } if refreshToken, ok = m.Get(clientRefreshTokenKey); !ok { return } if saltedKeyPass, ok = m.Get(clientSaltedKeyPassKey); !ok { return } _saltedKeyPass = saltedKeyPass // empty strings are considered "ok" by m.Get, which is not true business-wise ok = accessToken != "" && uid != "" && refreshToken != "" && saltedKeyPass != "" return } func setConfigMap(m configmap.Mapper, uid, accessToken, refreshToken, saltedKeyPass string) { m.Set(clientUIDKey, uid) m.Set(clientAccessTokenKey, accessToken) m.Set(clientRefreshTokenKey, refreshToken) m.Set(clientSaltedKeyPassKey, saltedKeyPass) _saltedKeyPass = saltedKeyPass } func clearConfigMap(m configmap.Mapper) { setConfigMap(m, "", "", "", "") _saltedKeyPass = "" } func authHandler(auth proton.Auth) { // fs.Debugf("authHandler called") setConfigMap(_mapper, auth.UID, auth.AccessToken, auth.RefreshToken, _saltedKeyPass) } func deAuthHandler() { // fs.Debugf("deAuthHandler called") clearConfigMap(_mapper) } func newProtonDrive(ctx context.Context, f *Fs, opt *Options, m configmap.Mapper) (*protonDriveAPI.ProtonDrive, error) { config := protonDriveAPI.NewDefaultConfig() config.AppVersion = opt.AppVersion config.UserAgent = f.ci.UserAgent // opt.UserAgent config.ReplaceExistingDraft = opt.ReplaceExistingDraft config.EnableCaching = opt.EnableCaching // let's see if we have the cached access credential uid, accessToken, refreshToken, saltedKeyPass, hasUseReusableLoginCredentials := getConfigMap(m) _saltedKeyPass = saltedKeyPass if hasUseReusableLoginCredentials { fs.Debugf(f, "Has cached credentials") config.UseReusableLogin = true config.ReusableCredential.UID = uid config.ReusableCredential.AccessToken = accessToken config.ReusableCredential.RefreshToken = refreshToken config.ReusableCredential.SaltedKeyPass = saltedKeyPass protonDrive /* credential will be nil since access credentials are passed in */, _, err := protonDriveAPI.NewProtonDrive(ctx, config, authHandler, deAuthHandler) if err != nil { fs.Debugf(f, "Cached credential doesn't work, clearing and using the fallback login method") // clear the access token on failure clearConfigMap(m) fs.Debugf(f, "couldn't initialize a new proton drive instance using cached credentials: %v", err) // we fallback to username+password login -> don't throw an error here // return nil, fmt.Errorf("couldn't initialize a new proton drive instance: %w", err) } else { fs.Debugf(f, "Used cached credential to initialize the ProtonDrive API") return protonDrive, nil } } // if not, let's try to log the user in using username and password (and 2FA if required) fs.Debugf(f, "Using username and password to log in") config.UseReusableLogin = false config.FirstLoginCredential.Username = opt.Username config.FirstLoginCredential.Password = opt.Password config.FirstLoginCredential.MailboxPassword = opt.MailboxPassword // if 2FA code is provided, use it; otherwise, generate one using the OTP secret key if provided config.FirstLoginCredential.TwoFA = opt.TwoFA if opt.TwoFA == "" && opt.OtpSecretKey != "" { code, err := totp.GenerateCode(opt.OtpSecretKey, time.Now()) if err != nil { return nil, fmt.Errorf("couldn't generate 2FA code: %w", err) } config.FirstLoginCredential.TwoFA = code } protonDrive, auth, err := protonDriveAPI.NewProtonDrive(ctx, config, authHandler, deAuthHandler) if err != nil { return nil, fmt.Errorf("couldn't initialize a new proton drive instance: %w", err) } fs.Debugf(f, "Used username and password to initialize the ProtonDrive API") setConfigMap(m, auth.UID, auth.AccessToken, auth.RefreshToken, auth.SaltedKeyPass) return protonDrive, nil } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // pacer is not used in NewFs() _mapper = m // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } if opt.Password != "" { var err error opt.Password, err = obscure.Reveal(opt.Password) if err != nil { return nil, fmt.Errorf("couldn't decrypt password: %w", err) } } if opt.MailboxPassword != "" { var err error opt.MailboxPassword, err = obscure.Reveal(opt.MailboxPassword) if err != nil { return nil, fmt.Errorf("couldn't decrypt mailbox password: %w", err) } } if opt.OtpSecretKey != "" { var err error opt.OtpSecretKey, err = obscure.Reveal(opt.OtpSecretKey) if err != nil { return nil, fmt.Errorf("couldn't decrypt OtpSecretKey: %w", err) } } ci := fs.GetConfig(ctx) root = strings.Trim(root, "/") f := &Fs{ name: name, root: root, opt: *opt, ci: ci, pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } f.features = (&fs.Features{ ReadMimeType: true, CanHaveEmptyDirectories: true, /* can't have multiple threads downloading The raw file is split into equally-sized (currently 4MB, but it might change in the future, say to 8MB, 16MB, etc.) blocks, except the last one which might be smaller than 4MB. Each block is encrypted separately, where the size and sha1 after the encryption is performed on the block is added to the metadata of the block, but the original block size and sha1 is not in the metadata. We can make assumption and implement the chunker, but for now, we would rather be safe about it, and let the block being concurrently downloaded and decrypted in the background, to speed up the download operation! */ NoMultiThreading: true, }).Fill(ctx, f) protonDrive, err := newProtonDrive(ctx, f, opt, m) if err != nil { return nil, err } f.protonDrive = protonDrive root = f.sanitizePath(root) f.dirCache = dircache.New( root, /* root folder path */ protonDrive.MainShare.LinkID, /* real root ID is the root folder, since we can't go past this folder */ f, ) err = f.dirCache.FindRoot(ctx, false) if err != nil { // if the root directory is not found, the initialization will still work // but if it's other kinds of error, then we raise it if err != fs.ErrorDirNotFound { return nil, fmt.Errorf("couldn't initialize a new root remote: %w", err) } // Assume it is a file (taken and modified from box.go) newRoot, remote := dircache.SplitPath(root) tempF := *f tempF.dirCache = dircache.New(newRoot, protonDrive.MainShare.LinkID, &tempF) tempF.root = newRoot // Make new Fs which is the parent err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } _, err := tempF.newObject(ctx, remote) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f return f, nil } return nil, err } f.features.Fill(ctx, &tempF) // XXX: update the old f here instead of returning tempF, since // `features` were already filled with functions having *f as a receiver. // See https://github.com/rclone/rclone/issues/2182 f.dirCache = tempF.dirCache f.root = tempF.root // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } //------------------------------------------------------------------------------ // CleanUp deletes all files currently in trash func (f *Fs) CleanUp(ctx context.Context) error { return f.pacer.Call(func() (bool, error) { err := f.protonDrive.EmptyTrash(ctx) return shouldRetry(ctx, err) }) } // NewObject finds the Object at remote. If it can't be found // it returns the error ErrorObjectNotFound. // // If remote points to a directory then it should return // ErrorIsDir if possible without doing any extra work, // otherwise ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObject(ctx, remote) } func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, error) { // attempt to locate the file leaf, folderLinkID, err := f.dirCache.FindPath(ctx, f.sanitizePath(remote), false) if err != nil { if err == fs.ErrorDirNotFound { // parent folder of the file not found, we for sure can't find the file return nil, fs.ErrorObjectNotFound } // other error has occurred return nil, err } var link *proton.Link if err = f.pacer.Call(func() (bool, error) { link, err = f.protonDrive.SearchByNameInActiveFolderByID(ctx, folderLinkID, leaf, true, false, proton.LinkStateActive) return shouldRetry(ctx, err) }); err != nil { return nil, err } if link == nil { // both link and err are nil, file not found return nil, fs.ErrorObjectNotFound } return link, nil } // readMetaDataForLink reads the metadata from the remote func (f *Fs) readMetaDataForLink(ctx context.Context, link *proton.Link) (*protonDriveAPI.FileSystemAttrs, error) { var fileSystemAttrs *protonDriveAPI.FileSystemAttrs var err error if err = f.pacer.Call(func() (bool, error) { fileSystemAttrs, err = f.protonDrive.GetActiveRevisionAttrs(ctx, link) return shouldRetry(ctx, err) }); err != nil { return nil, err } return fileSystemAttrs, nil } // Return an Object from a path and link // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } o.id = link.LinkID o.size = link.Size o.modTime = time.Unix(link.ModifyTime, 0) o.createdTime = time.Unix(link.CreateTime, 0) o.mimetype = link.MIMEType o.link = link fileSystemAttrs, err := o.fs.readMetaDataForLink(ctx, link) if err != nil { return nil, err } if fileSystemAttrs != nil { o.modTime = fileSystemAttrs.ModificationTime o.originalSize = &fileSystemAttrs.Size o.blockSizes = fileSystemAttrs.BlockSizes o.digests = &fileSystemAttrs.Digests } return o, nil } // Return an Object from a path only // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObject(ctx context.Context, remote string) (fs.Object, error) { link, err := f.getObjectLink(ctx, remote) if err != nil { return nil, err } return f.newObjectWithLink(ctx, remote, link) } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // Notice that this function is expensive since everything on proton is encrypted // So having a remote with 10k files, during operations like sync, might take a while and lots of bandwidth! func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) { folderLinkID, err := f.dirCache.FindDir(ctx, f.sanitizePath(dir), false) // will handle ErrDirNotFound here if err != nil { return nil, err } var foldersAndFiles []*protonDriveAPI.ProtonDirectoryData if err = f.pacer.Call(func() (bool, error) { foldersAndFiles, err = f.protonDrive.ListDirectory(ctx, folderLinkID) return shouldRetry(ctx, err) }); err != nil { return nil, err } entries := make(fs.DirEntries, 0) for i := range foldersAndFiles { remote := path.Join(dir, f.opt.Enc.ToStandardName(foldersAndFiles[i].Name)) if foldersAndFiles[i].IsFolder { f.dirCache.Put(remote, foldersAndFiles[i].Link.LinkID) d := fs.NewDir(remote, time.Unix(foldersAndFiles[i].Link.ModifyTime, 0)).SetID(foldersAndFiles[i].Link.LinkID) entries = append(entries, d) } else { obj, err := f.newObjectWithLink(ctx, remote, foldersAndFiles[i].Link) if err != nil { return nil, err } entries = append(entries, obj) } } return entries, nil } // FindLeaf finds a directory of name leaf in the folder with ID pathID // // This should be implemented by the backend and will be called by the // dircache package when appropriate. func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (string, bool, error) { /* f.opt.Enc.FromStandardName(leaf) not required since the DirCache only process sanitized path */ var link *proton.Link var err error if err = f.pacer.Call(func() (bool, error) { link, err = f.protonDrive.SearchByNameInActiveFolderByID(ctx, pathID, leaf, false, true, proton.LinkStateActive) return shouldRetry(ctx, err) }); err != nil { return "", false, err } if link == nil { return "", false, nil } return link.LinkID, true, nil } // CreateDir makes a directory with pathID as parent and name leaf // // This should be implemented by the backend and will be called by the // dircache package when appropriate. func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (string, error) { /* f.opt.Enc.FromStandardName(leaf) not required since the DirCache only process sanitized path */ var newID string var err error if err = f.pacer.Call(func() (bool, error) { newID, err = f.protonDrive.CreateNewFolderByID(ctx, pathID, leaf) return shouldRetry(ctx, err) }); err != nil { return "", err } return newID, err } // Put in to the remote path with the modTime given of the given size // // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Put should either // return an error or upload it properly (rather than e.g. calling panic). // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { size := src.Size() if size < 0 { return nil, errCanNotUploadFileWithUnknownSize } existingObj, err := f.NewObject(ctx, src.Remote()) switch err { case nil: // object is found, we add an revision to it return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // object not found, so we need to create it remote := src.Remote() size := src.Size() modTime := src.ModTime(ctx) obj, err := f.createObject(ctx, remote, modTime, size) if err != nil { return nil, err } return obj, obj.Update(ctx, in, src, options...) default: // real error caught return nil, err } } // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // // Returns the object, leaf, directoryID and error. // // Used to create new objects func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (*Object, error) { // ˇ-------ˇ filename // e.g. /root/a/b/c/test.txt // ^~~~~~~~~~~^ dirPath // Create the directory for the object if it doesn't exist _, _, err := f.dirCache.FindPath(ctx, f.sanitizePath(remote), true) if err != nil { return nil, err } // Temporary Object under construction obj := &Object{ fs: f, remote: remote, size: size, originalSize: nil, id: "", modTime: modTime, mimetype: "", link: nil, } return obj, nil } // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists func (f *Fs) Mkdir(ctx context.Context, dir string) error { _, err := f.dirCache.FindDir(ctx, f.sanitizePath(dir), true) return err } // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { folderLinkID, err := f.dirCache.FindDir(ctx, f.sanitizePath(dir), false) if err == fs.ErrorDirNotFound { return fmt.Errorf("[Rmdir] cannot find LinkID for dir %s (%s)", dir, f.sanitizePath(dir)) } else if err != nil { return err } if err = f.pacer.Call(func() (bool, error) { err = f.protonDrive.MoveFolderToTrashByID(ctx, folderLinkID, true) return shouldRetry(ctx, err) }); err != nil { return err } f.dirCache.FlushDir(f.sanitizePath(dir)) return nil } // Precision of the ModTimes in this Fs func (f *Fs) Precision() time.Duration { return time.Second } // DirCacheFlush an optional interface to flush internal directory cache // DirCacheFlush resets the directory cache - used in testing // as an optional interface func (f *Fs) DirCacheFlush() { f.dirCache.ResetRoot() f.protonDrive.ClearCache() } // Hashes returns the supported hash types of the filesystem func (f *Fs) Hashes() hash.Set { return hash.Set(hash.SHA1) } // About gets quota information func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { var user *proton.User var err error if err = f.pacer.Call(func() (bool, error) { user, err = f.protonDrive.About(ctx) return shouldRetry(ctx, err) }); err != nil { return nil, err } total := user.MaxSpace used := user.UsedSpace free := total - used usage := &fs.Usage{ Total: &total, Used: &used, Free: &free, } return usage, nil } // ------------------------------------------------------------ // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Hash returns the hashes of an object func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.SHA1 { return "", hash.ErrUnsupported } if o.digests != nil { return *o.digests, nil } // sha1 not cached: we fetch and try to obtain the sha1 of the link fileSystemAttrs, err := o.fs.protonDrive.GetActiveRevisionAttrsByID(ctx, o.ID()) if err != nil { return "", err } if fileSystemAttrs == nil || fileSystemAttrs.Digests == "" { fs.Debugf(o, "file sha1 digest missing") return "", nil } return fileSystemAttrs.Digests, nil } // Size returns the size of an object in bytes func (o *Object) Size() int64 { if o.fs.opt.ReportOriginalSize { // if ReportOriginalSize is set, we will generate an error when the original size failed to be parsed // this is crucial as features like Open() will need to use the proper size to operate the seek/range operator if o.originalSize != nil { return *o.originalSize } fs.Debugf(o, "Original file size missing") } return o.size } // ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime(ctx context.Context) time.Time { return o.modTime } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return fs.ErrorCantSetModTime } // Storable returns a boolean showing whether this object storable func (o *Object) Storable() bool { return true } // Open opens the file for read. Call Close() on the returned io.ReadCloser func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { fs.FixRangeOption(options, *o.originalSize) var offset, limit int64 = 0, -1 for _, option := range options { // if the caller passes in nil for options, it will become array of nil switch x := option.(type) { case *fs.SeekOption: offset = x.Offset case *fs.RangeOption: offset, limit = x.Decode(o.Size()) default: if option.Mandatory() { fs.Logf(o, "Unsupported mandatory option: %v", option) } } } // download and decrypt the file var reader io.ReadCloser var fileSystemAttrs *protonDriveAPI.FileSystemAttrs var sizeOnServer int64 var err error if err = o.fs.pacer.Call(func() (bool, error) { reader, sizeOnServer, fileSystemAttrs, err = o.fs.protonDrive.DownloadFileByID(ctx, o.id, offset) return shouldRetry(ctx, err) }); err != nil { return nil, err } if fileSystemAttrs != nil { o.originalSize = &fileSystemAttrs.Size o.modTime = fileSystemAttrs.ModificationTime o.digests = &fileSystemAttrs.Digests o.blockSizes = fileSystemAttrs.BlockSizes } else { fs.Debugf(o, "fileSystemAttrs is nil: using fallback size, and now digests and blocksizes available") o.originalSize = &sizeOnServer o.size = sizeOnServer o.digests = nil o.blockSizes = nil } retReader := io.NopCloser(reader) // the NewLimitedReadCloser will deal with the limit // deal with limit return readers.NewLimitedReadCloser(retReader, limit), nil } // Update in to the object with the modTime given of the given size // // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either // return an error or update the object properly (rather than e.g. calling panic). func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { size := src.Size() if size < 0 { return errCanNotUploadFileWithUnknownSize } remote := o.Remote() leaf, folderLinkID, err := o.fs.dirCache.FindPath(ctx, o.fs.sanitizePath(remote), true) if err != nil { return err } modTime := src.ModTime(ctx) var linkID string var fileSystemAttrs *proton.RevisionXAttrCommon if err = o.fs.pacer.Call(func() (bool, error) { linkID, fileSystemAttrs, err = o.fs.protonDrive.UploadFileByReader(ctx, folderLinkID, leaf, modTime, in, 0) return shouldRetry(ctx, err) }); err != nil { return err } var sha1Hash string if val, ok := fileSystemAttrs.Digests["SHA1"]; ok { sha1Hash = val } else { sha1Hash = "" } o.id = linkID o.originalSize = &fileSystemAttrs.Size o.modTime = modTime o.blockSizes = fileSystemAttrs.BlockSizes o.digests = &sha1Hash return nil } // Remove an object func (o *Object) Remove(ctx context.Context) error { return o.fs.pacer.Call(func() (bool, error) { err := o.fs.protonDrive.MoveFileToTrashByID(ctx, o.id) return shouldRetry(ctx, err) }) } // ID returns the ID of the Object if known, or "" if not func (o *Object) ID() string { return o.id } // Purge all files in the directory specified // // Implement this if you have a way of deleting all the files // quicker than just running Remove() on the result of List() // // Return an error if it doesn't exist func (f *Fs) Purge(ctx context.Context, dir string) error { root := path.Join(f.root, dir) if root == "" { // we can't remove the root directory, but we can list the directory and delete every folder and file in here return errCanNotPurgeRootDirectory } folderLinkID, err := f.dirCache.FindDir(ctx, f.sanitizePath(dir), false) if err != nil { return err } if err = f.pacer.Call(func() (bool, error) { err = f.protonDrive.MoveFolderToTrashByID(ctx, folderLinkID, false) return shouldRetry(ctx, err) }); err != nil { return err } f.dirCache.FlushDir(dir) return nil } // MimeType of an Object if known, "" otherwise func (o *Object) MimeType(ctx context.Context) string { return o.mimetype } // Disconnect the current user
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/seafile/seafile.go
backend/seafile/seafile.go
// Package seafile provides an interface to the Seafile storage system. package seafile import ( "context" "errors" "fmt" "io" "net/http" "net/url" "path" "strconv" "strings" "sync" "time" "github.com/coreos/go-semver/semver" "github.com/rclone/rclone/backend/seafile/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/cache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/rest" ) const ( librariesCacheKey = "all" retryAfterHeader = "Retry-After" configURL = "url" configUser = "user" configPassword = "pass" config2FA = "2fa" configLibrary = "library" configLibraryKey = "library_key" configCreateLibrary = "create_library" configAuthToken = "auth_token" ) // This is global to all instances of fs // (copying from a seafile remote to another remote would create 2 fs) var ( rangeDownloadNotice sync.Once // Display the notice only once createLibraryMutex sync.Mutex // Mutex to protect library creation ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "seafile", Description: "seafile", NewFs: NewFs, Config: Config, Options: []fs.Option{{ Name: configURL, Help: "URL of seafile host to connect to.", Required: true, Examples: []fs.OptionExample{{ Value: "https://cloud.seafile.com/", Help: "Connect to cloud.seafile.com.", }}, Sensitive: true, }, { Name: configUser, Help: "User name (usually email address).", Required: true, Sensitive: true, }, { // Password is not required, it will be left blank for 2FA Name: configPassword, Help: "Password.", IsPassword: true, Sensitive: true, }, { Name: config2FA, Help: "Two-factor authentication ('true' if the account has 2FA enabled).", Default: false, }, { Name: configLibrary, Help: "Name of the library.\n\nLeave blank to access all non-encrypted libraries.", }, { Name: configLibraryKey, Help: "Library password (for encrypted libraries only).\n\nLeave blank if you pass it through the command line.", IsPassword: true, Sensitive: true, }, { Name: configCreateLibrary, Help: "Should rclone create a library if it doesn't exist.", Advanced: true, Default: false, }, { // Keep the authentication token after entering the 2FA code Name: configAuthToken, Help: "Authentication token.", Hide: fs.OptionHideBoth, Sensitive: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.EncodeZero | encoder.EncodeCtl | encoder.EncodeSlash | encoder.EncodeBackSlash | encoder.EncodeDoubleQuote | encoder.EncodeInvalidUtf8 | encoder.EncodeDot), }}, }) } // Options defines the configuration for this backend type Options struct { URL string `config:"url"` User string `config:"user"` Password string `config:"pass"` Is2FA bool `config:"2fa"` AuthToken string `config:"auth_token"` LibraryName string `config:"library"` LibraryKey string `config:"library_key"` CreateLibrary bool `config:"create_library"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a remote seafile type Fs struct { name string // name of this remote root string // the path we are working on libraryName string // current library encrypted bool // Is this an encrypted library rootDirectory string // directory part of root (if any) opt Options // parsed options libraries *cache.Cache // Keep a cache of libraries librariesMutex sync.Mutex // Mutex to protect getLibraryID features *fs.Features // optional features endpoint *url.URL // URL of the host endpointURL string // endpoint as a string srv *rest.Client // the connection to the server pacer *fs.Pacer // pacer for API calls authMu sync.Mutex // Mutex to protect library decryption createDirMutex sync.Mutex // Protect creation of directories useOldDirectoryAPI bool // Use the old API v2 if seafile < 7 moveDirNotAvailable bool // Version < 7.0 don't have an API to move a directory renew *Renew // Renew an encrypted library token } // ------------------------------------------------------------ // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } root = strings.Trim(root, "/") isLibraryRooted := opt.LibraryName != "" var libraryName, rootDirectory string if isLibraryRooted { libraryName = opt.LibraryName rootDirectory = root } else { libraryName, rootDirectory = bucket.Split(root) } if !strings.HasSuffix(opt.URL, "/") { opt.URL += "/" } if opt.Password != "" { var err error opt.Password, err = obscure.Reveal(opt.Password) if err != nil { return nil, fmt.Errorf("couldn't decrypt user password: %w", err) } } if opt.LibraryKey != "" { var err error opt.LibraryKey, err = obscure.Reveal(opt.LibraryKey) if err != nil { return nil, fmt.Errorf("couldn't decrypt library password: %w", err) } } // Parse the endpoint u, err := url.Parse(opt.URL) if err != nil { return nil, err } f := &Fs{ name: name, root: root, libraryName: libraryName, rootDirectory: rootDirectory, libraries: cache.New(), opt: *opt, endpoint: u, endpointURL: u.String(), srv: rest.NewClient(fshttp.NewClient(ctx)).SetRoot(u.String()), pacer: getPacer(ctx, opt.URL), } f.features = (&fs.Features{ CanHaveEmptyDirectories: true, BucketBased: opt.LibraryName == "", }).Fill(ctx, f) serverInfo, err := f.getServerInfo(ctx) if err != nil { return nil, err } fs.Debugf(nil, "Seafile server version %s", serverInfo.Version) // We don't support lower than seafile v6.0 (version 6.0 is already more than 3 years old) serverVersion := semver.New(serverInfo.Version) if serverVersion.Major < 6 { return nil, errors.New("unsupported Seafile server (version < 6.0)") } if serverVersion.Major < 7 { // Seafile 6 does not support recursive listing f.useOldDirectoryAPI = true f.features.ListR = nil // It also does no support moving directories f.moveDirNotAvailable = true } // Take the authentication token from the configuration first token := f.opt.AuthToken if token == "" { // If not available, send the user/password instead token, err = f.authorizeAccount(ctx) if err != nil { return nil, err } } f.setAuthorizationToken(token) if f.libraryName != "" { // Check if the library exists exists, err := f.libraryExists(ctx, f.libraryName) if err != nil { return f, err } if !exists { if f.opt.CreateLibrary { err := f.mkLibrary(ctx, f.libraryName, "") if err != nil { return f, err } } else { return f, fmt.Errorf("library '%s' was not found, and the option to create it is not activated (advanced option)", f.libraryName) } } libraryID, err := f.getLibraryID(ctx, f.libraryName) if err != nil { return f, err } f.encrypted, err = f.isEncrypted(ctx, libraryID) if err != nil { return f, err } if f.encrypted { // If we're inside an encrypted library, let's decrypt it now err = f.authorizeLibrary(ctx, libraryID) if err != nil { return f, err } // And remove the public link feature f.features.PublicLink = nil // renew the library password every 45 minutes f.renew = NewRenew(45*time.Minute, func() error { return f.authorizeLibrary(context.Background(), libraryID) }) } } else { // Deactivate the cleaner feature since there's no library selected f.features.CleanUp = nil } if f.rootDirectory != "" { // Check to see if the root is an existing file remote := path.Base(rootDirectory) f.rootDirectory = path.Dir(rootDirectory) if f.rootDirectory == "." { f.rootDirectory = "" } _, err := f.NewObject(ctx, remote) if err != nil { if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorNotAFile) { // File doesn't exist so return the original f f.rootDirectory = rootDirectory return f, nil } return f, err } // Correct root if definitely pointing to a file f.root = path.Dir(f.root) if f.root == "." || f.root == "/" { f.root = "" } // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // Config callback for 2FA func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { serverURL, ok := m.Get(configURL) if !ok || serverURL == "" { // If there's no server URL, it means we're trying an operation at the backend level, like a "rclone authorize seafile" return nil, errors.New("operation not supported on this remote. If you need a 2FA code on your account, use the command: rclone config reconnect <remote name>: ") } u, err := url.Parse(serverURL) if err != nil { return nil, fmt.Errorf("invalid server URL %s", serverURL) } is2faEnabled, _ := m.Get(config2FA) if is2faEnabled != "true" { // no need to do anything here return nil, nil } username, _ := m.Get(configUser) if username == "" { return nil, errors.New("a username is required") } password, _ := m.Get(configPassword) if password != "" { password, _ = obscure.Reveal(password) } switch config.State { case "": // Empty state means it's the first call to the Config function if password == "" { return fs.ConfigPassword("password", "config_password", "Two-factor authentication: please enter your password (it won't be saved in the configuration)") } // password was successfully loaded from the config return fs.ConfigGoto("2fa") case "password": // password should be coming from the previous state (entered by the user) password = config.Result if password == "" { return fs.ConfigError("", "Password can't be blank") } // save it into the configuration file and keep going m.Set(configPassword, obscure.MustObscure(password)) return fs.ConfigGoto("2fa") case "2fa": return fs.ConfigInput("2fa_do", "config_2fa", "Two-factor authentication: please enter your 2FA code") case "2fa_do": code := config.Result if code == "" { return fs.ConfigError("2fa", "2FA codes can't be blank") } // Create rest client for getAuthorizationToken url := u.String() if !strings.HasPrefix(url, "/") { url += "/" } srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(url) // We loop asking for a 2FA code ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() token, err := getAuthorizationToken(ctx, srv, username, password, code) if err != nil { return fs.ConfigConfirm("2fa_error", true, "config_retry", fmt.Sprintf("Authentication failed: %v\n\nTry Again?", err)) } if token == "" { return fs.ConfigConfirm("2fa_error", true, "config_retry", "Authentication failed - no token returned.\n\nTry Again?") } // Let's save the token into the configuration m.Set(configAuthToken, token) // And delete any previous entry for password m.Set(configPassword, "") // And we're done here return nil, nil case "2fa_error": if config.Result == "true" { return fs.ConfigGoto("2fa") } return nil, errors.New("2fa authentication failed") } return nil, fmt.Errorf("unknown state %q", config.State) } // Shutdown the Fs func (f *Fs) Shutdown(ctx context.Context) error { if f.renew == nil { return nil } f.renew.Shutdown() return nil } // sets the AuthorizationToken up func (f *Fs) setAuthorizationToken(token string) { f.srv.SetHeader("Authorization", "Token "+token) } // authorizeAccount gets the auth token. func (f *Fs) authorizeAccount(ctx context.Context) (string, error) { f.authMu.Lock() defer f.authMu.Unlock() token, err := f.getAuthorizationToken(ctx) if err != nil { return "", err } return token, nil } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 408, // Request Timeout 429, // Rate exceeded. 500, // Get occasional 500 Internal Server Error 503, // Service Unavailable 504, // Gateway Time-out 520, // Operation failed (We get them sometimes when running tests in parallel) } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } // For 429 errors look at the Retry-After: header and // set the retry appropriately, starting with a minimum of 1 // second if it isn't set. if resp != nil && (resp.StatusCode == 429) { var retryAfter = 1 retryAfterString := resp.Header.Get(retryAfterHeader) if retryAfterString != "" { var err error retryAfter, err = strconv.Atoi(retryAfterString) if err != nil { fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err) } } return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Second) } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } func (f *Fs) shouldRetryUpload(ctx context.Context, resp *http.Response, err error) (bool, error) { if err != nil || (resp != nil && resp.StatusCode > 400) { return true, err } return false, nil } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { if f.libraryName == "" { return "seafile root" } library := "library" if f.encrypted { library = "encrypted " + library } if f.rootDirectory == "" { return fmt.Sprintf("seafile %s '%s'", library, f.libraryName) } return fmt.Sprintf("seafile %s '%s' path '%s'", library, f.libraryName, f.rootDirectory) } // Precision of the ModTimes in this Fs func (f *Fs) Precision() time.Duration { // The API doesn't support setting the modified time return fs.ModTimeNotSupported } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.None) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return fs.ErrorDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { if dir == "" && f.libraryName == "" { return f.listLibraries(ctx) } return f.listDir(ctx, dir, false) } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { libraryName, filePath := f.splitPath(remote) libraryID, err := f.getLibraryID(ctx, libraryName) if err != nil { return nil, err } err = f.authorizeLibrary(ctx, libraryID) if err != nil { return nil, err } fileDetails, err := f.getFileDetails(ctx, libraryID, filePath) if err != nil { return nil, err } modTime, err := time.Parse(time.RFC3339, fileDetails.Modified) if err != nil { fs.LogPrintf(fs.LogLevelWarning, fileDetails.Modified, "Cannot parse datetime") } o := &Object{ fs: f, libraryID: libraryID, id: fileDetails.ID, remote: remote, pathInLibrary: filePath, modTime: modTime, size: fileDetails.Size, } return o, nil } // Put in to the remote path with the modTime given of the given size // // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Put should either // return an error or upload it properly (rather than e.g. calling panic). // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { object := f.newObject(ctx, src.Remote(), src.Size(), src.ModTime(ctx)) // Check if we need to create a new library at that point if object.libraryID == "" { library, _ := f.splitPath(object.remote) err := f.Mkdir(ctx, library) if err != nil { return object, err } libraryID, err := f.getLibraryID(ctx, library) if err != nil { return object, err } object.libraryID = libraryID } err := object.Update(ctx, in, src, options...) if err != nil { return object, err } return object, nil } // PutStream uploads to the remote path with the modTime given but of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(ctx, in, src, options...) } // Mkdir makes the directory or library // // Shouldn't return an error if it already exists func (f *Fs) Mkdir(ctx context.Context, dir string) error { libraryName, folder := f.splitPath(dir) if strings.HasPrefix(dir, libraryName) { err := f.mkLibrary(ctx, libraryName, "") if err != nil { return err } if folder == "" { // No directory to create after the library return nil } } err := f.mkDir(ctx, dir) if err != nil { return err } return nil } // purgeCheck removes the root directory, if check is set then it // refuses to do so if it has anything in func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { libraryName, dirPath := f.splitPath(dir) libraryID, err := f.getLibraryID(ctx, libraryName) if err != nil { return err } if check { directoryEntries, err := f.getDirectoryEntries(ctx, libraryID, dirPath, false) if err != nil { return err } if len(directoryEntries) > 0 { return fs.ErrorDirectoryNotEmpty } } if dirPath == "" || dirPath == "/" { return f.deleteLibrary(ctx, libraryID) } return f.deleteDir(ctx, libraryID, dirPath) } // Rmdir removes the directory or library if empty // // Return an error if it doesn't exist or isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, true) } // ==================== Optional Interface fs.ListRer ==================== // ListR lists the objects and directories of the Fs starting // from dir recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) error { var err error if dir == "" && f.libraryName == "" { libraries, err := f.listLibraries(ctx) if err != nil { return err } // Send the library list as folders err = callback(libraries) if err != nil { return err } // Then list each library for _, library := range libraries { err = f.listDirCallback(ctx, library.Remote(), callback) if err != nil { return err } } return nil } err = f.listDirCallback(ctx, dir, callback) if err != nil { return err } return nil } // ==================== Optional Interface fs.Copier ==================== // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { return nil, fs.ErrorCantCopy } srcLibraryName, srcPath := srcObj.fs.splitPath(src.Remote()) srcLibraryID, err := srcObj.fs.getLibraryID(ctx, srcLibraryName) if err != nil { return nil, err } dstLibraryName, dstPath := f.splitPath(remote) dstLibraryID, err := f.getLibraryID(ctx, dstLibraryName) if err != nil { return nil, err } // Seafile does not accept a file name as a destination, only a path. // The destination filename will be the same as the original, or with (1) added in case it was already existing dstDir, dstFilename := path.Split(dstPath) // We have to make sure the destination path exists on the server or it's going to bomb out with an obscure error message err = f.mkMultiDir(ctx, dstLibraryID, dstDir) if err != nil { return nil, err } op, err := f.copyFile(ctx, srcLibraryID, srcPath, dstLibraryID, dstDir) if err != nil { return nil, err } if op.Name != dstFilename { // Destination was existing, so we need to move the file back into place err = f.adjustDestination(ctx, dstLibraryID, op.Name, dstPath, dstDir, dstFilename) if err != nil { return nil, err } } // Create a new object from the result return f.NewObject(ctx, remote) } // ==================== Optional Interface fs.Mover ==================== // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { return nil, fs.ErrorCantMove } srcLibraryName, srcPath := srcObj.fs.splitPath(src.Remote()) srcLibraryID, err := srcObj.fs.getLibraryID(ctx, srcLibraryName) if err != nil { return nil, err } dstLibraryName, dstPath := f.splitPath(remote) dstLibraryID, err := f.getLibraryID(ctx, dstLibraryName) if err != nil { return nil, err } // anchor both source and destination paths from the root so we can compare them srcPath = path.Join("/", srcPath) dstPath = path.Join("/", dstPath) srcDir := path.Dir(srcPath) dstDir, dstFilename := path.Split(dstPath) if srcLibraryID == dstLibraryID && srcDir == dstDir { // It's only a simple case of renaming the file _, err := f.renameFile(ctx, srcLibraryID, srcPath, dstFilename) if err != nil { return nil, err } return f.NewObject(ctx, remote) } // We have to make sure the destination path exists on the server err = f.mkMultiDir(ctx, dstLibraryID, dstDir) if err != nil { return nil, err } // Seafile does not accept a file name as a destination, only a path. // The destination filename will be the same as the original, or with (1) added in case it already exists op, err := f.moveFile(ctx, srcLibraryID, srcPath, dstLibraryID, dstDir) if err != nil { return nil, err } if op.Name != dstFilename { // Destination was existing, so we need to move the file back into place err = f.adjustDestination(ctx, dstLibraryID, op.Name, dstPath, dstDir, dstFilename) if err != nil { return nil, err } } // Create a new object from the result return f.NewObject(ctx, remote) } // adjustDestination rename the file func (f *Fs) adjustDestination(ctx context.Context, libraryID, srcFilename, dstPath, dstDir, dstFilename string) error { // Seafile seems to be acting strangely if the renamed file already exists (some cache issue maybe?) // It's better to delete the destination if it already exists fileDetail, err := f.getFileDetails(ctx, libraryID, dstPath) if err != nil && err != fs.ErrorObjectNotFound { return err } if fileDetail != nil { err = f.deleteFile(ctx, libraryID, dstPath) if err != nil { return err } } _, err = f.renameFile(ctx, libraryID, path.Join(dstDir, srcFilename), dstFilename) if err != nil { return err } return nil } // ==================== Optional Interface fs.DirMover ==================== // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { // Cast into a seafile Fs srcFs, ok := src.(*Fs) if !ok { return fs.ErrorCantDirMove } srcLibraryName, srcPath := srcFs.splitPath(srcRemote) srcLibraryID, err := srcFs.getLibraryID(ctx, srcLibraryName) if err != nil { return err } dstLibraryName, dstPath := f.splitPath(dstRemote) dstLibraryID, err := f.getLibraryID(ctx, dstLibraryName) if err != nil { return err } srcDir := path.Dir(srcPath) dstDir, dstName := path.Split(dstPath) // anchor both source and destination to the root so we can compare them srcDir = path.Join("/", srcDir) dstDir = path.Join("/", dstDir) // The destination should not exist entries, err := f.getDirectoryEntries(ctx, dstLibraryID, dstDir, false) if err != nil && err != fs.ErrorDirNotFound { return err } if err == nil { for _, entry := range entries { if entry.Name == dstName { // Destination exists return fs.ErrorDirExists } } } if srcLibraryID == dstLibraryID && srcDir == dstDir { // It's only renaming err = srcFs.renameDir(ctx, dstLibraryID, srcPath, dstName) if err != nil { return err } return nil } // Seafile < 7 does not support moving directories if f.moveDirNotAvailable { return fs.ErrorCantDirMove } // Make sure the destination path exists err = f.mkMultiDir(ctx, dstLibraryID, dstDir) if err != nil { return err } // If the destination already exists, seafile will add a " (n)" to the name. // Sadly this API call will not return the new given name like the move file version does // So the trick is to rename the directory to something random before moving it // After the move we rename the random name back to the expected one // Hopefully there won't be anything with the same name existing at destination ;) tempName := ".rclone-move-" + random.String(32) // 1- rename source err = srcFs.renameDir(ctx, srcLibraryID, srcPath, tempName) if err != nil { return fmt.Errorf("cannot rename source directory to a temporary name: %w", err) } // 2- move source to destination err = f.moveDir(ctx, srcLibraryID, srcDir, tempName, dstLibraryID, dstDir) if err != nil { // Doh! Let's rename the source back to its original name _ = srcFs.renameDir(ctx, srcLibraryID, path.Join(srcDir, tempName), path.Base(srcPath)) return err } // 3- rename destination back to source name err = f.renameDir(ctx, dstLibraryID, path.Join(dstDir, tempName), dstName) if err != nil { return fmt.Errorf("cannot rename temporary directory to destination name: %w", err) } return nil } // ==================== Optional Interface fs.Purger ==================== // Purge all files in the directory // // Implement this if you have a way of deleting all the files // quicker than just running Remove() on the result of List() // // Return an error if it doesn't exist func (f *Fs) Purge(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, false) } // ==================== Optional Interface fs.CleanUpper ==================== // CleanUp the trash in the Fs func (f *Fs) CleanUp(ctx context.Context) error { if f.libraryName == "" { return errors.New("cannot clean up at the root of the seafile server, please select a library to clean up") } libraryID, err := f.getLibraryID(ctx, f.libraryName) if err != nil { return err } return f.emptyLibraryTrash(ctx, libraryID) } // ==================== Optional Interface fs.Abouter ==================== // About gets quota information func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { accountInfo, err := f.getUserAccountInfo(ctx) if err != nil { return nil, err } usage = &fs.Usage{ Used: fs.NewUsageValue(accountInfo.Usage), // bytes in use } if accountInfo.Total > 0 { usage.Total = fs.NewUsageValue(accountInfo.Total) // quota of bytes that can be used usage.Free = fs.NewUsageValue(accountInfo.Total - accountInfo.Usage) // bytes which can be uploaded before reaching the quota } return usage, nil } // ==================== Optional Interface fs.UserInfoer ==================== // UserInfo returns info about the connected user func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) { accountInfo, err := f.getUserAccountInfo(ctx) if err != nil { return nil, err } return map[string]string{ "Name": accountInfo.Name, "Email": accountInfo.Email, }, nil } // ==================== Optional Interface fs.PublicLinker ==================== // PublicLink generates a public link to the remote path (usually readable by anyone) func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { libraryName, filePath := f.splitPath(remote) if libraryName == "" { // We cannot share the whole seafile server, we need at least a library return "", errors.New("cannot share the root of the seafile server, please select a library to share") } libraryID, err := f.getLibraryID(ctx, libraryName) if err != nil { return "", err } // List existing links first shareLinks, err := f.listShareLinks(ctx, libraryID, filePath) if err != nil { return "", err } if len(shareLinks) > 0 { for _, shareLink := range shareLinks { if !shareLink.IsExpired { return shareLink.Link, nil } } } // No link was found shareLink, err := f.createShareLink(ctx, libraryID, filePath) if err != nil { return "", err } if shareLink.IsExpired { return "", nil } return shareLink.Link, nil } func (f *Fs) listLibraries(ctx context.Context) (entries fs.DirEntries, err error) { libraries, err := f.getCachedLibraries(ctx) if err != nil { return nil, errors.New("cannot load libraries") } for _, library := range libraries { d := fs.NewDir(library.Name, time.Unix(library.Modified, 0)) d.SetSize(library.Size) entries = append(entries, d) } return entries, nil } func (f *Fs) libraryExists(ctx context.Context, libraryName string) (bool, error) { libraries, err := f.getCachedLibraries(ctx) if err != nil { return false, err } for _, library := range libraries { if library.Name == libraryName { return true, nil } } return false, nil } func (f *Fs) getLibraryID(ctx context.Context, name string) (string, error) { libraries, err := f.getCachedLibraries(ctx) if err != nil { return "", err } for _, library := range libraries { if library.Name == name { return library.ID, nil } } return "", fmt.Errorf("cannot find library '%s'", name) } func (f *Fs) isLibraryInCache(libraryName string) bool { f.librariesMutex.Lock() defer f.librariesMutex.Unlock() if f.libraries == nil { return false } value, found := f.libraries.GetMaybe(librariesCacheKey) if !found { return false } libraries := value.([]api.Library) for _, library := range libraries { if library.Name == libraryName { return true } } return false } func (f *Fs) isEncrypted(ctx context.Context, libraryID string) (bool, error) { libraries, err := f.getCachedLibraries(ctx) if err != nil { return false, err } for _, library := range libraries { if library.ID == libraryID { return library.Encrypted, nil } } return false, fmt.Errorf("cannot find library ID %s", libraryID) } func (f *Fs) authorizeLibrary(ctx context.Context, libraryID string) error { if libraryID == "" { return errors.New("a library ID is needed") } if f.opt.LibraryKey == "" { // We have no password to send
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/seafile/webapi.go
backend/seafile/webapi.go
package seafile import ( "bytes" "context" "errors" "fmt" "io" "net/http" "net/url" "path" "strings" "github.com/rclone/rclone/backend/seafile/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/rest" ) // Start of the API URLs const ( APIv20 = "api2/repos/" APIv21 = "api/v2.1/repos/" ) // Errors specific to seafile fs var ( ErrorInternalDuringUpload = errors.New("internal server error during file upload") ) // ==================== Seafile API ==================== func (f *Fs) getAuthorizationToken(ctx context.Context) (string, error) { return getAuthorizationToken(ctx, f.srv, f.opt.User, f.opt.Password, "") } // getAuthorizationToken can be called outside of an fs (during configuration of the remote to get the authentication token) // it's doing a single call (no pacer involved) func getAuthorizationToken(ctx context.Context, srv *rest.Client, user, password, oneTimeCode string) (string, error) { // API Documentation // https://download.seafile.com/published/web-api/home.md#user-content-Quick%20Start opts := rest.Opts{ Method: "POST", Path: "api2/auth-token/", ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request IgnoreStatus: true, // so we can load the error messages back into result } // 2FA if oneTimeCode != "" { opts.ExtraHeaders["X-SEAFILE-OTP"] = oneTimeCode } request := api.AuthenticationRequest{ Username: user, Password: password, } result := api.AuthenticationResult{} _, err := srv.CallJSON(ctx, &opts, &request, &result) if err != nil { // This is only going to be http errors here return "", fmt.Errorf("failed to authenticate: %w", err) } if len(result.Errors) > 0 { return "", errors.New(strings.Join(result.Errors, ", ")) } if result.Token == "" { // No error in "non_field_errors" field but still empty token return "", errors.New("failed to authenticate") } return result.Token, nil } func (f *Fs) getServerInfo(ctx context.Context) (account *api.ServerInfo, err error) { // API Documentation // https://download.seafile.com/published/web-api/v2.1/server-info.md#user-content-Get%20Server%20Information opts := rest.Opts{ Method: "GET", Path: "api2/server-info/", } result := api.ServerInfo{} var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return nil, fs.ErrorPermissionDenied } } return nil, fmt.Errorf("failed to get server info: %w", err) } return &result, nil } func (f *Fs) getUserAccountInfo(ctx context.Context) (account *api.AccountInfo, err error) { // API Documentation // https://download.seafile.com/published/web-api/v2.1/account.md#user-content-Check%20Account%20Info opts := rest.Opts{ Method: "GET", Path: "api2/account/info/", } result := api.AccountInfo{} var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return nil, fs.ErrorPermissionDenied } } return nil, fmt.Errorf("failed to get account info: %w", err) } return &result, nil } func (f *Fs) getLibraries(ctx context.Context) ([]api.Library, error) { // API Documentation // https://download.seafile.com/published/web-api/v2.1/libraries.md#user-content-List%20Libraries opts := rest.Opts{ Method: "GET", Path: APIv20, } result := make([]api.Library, 1) var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return nil, fs.ErrorPermissionDenied } } return nil, fmt.Errorf("failed to get libraries: %w", err) } return result, nil } func (f *Fs) createLibrary(ctx context.Context, libraryName, password string) (library *api.CreateLibrary, err error) { // API Documentation // https://download.seafile.com/published/web-api/v2.1/libraries.md#user-content-Create%20Library opts := rest.Opts{ Method: "POST", Path: APIv20, } request := api.CreateLibraryRequest{ Name: f.opt.Enc.FromStandardName(libraryName), Description: "Created by rclone", Password: password, } result := &api.CreateLibrary{} var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return nil, fs.ErrorPermissionDenied } } return nil, fmt.Errorf("failed to create library: %w", err) } return result, nil } func (f *Fs) deleteLibrary(ctx context.Context, libraryID string) error { // API Documentation // https://download.seafile.com/published/web-api/v2.1/libraries.md#user-content-Create%20Library opts := rest.Opts{ Method: "DELETE", Path: APIv20 + libraryID + "/", } result := "" var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return fs.ErrorPermissionDenied } } return fmt.Errorf("failed to delete library: %w", err) } return nil } func (f *Fs) decryptLibrary(ctx context.Context, libraryID, password string) error { // API Documentation // https://download.seafile.com/published/web-api/v2.1/library-encryption.md#user-content-Decrypt%20Library if libraryID == "" { return errors.New("cannot list files without a library") } // This is another call that cannot accept a JSON input so we have to build it manually opts := rest.Opts{ Method: "POST", Path: APIv20 + libraryID + "/", ContentType: "application/x-www-form-urlencoded", Body: bytes.NewBuffer([]byte("password=" + f.opt.Enc.FromStandardName(password))), NoResponse: true, } var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 400 { return errors.New("incorrect password") } if resp.StatusCode == 409 { fs.Debugf(nil, "library is not encrypted") return nil } } return fmt.Errorf("failed to decrypt library: %w", err) } return nil } func (f *Fs) getDirectoryEntriesAPIv21(ctx context.Context, libraryID, dirPath string, recursive bool) ([]api.DirEntry, error) { // API Documentation // https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-List%20Items%20in%20Directory // This is using the undocumented version 2.1 of the API (so we can use the recursive option which is not available in the version 2) if libraryID == "" { return nil, errors.New("cannot list files without a library") } dirPath = path.Join("/", dirPath) recursiveFlag := "0" if recursive { recursiveFlag = "1" } opts := rest.Opts{ Method: "GET", Path: APIv21 + libraryID + "/dir/", Parameters: url.Values{ "recursive": {recursiveFlag}, "p": {f.opt.Enc.FromStandardPath(dirPath)}, }, } result := &api.DirEntries{} var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return nil, fs.ErrorPermissionDenied } if resp.StatusCode == 404 { return nil, fs.ErrorDirNotFound } if resp.StatusCode == 440 { // Encrypted library and password not provided return nil, fs.ErrorPermissionDenied } } return nil, fmt.Errorf("failed to get directory contents: %w", err) } // Clean up encoded names for index, fileInfo := range result.Entries { fileInfo.Name = f.opt.Enc.ToStandardName(fileInfo.Name) fileInfo.Path = f.opt.Enc.ToStandardPath(fileInfo.Path) result.Entries[index] = fileInfo } return result.Entries, nil } func (f *Fs) getDirectoryDetails(ctx context.Context, libraryID, dirPath string) (*api.DirectoryDetail, error) { // API Documentation // https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-Get%20Directory%20Detail if libraryID == "" { return nil, errors.New("cannot read directory without a library") } dirPath = path.Join("/", dirPath) opts := rest.Opts{ Method: "GET", Path: APIv21 + libraryID + "/dir/detail/", Parameters: url.Values{"path": {f.opt.Enc.FromStandardPath(dirPath)}}, } result := &api.DirectoryDetail{} var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return nil, fs.ErrorPermissionDenied } if resp.StatusCode == 404 { return nil, fs.ErrorDirNotFound } } return nil, fmt.Errorf("failed to get directory details: %w", err) } result.Name = f.opt.Enc.ToStandardName(result.Name) result.Path = f.opt.Enc.ToStandardPath(result.Path) return result, nil } // createDir creates a new directory. The API will add a number to the directory name if it already exist func (f *Fs) createDir(ctx context.Context, libraryID, dirPath string) error { // API Documentation // https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-Create%20New%20Directory if libraryID == "" { return errors.New("cannot create directory without a library") } dirPath = path.Join("/", dirPath) // This call *cannot* handle json parameters in the body, so we have to build the request body manually opts := rest.Opts{ Method: "POST", Path: APIv20 + libraryID + "/dir/", Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(dirPath)}}, NoRedirect: true, ContentType: "application/x-www-form-urlencoded", Body: bytes.NewBuffer([]byte("operation=mkdir")), NoResponse: true, } var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return fs.ErrorPermissionDenied } } return fmt.Errorf("failed to create directory: %w", err) } return nil } func (f *Fs) renameDir(ctx context.Context, libraryID, dirPath, newName string) error { // API Documentation // https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-Rename%20Directory if libraryID == "" { return errors.New("cannot rename directory without a library") } dirPath = path.Join("/", dirPath) // This call *cannot* handle json parameters in the body, so we have to build the request body manually postParameters := url.Values{ "operation": {"rename"}, "newname": {f.opt.Enc.FromStandardPath(newName)}, } opts := rest.Opts{ Method: "POST", Path: APIv20 + libraryID + "/dir/", Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(dirPath)}}, ContentType: "application/x-www-form-urlencoded", Body: bytes.NewBuffer([]byte(postParameters.Encode())), NoResponse: true, } var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return fs.ErrorPermissionDenied } } return fmt.Errorf("failed to rename directory: %w", err) } return nil } func (f *Fs) moveDir(ctx context.Context, srcLibraryID, srcDir, srcName, dstLibraryID, dstPath string) error { // API Documentation // https://download.seafile.com/published/web-api/v2.1/files-directories-batch-op.md#user-content-Batch%20Move%20Items%20Synchronously if srcLibraryID == "" || dstLibraryID == "" || srcName == "" { return errors.New("libraryID and/or file path argument(s) missing") } srcDir = path.Join("/", srcDir) dstPath = path.Join("/", dstPath) opts := rest.Opts{ Method: "POST", Path: APIv21 + "sync-batch-move-item/", NoResponse: true, } request := &api.BatchSourceDestRequest{ SrcLibraryID: srcLibraryID, SrcParentDir: f.opt.Enc.FromStandardPath(srcDir), SrcItems: []string{f.opt.Enc.FromStandardPath(srcName)}, DstLibraryID: dstLibraryID, DstParentDir: f.opt.Enc.FromStandardPath(dstPath), } var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &request, nil) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return fs.ErrorPermissionDenied } if resp.StatusCode == 404 { return fs.ErrorObjectNotFound } } return fmt.Errorf("failed to move directory '%s' from '%s' to '%s': %w", srcName, srcDir, dstPath, err) } return nil } func (f *Fs) deleteDir(ctx context.Context, libraryID, filePath string) error { // API Documentation // https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-Delete%20Directory if libraryID == "" { return errors.New("cannot delete directory without a library") } filePath = path.Join("/", filePath) opts := rest.Opts{ Method: "DELETE", Path: APIv20 + libraryID + "/dir/", Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(filePath)}}, NoResponse: true, } var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, nil) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return fs.ErrorPermissionDenied } } return fmt.Errorf("failed to delete directory: %w", err) } return nil } func (f *Fs) getFileDetails(ctx context.Context, libraryID, filePath string) (*api.FileDetail, error) { // API Documentation // https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Get%20File%20Detail if libraryID == "" { return nil, errors.New("cannot open file without a library") } filePath = path.Join("/", filePath) opts := rest.Opts{ Method: "GET", Path: APIv20 + libraryID + "/file/detail/", Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(filePath)}}, } result := &api.FileDetail{} var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 404 { return nil, fs.ErrorObjectNotFound } if resp.StatusCode == 401 || resp.StatusCode == 403 { return nil, fs.ErrorPermissionDenied } } return nil, fmt.Errorf("failed to get file details: %w", err) } result.Name = f.opt.Enc.ToStandardName(result.Name) result.Parent = f.opt.Enc.ToStandardPath(result.Parent) return result, nil } func (f *Fs) deleteFile(ctx context.Context, libraryID, filePath string) error { // API Documentation // https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Delete%20File if libraryID == "" { return errors.New("cannot delete file without a library") } filePath = path.Join("/", filePath) opts := rest.Opts{ Method: "DELETE", Path: APIv20 + libraryID + "/file/", Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(filePath)}}, NoResponse: true, } err := f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, nil, nil) return f.shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("failed to delete file: %w", err) } return nil } func (f *Fs) getDownloadLink(ctx context.Context, libraryID, filePath string) (string, error) { // API Documentation // https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Download%20File if libraryID == "" { return "", errors.New("cannot download file without a library") } filePath = path.Join("/", filePath) opts := rest.Opts{ Method: "GET", Path: APIv20 + libraryID + "/file/", Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(filePath)}}, } result := "" var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 404 { return "", fs.ErrorObjectNotFound } } return "", fmt.Errorf("failed to get download link: %w", err) } return result, nil } func (f *Fs) download(ctx context.Context, downloadLink string, size int64, options ...fs.OpenOption) (io.ReadCloser, error) { // Check if we need to download partial content var start, end int64 = 0, size partialContent := false for _, option := range options { switch x := option.(type) { case *fs.SeekOption: start = x.Offset partialContent = true case *fs.RangeOption: if x.Start >= 0 { start = x.Start if x.End > 0 && x.End < size { end = x.End + 1 } } else { // {-1, 20} should load the last 20 characters [len-20:len] start = size - x.End } partialContent = true default: if option.Mandatory() { fs.Logf(nil, "Unsupported mandatory option: %v", option) } } } // Build the http request opts := rest.Opts{ Method: "GET", Options: options, } parsedURL, err := url.Parse(downloadLink) if err != nil { return nil, fmt.Errorf("failed to parse download url: %w", err) } if parsedURL.IsAbs() { opts.RootURL = downloadLink } else { opts.Path = downloadLink } var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 404 { return nil, fmt.Errorf("file not found '%s'", downloadLink) } } return nil, err } // Non-encrypted libraries are accepting the HTTP Range header, // BUT encrypted libraries are simply ignoring it if partialContent && resp.StatusCode == 200 { // Partial content was requested through a Range header, but a full content was sent instead rangeDownloadNotice.Do(func() { fs.Logf(nil, "%s ignored our request of partial content. This is probably because encrypted libraries are not accepting range requests. Loading this file might be slow!", f.String()) }) if start > 0 { // We need to read and discard the beginning of the data... _, err = io.CopyN(io.Discard, resp.Body, start) if err != nil { return nil, err } } // ... and return a limited reader for the remaining of the data return readers.NewLimitedReadCloser(resp.Body, end-start), nil } return resp.Body, nil } func (f *Fs) getUploadLink(ctx context.Context, libraryID string) (string, error) { // API Documentation // https://download.seafile.com/published/web-api/v2.1/file-upload.md if libraryID == "" { return "", errors.New("cannot upload file without a library") } opts := rest.Opts{ Method: "GET", Path: APIv20 + libraryID + "/upload-link/", } result := "" var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return "", fs.ErrorPermissionDenied } } return "", fmt.Errorf("failed to get upload link: %w", err) } return result, nil } func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath string) (*api.FileDetail, error) { // API Documentation // https://download.seafile.com/published/web-api/v2.1/file-upload.md fileDir, filename := path.Split(filePath) parameters := url.Values{ "parent_dir": {"/"}, "relative_path": {f.opt.Enc.FromStandardPath(fileDir)}, "need_idx_progress": {"true"}, "replace": {"1"}, } formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename)) if err != nil { return nil, fmt.Errorf("failed to make multipart upload: %w", err) } opts := rest.Opts{ Method: "POST", Body: formReader, ContentType: contentType, Parameters: url.Values{"ret-json": {"1"}}, // It needs to be on the url, not in the body parameters } parsedURL, err := url.Parse(uploadLink) if err != nil { return nil, fmt.Errorf("failed to parse upload url: %w", err) } if parsedURL.IsAbs() { opts.RootURL = uploadLink } else { opts.Path = uploadLink } result := make([]api.FileDetail, 1) var resp *http.Response // If an error occurs during the call, do not attempt to retry: The upload link is single use only err = f.pacer.CallNoRetry(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetryUpload(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return nil, fs.ErrorPermissionDenied } if resp.StatusCode == 500 { // This is a temporary error - we will get a new upload link before retrying return nil, ErrorInternalDuringUpload } } return nil, fmt.Errorf("failed to upload file: %w", err) } if len(result) > 0 { result[0].Parent = f.opt.Enc.ToStandardPath(result[0].Parent) result[0].Name = f.opt.Enc.ToStandardName(result[0].Name) return &result[0], nil } return nil, nil } func (f *Fs) listShareLinks(ctx context.Context, libraryID, remote string) ([]api.SharedLink, error) { // API Documentation // https://download.seafile.com/published/web-api/v2.1/share-links.md#user-content-List%20Share%20Link%20of%20a%20Folder%20(File) if libraryID == "" { return nil, errors.New("cannot get share links without a library") } remote = path.Join("/", remote) opts := rest.Opts{ Method: "GET", Path: "api/v2.1/share-links/", Parameters: url.Values{"repo_id": {libraryID}, "path": {f.opt.Enc.FromStandardPath(remote)}}, } result := make([]api.SharedLink, 1) var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return nil, fs.ErrorPermissionDenied } if resp.StatusCode == 404 { return nil, fs.ErrorObjectNotFound } } return nil, fmt.Errorf("failed to list shared links: %w", err) } return result, nil } // createShareLink will only work with non-encrypted libraries func (f *Fs) createShareLink(ctx context.Context, libraryID, remote string) (*api.SharedLink, error) { // API Documentation // https://download.seafile.com/published/web-api/v2.1/share-links.md#user-content-Create%20Share%20Link if libraryID == "" { return nil, errors.New("cannot create a shared link without a library") } remote = path.Join("/", remote) opts := rest.Opts{ Method: "POST", Path: "api/v2.1/share-links/", } request := &api.ShareLinkRequest{ LibraryID: libraryID, Path: f.opt.Enc.FromStandardPath(remote), } result := &api.SharedLink{} var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return nil, fs.ErrorPermissionDenied } if resp.StatusCode == 404 { return nil, fs.ErrorObjectNotFound } } return nil, fmt.Errorf("failed to create a shared link: %w", err) } return result, nil } func (f *Fs) copyFile(ctx context.Context, srcLibraryID, srcPath, dstLibraryID, dstPath string) (*api.FileInfo, error) { // API Documentation // https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Copy%20File // It's using the api/v2.1 which is not in the documentation (as of Apr 2020) but works better than api2 if srcLibraryID == "" || dstLibraryID == "" { return nil, errors.New("libraryID and/or file path argument(s) missing") } srcPath = path.Join("/", srcPath) dstPath = path.Join("/", dstPath) opts := rest.Opts{ Method: "POST", Path: APIv21 + srcLibraryID + "/file/", Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(srcPath)}}, } request := &api.FileOperationRequest{ Operation: api.CopyFileOperation, DestinationLibraryID: dstLibraryID, DestinationPath: f.opt.Enc.FromStandardPath(dstPath), } result := &api.FileInfo{} var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return nil, fs.ErrorPermissionDenied } if resp.StatusCode == 404 { fs.Debugf(nil, "Copy: %s", err) return nil, fs.ErrorObjectNotFound } } return nil, fmt.Errorf("failed to copy file %s:'%s' to %s:'%s': %w", srcLibraryID, srcPath, dstLibraryID, dstPath, err) } return f.decodeFileInfo(result), nil } func (f *Fs) moveFile(ctx context.Context, srcLibraryID, srcPath, dstLibraryID, dstPath string) (*api.FileInfo, error) { // API Documentation // https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Move%20File // It's using the api/v2.1 which is not in the documentation (as of Apr 2020) but works better than api2 if srcLibraryID == "" || dstLibraryID == "" { return nil, errors.New("libraryID and/or file path argument(s) missing") } srcPath = path.Join("/", srcPath) dstPath = path.Join("/", dstPath) opts := rest.Opts{ Method: "POST", Path: APIv21 + srcLibraryID + "/file/", Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(srcPath)}}, } request := &api.FileOperationRequest{ Operation: api.MoveFileOperation, DestinationLibraryID: dstLibraryID, DestinationPath: f.opt.Enc.FromStandardPath(dstPath), } result := &api.FileInfo{} var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return nil, fs.ErrorPermissionDenied } if resp.StatusCode == 404 { fs.Debugf(nil, "Move: %s", err) return nil, fs.ErrorObjectNotFound } } return nil, fmt.Errorf("failed to move file %s:'%s' to %s:'%s': %w", srcLibraryID, srcPath, dstLibraryID, dstPath, err) } return f.decodeFileInfo(result), nil } func (f *Fs) renameFile(ctx context.Context, libraryID, filePath, newname string) (*api.FileInfo, error) { // API Documentation // https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Rename%20File // It's using the api/v2.1 which is not in the documentation (as of Apr 2020) but works better than api2 if libraryID == "" || newname == "" { return nil, errors.New("libraryID and/or file path argument(s) missing") } filePath = path.Join("/", filePath) opts := rest.Opts{ Method: "POST", Path: APIv21 + libraryID + "/file/", Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(filePath)}}, } request := &api.FileOperationRequest{ Operation: api.RenameFileOperation, NewName: f.opt.Enc.FromStandardName(newname), } result := &api.FileInfo{} var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return nil, fs.ErrorPermissionDenied } if resp.StatusCode == 404 { fs.Debugf(nil, "Rename: %s", err) return nil, fs.ErrorObjectNotFound } } return nil, fmt.Errorf("failed to rename file '%s' to '%s': %w", filePath, newname, err) } return f.decodeFileInfo(result), nil } func (f *Fs) decodeFileInfo(input *api.FileInfo) *api.FileInfo { input.Name = f.opt.Enc.ToStandardName(input.Name) input.Path = f.opt.Enc.ToStandardPath(input.Path) return input } func (f *Fs) emptyLibraryTrash(ctx context.Context, libraryID string) error { // API Documentation // https://download.seafile.com/published/web-api/v2.1/libraries.md#user-content-Clean%20Library%20Trash if libraryID == "" { return errors.New("cannot clean up trash without a library") } opts := rest.Opts{ Method: "DELETE", Path: APIv21 + libraryID + "/trash/", NoResponse: true, } var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, nil) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return fs.ErrorPermissionDenied } if resp.StatusCode == 404 { return fs.ErrorObjectNotFound } } return fmt.Errorf("failed empty the library trash: %w", err) } return nil } func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath string) ([]api.DirEntry, error) { // API v2 from the official documentation, but that have been replaced by the much better v2.1 (undocumented as of Apr 2020) // getDirectoryEntriesAPIv2 is needed to keep compatibility with seafile v6. // API Documentation // https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-List%20Items%20in%20Directory if libraryID == "" { return nil, errors.New("cannot list files without a library") } dirPath = path.Join("/", dirPath) opts := rest.Opts{ Method: "GET", Path: APIv20 + libraryID + "/dir/", Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(dirPath)}}, } result := make([]api.DirEntry, 1) var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { if resp != nil { if resp.StatusCode == 401 || resp.StatusCode == 403 { return nil, fs.ErrorPermissionDenied } if resp.StatusCode == 404 { return nil, fs.ErrorDirNotFound } if resp.StatusCode == 440 { // Encrypted library and password not provided return nil, fs.ErrorPermissionDenied } } return nil, fmt.Errorf("failed to get directory contents: %w", err) } // Clean up encoded names for index, fileInfo := range result { fileInfo.Name = f.opt.Enc.ToStandardName(fileInfo.Name) fileInfo.Path = f.opt.Enc.ToStandardPath(fileInfo.Path) result[index] = fileInfo } return result, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/seafile/pacer.go
backend/seafile/pacer.go
package seafile import ( "context" "fmt" "net/url" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/pacer" ) const ( minSleep = 100 * time.Millisecond maxSleep = 10 * time.Second decayConstant = 2 // bigger for slower decay, exponential ) // Use only one pacer per server URL var ( pacers map[string]*fs.Pacer pacerMutex sync.Mutex ) func init() { pacers = make(map[string]*fs.Pacer, 0) } // getPacer returns the unique pacer for that remote URL func getPacer(ctx context.Context, remote string) *fs.Pacer { pacerMutex.Lock() defer pacerMutex.Unlock() remote = parseRemote(remote) if existing, found := pacers[remote]; found { return existing } pacers[remote] = fs.NewPacer( ctx, pacer.NewDefault( pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), ), ) return pacers[remote] } // parseRemote formats a remote url into "hostname:port" func parseRemote(remote string) string { remoteURL, err := url.Parse(remote) if err != nil { // Return a default value in the very unlikely event we're not going to parse remote fs.Infof(nil, "Cannot parse remote %s", remote) return "default" } host := remoteURL.Hostname() port := remoteURL.Port() if port == "" { if remoteURL.Scheme == "https" { port = "443" } else { port = "80" } } return fmt.Sprintf("%s:%s", host, port) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/seafile/seafile_test.go
backend/seafile/seafile_test.go
// Test Seafile filesystem interface package seafile_test import ( "testing" "github.com/rclone/rclone/backend/seafile" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestSeafile:", NilObject: (*seafile.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/seafile/renew.go
backend/seafile/renew.go
package seafile import ( "sync" "time" "github.com/rclone/rclone/fs" ) // Renew allows tokens to be renewed on expiry. type Renew struct { ts *time.Ticker // timer indicating when it's time to renew the token run func() error // the callback to do the renewal done chan any // channel to end the go routine shutdown *sync.Once } // NewRenew creates a new Renew struct and starts a background process // which renews the token whenever it expires. It uses the run() call // to do the renewal. func NewRenew(every time.Duration, run func() error) *Renew { r := &Renew{ ts: time.NewTicker(every), run: run, done: make(chan any), shutdown: &sync.Once{}, } go r.renewOnExpiry() return r } func (r *Renew) renewOnExpiry() { for { select { case <-r.ts.C: err := r.run() if err != nil { fs.Errorf(nil, "error while refreshing decryption token: %s", err) } case <-r.done: return } } } // Shutdown stops the ticker and no more renewal will take place. func (r *Renew) Shutdown() { // closing a channel can only be done once r.shutdown.Do(func() { r.ts.Stop() close(r.done) }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/seafile/renew_test.go
backend/seafile/renew_test.go
package seafile import ( "sync/atomic" "testing" "time" "github.com/stretchr/testify/assert" ) func TestShouldAllowShutdownTwice(t *testing.T) { renew := NewRenew(time.Hour, func() error { return nil }) renew.Shutdown() renew.Shutdown() } func TestRenewalInTimeLimit(t *testing.T) { var count atomic.Int64 renew := NewRenew(100*time.Millisecond, func() error { count.Add(1) return nil }) time.Sleep(time.Second) renew.Shutdown() // there's no guarantee the CI agent can handle a simple goroutine renewCount := count.Load() t.Logf("renew count = %d", renewCount) assert.Greater(t, renewCount, int64(0)) assert.Less(t, renewCount, int64(11)) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/seafile/object.go
backend/seafile/object.go
package seafile import ( "context" "io" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" ) // Object describes a seafile object (also commonly called a file) type Object struct { fs *Fs // what this object is part of id string // internal ID of object remote string // The remote path (full path containing library name if target at root) pathInLibrary string // Path of the object without the library name size int64 // size of the object modTime time.Time // modification time of the object libraryID string // Needed to download the file } // ==================== Interface fs.DirEntry ==================== // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote string func (o *Object) Remote() string { return o.remote } // ModTime returns last modified time func (o *Object) ModTime(context.Context) time.Time { return o.modTime } // Size returns the size of an object in bytes func (o *Object) Size() int64 { return o.size } // ==================== Interface fs.ObjectInfo ==================== // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Hash returns the selected checksum of the file // If no checksum is available it returns "" func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) { return "", hash.ErrUnsupported } // Storable says whether this object can be stored func (o *Object) Storable() bool { return true } // ==================== Interface fs.Object ==================== // SetModTime sets the metadata on the object to set the modification date func (o *Object) SetModTime(ctx context.Context, t time.Time) error { return fs.ErrorCantSetModTime } // Open opens the file for read. Call Close() on the returned io.ReadCloser func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { downloadLink, err := o.fs.getDownloadLink(ctx, o.libraryID, o.pathInLibrary) if err != nil { return nil, err } reader, err := o.fs.download(ctx, downloadLink, o.Size(), options...) if err != nil { return nil, err } return reader, nil } // Update in to the object with the modTime given of the given size // // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either // return an error or update the object properly (rather than e.g. calling panic). func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { // The upload sometimes return a temporary 500 error // We cannot use the pacer to retry uploading the file as the upload link is single use only for retry := 0; retry <= 3; retry++ { uploadLink, err := o.fs.getUploadLink(ctx, o.libraryID) if err != nil { return err } uploaded, err := o.fs.upload(ctx, in, uploadLink, o.pathInLibrary) if err == ErrorInternalDuringUpload { // This is a temporary error, try again with a new upload link continue } if err != nil { return err } // Set the properties from the upload back to the object o.size = uploaded.Size o.id = uploaded.ID return nil } return ErrorInternalDuringUpload } // Remove this object func (o *Object) Remove(ctx context.Context) error { return o.fs.deleteFile(ctx, o.libraryID, o.pathInLibrary) } // ==================== Optional Interface fs.IDer ==================== // ID returns the ID of the Object if known, or "" if not func (o *Object) ID() string { return o.id }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/seafile/seafile_internal_test.go
backend/seafile/seafile_internal_test.go
package seafile import ( "context" "path" "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/obscure" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type pathData struct { configLibrary string // Library specified in the config configRoot string // Root directory specified in the config argumentPath string // Path given as an argument in the command line expectedLibrary string expectedPath string } // Test the method to split a library name and a path // from a mix of configuration data and path command line argument func TestSplitPath(t *testing.T) { testData := []pathData{ { configLibrary: "", configRoot: "", argumentPath: "", expectedLibrary: "", expectedPath: "", }, { configLibrary: "", configRoot: "", argumentPath: "Library", expectedLibrary: "Library", expectedPath: "", }, { configLibrary: "", configRoot: "", argumentPath: path.Join("Library", "path", "to", "file"), expectedLibrary: "Library", expectedPath: path.Join("path", "to", "file"), }, { configLibrary: "Library", configRoot: "", argumentPath: "", expectedLibrary: "Library", expectedPath: "", }, { configLibrary: "Library", configRoot: "", argumentPath: "path", expectedLibrary: "Library", expectedPath: "path", }, { configLibrary: "Library", configRoot: "", argumentPath: path.Join("path", "to", "file"), expectedLibrary: "Library", expectedPath: path.Join("path", "to", "file"), }, { configLibrary: "Library", configRoot: "root", argumentPath: "", expectedLibrary: "Library", expectedPath: "root", }, { configLibrary: "Library", configRoot: path.Join("root", "path"), argumentPath: "", expectedLibrary: "Library", expectedPath: path.Join("root", "path"), }, { configLibrary: "Library", configRoot: "root", argumentPath: "path", expectedLibrary: "Library", expectedPath: path.Join("root", "path"), }, { configLibrary: "Library", configRoot: "root", argumentPath: path.Join("path", "to", "file"), expectedLibrary: "Library", expectedPath: path.Join("root", "path", "to", "file"), }, { configLibrary: "Library", configRoot: path.Join("root", "path"), argumentPath: path.Join("subpath", "to", "file"), expectedLibrary: "Library", expectedPath: path.Join("root", "path", "subpath", "to", "file"), }, } for _, test := range testData { fs := &Fs{ libraryName: test.configLibrary, rootDirectory: test.configRoot, } libraryName, path := fs.splitPath(test.argumentPath) assert.Equal(t, test.expectedLibrary, libraryName) assert.Equal(t, test.expectedPath, path) } } func TestSplitPathIntoSlice(t *testing.T) { testData := map[string][]string{ "1": {"1"}, "/1": {"1"}, "/1/": {"1"}, "1/2/3": {"1", "2", "3"}, } for input, expected := range testData { output := splitPath(input) assert.Equal(t, expected, output) } } func Test2FAStateMachine(t *testing.T) { fixtures := []struct { name string mapper configmap.Mapper input fs.ConfigIn expectState string expectErrorMessage string expectResult string expectFail bool expectNil bool }{ { name: "no url", mapper: configmap.Simple{}, input: fs.ConfigIn{State: ""}, expectFail: true, }, { name: "unknown state", mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"}, input: fs.ConfigIn{State: "unknown"}, expectFail: true, }, { name: "2fa not set", mapper: configmap.Simple{"url": "http://localhost/"}, input: fs.ConfigIn{State: ""}, expectNil: true, }, { name: "no password in config", mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"}, input: fs.ConfigIn{State: ""}, expectState: "password", }, { name: "config ready for 2fa token", mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username", "pass": obscure.MustObscure("password")}, input: fs.ConfigIn{State: ""}, expectState: "2fa", }, { name: "password not entered", mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"}, input: fs.ConfigIn{State: "password"}, expectState: "", expectErrorMessage: "Password can't be blank", }, { name: "password entered", mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"}, input: fs.ConfigIn{State: "password", Result: "password"}, expectState: "2fa", }, { name: "ask for a 2fa code", mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"}, input: fs.ConfigIn{State: "2fa"}, expectState: "2fa_do", }, { name: "no 2fa code entered", mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"}, input: fs.ConfigIn{State: "2fa_do"}, expectState: "2fa", // ask for a code again expectErrorMessage: "2FA codes can't be blank", }, { name: "2fa error and retry", mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"}, input: fs.ConfigIn{State: "2fa_error", Result: "true"}, expectState: "2fa", // ask for a code again }, { name: "2fa error and fail", mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"}, input: fs.ConfigIn{State: "2fa_error"}, expectFail: true, }, } for _, fixture := range fixtures { t.Run(fixture.name, func(t *testing.T) { output, err := Config(context.Background(), "test", fixture.mapper, fixture.input) if fixture.expectFail { require.Error(t, err) t.Log(err) return } if fixture.expectNil { require.Nil(t, output) return } assert.Equal(t, fixture.expectState, output.State) assert.Equal(t, fixture.expectErrorMessage, output.Error) assert.Equal(t, fixture.expectResult, output.Result) }) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/seafile/api/types.go
backend/seafile/api/types.go
// Package api provides types used by the Seafile API. package api // Some api objects are duplicated with only small differences, // it's because the returned JSON objects are very inconsistent between api calls // AuthenticationRequest contains user credentials type AuthenticationRequest struct { Username string `json:"username"` Password string `json:"password"` } // AuthenticationResult is returned by a call to the authentication api type AuthenticationResult struct { Token string `json:"token"` Errors []string `json:"non_field_errors"` } // AccountInfo contains simple user properties type AccountInfo struct { Usage int64 `json:"usage"` Total int64 `json:"total"` Email string `json:"email"` Name string `json:"name"` } // ServerInfo contains server information type ServerInfo struct { Version string `json:"version"` } // DefaultLibrary when none specified type DefaultLibrary struct { ID string `json:"repo_id"` Exists bool `json:"exists"` } // CreateLibraryRequest contains the information needed to create a library type CreateLibraryRequest struct { Name string `json:"name"` Description string `json:"desc"` Password string `json:"passwd"` } // Library properties. Please note not all properties are going to be useful for rclone type Library struct { Encrypted bool `json:"encrypted"` Owner string `json:"owner"` ID string `json:"id"` Size int64 `json:"size"` Name string `json:"name"` Modified int64 `json:"mtime"` } // CreateLibrary properties. Seafile is not consistent and returns different types for different API calls type CreateLibrary struct { ID string `json:"repo_id"` Name string `json:"repo_name"` } // FileType is either "dir" or "file" type FileType string // File types var ( FileTypeDir FileType = "dir" FileTypeFile FileType = "file" ) // FileDetail contains file properties (for older api v2.0) type FileDetail struct { ID string `json:"id"` Type FileType `json:"type"` Name string `json:"name"` Size int64 `json:"size"` Parent string `json:"parent_dir"` Modified string `json:"last_modified"` } // DirEntries contains a list of DirEntry type DirEntries struct { Entries []DirEntry `json:"dirent_list"` } // DirEntry contains a directory entry type DirEntry struct { ID string `json:"id"` Type FileType `json:"type"` Name string `json:"name"` Size int64 `json:"size"` Path string `json:"parent_dir"` Modified int64 `json:"mtime"` } // Operation is move, copy or rename type Operation string // Operations var ( CopyFileOperation Operation = "copy" MoveFileOperation Operation = "move" RenameFileOperation Operation = "rename" ) // FileOperationRequest is sent to the api to copy, move or rename a file type FileOperationRequest struct { Operation Operation `json:"operation"` DestinationLibraryID string `json:"dst_repo"` // For copy/move operation DestinationPath string `json:"dst_dir"` // For copy/move operation NewName string `json:"newname"` // Only to be used by the rename operation } // FileInfo is returned by a server file copy/move/rename (new api v2.1) type FileInfo struct { Type string `json:"type"` LibraryID string `json:"repo_id"` Path string `json:"parent_dir"` Name string `json:"obj_name"` ID string `json:"obj_id"` Size int64 `json:"size"` } // CreateDirRequest only contain an operation field type CreateDirRequest struct { Operation string `json:"operation"` } // DirectoryDetail contains the directory details specific to the getDirectoryDetails call type DirectoryDetail struct { ID string `json:"repo_id"` Name string `json:"name"` Path string `json:"path"` } // ShareLinkRequest contains the information needed to create or list shared links type ShareLinkRequest struct { LibraryID string `json:"repo_id"` Path string `json:"path"` } // SharedLink contains the information returned by a call to shared link creation type SharedLink struct { Link string `json:"link"` IsExpired bool `json:"is_expired"` } // BatchSourceDestRequest contains JSON parameters for sending a batch copy or move operation type BatchSourceDestRequest struct { SrcLibraryID string `json:"src_repo_id"` SrcParentDir string `json:"src_parent_dir"` SrcItems []string `json:"src_dirents"` DstLibraryID string `json:"dst_repo_id"` DstParentDir string `json:"dst_parent_dir"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/ulozto/ulozto_test.go
backend/ulozto/ulozto_test.go
package ulozto import ( "context" "errors" "testing" "time" "github.com/rclone/rclone/backend/ulozto/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fstest" "github.com/stretchr/testify/require" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestUlozto:", NilObject: (*Object)(nil), }) } // TestListWithoutMetadata verifies that basic operations can be performed even if the remote file wasn't written by // rclone, or the serialized metadata can't be read. func TestListWithoutMetadata(t *testing.T) { const ( remoteName = "TestUlozto:" payload = "42foobar42" sha256 = "d41f400003e93eb0891977f525e73ecedfa04272d2036f6137106168ecb196ab" md5 = "8ad32cfeb3dc0f5092261268f335e0a5" filesize = len(payload) ) ctx := context.Background() fstest.Initialise() subRemoteName, subRemoteLeaf, err := fstest.RandomRemoteName(remoteName) require.NoError(t, err) f, err := fs.NewFs(ctx, subRemoteName) if errors.Is(err, fs.ErrorNotFoundInConfigFile) { t.Logf("Didn't find %q in config file - skipping tests", remoteName) return } require.NoError(t, err) file := fstest.Item{ModTime: time.UnixMilli(123456789), Path: subRemoteLeaf, Size: int64(filesize), Hashes: map[hash.Type]string{ hash.SHA256: sha256, hash.MD5: md5, }} // Create a file with the given content and metadata obj := fstests.PutTestContents(ctx, t, f, &file, payload, false) // Verify the file has been uploaded fstest.CheckListing(t, f, []fstest.Item{file}) // Now delete the description metadata uloztoObj := obj.(*Object) err = uloztoObj.updateFileProperties(ctx, api.UpdateDescriptionRequest{ Description: "", }) require.NoError(t, err) // Listing the file should still succeed, although with estimated mtime and no hashes fileWithoutDetails := fstest.Item{Path: subRemoteLeaf, Size: int64(filesize), ModTime: uloztoObj.remoteFsMtime, Hashes: map[hash.Type]string{ hash.SHA256: "", hash.MD5: "", }} fstest.CheckListing(t, f, []fstest.Item{fileWithoutDetails}) mtime := time.UnixMilli(987654321) // When we update the mtime it should be reflected but hashes should stay intact require.NoError(t, obj.SetModTime(ctx, mtime)) updatedMtimeFile := fstest.Item{Path: subRemoteLeaf, Size: int64(filesize), ModTime: mtime, Hashes: map[hash.Type]string{ hash.SHA256: "", hash.MD5: "", }} fstest.CheckListing(t, f, []fstest.Item{updatedMtimeFile}) // Tear down require.NoError(t, operations.Purge(ctx, f, "")) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/ulozto/ulozto.go
backend/ulozto/ulozto.go
// Package ulozto provides an interface to the Uloz.to storage system. package ulozto import ( "bytes" "context" "encoding/base64" "encoding/gob" "encoding/hex" "errors" "fmt" "io" "net/http" "net/url" "path" "strconv" "strings" "time" "github.com/rclone/rclone/backend/ulozto/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" ) // TODO Uloz.to only supports file names of 255 characters or less and silently truncates names that are longer. const ( minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential rootURL = "https://apis.uloz.to" ) // Options defines the configuration for this backend type Options struct { AppToken string `config:"app_token"` Username string `config:"username"` Password string `config:"password"` RootFolderSlug string `config:"root_folder_slug"` Enc encoder.MultiEncoder `config:"encoding"` ListPageSize int `config:"list_page_size"` } func init() { fs.Register(&fs.RegInfo{ Name: "ulozto", Description: "Uloz.to", NewFs: NewFs, Options: []fs.Option{ { Name: "app_token", Default: "", Help: `The application token identifying the app. An app API key can be either found in the API doc https://uloz.to/upload-resumable-api-beta or obtained from customer service.`, Sensitive: true, }, { Name: "username", Default: "", Help: "The username of the principal to operate as.", Sensitive: true, }, { Name: "password", Default: "", Help: "The password for the user.", IsPassword: true, }, { Name: "root_folder_slug", Help: `If set, rclone will use this folder as the root folder for all operations. For example, if the slug identifies 'foo/bar/', 'ulozto:baz' is equivalent to 'ulozto:foo/bar/baz' without any root slug set.`, Default: "", Advanced: true, Sensitive: true, }, { Name: "list_page_size", Default: 500, Help: "The size of a single page for list commands. 1-500", Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: encoder.Display | encoder.EncodeInvalidUtf8 | encoder.EncodeBackSlash, }, }, }) } // Fs represents a remote uloz.to storage type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options features *fs.Features // optional features rest *rest.Client // REST client with authentication headers set, used to communicate with API endpoints cdn *rest.Client // REST client without authentication headers set, used for CDN payload upload/download dirCache *dircache.DirCache // Map of directory path to directory id pacer *fs.Pacer // pacer for API calls } // NewFs constructs a Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } // Strip leading and trailing slashes, see https://github.com/rclone/rclone/issues/7796 for details. root = strings.Trim(root, "/") client := fshttp.NewClient(ctx) f := &Fs{ name: name, root: root, opt: *opt, cdn: rest.NewClient(client), rest: rest.NewClient(client).SetRoot(rootURL), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } f.features = (&fs.Features{ DuplicateFiles: true, CanHaveEmptyDirectories: true, }).Fill(ctx, f) f.rest.SetErrorHandler(errorHandler) f.rest.SetHeader("X-Auth-Token", f.opt.AppToken) auth, err := f.authenticate(ctx) if err != nil { return f, err } var rootSlug string if opt.RootFolderSlug == "" { rootSlug = auth.Session.User.RootFolderSlug } else { rootSlug = opt.RootFolderSlug } f.dirCache = dircache.New(root, rootSlug, f) err = f.dirCache.FindRoot(ctx, false) if errors.Is(err, fs.ErrorDirNotFound) { // All good, we'll create the folder later on. return f, nil } if errors.Is(err, fs.ErrorIsFile) { rootFolder, _ := dircache.SplitPath(root) f.root = rootFolder f.dirCache = dircache.New(rootFolder, rootSlug, f) err = f.dirCache.FindRoot(ctx, false) if err != nil { return f, err } return f, fs.ErrorIsFile } return f, err } // About implements the Abouter interface for Uloz.to. func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { used, err := f.getUsedSize(ctx) if err != nil { return nil, err } usage := fs.Usage{ Used: &used, } return &usage, nil } // errorHandler parses a non 2xx error response into an error func errorHandler(resp *http.Response) error { // Decode error response errResponse := new(api.Error) err := rest.DecodeJSON(resp, &errResponse) if err != nil { fs.Debugf(nil, "Couldn't decode error response: %v", err) } if errResponse.StatusCode == 0 { errResponse.StatusCode = resp.StatusCode } return errResponse } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout } // shouldRetry returns a boolean whether this resp and err should be retried. // It also returns the err for convenience. func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, reauth bool) (bool, error) { if err == nil { return false, nil } if fserrors.ContextError(ctx, &err) { return false, err } var apiErr *api.Error if resp != nil && resp.StatusCode == 401 && errors.As(err, &apiErr) && apiErr.ErrorCode == 70001 { fs.Debugf(nil, "Should retry: %v", err) if reauth { _, err = f.authenticate(ctx) if err != nil { return false, err } } return true, err } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } func (f *Fs) authenticate(ctx context.Context) (response *api.AuthenticateResponse, err error) { // TODO only reauth once if the token expires // Remove the old user token f.rest.RemoveHeader("X-User-Token") opts := rest.Opts{ Method: "PUT", Path: "/v6/session", } clearPassword, err := obscure.Reveal(f.opt.Password) if err != nil { return nil, err } authRequest := api.AuthenticateRequest{ Login: f.opt.Username, Password: clearPassword, } err = f.pacer.Call(func() (bool, error) { httpResp, err := f.rest.CallJSON(ctx, &opts, &authRequest, &response) return f.shouldRetry(ctx, httpResp, err, false) }) if err != nil { return nil, err } f.rest.SetHeader("X-User-Token", response.TokenID) return response, nil } func (f *Fs) getUsedSize(ctx context.Context) (int64, error) { rootID, err := f.dirCache.RootID(ctx, false) if err != nil { return 0, err } opts := rest.Opts{ Method: "GET", Path: fmt.Sprintf("/v6/user/%s/folder/%s/folder-sizes", f.opt.Username, rootID), Parameters: url.Values{ "recursive": []string{"true"}, }, } folderSizes := api.FolderSizesResponse{} err = f.pacer.Call(func() (bool, error) { resp, err := f.rest.CallJSON(ctx, &opts, nil, &folderSizes) return f.shouldRetry(ctx, resp, err, true) }) if err != nil { return 0, err } return folderSizes[rootID].Recursive.FilesSize, nil } // UploadSession represents a single Uloz.to upload session. // // Uloz.to supports uploading multiple files at once and committing them atomically. This functionality isn't being used // by the backend implementation and for simplicity, each session corresponds to a single file being uploaded. type UploadSession struct { Filesystem *Fs URL string PrivateSlug string ValidUntil time.Time } func (f *Fs) createUploadSession(ctx context.Context) (session *UploadSession, err error) { session = &UploadSession{ Filesystem: f, } err = session.renewUploadSession(ctx) if err != nil { return nil, err } return session, nil } func (session *UploadSession) renewUploadSession(ctx context.Context) error { opts := rest.Opts{ Method: "POST", Path: "/v5/upload/link", Parameters: url.Values{}, } createUploadURLReq := api.CreateUploadURLRequest{ UserLogin: session.Filesystem.opt.Username, Realm: "ulozto", } if session.PrivateSlug != "" { createUploadURLReq.ExistingSessionSlug = session.PrivateSlug } var err error var response api.CreateUploadURLResponse err = session.Filesystem.pacer.Call(func() (bool, error) { httpResp, err := session.Filesystem.rest.CallJSON(ctx, &opts, &createUploadURLReq, &response) return session.Filesystem.shouldRetry(ctx, httpResp, err, true) }) if err != nil { return err } session.PrivateSlug = response.PrivateSlug session.URL = response.UploadURL session.ValidUntil = response.ValidUntil return nil } func (f *Fs) uploadUnchecked(ctx context.Context, name, parentSlug string, info fs.ObjectInfo, payload io.Reader) (fs.Object, error) { session, err := f.createUploadSession(ctx) if err != nil { return nil, err } hashes := hash.NewHashSet(hash.MD5, hash.SHA256) hasher, err := hash.NewMultiHasherTypes(hashes) if err != nil { return nil, err } payload = io.TeeReader(payload, hasher) encodedName := f.opt.Enc.FromStandardName(name) opts := rest.Opts{ Method: "POST", Body: payload, // Not using Parameters as the session URL has parameters itself RootURL: session.URL + "&batch_file_id=1&is_porn=false", MultipartContentName: "file", MultipartFileName: encodedName, Parameters: url.Values{}, } if info.Size() > 0 { size := info.Size() opts.ContentLength = &size } var uploadResponse api.SendFilePayloadResponse err = f.pacer.CallNoRetry(func() (bool, error) { httpResp, err := f.cdn.CallJSON(ctx, &opts, nil, &uploadResponse) return f.shouldRetry(ctx, httpResp, err, true) }) if err != nil { return nil, err } sha256digest, err := hasher.Sum(hash.SHA256) if err != nil { return nil, err } md5digest, err := hasher.Sum(hash.MD5) if err != nil { return nil, err } if hex.EncodeToString(md5digest) != uploadResponse.Md5 { return nil, errors.New("MD5 digest mismatch") } metadata := DescriptionEncodedMetadata{ Md5Hash: md5digest, Sha256Hash: sha256digest, ModTimeEpochMicros: info.ModTime(ctx).UnixMicro(), } encodedMetadata, err := metadata.encode() if err != nil { return nil, err } // Successfully uploaded, now move the file where it belongs and commit it updateReq := api.BatchUpdateFilePropertiesRequest{ Name: encodedName, FolderSlug: parentSlug, Description: encodedMetadata, Slugs: []string{uploadResponse.Slug}, UploadTokens: map[string]string{uploadResponse.Slug: session.PrivateSlug + ":1"}, } var updateResponse []api.File opts = rest.Opts{ Method: "PATCH", Path: "/v8/file-list/private", Parameters: url.Values{}, } err = f.pacer.Call(func() (bool, error) { httpResp, err := session.Filesystem.rest.CallJSON(ctx, &opts, &updateReq, &updateResponse) return f.shouldRetry(ctx, httpResp, err, true) }) if err != nil { return nil, err } if len(updateResponse) != 1 { return nil, errors.New("unexpected number of files in the response") } opts = rest.Opts{ Method: "PATCH", Path: "/v8/upload-batch/private/" + session.PrivateSlug, Parameters: url.Values{}, } commitRequest := api.CommitUploadBatchRequest{ Status: "confirmed", OwnerLogin: f.opt.Username, } var commitResponse api.CommitUploadBatchResponse err = f.pacer.Call(func() (bool, error) { httpResp, err := session.Filesystem.rest.CallJSON(ctx, &opts, &commitRequest, &commitResponse) return f.shouldRetry(ctx, httpResp, err, true) }) if err != nil { return nil, err } file, err := f.newObjectWithInfo(ctx, info.Remote(), &updateResponse[0]) return file, err } // Put implements the mandatory method fs.Fs.Put. func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { existingObj, err := f.NewObject(ctx, src.Remote()) switch { case err == nil: return existingObj, existingObj.Update(ctx, in, src, options...) case errors.Is(err, fs.ErrorObjectNotFound): // Not found so create it return f.PutUnchecked(ctx, in, src, options...) default: return nil, err } } // PutUnchecked implements the optional interface fs.PutUncheckeder. // // Uloz.to allows to have multiple files of the same name in the same folder. func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { filename, folderSlug, err := f.dirCache.FindPath(ctx, src.Remote(), true) if err != nil { return nil, err } return f.uploadUnchecked(ctx, filename, folderSlug, src, in) } // Mkdir implements the mandatory method fs.Fs.Mkdir. func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { _, err = f.dirCache.FindDir(ctx, dir, true) return err } func (f *Fs) isDirEmpty(ctx context.Context, slug string) (empty bool, err error) { folders, err := f.fetchListFolderPage(ctx, slug, "", 1, 0) if err != nil { return false, err } if len(folders) > 0 { return false, nil } files, err := f.fetchListFilePage(ctx, slug, "", 1, 0) if err != nil { return false, err } if len(files) > 0 { return false, nil } return true, nil } // Rmdir implements the mandatory method fs.Fs.Rmdir. func (f *Fs) Rmdir(ctx context.Context, dir string) error { slug, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return err } empty, err := f.isDirEmpty(ctx, slug) if err != nil { return err } if !empty { return fs.ErrorDirectoryNotEmpty } opts := rest.Opts{ Method: "DELETE", Path: "/v5/user/" + f.opt.Username + "/folder-list", } req := api.DeleteFoldersRequest{Slugs: []string{slug}} err = f.pacer.Call(func() (bool, error) { httpResp, err := f.rest.CallJSON(ctx, &opts, req, nil) return f.shouldRetry(ctx, httpResp, err, true) }) if err != nil { return err } f.dirCache.FlushDir(dir) return nil } // Move implements the optional method fs.Mover.Move. func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { if remote == src.Remote() { // Already there, do nothing return src, nil } srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } filename, folderSlug, err := f.dirCache.FindPath(ctx, remote, true) if err != nil { return nil, err } newObj := &Object{} newObj.copyFrom(srcObj) newObj.remote = remote return newObj, newObj.updateFileProperties(ctx, api.MoveFileRequest{ ParentFolderSlug: folderSlug, NewFilename: filename, }) } // DirMove implements the optional method fs.DirMover.DirMove. func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } srcSlug, _, srcName, dstParentSlug, dstName, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) if err != nil { return err } opts := rest.Opts{ Method: "PATCH", Path: "/v6/user/" + f.opt.Username + "/folder-list/parent-folder", } req := api.MoveFolderRequest{ FolderSlugs: []string{srcSlug}, NewParentFolderSlug: dstParentSlug, } err = f.pacer.Call(func() (bool, error) { httpResp, err := f.rest.CallJSON(ctx, &opts, &req, nil) return f.shouldRetry(ctx, httpResp, err, true) }) if err != nil { return err } // The old folder doesn't exist anymore so clear the cache now instead of after renaming srcFs.dirCache.FlushDir(srcRemote) if srcName != dstName { // There's no endpoint to rename the folder alongside moving it, so this has to happen separately. opts = rest.Opts{ Method: "PATCH", Path: "/v7/user/" + f.opt.Username + "/folder/" + srcSlug, } renameReq := api.RenameFolderRequest{ NewName: dstName, } err = f.pacer.Call(func() (bool, error) { httpResp, err := f.rest.CallJSON(ctx, &opts, &renameReq, nil) return f.shouldRetry(ctx, httpResp, err, true) }) return err } return nil } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("uloz.to root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { return time.Microsecond } // Hashes implements fs.Fs.Hashes by returning the supported hash types of the filesystem. func (f *Fs) Hashes() hash.Set { return hash.NewHashSet(hash.SHA256, hash.MD5) } // DescriptionEncodedMetadata represents a set of metadata encoded as Uloz.to description. // // Uloz.to doesn't support setting metadata such as mtime but allows the user to set an arbitrary description field. // The content of this structure will be serialized and stored in the backend. // // The files themselves are immutable so there's no danger that the file changes, and we'll forget to update the hashes. // It is theoretically possible to rewrite the description to provide incorrect information for a file. However, in case // it's a real attack vector, a nefarious person already has write access to the repo, and the situation is above // rclone's pay grade already. type DescriptionEncodedMetadata struct { Md5Hash []byte // The MD5 hash of the file Sha256Hash []byte // The SHA256 hash of the file ModTimeEpochMicros int64 // The mtime of the file, as set by rclone } func (md *DescriptionEncodedMetadata) encode() (string, error) { b := bytes.Buffer{} e := gob.NewEncoder(&b) err := e.Encode(md) if err != nil { return "", err } // Version the encoded string from the beginning even though we don't need it yet. return "1;" + base64.StdEncoding.EncodeToString(b.Bytes()), nil } func decodeDescriptionMetadata(str string) (*DescriptionEncodedMetadata, error) { // The encoded data starts with a version number which is not a part iof the serialized object spl := strings.SplitN(str, ";", 2) if len(spl) < 2 || spl[0] != "1" { return nil, errors.New("can't decode, unknown encoded metadata version") } m := DescriptionEncodedMetadata{} by, err := base64.StdEncoding.DecodeString(spl[1]) if err != nil { return nil, err } b := bytes.Buffer{} b.Write(by) d := gob.NewDecoder(&b) err = d.Decode(&m) if err != nil { return nil, err } return &m, nil } // Object describes an uloz.to object. // // Valid objects will always have all fields but encodedMetadata set. type Object struct { fs *Fs // what this object is part of remote string // The remote path name string // The file name size int64 // size of the object slug string // ID of the object remoteFsMtime time.Time // The time the object was last modified in the remote fs. // Metadata not available natively and encoded in the description field. May not be present if the encoded metadata // is not present (e.g. if file wasn't uploaded by rclone) or invalid. encodedMetadata *DescriptionEncodedMetadata } // Storable implements the mandatory method fs.ObjectInfo.Storable func (o *Object) Storable() bool { return true } func (o *Object) updateFileProperties(ctx context.Context, req any) (err error) { var resp *api.File opts := rest.Opts{ Method: "PATCH", Path: "/v8/file/" + o.slug + "/private", } err = o.fs.pacer.Call(func() (bool, error) { httpResp, err := o.fs.rest.CallJSON(ctx, &opts, &req, &resp) return o.fs.shouldRetry(ctx, httpResp, err, true) }) if err != nil { return err } return o.setMetaData(resp) } // SetModTime implements the mandatory method fs.Object.SetModTime func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) { var newMetadata DescriptionEncodedMetadata if o.encodedMetadata == nil { newMetadata = DescriptionEncodedMetadata{} } else { newMetadata = *o.encodedMetadata } newMetadata.ModTimeEpochMicros = t.UnixMicro() encoded, err := newMetadata.encode() if err != nil { return err } return o.updateFileProperties(ctx, api.UpdateDescriptionRequest{ Description: encoded, }) } // Open implements the mandatory method fs.Object.Open func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { opts := rest.Opts{ Method: "POST", Path: "/v5/file/download-link/vipdata", } req := &api.GetDownloadLinkRequest{ Slug: o.slug, UserLogin: o.fs.opt.Username, DeviceID: fmt.Sprintf("%d", time.Now().UnixNano()), } var resp *api.GetDownloadLinkResponse err = o.fs.pacer.Call(func() (bool, error) { httpResp, err := o.fs.rest.CallJSON(ctx, &opts, &req, &resp) return o.fs.shouldRetry(ctx, httpResp, err, true) }) if err != nil { return nil, err } downloadURL := resp.Link if resp.Hash != "" { if strings.Contains(downloadURL, "?") { downloadURL += "&" } else { downloadURL += "?" } downloadURL += "hash=" + url.QueryEscape(resp.Hash) } opts = rest.Opts{ Method: "GET", RootURL: downloadURL, Options: options, } var httpResp *http.Response err = o.fs.pacer.Call(func() (bool, error) { httpResp, err = o.fs.rest.Call(ctx, &opts) return o.fs.shouldRetry(ctx, httpResp, err, true) }) if err != nil { return nil, err } return httpResp.Body, err } func (o *Object) copyFrom(other *Object) { o.fs = other.fs o.remote = other.remote o.size = other.size o.slug = other.slug o.remoteFsMtime = other.remoteFsMtime o.encodedMetadata = other.encodedMetadata } // RenamingObjectInfoProxy is a delegating proxy for fs.ObjectInfo // with the option of specifying a different remote path. type RenamingObjectInfoProxy struct { delegate fs.ObjectInfo remote string } // Remote implements fs.ObjectInfo.Remote by delegating to the wrapped instance. func (s *RenamingObjectInfoProxy) String() string { return s.delegate.String() } // Remote implements fs.ObjectInfo.Remote by returning the specified remote path. func (s *RenamingObjectInfoProxy) Remote() string { return s.remote } // ModTime implements fs.ObjectInfo.ModTime by delegating to the wrapped instance. func (s *RenamingObjectInfoProxy) ModTime(ctx context.Context) time.Time { return s.delegate.ModTime(ctx) } // Size implements fs.ObjectInfo.Size by delegating to the wrapped instance. func (s *RenamingObjectInfoProxy) Size() int64 { return s.delegate.Size() } // Fs implements fs.ObjectInfo.Fs by delegating to the wrapped instance. func (s *RenamingObjectInfoProxy) Fs() fs.Info { return s.delegate.Fs() } // Hash implements fs.ObjectInfo.Hash by delegating to the wrapped instance. func (s *RenamingObjectInfoProxy) Hash(ctx context.Context, ty hash.Type) (string, error) { return s.delegate.Hash(ctx, ty) } // Storable implements fs.ObjectInfo.Storable by delegating to the wrapped instance. func (s *RenamingObjectInfoProxy) Storable() bool { return s.delegate.Storable() } // Update implements the mandatory method fs.Object.Update func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { // The backend allows to store multiple files with the same name, so simply upload the new file and remove the old // one afterwards. info := &RenamingObjectInfoProxy{ delegate: src, remote: o.Remote(), } newo, err := o.fs.PutUnchecked(ctx, in, info, options...) if err != nil { return err } err = o.Remove(ctx) if err != nil { return err } o.copyFrom(newo.(*Object)) return nil } // Remove implements the mandatory method fs.Object.Remove func (o *Object) Remove(ctx context.Context) error { for range 2 { // First call moves the item to recycle bin, second deletes it for good var err error opts := rest.Opts{ Method: "DELETE", Path: "/v6/file/" + o.slug + "/private", } err = o.fs.pacer.Call(func() (bool, error) { httpResp, err := o.fs.rest.CallJSON(ctx, &opts, nil, nil) return o.fs.shouldRetry(ctx, httpResp, err, true) }) if err != nil { return err } } return nil } // ModTime implements the mandatory method fs.Object.ModTime func (o *Object) ModTime(ctx context.Context) time.Time { if o.encodedMetadata != nil { return time.UnixMicro(o.encodedMetadata.ModTimeEpochMicros) } // The time the object was last modified on the server - a handwavy guess, but we don't have any better return o.remoteFsMtime } // Fs implements the mandatory method fs.Object.Fs func (o *Object) Fs() fs.Info { return o.fs } // String returns the string representation of the remote object reference. func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Size returns the size of an object in bytes func (o *Object) Size() int64 { return o.size } // Hash implements the mandatory method fs.Object.Hash. // // Supports SHA256 and MD5 hashes. func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 && t != hash.SHA256 { return "", hash.ErrUnsupported } if o.encodedMetadata == nil { return "", nil } switch t { case hash.MD5: return hex.EncodeToString(o.encodedMetadata.Md5Hash), nil case hash.SHA256: return hex.EncodeToString(o.encodedMetadata.Sha256Hash), nil } panic("Should never get here") } // FindLeaf implements dircache.DirCacher.FindLeaf by successively walking through the folder hierarchy until // the desired folder is found, or there's nowhere to continue. func (f *Fs) FindLeaf(ctx context.Context, folderSlug, leaf string) (leafSlug string, found bool, err error) { folders, err := f.listFolders(ctx, folderSlug, leaf) if err != nil { if errors.Is(err, fs.ErrorDirNotFound) { return "", false, nil } return "", false, err } for _, folder := range folders { if folder.Name == leaf { return folder.Slug, true, nil } } // Uloz.to allows creation of multiple files / folders with the same name in the same parent folder. rclone always // expects folder paths to be unique (no other file or folder with the same name should exist). As a result we also // need to look at the files to return the correct error if necessary. files, err := f.listFiles(ctx, folderSlug, leaf) if err != nil { return "", false, err } for _, file := range files { if file.Name == leaf { return "", false, fs.ErrorIsFile } } // The parent folder exists but no file or folder with the given name was found in it. return "", false, nil } // CreateDir implements dircache.DirCacher.CreateDir by creating a folder with the given name under a folder identified // by parentSlug. func (f *Fs) CreateDir(ctx context.Context, parentSlug, leaf string) (newID string, err error) { var folder *api.Folder opts := rest.Opts{ Method: "POST", Path: "/v6/user/" + f.opt.Username + "/folder", Parameters: url.Values{}, } mkdir := api.CreateFolderRequest{ Name: f.opt.Enc.FromStandardName(leaf), ParentFolderSlug: parentSlug, } err = f.pacer.Call(func() (bool, error) { httpResp, err := f.rest.CallJSON(ctx, &opts, &mkdir, &folder) return f.shouldRetry(ctx, httpResp, err, true) }) if err != nil { return "", err } return folder.Slug, nil } func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } var err error if info == nil { info, err = f.readMetaDataForPath(ctx, remote) } if err != nil { return nil, err } err = o.setMetaData(info) if err != nil { return nil, err } return o, nil } // readMetaDataForPath reads the metadata from the path func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.File, err error) { filename, folderSlug, err := f.dirCache.FindPath(ctx, path, false) if err != nil { if errors.Is(err, fs.ErrorDirNotFound) { return nil, fs.ErrorObjectNotFound } return nil, err } files, err := f.listFiles(ctx, folderSlug, filename) if err != nil { return nil, err } for _, file := range files { if file.Name == filename { return &file, nil } } folders, err := f.listFolders(ctx, folderSlug, filename) if err != nil { return nil, err } for _, file := range folders { if file.Name == filename { return nil, fs.ErrorIsDir } } return nil, fs.ErrorObjectNotFound } func (o *Object) setMetaData(info *api.File) (err error) { o.name = info.Name o.size = info.Filesize o.remoteFsMtime = info.LastUserModified o.encodedMetadata, err = decodeDescriptionMetadata(info.Description) if err != nil { fs.Debugf(o, "Couldn't decode metadata: %v", err) } o.slug = info.Slug return nil } // NewObject implements fs.Fs.NewObject. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // List implements fs.Fs.List by listing all files and folders in the given folder. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { folderSlug, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } folders, err := f.listFolders(ctx, folderSlug, "") if err != nil { return nil, err } for _, folder := range folders { remote := path.Join(dir, folder.Name) f.dirCache.Put(remote, folder.Slug) entries = append(entries, fs.NewDir(remote, folder.LastUserModified)) } files, err := f.listFiles(ctx, folderSlug, "") if err != nil { return nil, err } for _, file := range files { remote := path.Join(dir, file.Name) remoteFile, err := f.newObjectWithInfo(ctx, remote, &file) if err != nil { return nil, err } entries = append(entries, remoteFile) } return entries, nil } func (f *Fs) fetchListFolderPage( ctx context.Context, folderSlug string, searchQuery string, limit int, offset int, ) (folders []api.Folder, err error) { opts := rest.Opts{ Method: "GET", Path: "/v9/user/" + f.opt.Username + "/folder/" + folderSlug + "/folder-list", Parameters: url.Values{}, } opts.Parameters.Set("status", "ok") opts.Parameters.Set("limit", strconv.Itoa(limit)) if offset > 0 { opts.Parameters.Set("offset", strconv.Itoa(offset)) } if searchQuery != "" { opts.Parameters.Set("search_query", f.opt.Enc.FromStandardName(searchQuery)) } var respBody *api.ListFoldersResponse err = f.pacer.Call(func() (bool, error) { httpResp, err := f.rest.CallJSON(ctx, &opts, nil, &respBody) return f.shouldRetry(ctx, httpResp, err, true) }) if err != nil { return nil, err } for i := range respBody.Subfolders { respBody.Subfolders[i].Name = f.opt.Enc.ToStandardName(respBody.Subfolders[i].Name) } return respBody.Subfolders, nil } func (f *Fs) listFolders( ctx context.Context, folderSlug string, searchQuery string, ) (folders []api.Folder, err error) { targetPageSize := f.opt.ListPageSize lastPageSize := targetPageSize offset := 0 for targetPageSize == lastPageSize { page, err := f.fetchListFolderPage(ctx, folderSlug, searchQuery, targetPageSize, offset) if err != nil { var apiErr *api.Error casted := errors.As(err, &apiErr) if casted && apiErr.ErrorCode == 30001 { return nil, fs.ErrorDirNotFound } return nil, err } lastPageSize = len(page) offset += lastPageSize folders = append(folders, page...) } return folders, nil } func (f *Fs) fetchListFilePage( ctx context.Context, folderSlug string, searchQuery string, limit int, offset int, ) (folders []api.File, err error) { opts := rest.Opts{ Method: "GET",
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/ulozto/api/types.go
backend/ulozto/api/types.go
// Package api has type definitions for uloz.to package api import ( "errors" "fmt" "time" ) // Error is a representation of the JSON structure returned by uloz.to for unsuccessful requests. type Error struct { ErrorCode int `json:"error"` StatusCode int `json:"code"` Message string `json:"message"` } // Error implements error.Error() and returns a string representation of the error. func (e *Error) Error() string { out := fmt.Sprintf("Error %d (%d)", e.ErrorCode, e.StatusCode) if e.Message != "" { out += ": " + e.Message } return out } // Is determines if the error is an instance of another error. It's required for the // errors package to search in causal chain. func (e *Error) Is(target error) bool { var err *Error ok := errors.As(target, &err) return ok } // ListResponseMetadata groups fields common for all API List calls, // and maps to the Metadata API JSON object. type ListResponseMetadata struct { Timestamp time.Time `json:"RunAt"` Offset int32 `json:"offset"` Limit int32 `json:"limit"` ItemsCount int32 `json:"items_count"` } // Folder represents a single folder, and maps to the AggregatePrivateViewFolder // JSON API object. type Folder struct { Discriminator string `json:"discriminator"` Name string `json:"name"` SanitizedName string `json:"name_sanitized"` Slug string `json:"slug"` Status string `json:"status"` PublicURL string `json:"public_url"` IsPasswordProtected bool `json:"is_password_protected"` Type string `json:"type"` FileManagerLink string `json:"file_manager_link"` ParentFolderSlug string `json:"parent_folder_slug"` Privacy string `json:"privacy"` Created time.Time `json:"created"` LastUserModified time.Time `json:"last_user_modified"` HasSubfolder bool `json:"has_subfolder"` HasTrashedSubfolders bool `json:"has_trashed_subfolders"` } // File represents a single file, and maps to the AggregatePrivateViewFileV3 // JSON API object. type File struct { Discriminator string `json:"discriminator"` Slug string `json:"slug"` URL string `json:"url"` Realm string `json:"realm"` Name string `json:"name"` NameSanitized string `json:"name_sanitized"` Extension string `json:"extension"` Filesize int64 `json:"filesize"` PasswordProtectedFile bool `json:"password_protected_file"` Description string `json:"description"` DescriptionSanitized string `json:"description_sanitized"` IsPorn bool `json:"is_porn"` Rating int `json:"rating"` PasswordProtectedArchive bool `json:"password_protected_archive"` MalwareStatus string `json:"malware_status"` ContentStatus string `json:"content_status"` ContentType string `json:"content_type"` Format struct { } `json:"format"` DownloadTypes []any `json:"download_types"` ThumbnailInfo []any `json:"thumbnail_info"` PreviewInfo struct { } `json:"preview_info"` Privacy string `json:"privacy"` IsPornByUploader bool `json:"is_porn_by_uploader"` ExpireDownload int `json:"expire_download"` ExpireTime time.Time `json:"expire_time"` UploadTime time.Time `json:"upload_time"` LastUserModified time.Time `json:"last_user_modified"` FolderSlug string `json:"folder_slug"` IsIncomplete bool `json:"is_incomplete"` IsInTrash bool `json:"is_in_trash"` Processing struct { Identify bool `json:"identify"` Thumbnails bool `json:"thumbnails"` LivePreview bool `json:"live_preview"` ArchiveContent bool `json:"archive_content"` Preview bool `json:"preview"` } `json:"processing"` } // FolderSize represents the API object describing the sizes of a files and subfolders of a folder. type FolderSize struct { FilesSize int64 `json:"files_size"` FilesCount int64 `json:"files_count"` FoldersCount int64 `json:"folders_count"` } // FolderSizes describes the subfolder sizes of a single folder. type FolderSizes struct { Direct FolderSize `json:"direct"` Recursive FolderSize `json:"recursive"` } // CreateFolderRequest represents the JSON API object // that's sent to the create folder API endpoint. type CreateFolderRequest struct { Name string `json:"name"` ParentFolderSlug string `json:"parent_folder_slug"` } // ListFoldersResponse represents the JSON API object // that's received from the list folders API endpoint. type ListFoldersResponse struct { Metadata ListResponseMetadata `json:"metadata"` Folder Folder `json:"folder"` Subfolders []Folder `json:"subfolders"` } // ListFilesResponse represents the JSON API object // that's received from the list files API endpoint. type ListFilesResponse struct { Metadata ListResponseMetadata `json:"metadata"` Items []File `json:"items"` } // FolderSizesResponse represents the response from the folder-sizes endpoint. type FolderSizesResponse map[string]FolderSizes // DeleteFoldersRequest represents the JSON API object // that's sent to the delete folders API endpoint. type DeleteFoldersRequest struct { Slugs []string `json:"slugs"` } // CreateUploadURLRequest represents the JSON API object that's // sent to the API endpoint generating URLs for new file uploads. type CreateUploadURLRequest struct { UserLogin string `json:"user_login"` Realm string `json:"realm"` ExistingSessionSlug string `json:"private_slug"` } // CreateUploadURLResponse represents the JSON API object that's // received from the API endpoint generating URLs for new file uploads. type CreateUploadURLResponse struct { UploadURL string `json:"upload_url"` PrivateSlug string `json:"private_slug"` ValidUntil time.Time `json:"valid_until"` ValidityInterval int64 `json:"validity_interval"` } // BatchUpdateFilePropertiesRequest represents the JSON API object that's // sent to the API endpoint moving the uploaded files from a scratch space // to their final destination. type BatchUpdateFilePropertiesRequest struct { Name string `json:"name"` FolderSlug string `json:"folder_slug"` Description string `json:"description"` Slugs []string `json:"slugs"` UploadTokens map[string]string `json:"upload_tokens"` } // SendFilePayloadResponse represents the JSON API object that's received // in response to uploading a file's body to the CDN URL. type SendFilePayloadResponse struct { Size int64 `json:"size"` ContentType string `json:"contentType"` Md5 string `json:"md5"` Message string `json:"message"` ReturnCode int `json:"return_code"` Slug string `json:"slug"` } // CommitUploadBatchRequest represents the JSON API object that's // sent to the API endpoint marking the upload batch as final. type CommitUploadBatchRequest struct { Status string `json:"status"` OwnerLogin string `json:"owner_login"` } // CommitUploadBatchResponse represents the JSON API object that's // received from the API endpoint marking the upload batch as final. type CommitUploadBatchResponse struct { PrivateSlug string `json:"private_slug"` PublicSlug string `json:"public_slug"` Status string `json:"status"` ConfirmedAt time.Time `json:"confirmed_at"` Discriminator string `json:"discriminator"` Privacy string `json:"privacy"` Name time.Time `json:"name"` PublicURL string `json:"public_url"` FilesCountOk int `json:"files_count_ok"` FilesCountTrash int `json:"files_count_trash"` FilesCountIncomplete int `json:"files_count_incomplete"` } // UpdateDescriptionRequest represents the JSON API object that's // sent to the file modification API endpoint marking the upload batch as final. type UpdateDescriptionRequest struct { Description string `json:"description"` } // MoveFolderRequest represents the JSON API object that's // sent to the folder moving API endpoint. type MoveFolderRequest struct { FolderSlugs []string `json:"slugs"` NewParentFolderSlug string `json:"parent_folder_slug"` } // RenameFolderRequest represents the JSON API object that's // sent to the folder moving API endpoint. type RenameFolderRequest struct { NewName string `json:"name"` } // MoveFileRequest represents the JSON API object that's // sent to the file moving API endpoint. type MoveFileRequest struct { ParentFolderSlug string `json:"folder_slug,omitempty"` NewFilename string `json:"name,omitempty"` } // GetDownloadLinkRequest represents the JSON API object that's // sent to the API endpoint that generates CDN download links for file payloads. type GetDownloadLinkRequest struct { Slug string `json:"file_slug"` UserLogin string `json:"user_login"` DeviceID string `json:"device_id"` } // GetDownloadLinkResponse represents the JSON API object that's // received from the API endpoint that generates CDN download links for file payloads. type GetDownloadLinkResponse struct { Link string `json:"link"` DownloadURLValidUntil time.Time `json:"download_url_valid_until"` DownloadURLValidityInterval int `json:"download_url_validity_interval"` Hash string `json:"hash"` } // AuthenticateRequest represents the JSON API object that's sent to the auth API endpoint. type AuthenticateRequest struct { Login string `json:"login"` Password string `json:"password"` } // AuthenticateResponse represents the JSON API object that's received from the auth API endpoint. type AuthenticateResponse struct { TokenID string `json:"token_id"` TokenValidityInterval int `json:"token_validity_interval"` Session struct { Country string `json:"country"` IsLimitedCountry bool `json:"is_limited_country"` User struct { Login string `json:"login"` UserID int64 `json:"user_id"` Credit int64 `json:"credit"` AvatarURL string `json:"avatar_url"` FavoritesLink string `json:"favorites_link"` RootFolderSlug string `json:"root_folder_slug"` FavoritesFolderSlug string `json:"favorites_folder_slug"` HasCloud bool `json:"has_cloud"` } `json:"user"` } `json:"session"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/googlecloudstorage/googlecloudstorage.go
backend/googlecloudstorage/googlecloudstorage.go
// Package googlecloudstorage provides an interface to Google Cloud Storage package googlecloudstorage /* Notes Can't set Updated but can set Metadata on object creation Patch needs full_control not just read_write FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 error - https://code.google.com/p/google-api-go-client/issues/detail?id=64 */ import ( "context" "encoding/base64" "encoding/hex" "errors" "fmt" "io" "net/http" "os" "path" "strconv" "strings" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" option "google.golang.org/api/option" // NOTE: This API is deprecated storage "google.golang.org/api/storage/v1" ) const ( rcloneClientID = "202264815644.apps.googleusercontent.com" rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw" timeFormat = time.RFC3339Nano metaMtime = "mtime" // key to store mtime in metadata metaMtimeGsutil = "goog-reserved-file-mtime" // key used by GSUtil to store mtime in metadata listChunks = 1000 // chunk size to read directory listings minSleep = 10 * time.Millisecond ) var ( // Description of how to auth for this app storageConfig = &oauthutil.Config{ Scopes: []string{storage.DevstorageReadWriteScope}, AuthURL: google.Endpoint.AuthURL, TokenURL: google.Endpoint.TokenURL, ClientID: rcloneClientID, ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), RedirectURL: oauthutil.RedirectURL, } ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "google cloud storage", Prefix: "gcs", Description: "Google Cloud Storage (this is not Google Drive)", NewFs: NewFs, Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { saFile, _ := m.Get("service_account_file") saCreds, _ := m.Get("service_account_credentials") anonymous, _ := m.Get("anonymous") envAuth, _ := m.Get("env_auth") if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" { return nil, nil } return oauthutil.ConfigOut("", &oauthutil.Options{ OAuth2Config: storageConfig, }) }, Options: append(oauthutil.SharedOptions, []fs.Option{{ Name: "project_number", Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.", Sensitive: true, }, { Name: "user_project", Help: "User project.\n\nOptional - needed only for requester pays.", Sensitive: true, }, { Name: "service_account_file", Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, }, { Name: "service_account_credentials", Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Hide: fs.OptionHideBoth, Sensitive: true, }, { Name: "access_token", Help: "Short-lived access token.\n\nLeave blank normally.\nNeeded only if you want use short-lived access token instead of interactive login.", Hide: fs.OptionHideConfigurator, Sensitive: true, Advanced: true, }, { Name: "anonymous", Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.", Default: false, }, { Name: "object_acl", Help: "Access Control List for new objects.", Examples: []fs.OptionExample{{ Value: "authenticatedRead", Help: "Object owner gets OWNER access.\nAll Authenticated Users get READER access.", }, { Value: "bucketOwnerFullControl", Help: "Object owner gets OWNER access.\nProject team owners get OWNER access.", }, { Value: "bucketOwnerRead", Help: "Object owner gets OWNER access.\nProject team owners get READER access.", }, { Value: "private", Help: "Object owner gets OWNER access.\nDefault if left blank.", }, { Value: "projectPrivate", Help: "Object owner gets OWNER access.\nProject team members get access according to their roles.", }, { Value: "publicRead", Help: "Object owner gets OWNER access.\nAll Users get READER access.", }}, }, { Name: "bucket_acl", Help: "Access Control List for new buckets.", Examples: []fs.OptionExample{{ Value: "authenticatedRead", Help: "Project team owners get OWNER access.\nAll Authenticated Users get READER access.", }, { Value: "private", Help: "Project team owners get OWNER access.\nDefault if left blank.", }, { Value: "projectPrivate", Help: "Project team members get access according to their roles.", }, { Value: "publicRead", Help: "Project team owners get OWNER access.\nAll Users get READER access.", }, { Value: "publicReadWrite", Help: "Project team owners get OWNER access.\nAll Users get WRITER access.", }}, }, { Name: "bucket_policy_only", Help: `Access checks should use bucket-level IAM policies. If you want to upload objects to a bucket with Bucket Policy Only set then you will need to set this. When it is set, rclone: - ignores ACLs set on buckets - ignores ACLs set on objects - creates buckets with Bucket Policy Only set Docs: https://cloud.google.com/storage/docs/bucket-policy-only `, Default: false, }, { Name: "location", Help: "Location for the newly created buckets.", Examples: []fs.OptionExample{{ Value: "", Help: "Empty for default location (US)", }, { Value: "asia", Help: "Multi-regional location for Asia", }, { Value: "eu", Help: "Multi-regional location for Europe", }, { Value: "us", Help: "Multi-regional location for United States", }, { Value: "asia-east1", Help: "Taiwan", }, { Value: "asia-east2", Help: "Hong Kong", }, { Value: "asia-northeast1", Help: "Tokyo", }, { Value: "asia-northeast2", Help: "Osaka", }, { Value: "asia-northeast3", Help: "Seoul", }, { Value: "asia-south1", Help: "Mumbai", }, { Value: "asia-south2", Help: "Delhi", }, { Value: "asia-southeast1", Help: "Singapore", }, { Value: "asia-southeast2", Help: "Jakarta", }, { Value: "australia-southeast1", Help: "Sydney", }, { Value: "australia-southeast2", Help: "Melbourne", }, { Value: "europe-north1", Help: "Finland", }, { Value: "europe-west1", Help: "Belgium", }, { Value: "europe-west2", Help: "London", }, { Value: "europe-west3", Help: "Frankfurt", }, { Value: "europe-west4", Help: "Netherlands", }, { Value: "europe-west6", Help: "Zürich", }, { Value: "europe-central2", Help: "Warsaw", }, { Value: "us-central1", Help: "Iowa", }, { Value: "us-east1", Help: "South Carolina", }, { Value: "us-east4", Help: "Northern Virginia", }, { Value: "us-east5", Help: "Ohio", }, { Value: "us-west1", Help: "Oregon", }, { Value: "us-west2", Help: "California", }, { Value: "us-west3", Help: "Salt Lake City", }, { Value: "us-west4", Help: "Las Vegas", }, { Value: "northamerica-northeast1", Help: "Montréal", }, { Value: "northamerica-northeast2", Help: "Toronto", }, { Value: "southamerica-east1", Help: "São Paulo", }, { Value: "southamerica-west1", Help: "Santiago", }, { Value: "asia1", Help: "Dual region: asia-northeast1 and asia-northeast2.", }, { Value: "eur4", Help: "Dual region: europe-north1 and europe-west4.", }, { Value: "nam4", Help: "Dual region: us-central1 and us-east1.", }}, }, { Name: "storage_class", Help: "The storage class to use when storing objects in Google Cloud Storage.", Examples: []fs.OptionExample{{ Value: "", Help: "Default", }, { Value: "MULTI_REGIONAL", Help: "Multi-regional storage class", }, { Value: "REGIONAL", Help: "Regional storage class", }, { Value: "NEARLINE", Help: "Nearline storage class", }, { Value: "COLDLINE", Help: "Coldline storage class", }, { Value: "ARCHIVE", Help: "Archive storage class", }, { Value: "DURABLE_REDUCED_AVAILABILITY", Help: "Durable reduced availability storage class", }}, }, { Name: "directory_markers", Default: false, Advanced: true, Help: `Upload an empty object with a trailing slash when a new directory is created Empty folders are unsupported for bucket based remotes, this option creates an empty object ending with "/", to persist the folder. `, }, { Name: "no_check_bucket", Help: `If set, don't attempt to check the bucket exists or create it. This can be useful when trying to minimise the number of transactions rclone does if you know the bucket exists already. `, Default: false, Advanced: true, }, { Name: "decompress", Help: `If set this will decompress gzip encoded objects. It is possible to upload objects to GCS with "Content-Encoding: gzip" set. Normally rclone will download these files as compressed objects. If this flag is set then rclone will decompress these files with "Content-Encoding: gzip" as they are received. This means that rclone can't check the size and hash but the file contents will be decompressed. `, Advanced: true, Default: false, }, { Name: "endpoint", Help: `Custom endpoint for the storage API. Leave blank to use the provider default. When using a custom endpoint that includes a subpath (e.g. example.org/custom/endpoint), the subpath will be ignored during upload operations due to a limitation in the underlying Google API Go client library. Download and listing operations will work correctly with the full endpoint path. If you require subpath support for uploads, avoid using subpaths in your custom endpoint configuration.`, Advanced: true, Examples: []fs.OptionExample{{ Value: "storage.example.org", Help: "Specify a custom endpoint", }, { Value: "storage.example.org:4443", Help: "Specifying a custom endpoint with port", }, { Value: "storage.example.org:4443/gcs/api", Help: "Specifying a subpath, see the note, uploads won't use the custom path!", }}, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.Base | encoder.EncodeCrLf | encoder.EncodeInvalidUtf8), }, { Name: "env_auth", Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.", Default: false, Examples: []fs.OptionExample{{ Value: "false", Help: "Enter credentials in the next step.", }, { Value: "true", Help: "Get GCP IAM credentials from the environment (env vars or IAM).", }}, }}...), }) } // Options defines the configuration for this backend type Options struct { ProjectNumber string `config:"project_number"` UserProject string `config:"user_project"` ServiceAccountFile string `config:"service_account_file"` ServiceAccountCredentials string `config:"service_account_credentials"` Anonymous bool `config:"anonymous"` ObjectACL string `config:"object_acl"` BucketACL string `config:"bucket_acl"` BucketPolicyOnly bool `config:"bucket_policy_only"` Location string `config:"location"` StorageClass string `config:"storage_class"` NoCheckBucket bool `config:"no_check_bucket"` Decompress bool `config:"decompress"` Endpoint string `config:"endpoint"` Enc encoder.MultiEncoder `config:"encoding"` EnvAuth bool `config:"env_auth"` DirectoryMarkers bool `config:"directory_markers"` AccessToken string `config:"access_token"` } // Fs represents a remote storage server type Fs struct { name string // name of this remote root string // the path we are working on if any opt Options // parsed options features *fs.Features // optional features svc *storage.Service // the connection to the storage server client *http.Client // authorized client rootBucket string // bucket part of root (if any) rootDirectory string // directory part of root (if any) cache *bucket.Cache // cache of bucket status pacer *fs.Pacer // To pace the API calls warnCompressed sync.Once // warn once about compressed files } // Object describes a storage object // // Will definitely have info but maybe not meta type Object struct { fs *Fs // what this object is part of remote string // The remote path url string // download path md5sum string // The MD5Sum of the object bytes int64 // Bytes in the object modTime time.Time // Modified time of the object mimeType string gzipped bool // set if object has Content-Encoding: gzip } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { if f.rootBucket == "" { return "GCS root" } if f.rootDirectory == "" { return fmt.Sprintf("GCS bucket %s", f.rootBucket) } return fmt.Sprintf("GCS bucket %s path %s", f.rootBucket, f.rootDirectory) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // shouldRetry determines whether a given err rates being retried func shouldRetry(ctx context.Context, err error) (again bool, errOut error) { if fserrors.ContextError(ctx, &err) { return false, err } again = false if err != nil { if fserrors.ShouldRetry(err) { again = true } else { switch gerr := err.(type) { case *googleapi.Error: if gerr.Code >= 500 && gerr.Code < 600 { // All 5xx errors should be retried again = true } else if len(gerr.Errors) > 0 { reason := gerr.Errors[0].Reason if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" { again = true } } } } } return again, err } // parsePath parses a remote 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // split returns bucket and bucketPath from the rootRelativePath // relative to f.root func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) { bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath)) if f.opt.DirectoryMarkers && strings.HasSuffix(bucketPath, "//") { bucketPath = bucketPath[:len(bucketPath)-1] } return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath) } // split returns bucket and bucketPath from the object func (o *Object) split() (bucket, bucketPath string) { return o.fs.split(o.remote) } func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) { conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...) if err != nil { return nil, fmt.Errorf("error processing credentials: %w", err) } ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx)) return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil } // setRoot changes the root of the Fs func (f *Fs) setRoot(root string) { f.root = parsePath(root) f.rootBucket, f.rootDirectory = bucket.Split(f.root) } // NewFs constructs an Fs from the path, bucket:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { var oAuthClient *http.Client // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } if opt.ObjectACL == "" { opt.ObjectACL = "private" } if opt.BucketACL == "" { opt.BucketACL = "private" } // try loading service account credentials from env variable, then from a file if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" { loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile)) if err != nil { return nil, fmt.Errorf("error opening service account credentials file: %w", err) } opt.ServiceAccountCredentials = string(loadedCreds) } if opt.Anonymous { oAuthClient = fshttp.NewClient(ctx) } else if opt.ServiceAccountCredentials != "" { oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials)) if err != nil { return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err) } } else if opt.EnvAuth { oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope) if err != nil { return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err) } } else if opt.AccessToken != "" { ts := oauth2.Token{AccessToken: opt.AccessToken} oAuthClient = oauth2.NewClient(ctx, oauth2.StaticTokenSource(&ts)) } else { oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig) if err != nil { ctx := context.Background() oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope) if err != nil { return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err) } } } f := &Fs{ name: name, root: root, opt: *opt, pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))), cache: bucket.NewCache(), } f.setRoot(root) f.features = (&fs.Features{ ReadMimeType: true, WriteMimeType: true, BucketBased: true, BucketBasedRootOK: true, }).Fill(ctx, f) if opt.DirectoryMarkers { f.features.CanHaveEmptyDirectories = true } // Create a new authorized Drive client. f.client = oAuthClient gcsOpts := []option.ClientOption{option.WithHTTPClient(f.client)} if opt.Endpoint != "" { gcsOpts = append(gcsOpts, option.WithEndpoint(opt.Endpoint)) } f.svc, err = storage.NewService(context.Background(), gcsOpts...) if err != nil { return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err) } if f.rootBucket != "" && f.rootDirectory != "" { // Check to see if the object exists encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory) err = f.pacer.Call(func() (bool, error) { get := f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx) if f.opt.UserProject != "" { get = get.UserProject(f.opt.UserProject) } _, err = get.Do() return shouldRetry(ctx, err) }) if err == nil { newRoot := path.Dir(f.root) if newRoot == "." { newRoot = "" } f.setRoot(newRoot) // return an error with an fs which points to the parent return f, fs.ErrorIsFile } } return f, nil } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage.Object) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } if info != nil { o.setMetaData(info) } else { err := o.readMetaData(ctx) // reads info and meta, returning an error if err != nil { return nil, err } } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // listFn is called from list to handle an object. type listFn func(remote string, object *storage.Object, isDirectory bool) error // list the objects into the function supplied // // dir is the starting directory, "" for root // // Set recurse to read sub directories. // // The remote has prefix removed from it and if addBucket is set // then it adds the bucket to the start. func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) { if prefix != "" { prefix += "/" } if directory != "" { directory += "/" } list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks) if f.opt.UserProject != "" { list = list.UserProject(f.opt.UserProject) } if !recurse { list = list.Delimiter("/") } foundItems := 0 for { var objects *storage.Objects err = f.pacer.Call(func() (bool, error) { objects, err = list.Context(ctx).Do() return shouldRetry(ctx, err) }) if err != nil { if gErr, ok := err.(*googleapi.Error); ok { if gErr.Code == http.StatusNotFound { err = fs.ErrorDirNotFound } } return err } if !recurse { foundItems += len(objects.Prefixes) var object storage.Object for _, remote := range objects.Prefixes { if !strings.HasSuffix(remote, "/") { continue } remote = f.opt.Enc.ToStandardPath(remote) if !strings.HasPrefix(remote, prefix) { fs.Logf(f, "Odd name received %q", remote) continue } remote = remote[len(prefix) : len(remote)-1] if addBucket { remote = path.Join(bucket, remote) } err = fn(remote, &object, true) if err != nil { return err } } } foundItems += len(objects.Items) for _, object := range objects.Items { remote := f.opt.Enc.ToStandardPath(object.Name) if !strings.HasPrefix(remote, prefix) { fs.Logf(f, "Odd name received %q", object.Name) continue } isDirectory := remote == "" || strings.HasSuffix(remote, "/") // is this a directory marker? if isDirectory { // Don't insert the root directory if remote == f.opt.Enc.ToStandardPath(directory) { continue } // process directory markers as directories remote, _ = strings.CutSuffix(remote, "/") } remote = remote[len(prefix):] if addBucket { remote = path.Join(bucket, remote) } err = fn(remote, object, isDirectory) if err != nil { return err } } if objects.NextPageToken == "" { break } list.PageToken(objects.NextPageToken) } if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" { // Determine whether the directory exists or not by whether it has a marker _, err := f.readObjectInfo(ctx, bucket, directory) if err != nil { if err == fs.ErrorObjectNotFound { return fs.ErrorDirNotFound } return err } } return nil } // Convert a list item into a DirEntry func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) { if isDirectory { d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size)) return d, nil } o, err := f.newObjectWithInfo(ctx, remote, object) if err != nil { return nil, err } return o, nil } // listDir lists a single directory func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) { // List the objects err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error { entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) if err != nil { return err } if entry != nil { return callback(entry) } return nil }) if err != nil { return err } // bucket must be present if listing succeeded f.cache.MarkOK(bucket) return err } // listBuckets lists the buckets func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) { if f.opt.ProjectNumber == "" { return nil, errors.New("can't list buckets without project number") } listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks) if f.opt.UserProject != "" { listBuckets = listBuckets.UserProject(f.opt.UserProject) } for { var buckets *storage.Buckets err = f.pacer.Call(func() (bool, error) { buckets, err = listBuckets.Context(ctx).Do() return shouldRetry(ctx, err) }) if err != nil { return nil, err } for _, bucket := range buckets.Items { d := fs.NewDir(f.opt.Enc.ToStandardName(bucket.Name), time.Time{}) entries = append(entries, d) } if buckets.NextPageToken == "" { break } listBuckets.PageToken(buckets.NextPageToken) } return entries, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return list.WithListP(ctx, dir, f) } // ListP lists the objects and directories of the Fs starting // from dir non recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error { list := list.NewHelper(callback) bucket, directory := f.split(dir) if bucket == "" { if directory != "" { return fs.ErrorListBucketRequired } entries, err := f.listBuckets(ctx) if err != nil { return err } for _, entry := range entries { err = list.Add(entry) if err != nil { return err } } } else { err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add) if err != nil { return err } } return list.Flush() } // ListR lists the objects and directories of the Fs starting // from dir recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { bucket, directory := f.split(dir) list := list.NewHelper(callback) listR := func(bucket, directory, prefix string, addBucket bool) error { return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error { entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) if err != nil { return err } return list.Add(entry) }) } if bucket == "" { entries, err := f.listBuckets(ctx) if err != nil { return err } for _, entry := range entries { err = list.Add(entry) if err != nil { return err } bucket := entry.Remote() err = listR(bucket, "", f.rootDirectory, true) if err != nil { return err } // bucket must be present if listing succeeded f.cache.MarkOK(bucket) } } else { err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "") if err != nil { return err } // bucket must be present if listing succeeded f.cache.MarkOK(bucket) } return list.Flush() } // Put the object into the bucket // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // Temporary Object under construction o := &Object{ fs: f, remote: src.Remote(), } return o, o.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(ctx, in, src, options...) } // Create directory marker file and parents func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error { if !f.opt.DirectoryMarkers || bucket == "" { return nil } // Object to be uploaded o := &Object{ fs: f, modTime: time.Now(), } for { _, bucketPath := f.split(dir) // Don't create the directory marker if it is the bucket or at the very root if bucketPath == "" { break } o.remote = dir + "/" // Check to see if object already exists _, err := o.readObjectInfo(ctx) if err == nil { return nil } // Upload it if not fs.Debugf(o, "Creating directory marker") content := io.Reader(strings.NewReader("")) err = o.Update(ctx, content, o) if err != nil { return fmt.Errorf("creating directory marker failed: %w", err) } // Now check parent directory exists dir = path.Dir(dir) if dir == "/" || dir == "." { break } } return nil } // Mkdir creates the bucket if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { bucket, _ := f.split(dir) e := f.checkBucket(ctx, bucket) if e != nil { return e } return f.createDirectoryMarker(ctx, bucket, dir) } // mkdirParent creates the parent bucket/directory if it doesn't exist func (f *Fs) mkdirParent(ctx context.Context, remote string) error { remote, _ = strings.CutSuffix(remote, "/") dir := path.Dir(remote) if dir == "/" || dir == "." { dir = "" } return f.Mkdir(ctx, dir) } // makeBucket creates the bucket if it doesn't exist func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) { return f.cache.Create(bucket, func() error { // List something from the bucket to see if it exists. Doing it like this enables the use of a // service account that only has the "Storage Object Admin" role. See #2193 for details. err = f.pacer.Call(func() (bool, error) { list := f.svc.Objects.List(bucket).MaxResults(1).Context(ctx) if f.opt.UserProject != "" { list = list.UserProject(f.opt.UserProject) } _, err = list.Do() return shouldRetry(ctx, err) }) if err == nil { // Bucket already exists return nil } else if gErr, ok := err.(*googleapi.Error); ok { if gErr.Code != http.StatusNotFound { return fmt.Errorf("failed to get bucket: %w", err) } } else { return fmt.Errorf("failed to get bucket: %w", err) } if f.opt.ProjectNumber == "" { return errors.New("can't make bucket without project number") } bucket := storage.Bucket{ Name: bucket, Location: f.opt.Location, StorageClass: f.opt.StorageClass, } if f.opt.BucketPolicyOnly { bucket.IamConfiguration = &storage.BucketIamConfiguration{ BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{ Enabled: true, }, } } return f.pacer.Call(func() (bool, error) { insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket) if !f.opt.BucketPolicyOnly { insertBucket.PredefinedAcl(f.opt.BucketACL) } insertBucket = insertBucket.Context(ctx) if f.opt.UserProject != "" { insertBucket = insertBucket.UserProject(f.opt.UserProject) } _, err = insertBucket.Do() return shouldRetry(ctx, err) }) }, nil) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/googlecloudstorage/googlecloudstorage_test.go
backend/googlecloudstorage/googlecloudstorage_test.go
// Test GoogleCloudStorage filesystem interface package googlecloudstorage_test import ( "testing" "github.com/rclone/rclone/backend/googlecloudstorage" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestGoogleCloudStorage:", NilObject: (*googlecloudstorage.Object)(nil), }) } func TestIntegration2(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("Skipping as -remote set") } name := "TestGoogleCloudStorage" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":", NilObject: (*googlecloudstorage.Object)(nil), ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "directory_markers", Value: "true"}, }, }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/linkbox/linkbox_test.go
backend/linkbox/linkbox_test.go
// Test Linkbox filesystem interface package linkbox_test import ( "testing" "github.com/rclone/rclone/backend/linkbox" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestLinkbox:", NilObject: (*linkbox.Object)(nil), // Linkbox doesn't support leading dots for files SkipLeadingDot: true, }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/linkbox/linkbox.go
backend/linkbox/linkbox.go
// Package linkbox provides an interface to the linkbox.to Cloud storage system. // // API docs: https://www.linkbox.to/api-docs package linkbox /* Extras - PublicLink - NO - sharing doesn't share the actual file, only a page with it on - Move - YES - have Move and Rename file APIs so is possible - MoveDir - NO - probably not possible - have Move but no Rename */ import ( "bytes" "context" "crypto/md5" "fmt" "io" "net/http" "net/url" "path" "regexp" "strconv" "strings" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" ) const ( maxEntitiesPerPage = 1000 minSleep = 200 * time.Millisecond maxSleep = 2 * time.Second pacerBurst = 1 linkboxAPIURL = "https://www.linkbox.to/api/open/" rootID = "0" // ID of root directory ) func init() { fsi := &fs.RegInfo{ Name: "linkbox", Description: "Linkbox", NewFs: NewFs, Options: []fs.Option{{ Name: "token", Help: "Token from https://www.linkbox.to/admin/account", Sensitive: true, Required: true, }}, } fs.Register(fsi) } // Options defines the configuration for this backend type Options struct { Token string `config:"token"` } // Fs stores the interface to the remote Linkbox files type Fs struct { name string root string opt Options // options for this backend features *fs.Features // optional features ci *fs.ConfigInfo // global config srv *rest.Client // the connection to the server dirCache *dircache.DirCache // Map of directory path to directory id pacer *fs.Pacer } // Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading) type Object struct { fs *Fs remote string size int64 modTime time.Time contentType string fullURL string dirID int64 itemID string // and these IDs are for files id int64 // these IDs appear to apply to directories isDir bool } // NewFs creates a new Fs object from the name and root. It connects to // the host specified in the config file. func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { root = strings.Trim(root, "/") // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } ci := fs.GetConfig(ctx) f := &Fs{ name: name, opt: *opt, root: root, ci: ci, srv: rest.NewClient(fshttp.NewClient(ctx)), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep))), } f.dirCache = dircache.New(root, rootID, f) f.features = (&fs.Features{ CanHaveEmptyDirectories: true, CaseInsensitive: true, }).Fill(ctx, f) // Find the current root err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) tempF := *f tempF.dirCache = dircache.New(newRoot, rootID, &tempF) tempF.root = newRoot // Make new Fs which is the parent err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } _, err := tempF.NewObject(ctx, remote) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f return f, nil } return nil, err } f.features.Fill(ctx, &tempF) // XXX: update the old f here instead of returning tempF, since // `features` were already filled with functions having *f as a receiver. // See https://github.com/rclone/rclone/issues/2182 f.dirCache = tempF.dirCache f.root = tempF.root // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } type entity struct { Type string `json:"type"` Name string `json:"name"` URL string `json:"url"` Ctime int64 `json:"ctime"` Size int64 `json:"size"` ID int64 `json:"id"` Pid int64 `json:"pid"` ItemID string `json:"item_id"` } // Return true if the entity is a directory func (e *entity) isDir() bool { return e.Type == "dir" || e.Type == "sdir" } type data struct { Entities []entity `json:"list"` } type fileSearchRes struct { response SearchData data `json:"data"` } // Set an object info from an entity func (o *Object) set(e *entity) { o.modTime = time.Unix(e.Ctime, 0) o.contentType = e.Type o.size = e.Size o.fullURL = e.URL o.isDir = e.isDir() o.id = e.ID o.itemID = e.ItemID o.dirID = e.Pid } // Call linkbox with the query in opts and return result // // This will be checked for error and an error will be returned if Status != 1 func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result any) error { err := f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, opts, nil, &result) return f.shouldRetry(ctx, resp, err) }) if err != nil { return err } responser := result.(responser) if responser.IsError() { return responser } return nil } // list the objects into the function supplied // // If directories is set it only sends directories // User function to process a File item from listAll // // Should return true to finish processing type listAllFn func(*entity) bool // Search is a bit fussy about which characters match // // If the name doesn't match this then do an dir list instead // N.B.: Linkbox doesn't support search by name that is longer than 50 chars var searchOK = regexp.MustCompile(`^[a-zA-Z0-9_ -.]{1,50}$`) // Lists the directory required calling the user function on each item found // // If the user fn ever returns true then it early exits with found = true // // If you set name then search ignores dirID. name is a substring // search also so name="dir" matches "sub dir" also. This filters it // down so it only returns items in dirID func (f *Fs) listAll(ctx context.Context, dirID string, name string, fn listAllFn) (found bool, err error) { var ( pageNumber = 0 numberOfEntities = maxEntitiesPerPage ) name = strings.TrimSpace(name) // search doesn't like spaces if !searchOK.MatchString(name) { // If name isn't good then do an unbounded search name = "" } OUTER: for numberOfEntities == maxEntitiesPerPage { pageNumber++ opts := &rest.Opts{ Method: "GET", RootURL: linkboxAPIURL, Path: "file_search", Parameters: url.Values{ "token": {f.opt.Token}, "name": {name}, "pid": {dirID}, "pageNo": {itoa(pageNumber)}, "pageSize": {itoa64(maxEntitiesPerPage)}, }, } var responseResult fileSearchRes err = getUnmarshaledResponse(ctx, f, opts, &responseResult) if err != nil { return false, fmt.Errorf("getting files failed: %w", err) } numberOfEntities = len(responseResult.SearchData.Entities) for _, entity := range responseResult.SearchData.Entities { if itoa64(entity.Pid) != dirID { // when name != "" this returns from all directories, so ignore not this one continue } if fn(&entity) { found = true break OUTER } } if pageNumber > 100000 { return false, fmt.Errorf("too many results") } } return found, nil } // Turn 64 bit int to string func itoa64(i int64) string { return strconv.FormatInt(i, 10) } // Turn int to string func itoa(i int) string { return itoa64(int64(i)) } func splitDirAndName(remote string) (dir string, name string) { lastSlashPosition := strings.LastIndex(remote, "/") if lastSlashPosition == -1 { dir = "" name = remote } else { dir = remote[:lastSlashPosition] name = remote[lastSlashPosition+1:] } // fs.Debugf(nil, "splitDirAndName remote = {%s}, dir = {%s}, name = {%s}", remote, dir, name) return dir, name } // FindLeaf finds a directory of name leaf in the folder with ID directoryID func (f *Fs) FindLeaf(ctx context.Context, directoryID, leaf string) (directoryIDOut string, found bool, err error) { // Find the leaf in directoryID found, err = f.listAll(ctx, directoryID, leaf, func(entity *entity) bool { if entity.isDir() && strings.EqualFold(entity.Name, leaf) { directoryIDOut = itoa64(entity.ID) return true } return false }) return directoryIDOut, found, err } // Returned from "folder_create" type folderCreateRes struct { response Data struct { DirID int64 `json:"dirId"` } `json:"data"` } // CreateDir makes a directory with dirID as parent and name leaf func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, err error) { // fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf) opts := &rest.Opts{ Method: "GET", RootURL: linkboxAPIURL, Path: "folder_create", Parameters: url.Values{ "token": {f.opt.Token}, "name": {leaf}, "pid": {dirID}, "isShare": {"0"}, "canInvite": {"1"}, "canShare": {"1"}, "withBodyImg": {"1"}, "desc": {""}, }, } response := folderCreateRes{} err = getUnmarshaledResponse(ctx, f, opts, &response) if err != nil { // response status 1501 means that directory already exists if response.Status == 1501 { return newID, fmt.Errorf("couldn't find already created directory: %w", fs.ErrorDirNotFound) } return newID, fmt.Errorf("CreateDir failed: %w", err) } if response.Data.DirID == 0 { return newID, fmt.Errorf("API returned 0 for ID of newly created directory") } return itoa64(response.Data.DirID), nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { // fs.Debugf(f, "List method dir = {%s}", dir) directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } _, err = f.listAll(ctx, directoryID, "", func(entity *entity) bool { remote := path.Join(dir, entity.Name) if entity.isDir() { id := itoa64(entity.ID) modTime := time.Unix(entity.Ctime, 0) d := fs.NewDir(remote, modTime).SetID(id).SetParentID(itoa64(entity.Pid)) entries = append(entries, d) // cache the directory ID for later lookups f.dirCache.Put(remote, id) } else { o := &Object{ fs: f, remote: remote, } o.set(entity) entries = append(entries, o) } return false }) if err != nil { return nil, err } return entries, nil } // get an entity with leaf from dirID func getEntity(ctx context.Context, f *Fs, leaf string, directoryID string, token string) (*entity, error) { var result *entity var resultErr = fs.ErrorObjectNotFound _, err := f.listAll(ctx, directoryID, leaf, func(entity *entity) bool { if strings.EqualFold(entity.Name, leaf) { // fs.Debugf(f, "getObject found entity.Name {%s} name {%s}", entity.Name, name) if entity.isDir() { result = nil resultErr = fs.ErrorIsDir } else { result = entity resultErr = nil } return true } return false }) if err != nil { return nil, err } return result, resultErr } // NewObject finds the Object at remote. If it can't be found // it returns the error ErrorObjectNotFound. // // If remote points to a directory then it should return // ErrorIsDir if possible without doing any extra work, // otherwise ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { leaf, dirID, err := f.dirCache.FindPath(ctx, remote, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, fs.ErrorObjectNotFound } return nil, err } entity, err := getEntity(ctx, f, leaf, dirID, f.opt.Token) if err != nil { return nil, err } o := &Object{ fs: f, remote: remote, } o.set(entity) return o, nil } // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists func (f *Fs) Mkdir(ctx context.Context, dir string) error { _, err := f.dirCache.FindDir(ctx, dir, true) return err } func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { if check { entries, err := f.List(ctx, dir) if err != nil { return err } if len(entries) != 0 { return fs.ErrorDirectoryNotEmpty } } directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return err } opts := &rest.Opts{ Method: "GET", RootURL: linkboxAPIURL, Path: "folder_del", Parameters: url.Values{ "token": {f.opt.Token}, "dirIds": {directoryID}, }, } response := response{} err = getUnmarshaledResponse(ctx, f, opts, &response) if err != nil { // Linkbox has some odd error returns here if response.Status == 403 || response.Status == 500 { return fs.ErrorDirNotFound } return fmt.Errorf("purge error: %w", err) } f.dirCache.FlushDir(dir) return nil } // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, true) } // SetModTime sets modTime on a particular file func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return fs.ErrorCantSetModTime } // Open opens the file for read. Call Close() on the returned io.ReadCloser func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { var res *http.Response downloadURL := o.fullURL if downloadURL == "" { _, name := splitDirAndName(o.Remote()) newObject, err := getEntity(ctx, o.fs, name, itoa64(o.dirID), o.fs.opt.Token) if err != nil { return nil, err } if newObject == nil { // fs.Debugf(o.fs, "Open entity is empty: name = {%s}", name) return nil, fs.ErrorObjectNotFound } downloadURL = newObject.URL } opts := &rest.Opts{ Method: "GET", RootURL: downloadURL, Options: options, } err := o.fs.pacer.Call(func() (bool, error) { var err error res, err = o.fs.srv.Call(ctx, opts) return o.fs.shouldRetry(ctx, res, err) }) if err != nil { return nil, fmt.Errorf("Open failed: %w", err) } return res.Body, nil } // Update in to the object with the modTime given of the given size // // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either // return an error or update the object properly (rather than e.g. calling panic). func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { size := src.Size() if size == 0 { return fs.ErrorCantUploadEmptyFiles } else if size < 0 { return fmt.Errorf("can't upload files of unknown length") } remote := o.Remote() // remove the file if it exists if o.itemID != "" { fs.Debugf(o, "Update: removing old file") err = o.Remove(ctx) if err != nil { fs.Errorf(o, "Update: failed to remove existing file: %v", err) } o.itemID = "" } else { tmpObject, err := o.fs.NewObject(ctx, remote) if err == nil { fs.Debugf(o, "Update: removing old file") err = tmpObject.Remove(ctx) if err != nil { fs.Errorf(o, "Update: failed to remove existing file: %v", err) } } } first10m := io.LimitReader(in, 10_485_760) first10mBytes, err := io.ReadAll(first10m) if err != nil { return fmt.Errorf("Update err in reading file: %w", err) } // get upload authorization (step 1) opts := &rest.Opts{ Method: "GET", RootURL: linkboxAPIURL, Path: "get_upload_url", Options: options, Parameters: url.Values{ "token": {o.fs.opt.Token}, "fileMd5ofPre10m": {fmt.Sprintf("%x", md5.Sum(first10mBytes))}, "fileSize": {itoa64(size)}, }, } getFirstStepResult := getUploadURLResponse{} err = getUnmarshaledResponse(ctx, o.fs, opts, &getFirstStepResult) if err != nil { if getFirstStepResult.Status != 600 { return fmt.Errorf("Update err in unmarshaling response: %w", err) } } switch getFirstStepResult.Status { case 1: // upload file using link from first step var res *http.Response var location string // Check to see if we are being redirected opts := &rest.Opts{ Method: "HEAD", RootURL: getFirstStepResult.Data.SignURL, Options: options, NoRedirect: true, } err = o.fs.pacer.CallNoRetry(func() (bool, error) { res, err = o.fs.srv.Call(ctx, opts) return o.fs.shouldRetry(ctx, res, err) }) if res != nil { location = res.Header.Get("Location") if location != "" { // set the URL to the new Location opts.RootURL = location err = nil } } if err != nil { return fmt.Errorf("head upload URL: %w", err) } file := io.MultiReader(bytes.NewReader(first10mBytes), in) opts.Method = "PUT" opts.Body = file opts.ContentLength = &size err = o.fs.pacer.CallNoRetry(func() (bool, error) { res, err = o.fs.srv.Call(ctx, opts) return o.fs.shouldRetry(ctx, res, err) }) if err != nil { return fmt.Errorf("update err in uploading file: %w", err) } _, err = io.ReadAll(res.Body) if err != nil { return fmt.Errorf("update err in reading response: %w", err) } case 600: // Status means that we don't need to upload file // We need only to make second step default: return fmt.Errorf("got unexpected message from Linkbox: %s", getFirstStepResult.Message) } leaf, dirID, err := o.fs.dirCache.FindPath(ctx, remote, false) if err != nil { return err } // create file item at Linkbox (second step) opts = &rest.Opts{ Method: "GET", RootURL: linkboxAPIURL, Path: "folder_upload_file", Options: options, Parameters: url.Values{ "token": {o.fs.opt.Token}, "fileMd5ofPre10m": {fmt.Sprintf("%x", md5.Sum(first10mBytes))}, "fileSize": {itoa64(size)}, "pid": {dirID}, "diyName": {leaf}, }, } getSecondStepResult := getUploadURLResponse{} err = getUnmarshaledResponse(ctx, o.fs, opts, &getSecondStepResult) if err != nil { return fmt.Errorf("Update second step failed: %w", err) } // Try a few times to read the object after upload for eventual consistency const maxTries = 10 var sleepTime = 100 * time.Millisecond var entity *entity for try := 1; try <= maxTries; try++ { entity, err = getEntity(ctx, o.fs, leaf, dirID, o.fs.opt.Token) if err == nil { break } if err != fs.ErrorObjectNotFound { return fmt.Errorf("Update failed to read object: %w", err) } fs.Debugf(o, "Trying to read object after upload: try again in %v (%d/%d)", sleepTime, try, maxTries) time.Sleep(sleepTime) sleepTime *= 2 } if err != nil { return err } o.set(entity) return nil } // Remove this object func (o *Object) Remove(ctx context.Context) error { opts := &rest.Opts{ Method: "GET", RootURL: linkboxAPIURL, Path: "file_del", Parameters: url.Values{ "token": {o.fs.opt.Token}, "itemIds": {o.itemID}, }, } requestResult := getUploadURLResponse{} err := getUnmarshaledResponse(ctx, o.fs, opts, &requestResult) if err != nil { return fmt.Errorf("could not Remove: %w", err) } return nil } // ModTime returns the modification time of the remote http file func (o *Object) ModTime(ctx context.Context) time.Time { return o.modTime } // Remote the name of the remote HTTP file, relative to the fs root func (o *Object) Remote() string { return o.remote } // Size returns the size in bytes of the remote http file func (o *Object) Size() int64 { return o.size } // String returns the URL to the remote HTTP file func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Fs is the filesystem this remote http file object is located within func (o *Object) Fs() fs.Info { return o.fs } // Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) { return "", hash.ErrUnsupported } // Storable returns whether the remote http file is a regular file // (not a directory, symbolic link, block device, character device, named pipe, etc.) func (o *Object) Storable() bool { return true } // Features returns the optional features of this Fs // Info provides a read only interface to information about a filesystem. func (f *Fs) Features() *fs.Features { return f.features } // Name of the remote (as passed into NewFs) // Name returns the configured name of the file system func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String returns a description of the FS func (f *Fs) String() string { return fmt.Sprintf("Linkbox root '%s'", f.root) } // Precision of the ModTimes in this Fs func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported } // Hashes returns hash.HashNone to indicate remote hashing is unavailable // Returns the supported hash types of the filesystem func (f *Fs) Hashes() hash.Set { return hash.Set(hash.None) } /* { "data": { "signUrl": "http://xx -- Then CURL PUT your file with sign url " }, "msg": "please use this url to upload (PUT method)", "status": 1 } */ // All messages have these items type response struct { Message string `json:"msg"` Status int `json:"status"` } // IsError returns whether response represents an error func (r *response) IsError() bool { return r.Status != 1 } // Error returns the error state of this response func (r *response) Error() string { return fmt.Sprintf("Linkbox error %d: %s", r.Status, r.Message) } // responser is interface covering the response so we can use it when it is embedded. type responser interface { IsError() bool Error() string } type getUploadURLData struct { SignURL string `json:"signUrl"` } type getUploadURLResponse struct { response Data getUploadURLData `json:"data"` } // Put in to the remote path with the modTime given of the given size // // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Put should either // return an error or upload it properly (rather than e.g. calling panic). // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { o := &Object{ fs: f, remote: src.Remote(), size: src.Size(), } dir, _ := splitDirAndName(src.Remote()) err := f.Mkdir(ctx, dir) if err != nil { return nil, err } err = o.Update(ctx, in, src, options...) return o, err } // Purge all files in the directory specified // // Implement this if you have a way of deleting all the files // quicker than just running Remove() on the result of List() // // Return an error if it doesn't exist func (f *Fs) Purge(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, false) } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry determines whether a given err rates being retried func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // DirCacheFlush resets the directory cache - used in testing as an // optional interface func (f *Fs) DirCacheFlush() { f.dirCache.ResetRoot() } // Check the interfaces are satisfied var ( _ fs.Fs = &Fs{} _ fs.Purger = &Fs{} _ fs.DirCacheFlusher = &Fs{} _ fs.Object = &Object{} )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/box/box_test.go
backend/box/box_test.go
// Test Box filesystem interface package box_test import ( "testing" "github.com/rclone/rclone/backend/box" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestBox:", NilObject: (*box.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/box/box.go
backend/box/box.go
// Package box provides an interface to the Box // object storage system. package box // FIXME Box only supports file names of 255 characters or less. Names // that will not be supported are those that contain non-printable // ascii, / or \, names with trailing spaces, and the special names // “.” and “..”. // FIXME box can copy a directory import ( "context" "crypto/rsa" "encoding/json" "encoding/pem" "errors" "fmt" "io" "net/http" "net/url" "os" "path" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/golang-jwt/jwt/v4" "github.com/rclone/rclone/backend/box/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/jwtutil" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/rest" "github.com/youmark/pkcs8" ) const ( rcloneClientID = "d0374ba6pgmaguie02ge15sv1mllndho" rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL" minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential rootURL = "https://api.box.com/2.0" uploadURL = "https://upload.box.com/api/2.0" minUploadCutoff = 50000000 // upload cutoff can be no lower than this defaultUploadCutoff = 50 * 1024 * 1024 tokenURL = "https://api.box.com/oauth2/token" ) // Globals var ( // Description of how to auth for this app oauthConfig = &oauthutil.Config{ Scopes: nil, AuthURL: "https://app.box.com/api/oauth2/authorize", TokenURL: "https://app.box.com/api/oauth2/token", ClientID: rcloneClientID, ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), RedirectURL: oauthutil.RedirectURL, } ) type boxCustomClaims struct { jwt.StandardClaims BoxSubType string `json:"box_sub_type,omitempty"` } // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "box", Description: "Box", NewFs: NewFs, Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { boxAccessToken, boxAccessTokenOk := m.Get("access_token") var err error // If using box config.json, use JWT auth if usesJWTAuth(m) { err = refreshJWTToken(ctx, name, m) if err != nil { return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err) } // Else, if not using an access token, use oauth2 } else if boxAccessToken == "" || !boxAccessTokenOk { return oauthutil.ConfigOut("", &oauthutil.Options{ OAuth2Config: oauthConfig, }) } return nil, nil }, Options: append(oauthutil.SharedOptions, []fs.Option{{ Name: "root_folder_id", Help: "Fill in for rclone to use a non root folder as its starting point.", Default: "0", Advanced: true, Sensitive: true, }, { Name: "box_config_file", Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp, }, { Name: "config_credentials", Help: "Box App config.json contents.\n\nLeave blank normally.", Hide: fs.OptionHideBoth, Sensitive: true, }, { Name: "access_token", Help: "Box App Primary Access Token\n\nLeave blank normally.", Sensitive: true, }, { Name: "box_sub_type", Default: "user", Examples: []fs.OptionExample{{ Value: "user", Help: "Rclone should act on behalf of a user.", }, { Value: "enterprise", Help: "Rclone should act on behalf of a service account.", }}, }, { Name: "upload_cutoff", Help: "Cutoff for switching to multipart upload (>= 50 MiB).", Default: fs.SizeSuffix(defaultUploadCutoff), Advanced: true, }, { Name: "commit_retries", Help: "Max number of times to try committing a multipart file.", Default: 100, Advanced: true, }, { Name: "list_chunk", Default: 1000, Help: "Size of listing chunk 1-1000.", Advanced: true, }, { Name: "owned_by", Default: "", Help: "Only show items owned by the login (email address) passed in.", Advanced: true, }, { Name: "impersonate", Default: "", Help: `Impersonate this user ID when using a service account. Setting this flag allows rclone, when using a JWT service account, to act on behalf of another user by setting the as-user header. The user ID is the Box identifier for a user. User IDs can found for any user via the GET /users endpoint, which is only available to admins, or by calling the GET /users/me endpoint with an authenticated user session. See: https://developer.box.com/guides/authentication/jwt/as-user/ `, Advanced: true, Sensitive: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // From https://developer.box.com/docs/error-codes#section-400-bad-request : // > Box only supports file or folder names that are 255 characters or less. // > File names containing non-printable ascii, "/" or "\", names with leading // > or trailing spaces, and the special names “.” and “..” are also unsupported. // // Testing revealed names with leading spaces work fine. // Also encode invalid UTF-8 bytes as json doesn't handle them properly. Default: (encoder.Display | encoder.EncodeBackSlash | encoder.EncodeRightSpace | encoder.EncodeInvalidUtf8), }}...), }) } func usesJWTAuth(m configmap.Mapper) bool { jsonFile, okFile := m.Get("box_config_file") jsonFileCredentials, okCredentials := m.Get("config_credentials") boxSubType, boxSubTypeOk := m.Get("box_sub_type") return (okFile || okCredentials) && boxSubTypeOk && (jsonFile != "" || jsonFileCredentials != "") && boxSubType != "" } func refreshJWTToken(ctx context.Context, name string, m configmap.Mapper) error { boxSubType, _ := m.Get("box_sub_type") boxConfig, err := getBoxConfig(m) if err != nil { return fmt.Errorf("get box config: %w", err) } privateKey, err := getDecryptedPrivateKey(boxConfig) if err != nil { return fmt.Errorf("get decrypted private key: %w", err) } claims, err := getClaims(boxConfig, boxSubType) if err != nil { return fmt.Errorf("get claims: %w", err) } signingHeaders := getSigningHeaders(boxConfig) queryParams := getQueryParams(boxConfig) client := fshttp.NewClient(ctx) err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client) return err } func getBoxConfig(m configmap.Mapper) (boxConfig *api.ConfigJSON, err error) { configFileCredentials, _ := m.Get("config_credentials") configFileBytes := []byte(configFileCredentials) if configFileCredentials == "" { configFile, _ := m.Get("box_config_file") configFileBytes, err = os.ReadFile(configFile) if err != nil { return nil, fmt.Errorf("box: failed to read Box config: %w", err) } } err = json.Unmarshal(configFileBytes, &boxConfig) if err != nil { return nil, fmt.Errorf("box: failed to parse Box config: %w", err) } return boxConfig, nil } func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) { val, err := jwtutil.RandomHex(20) if err != nil { return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err) } claims = &boxCustomClaims{ //lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely //nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019 StandardClaims: jwt.StandardClaims{ Id: val, Issuer: boxConfig.BoxAppSettings.ClientID, Subject: boxConfig.EnterpriseID, Audience: tokenURL, ExpiresAt: time.Now().Add(time.Second * 45).Unix(), }, BoxSubType: boxSubType, } return claims, nil } func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]any { signingHeaders := map[string]any{ "kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID, } return signingHeaders } func getQueryParams(boxConfig *api.ConfigJSON) map[string]string { queryParams := map[string]string{ "client_id": boxConfig.BoxAppSettings.ClientID, "client_secret": boxConfig.BoxAppSettings.ClientSecret, } return queryParams } func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) { block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey)) if block == nil { return nil, errors.New("box: failed to PEM decode private key") } if len(rest) > 0 { return nil, fmt.Errorf("box: extra data included in private key: %w", err) } rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase)) if err != nil { return nil, fmt.Errorf("box: failed to decrypt private key: %w", err) } return rsaKey.(*rsa.PrivateKey), nil } // Options defines the configuration for this backend type Options struct { UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` CommitRetries int `config:"commit_retries"` Enc encoder.MultiEncoder `config:"encoding"` RootFolderID string `config:"root_folder_id"` AccessToken string `config:"access_token"` ListChunk int `config:"list_chunk"` OwnedBy string `config:"owned_by"` Impersonate string `config:"impersonate"` } // ItemMeta defines metadata we cache for each Item ID type ItemMeta struct { SequenceID int64 // the most recent event processed for this item ParentID string // ID of the parent directory of this item Name string // leaf name of this item } // Fs represents a remote box type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options features *fs.Features // optional features srv *rest.Client // the connection to the server dirCache *dircache.DirCache // Map of directory path to directory id pacer *fs.Pacer // pacer for API calls tokenRenewer *oauthutil.Renew // renew the token on expiry uploadToken *pacer.TokenDispenser // control concurrency itemMetaCacheMu *sync.Mutex // protects itemMetaCache itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata } // Object describes a box object // // Will definitely have info but maybe not meta type Object struct { fs *Fs // what this object is part of remote string // The remote path hasMetaData bool // whether info below has been set size int64 // size of the object modTime time.Time // modification time of the object id string // ID of the object publicLink string // Public Link for the object sha1 string // SHA-1 of the object content } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("box root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // parsePath parses a box 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } authRetry := false if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Header.Get("Www-Authenticate"), "expired_token") { authRetry = true fs.Debugf(nil, "Should retry: %v", err) } // Box API errors which should be retries if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "operation_blocked_temporary" { fs.Debugf(nil, "Retrying API error %v", err) return true, err } return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // readMetaDataForPath reads the metadata from the path func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) { // defer log.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, fs.ErrorObjectNotFound } return nil, err } // Use preupload to find the ID itemMini, err := f.preUploadCheck(ctx, leaf, directoryID, -1) if err != nil { return nil, err } if itemMini == nil { return nil, fs.ErrorObjectNotFound } // Now we have the ID we can look up the object proper opts := rest.Opts{ Method: "GET", Path: "/files/" + itemMini.ID, Parameters: fieldsValue(), } var item api.Item err = f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, nil, &item) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } return &item, nil } // errorHandler parses a non 2xx error response into an error func errorHandler(resp *http.Response) error { // Decode error response errResponse := new(api.Error) err := rest.DecodeJSON(resp, &errResponse) if err != nil { fs.Debugf(nil, "Couldn't decode error response: %v", err) } if errResponse.Code == "" { errResponse.Code = resp.Status } if errResponse.Status == 0 { errResponse.Status = resp.StatusCode } return errResponse } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } if opt.UploadCutoff < minUploadCutoff { return nil, fmt.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff)) } root = parsePath(root) client := fshttp.NewClient(ctx) var ts *oauthutil.TokenSource // If not using an accessToken, create an oauth client and tokensource if opt.AccessToken == "" { client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig) if err != nil { return nil, fmt.Errorf("failed to configure Box: %w", err) } } ci := fs.GetConfig(ctx) f := &Fs{ name: name, root: root, opt: *opt, srv: rest.NewClient(client).SetRoot(rootURL), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), uploadToken: pacer.NewTokenDispenser(ci.Transfers), itemMetaCacheMu: new(sync.Mutex), itemMetaCache: make(map[string]ItemMeta), } f.features = (&fs.Features{ CaseInsensitive: true, CanHaveEmptyDirectories: true, }).Fill(ctx, f) f.srv.SetErrorHandler(errorHandler) // If using an accessToken, set the Authorization header if f.opt.AccessToken != "" { f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken) } // If using impersonate set an as-user header if f.opt.Impersonate != "" { f.srv.SetHeader("as-user", f.opt.Impersonate) } if ts != nil { // If using box config.json and JWT, renewing should just refresh the token and // should do so whether there are uploads pending or not. if usesJWTAuth(m) { f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { err := refreshJWTToken(ctx, name, m) return err }) f.tokenRenewer.Start() } else { // Renew the token in the background f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { _, err := f.readMetaDataForPath(ctx, "") return err }) } } // Get rootFolderID rootID := f.opt.RootFolderID f.dirCache = dircache.New(root, rootID, f) // Find the current root err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) tempF := *f tempF.dirCache = dircache.New(newRoot, rootID, &tempF) tempF.root = newRoot // Make new Fs which is the parent err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } _, err := tempF.newObjectWithInfo(ctx, remote, nil) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f return f, nil } return nil, err } f.features.Fill(ctx, &tempF) // XXX: update the old f here instead of returning tempF, since // `features` were already filled with functions having *f as a receiver. // See https://github.com/rclone/rclone/issues/2182 f.dirCache = tempF.dirCache f.root = tempF.root // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // rootSlash returns root with a slash on if it is empty, otherwise empty string func (f *Fs) rootSlash() string { if f.root == "" { return f.root } return f.root + "/" } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } var err error if info != nil { // Set info err = o.setMetaData(info) } else { err = o.readMetaData(ctx) // reads info and meta, returning an error } if err != nil { return nil, err } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // FindLeaf finds a directory of name leaf in the folder with ID pathID func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { // Find the leaf in pathID found, err = f.listAll(ctx, pathID, true, false, true, func(item *api.Item) bool { if strings.EqualFold(item.Name, leaf) { pathIDOut = item.ID return true } return false }) return pathIDOut, found, err } // fieldsValue creates a url.Values with fields set to those in api.Item func fieldsValue() url.Values { values := url.Values{} values.Set("fields", api.ItemFields) return values } // CreateDir makes a directory with pathID as parent and name leaf func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) var resp *http.Response var info *api.Item opts := rest.Opts{ Method: "POST", Path: "/folders", Parameters: fieldsValue(), } mkdir := api.CreateFolder{ Name: f.opt.Enc.FromStandardName(leaf), Parent: api.Parent{ ID: pathID, }, } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info) return shouldRetry(ctx, resp, err) }) if err != nil { // fmt.Printf("...Error %v\n", err) return "", err } // fmt.Printf("...Id %q\n", *info.Id) return info.ID, nil } // list the objects into the function supplied // // If directories is set it only sends directories // User function to process a File item from listAll // // Should return true to finish processing type listAllFn func(*api.Item) bool // Lists the directory required calling the user function on each item found // // If the user fn ever returns true then it early exits with found = true func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, activeOnly bool, fn listAllFn) (found bool, err error) { opts := rest.Opts{ Method: "GET", Path: "/folders/" + dirID + "/items", Parameters: fieldsValue(), } opts.Parameters.Set("limit", strconv.Itoa(f.opt.ListChunk)) opts.Parameters.Set("usemarker", "true") var marker *string OUTER: for { if marker != nil { opts.Parameters.Set("marker", *marker) } var result api.FolderItems var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if err != nil { return found, fmt.Errorf("couldn't list files: %w", err) } for i := range result.Entries { item := &result.Entries[i] if item.Type == api.ItemTypeFolder { if filesOnly { continue } } else if item.Type == api.ItemTypeFile { if directoriesOnly { continue } } else { fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type) continue } if activeOnly && item.ItemStatus != api.ItemStatusActive { continue } if f.opt.OwnedBy != "" && f.opt.OwnedBy != item.OwnedBy.Login { continue } item.Name = f.opt.Enc.ToStandardName(item.Name) if fn(item) { found = true break OUTER } } marker = result.NextMarker if marker == nil { break } } return } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return list.WithListP(ctx, dir, f) } // ListP lists the objects and directories of the Fs starting // from dir non recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error { list := list.NewHelper(callback) directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return err } var iErr error _, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool { remote := path.Join(dir, info.Name) if info.Type == api.ItemTypeFolder { // cache the directory ID for later lookups f.dirCache.Put(remote, info.ID) d := fs.NewDir(remote, info.ModTime()).SetID(info.ID) // FIXME more info from dir? err = list.Add(d) if err != nil { iErr = err return true } } else if info.Type == api.ItemTypeFile { o, err := f.newObjectWithInfo(ctx, remote, info) if err != nil { iErr = err return true } err = list.Add(o) if err != nil { iErr = err return true } } // Cache some metadata for this Item to help us process events later // on. In particular, the box event API does not provide the old path // of the Item when it is renamed/deleted/moved/etc. f.itemMetaCacheMu.Lock() cachedItemMeta, found := f.itemMetaCache[info.ID] if !found || cachedItemMeta.SequenceID < info.SequenceID { f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name} } f.itemMetaCacheMu.Unlock() return false }) if err != nil { return err } if iErr != nil { return iErr } return list.Flush() } // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // // Returns the object, leaf, directoryID and error. // // Used to create new objects func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { // Create the directory for the object if it doesn't exist leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true) if err != nil { return } // Temporary Object under construction o = &Object{ fs: f, remote: remote, } return o, leaf, directoryID, nil } // preUploadCheck checks to see if a file can be uploaded // // It returns "", nil if the file is good to go // It returns "ID", nil if the file must be updated func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (item *api.ItemMini, err error) { check := api.PreUploadCheck{ Name: f.opt.Enc.FromStandardName(leaf), Parent: api.Parent{ ID: directoryID, }, } if size >= 0 { check.Size = &size } opts := rest.Opts{ Method: "OPTIONS", Path: "/files/content/", } var result api.PreUploadCheckResponse var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &check, &result) return shouldRetry(ctx, resp, err) }) if err != nil { if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "item_name_in_use" { var conflict api.PreUploadCheckConflict err = json.Unmarshal(apiErr.ContextInfo, &conflict) if err != nil { return nil, fmt.Errorf("pre-upload check: JSON decode failed: %w", err) } if conflict.Conflicts.Type != api.ItemTypeFile { return nil, fs.ErrorIsDir } return &conflict.Conflicts, nil } return nil, fmt.Errorf("pre-upload check: %w", err) } return nil, nil } // Put the object // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // If directory doesn't exist, file doesn't exist so can upload remote := src.Remote() leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false) if err != nil { if err == fs.ErrorDirNotFound { return f.PutUnchecked(ctx, in, src, options...) } return nil, err } // Preflight check the upload, which returns the ID if the // object already exists item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size()) if err != nil { return nil, err } if item == nil { return f.PutUnchecked(ctx, in, src, options...) } // If object exists then create a skeleton one with just id o := &Object{ fs: f, remote: remote, id: item.ID, } return o, o.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(ctx, in, src, options...) } // PutUnchecked the object into the container // // This will produce an error if the object already exists. // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() modTime := src.ModTime(ctx) o, _, _, err := f.createObject(ctx, remote, modTime, size) if err != nil { return nil, err } return o, o.Update(ctx, in, src, options...) } // Mkdir creates the container if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { _, err := f.dirCache.FindDir(ctx, dir, true) return err } // deleteObject removes an object by ID func (f *Fs) deleteObject(ctx context.Context, id string) error { opts := rest.Opts{ Method: "DELETE", Path: "/files/" + id, NoResponse: true, } return f.pacer.Call(func() (bool, error) { resp, err := f.srv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) } // purgeCheck removes the root directory, if check is set then it // refuses to do so if it has anything in func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } dc := f.dirCache rootID, err := dc.FindDir(ctx, dir, false) if err != nil { return err } opts := rest.Opts{ Method: "DELETE", Path: "/folders/" + rootID, Parameters: url.Values{}, NoResponse: true, } opts.Parameters.Set("recursive", strconv.FormatBool(!check)) var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("rmdir failed: %w", err) } f.dirCache.FlushDir(dir) if err != nil { return err } return nil } // Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, true) } // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { return time.Second } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } err := srcObj.readMetaData(ctx) if err != nil { return nil, err } srcPath := srcObj.fs.rootSlash() + srcObj.remote dstPath := f.rootSlash() + remote if strings.EqualFold(srcPath, dstPath) { return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath) } // Create temporary object dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } // check if dest already exists item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size()) if err != nil { return nil, err } if item != nil { // dest already exists, need to copy to temp name and then move tempSuffix := "-rclone-copy-" + random.String(8) fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix) tempObj, err := f.Copy(ctx, src, remote+tempSuffix) if err != nil { return nil, err } fs.Debugf(remote+tempSuffix, "moving to real name %v", remote) err = f.deleteObject(ctx, item.ID) if err != nil { return nil, err } return f.Move(ctx, tempObj, remote) } // Copy the object opts := rest.Opts{ Method: "POST", Path: "/files/" + srcObj.id + "/copy", Parameters: fieldsValue(), } copyFile := api.CopyFile{ Name: f.opt.Enc.FromStandardName(leaf), Parent: api.Parent{ ID: directoryID, }, } var resp *http.Response var info *api.Item err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &copyFile, &info) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } err = dstObj.setMetaData(info) if err != nil { return nil, err } return dstObj, nil } // Purge deletes all the files and the container // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, false) } // move a file or folder func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (info *api.Item, err error) { // Move the object opts := rest.Opts{ Method: "PUT", Path: endpoint + id, Parameters: fieldsValue(), } move := api.UpdateFileMove{ Name: f.opt.Enc.FromStandardName(leaf), Parent: api.Parent{ ID: directoryID, }, } var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/box/upload.go
backend/box/upload.go
// multipart upload for box package box import ( "bytes" "context" "crypto/sha1" "encoding/base64" "encoding/json" "errors" "fmt" "io" "net/http" "strconv" "sync" "time" "github.com/rclone/rclone/backend/box/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/rest" ) // createUploadSession creates an upload session for the object func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) { opts := rest.Opts{ Method: "POST", Path: "/files/upload_sessions", RootURL: uploadURL, } request := api.UploadSessionRequest{ FileSize: size, } // If object has an ID then it is existing so create a new version if o.id != "" { opts.Path = "/files/" + o.id + "/upload_sessions" } else { opts.Path = "/files/upload_sessions" request.FolderID = directoryID request.FileName = o.fs.opt.Enc.FromStandardName(leaf) } var resp *http.Response err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response) return shouldRetry(ctx, resp, err) }) return } // sha1Digest produces a digest using sha1 as per RFC3230 func sha1Digest(digest []byte) string { return "sha=" + base64.StdEncoding.EncodeToString(digest) } // uploadPart uploads a part in an upload session func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn, options ...fs.OpenOption) (response *api.UploadPartResponse, err error) { chunkSize := int64(len(chunk)) sha1sum := sha1.Sum(chunk) opts := rest.Opts{ Method: "PUT", Path: "/files/upload_sessions/" + SessionID, RootURL: uploadURL, ContentType: "application/octet-stream", ContentLength: &chunkSize, ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize), Options: options, ExtraHeaders: map[string]string{ "Digest": sha1Digest(sha1sum[:]), }, } var resp *http.Response err = o.fs.pacer.Call(func() (bool, error) { opts.Body = wrap(bytes.NewReader(chunk)) resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } return response, nil } // commitUpload finishes an upload session func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) { opts := rest.Opts{ Method: "POST", Path: "/files/upload_sessions/" + SessionID + "/commit", RootURL: uploadURL, ExtraHeaders: map[string]string{ "Digest": sha1Digest(sha1sum), }, } request := api.CommitUpload{ Parts: parts, } request.Attributes.ContentModifiedAt = api.Time(modTime) request.Attributes.ContentCreatedAt = api.Time(modTime) var body []byte var resp *http.Response // For discussion of this value see: // https://github.com/rclone/rclone/issues/2054 maxTries := o.fs.opt.CommitRetries const defaultDelay = 10 var tries int outer: for tries = range maxTries { err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil) if err != nil { return shouldRetry(ctx, resp, err) } body, err = rest.ReadBody(resp) return shouldRetry(ctx, resp, err) }) delay := defaultDelay var why string if err != nil { // Sometimes we get 400 Error with // parts_mismatch immediately after uploading // the last part. Ignore this error and wait. if boxErr, ok := err.(*api.Error); ok && boxErr.Code == "parts_mismatch" { why = err.Error() } else { return nil, err } } else { switch resp.StatusCode { case http.StatusOK, http.StatusCreated: break outer case http.StatusAccepted: why = "not ready yet" delayString := resp.Header.Get("Retry-After") if delayString != "" { delay, err = strconv.Atoi(delayString) if err != nil { fs.Debugf(o, "Couldn't decode Retry-After header %q: %v", delayString, err) delay = defaultDelay } } default: return nil, fmt.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode) } } fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why) time.Sleep(time.Duration(delay) * time.Second) } if tries >= maxTries { return nil, errors.New("too many tries to commit multipart upload - increase --low-level-retries") } err = json.Unmarshal(body, &result) if err != nil { return nil, fmt.Errorf("couldn't decode commit response: %q: %w", body, err) } return result, nil } // abortUpload cancels an upload session func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error) { opts := rest.Opts{ Method: "DELETE", Path: "/files/upload_sessions/" + SessionID, RootURL: uploadURL, NoResponse: true, } var resp *http.Response err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) return err } // uploadMultipart uploads a file using multipart upload func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time, options ...fs.OpenOption) (err error) { // Create upload session session, err := o.createUploadSession(ctx, leaf, directoryID, size) if err != nil { return fmt.Errorf("multipart upload create session failed: %w", err) } chunkSize := session.PartSize fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize)) // Cancel the session if something went wrong defer atexit.OnError(&err, func() { fs.Debugf(o, "Cancelling multipart upload: %v", err) cancelErr := o.abortUpload(ctx, session.ID) if cancelErr != nil { fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr) } })() // unwrap the accounting from the input, we use wrap to put it // back on after the buffering in, wrap := accounting.UnWrap(in) // Upload the chunks remaining := size position := int64(0) parts := make([]api.Part, session.TotalParts) hash := sha1.New() errs := make(chan error, 1) var wg sync.WaitGroup outer: for part := range session.TotalParts { // Check any errors select { case err = <-errs: break outer default: } reqSize := min(remaining, chunkSize) // Make a block of memory buf := make([]byte, reqSize) // Read the chunk _, err = io.ReadFull(in, buf) if err != nil { err = fmt.Errorf("multipart upload failed to read source: %w", err) break outer } // Make the global hash (must be done sequentially) _, _ = hash.Write(buf) // Transfer the chunk wg.Add(1) o.fs.uploadToken.Get() go func(part int, position int64) { defer wg.Done() defer o.fs.uploadToken.Put() fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize)) partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...) if err != nil { err = fmt.Errorf("multipart upload failed to upload part: %w", err) select { case errs <- err: default: } return } parts[part] = partResponse.Part }(part, position) // ready for next block remaining -= chunkSize position += chunkSize } wg.Wait() if err == nil { select { case err = <-errs: default: } } if err != nil { return err } // Finalise the upload session result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil)) if err != nil { return fmt.Errorf("multipart upload failed to finalize: %w", err) } if result.TotalCount != 1 || len(result.Entries) != 1 { return fmt.Errorf("multipart upload failed %v - not sure why", o) } return o.setMetaData(&result.Entries[0]) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/box/api/types.go
backend/box/api/types.go
// Package api has type definitions for box // // Converted from the API docs with help from https://mholt.github.io/json-to-go/ package api import ( "encoding/json" "fmt" "time" ) const ( // 2017-05-03T07:26:10-07:00 timeFormat = `"` + time.RFC3339 + `"` ) // Time represents date and time information for the // box API, by using RFC3339 type Time time.Time // MarshalJSON turns a Time into JSON (in UTC) func (t *Time) MarshalJSON() (out []byte, err error) { timeString := (*time.Time)(t).Format(timeFormat) return []byte(timeString), nil } // UnmarshalJSON turns JSON into a Time func (t *Time) UnmarshalJSON(data []byte) error { newT, err := time.Parse(timeFormat, string(data)) if err != nil { return err } *t = Time(newT) return nil } // Error is returned from box when things go wrong type Error struct { Type string `json:"type"` Status int `json:"status"` Code string `json:"code"` ContextInfo json.RawMessage `json:"context_info"` HelpURL string `json:"help_url"` Message string `json:"message"` RequestID string `json:"request_id"` } // Error returns a string for the error and satisfies the error interface func (e *Error) Error() string { out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status) if e.Message != "" { out += ": " + e.Message } if e.ContextInfo != nil { out += fmt.Sprintf(" (%s)", string(e.ContextInfo)) } return out } // Check Error satisfies the error interface var _ error = (*Error)(nil) // ItemFields are the fields needed for FileInfo var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by" // Types of things in Item/ItemMini const ( ItemTypeFolder = "folder" ItemTypeFile = "file" ItemStatusActive = "active" ItemStatusTrashed = "trashed" ItemStatusDeleted = "deleted" ) // ItemMini is a subset of the elements in a full Item returned by some API calls type ItemMini struct { Type string `json:"type"` ID string `json:"id"` SequenceID int64 `json:"sequence_id,string"` Etag string `json:"etag"` SHA1 string `json:"sha1"` Name string `json:"name"` } // Item describes a folder or a file as returned by Get Folder Items and others type Item struct { Type string `json:"type"` ID string `json:"id"` SequenceID int64 `json:"sequence_id,string"` Etag string `json:"etag"` SHA1 string `json:"sha1"` Name string `json:"name"` Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261 CreatedAt Time `json:"created_at"` ModifiedAt Time `json:"modified_at"` ContentCreatedAt Time `json:"content_created_at"` ContentModifiedAt Time `json:"content_modified_at"` ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted Parent ItemMini `json:"parent"` SharedLink struct { URL string `json:"url,omitempty"` Access string `json:"access,omitempty"` } `json:"shared_link"` OwnedBy struct { Type string `json:"type"` ID string `json:"id"` Name string `json:"name"` Login string `json:"login"` } `json:"owned_by"` } // ModTime returns the modification time of the item func (i *Item) ModTime() (t time.Time) { t = time.Time(i.ContentModifiedAt) if t.IsZero() { t = time.Time(i.ModifiedAt) } return t } // FolderItems is returned from the GetFolderItems call type FolderItems struct { TotalCount int `json:"total_count"` Entries []Item `json:"entries"` Offset int `json:"offset"` Limit int `json:"limit"` NextMarker *string `json:"next_marker,omitempty"` // There is some confusion about how this is actually // returned. The []struct has worked for many years, but in // https://github.com/rclone/rclone/issues/8776 box was // returning it returned not as a list. We don't actually use // this so comment it out. // // Order struct { // By string `json:"by"` // Direction string `json:"direction"` // } `json:"order"` // // Order []struct { // By string `json:"by"` // Direction string `json:"direction"` // } `json:"order"` } // Parent defined the ID of the parent directory type Parent struct { ID string `json:"id"` } // CreateFolder is the request for Create Folder type CreateFolder struct { Name string `json:"name"` Parent Parent `json:"parent"` } // UploadFile is the request for Upload File type UploadFile struct { Name string `json:"name"` Parent Parent `json:"parent"` ContentCreatedAt Time `json:"content_created_at"` ContentModifiedAt Time `json:"content_modified_at"` } // PreUploadCheck is the request for upload preflight check type PreUploadCheck struct { Name string `json:"name"` Parent Parent `json:"parent"` Size *int64 `json:"size,omitempty"` } // PreUploadCheckResponse is the response from upload preflight check // if successful type PreUploadCheckResponse struct { UploadToken string `json:"upload_token"` UploadURL string `json:"upload_url"` } // PreUploadCheckConflict is returned in the ContextInfo error field // from PreUploadCheck when the error code is "item_name_in_use" type PreUploadCheckConflict struct { Conflicts ItemMini `json:"conflicts"` } // UpdateFileModTime is used in Update File Info type UpdateFileModTime struct { ContentModifiedAt Time `json:"content_modified_at"` } // UpdateFileMove is the request for Upload File to change name and parent type UpdateFileMove struct { Name string `json:"name"` Parent Parent `json:"parent"` } // CopyFile is the request for Copy File type CopyFile struct { Name string `json:"name"` Parent Parent `json:"parent"` } // CreateSharedLink is the request for Public Link type CreateSharedLink struct { SharedLink struct { URL string `json:"url,omitempty"` Access string `json:"access,omitempty"` } `json:"shared_link"` } // UploadSessionRequest is uses in Create Upload Session type UploadSessionRequest struct { FolderID string `json:"folder_id,omitempty"` // don't pass for update FileSize int64 `json:"file_size"` FileName string `json:"file_name,omitempty"` // optional for update } // UploadSessionResponse is returned from Create Upload Session type UploadSessionResponse struct { TotalParts int `json:"total_parts"` PartSize int64 `json:"part_size"` SessionEndpoints struct { ListParts string `json:"list_parts"` Commit string `json:"commit"` UploadPart string `json:"upload_part"` Status string `json:"status"` Abort string `json:"abort"` } `json:"session_endpoints"` SessionExpiresAt Time `json:"session_expires_at"` ID string `json:"id"` Type string `json:"type"` NumPartsProcessed int `json:"num_parts_processed"` } // Part defines the return from upload part call which are passed to commit upload also type Part struct { PartID string `json:"part_id"` Offset int64 `json:"offset"` Size int64 `json:"size"` Sha1 string `json:"sha1"` } // UploadPartResponse is returned from the upload part call type UploadPartResponse struct { Part Part `json:"part"` } // CommitUpload is used in the Commit Upload call type CommitUpload struct { Parts []Part `json:"parts"` Attributes struct { ContentCreatedAt Time `json:"content_created_at"` ContentModifiedAt Time `json:"content_modified_at"` } `json:"attributes"` } // ConfigJSON defines the shape of a box config.json type ConfigJSON struct { BoxAppSettings AppSettings `json:"boxAppSettings"` EnterpriseID string `json:"enterpriseID"` } // AppSettings defines the shape of the boxAppSettings within box config.json type AppSettings struct { ClientID string `json:"clientID"` ClientSecret string `json:"clientSecret"` AppAuth AppAuth `json:"appAuth"` } // AppAuth defines the shape of the appAuth within boxAppSettings in config.json type AppAuth struct { PublicKeyID string `json:"publicKeyID"` PrivateKey string `json:"privateKey"` Passphrase string `json:"passphrase"` } // User is returned from /users/me type User struct { Type string `json:"type"` ID string `json:"id"` Name string `json:"name"` Login string `json:"login"` CreatedAt time.Time `json:"created_at"` ModifiedAt time.Time `json:"modified_at"` Language string `json:"language"` Timezone string `json:"timezone"` SpaceAmount float64 `json:"space_amount"` SpaceUsed float64 `json:"space_used"` MaxUploadSize float64 `json:"max_upload_size"` Status string `json:"status"` JobTitle string `json:"job_title"` Phone string `json:"phone"` Address string `json:"address"` AvatarURL string `json:"avatar_url"` } // FileTreeChangeEventTypes are the events that can require cache invalidation var FileTreeChangeEventTypes = map[string]struct{}{ "ITEM_COPY": {}, "ITEM_CREATE": {}, "ITEM_MAKE_CURRENT_VERSION": {}, "ITEM_MODIFY": {}, "ITEM_MOVE": {}, "ITEM_RENAME": {}, "ITEM_TRASH": {}, "ITEM_UNDELETE_VIA_TRASH": {}, "ITEM_UPLOAD": {}, } // Event is an array element in the response returned from /events type Event struct { EventType string `json:"event_type"` EventID string `json:"event_id"` Source Item `json:"source"` } // Events is returned from /events type Events struct { ChunkSize int64 `json:"chunk_size"` Entries []Event `json:"entries"` NextStreamPosition int64 `json:"next_stream_position"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/fadvise_unix.go
backend/local/fadvise_unix.go
//go:build linux package local import ( "io" "os" "github.com/rclone/rclone/fs" "golang.org/x/sys/unix" ) // fadvise provides means to automate freeing pages in kernel page cache for // a given file descriptor as the file is sequentially processed (read or // written). // // When copying a file to a remote backend all the file content is read by // kernel and put to page cache to make future reads faster. // This causes memory pressure visible in both memory usage and CPU consumption // and can even cause OOM errors in applications consuming large amounts memory. // // In case of an upload to a remote backend, there is no benefits from caching. // // fadvise would orchestrate calling POSIX_FADV_DONTNEED // // POSIX_FADV_DONTNEED attempts to free cached pages associated // with the specified region. This is useful, for example, while // streaming large files. A program may periodically request the // kernel to free cached data that has already been used, so that // more useful cached pages are not discarded instead. // // Requests to discard partial pages are ignored. It is // preferable to preserve needed data than discard unneeded data. // If the application requires that data be considered for // discarding, then offset and len must be page-aligned. // // The implementation may attempt to write back dirty pages in // the specified region, but this is not guaranteed. Any // unwritten dirty pages will not be freed. If the application // wishes to ensure that dirty pages will be released, it should // call fsync(2) or fdatasync(2) first. type fadvise struct { o *Object fd int lastPos int64 curPos int64 windowSize int64 freePagesCh chan offsetLength doneCh chan struct{} } type offsetLength struct { offset int64 length int64 } const ( defaultAllowPages = 32 defaultWorkerQueueSize = 64 ) func newFadvise(o *Object, fd int, offset int64) *fadvise { f := &fadvise{ o: o, fd: fd, lastPos: offset, curPos: offset, windowSize: int64(os.Getpagesize()) * defaultAllowPages, freePagesCh: make(chan offsetLength, defaultWorkerQueueSize), doneCh: make(chan struct{}), } go f.worker() return f } // sequential configures readahead strategy in Linux kernel. // // Under Linux, POSIX_FADV_NORMAL sets the readahead window to the // default size for the backing device; POSIX_FADV_SEQUENTIAL doubles // this size, and POSIX_FADV_RANDOM disables file readahead entirely. func (f *fadvise) sequential(limit int64) bool { l := int64(0) if limit > 0 { l = limit } if err := unix.Fadvise(f.fd, f.curPos, l, unix.FADV_SEQUENTIAL); err != nil { fs.Debugf(f.o, "fadvise sequential failed on file descriptor %d: %s", f.fd, err) return false } return true } func (f *fadvise) next(n int) { f.curPos += int64(n) f.freePagesIfNeeded() } func (f *fadvise) freePagesIfNeeded() { if f.curPos >= f.lastPos+f.windowSize { f.freePages() } } func (f *fadvise) freePages() { f.freePagesCh <- offsetLength{f.lastPos, f.curPos - f.lastPos} f.lastPos = f.curPos } func (f *fadvise) worker() { for p := range f.freePagesCh { if err := unix.Fadvise(f.fd, p.offset, p.length, unix.FADV_DONTNEED); err != nil { fs.Debugf(f.o, "fadvise dontneed failed on file descriptor %d: %s", f.fd, err) } } close(f.doneCh) } func (f *fadvise) wait() { close(f.freePagesCh) <-f.doneCh } type fadviseReadCloser struct { *fadvise inner io.ReadCloser } // newFadviseReadCloser wraps os.File so that reading from that file would // remove already consumed pages from kernel page cache. // In addition to that it instructs kernel to double the readahead window to // make sequential reads faster. // See also fadvise. func newFadviseReadCloser(o *Object, f *os.File, offset, limit int64) io.ReadCloser { r := fadviseReadCloser{ fadvise: newFadvise(o, int(f.Fd()), offset), inner: f, } // If syscall failed it's likely that the subsequent syscalls to that // file descriptor would also fail. In that case return the provided os.File // pointer. if !r.sequential(limit) { r.wait() return f } return r } func (f fadviseReadCloser) Read(p []byte) (n int, err error) { n, err = f.inner.Read(p) f.next(n) return } func (f fadviseReadCloser) Close() error { f.freePages() f.wait() return f.inner.Close() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/xattr.go
backend/local/xattr.go
//go:build !openbsd && !plan9 package local import ( "fmt" "strings" "syscall" "github.com/pkg/xattr" "github.com/rclone/rclone/fs" ) const ( xattrPrefix = "user." // FIXME is this correct for all unixes? xattrSupported = xattr.XATTR_SUPPORTED ) // Check to see if the error supplied is a not supported error, and if // so, disable xattrs func (f *Fs) xattrIsNotSupported(err error) bool { xattrErr, ok := err.(*xattr.Error) if !ok { return false } // Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris) if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR { // Show xattrs not supported if f.xattrSupported.CompareAndSwap(1, 0) { fs.Errorf(f, "xattrs not supported - disabling: %v", err) } return true } return false } // getXattr returns the extended attributes for an object // // It doesn't return any attributes owned by this backend in // metadataKeys func (o *Object) getXattr() (metadata fs.Metadata, err error) { if !xattrSupported || o.fs.xattrSupported.Load() == 0 { return nil, nil } var list []string if o.fs.opt.FollowSymlinks { list, err = xattr.List(o.path) } else { list, err = xattr.LList(o.path) } if err != nil { if o.fs.xattrIsNotSupported(err) { return nil, nil } return nil, fmt.Errorf("failed to read xattr: %w", err) } if len(list) == 0 { return nil, nil } metadata = make(fs.Metadata, len(list)) for _, k := range list { var v []byte if o.fs.opt.FollowSymlinks { v, err = xattr.Get(o.path, k) } else { v, err = xattr.LGet(o.path, k) } if err != nil { if o.fs.xattrIsNotSupported(err) { return nil, nil } return nil, fmt.Errorf("failed to read xattr key %q: %w", k, err) } k = strings.ToLower(k) if !strings.HasPrefix(k, xattrPrefix) { continue } k = k[len(xattrPrefix):] if _, found := systemMetadataInfo[k]; found { continue } metadata[k] = string(v) } return metadata, nil } // setXattr sets the metadata on the file Xattrs // // It doesn't set any attributes owned by this backend in metadataKeys func (o *Object) setXattr(metadata fs.Metadata) (err error) { if !xattrSupported || o.fs.xattrSupported.Load() == 0 { return nil } for k, value := range metadata { k = strings.ToLower(k) if _, found := systemMetadataInfo[k]; found { continue } k = xattrPrefix + k v := []byte(value) if o.fs.opt.FollowSymlinks { err = xattr.Set(o.path, k, v) } else { err = xattr.LSet(o.path, k, v) } if err != nil { if o.fs.xattrIsNotSupported(err) { return nil } return fmt.Errorf("failed to set xattr key %q: %w", k, err) } } return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/local.go
backend/local/local.go
// Package local provides a filesystem interface package local import ( "bytes" "context" "errors" "fmt" "io" iofs "io/fs" "os" "path" "path/filepath" "runtime" "strings" "sync" "sync/atomic" "time" "unicode/utf8" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/file" "github.com/rclone/rclone/lib/readers" "golang.org/x/text/unicode/norm" ) // Constants const ( devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly ) // timeType allows the user to choose what exactly ModTime() returns type timeType = fs.Enum[timeTypeChoices] const ( mTime timeType = iota aTime bTime cTime ) type timeTypeChoices struct{} func (timeTypeChoices) Choices() []string { return []string{ mTime: "mtime", aTime: "atime", bTime: "btime", cTime: "ctime", } } // Register with Fs func init() { fsi := &fs.RegInfo{ Name: "local", Description: "Local Disk", NewFs: NewFs, CommandHelp: commandHelp, MetadataInfo: &fs.MetadataInfo{ System: systemMetadataInfo, Help: `Depending on which OS is in use the local backend may return only some of the system metadata. Setting system metadata is supported on all OSes but setting user metadata is only supported on linux, freebsd, netbsd, macOS and Solaris. It is **not** supported on Windows yet ([see pkg/attrs#47](https://github.com/pkg/xattr/issues/47)). User metadata is stored as extended attributes (which may not be supported by all file systems) under the "user.*" prefix. Metadata is supported on files and directories. `, }, Options: []fs.Option{ { Name: "nounc", Help: "Disable UNC (long path names) conversion on Windows.", Default: false, Advanced: runtime.GOOS != "windows", Examples: []fs.OptionExample{{ Value: "true", Help: "Disables long file names.", }}, }, { Name: "copy_links", Help: "Follow symlinks and copy the pointed to item.", Default: false, NoPrefix: true, ShortOpt: "L", Advanced: true, }, { Name: "links", Help: "Translate symlinks to/from regular files with a '" + fs.LinkSuffix + "' extension for the local backend.", Default: false, Advanced: true, }, { Name: "skip_links", Help: `Don't warn about skipped symlinks. This flag disables warning messages on skipped symlinks or junction points, as you explicitly acknowledge that they should be skipped.`, Default: false, NoPrefix: true, Advanced: true, }, { Name: "skip_specials", Help: `Don't warn about skipped pipes, sockets and device objects. This flag disables warning messages on skipped pipes, sockets and device objects, as you explicitly acknowledge that they should be skipped.`, Default: false, NoPrefix: true, Advanced: true, }, { Name: "zero_size_links", Help: `Assume the Stat size of links is zero (and read them instead) (deprecated). Rclone used to use the Stat size of links as the link size, but this fails in quite a few places: - Windows - On some virtual filesystems (such ash LucidLink) - Android So rclone now always reads the link. `, Default: false, Advanced: true, }, { Name: "unicode_normalization", Help: `Apply unicode NFC normalization to paths and filenames. This flag can be used to normalize file names into unicode NFC form that are read from the local filesystem. Rclone does not normally touch the encoding of file names it reads from the file system. This can be useful when using macOS as it normally provides decomposed (NFD) unicode which in some language (eg Korean) doesn't display properly on some OSes. Note that rclone compares filenames with unicode normalization in the sync routine so this flag shouldn't normally be used.`, Default: false, Advanced: true, }, { Name: "no_check_updated", Help: `Don't check to see if the files change during upload. Normally rclone checks the size and modification time of files as they are being uploaded and aborts with a message which starts "can't copy - source file is being updated" if the file changes during upload. However on some file systems this modification time check may fail (e.g. [Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this check can be disabled with this flag. If this flag is set, rclone will use its best efforts to transfer a file which is being updated. If the file is only having things appended to it (e.g. a log) then rclone will transfer the log file with the size it had the first time rclone saw it. If the file is being modified throughout (not just appended to) then the transfer may fail with a hash check failure. In detail, once the file has had stat() called on it for the first time we: - Only transfer the size that stat gave - Only checksum the size that stat gave - Don't update the stat info for the file **NB** do not use this flag on a Windows Volume Shadow (VSS). For some unknown reason, files in a VSS sometimes show different sizes from the directory listing (where the initial stat value comes from on Windows) and when stat is called on them directly. Other copy tools always use the direct stat value and setting this flag will disable that. `, Default: false, Advanced: true, }, { Name: "one_file_system", Help: "Don't cross filesystem boundaries (unix/macOS only).", Default: false, NoPrefix: true, ShortOpt: "x", Advanced: true, }, { Name: "case_sensitive", Help: `Force the filesystem to report itself as case sensitive. Normally the local backend declares itself as case insensitive on Windows/macOS and case sensitive for everything else. Use this flag to override the default choice.`, Default: false, Advanced: true, }, { Name: "case_insensitive", Help: `Force the filesystem to report itself as case insensitive. Normally the local backend declares itself as case insensitive on Windows/macOS and case sensitive for everything else. Use this flag to override the default choice.`, Default: false, Advanced: true, }, { Name: "no_clone", Help: `Disable reflink cloning for server-side copies. Normally, for local-to-local transfers, rclone will "clone" the file when possible, and fall back to "copying" only when cloning is not supported. Cloning creates a shallow copy (or "reflink") which initially shares blocks with the original file. Unlike a "hardlink", the two files are independent and neither will affect the other if subsequently modified. Cloning is usually preferable to copying, as it is much faster and is deduplicated by default (i.e. having two identical files does not consume more storage than having just one.) However, for use cases where data redundancy is preferable, --local-no-clone can be used to disable cloning and force "deep" copies. Currently, cloning is only supported when using APFS on macOS (support for other platforms may be added in the future.)`, Default: false, Advanced: true, }, { Name: "no_preallocate", Help: `Disable preallocation of disk space for transferred files. Preallocation of disk space helps prevent filesystem fragmentation. However, some virtual filesystem layers (such as Google Drive File Stream) may incorrectly set the actual file size equal to the preallocated space, causing checksum and file size checks to fail. Use this flag to disable preallocation.`, Default: false, Advanced: true, }, { Name: "no_sparse", Help: `Disable sparse files for multi-thread downloads. On Windows platforms rclone will make sparse files when doing multi-thread downloads. This avoids long pauses on large files where the OS zeros the file. However sparse files may be undesirable as they cause disk fragmentation and can be slow to work with.`, Default: false, Advanced: true, }, { Name: "no_set_modtime", Help: `Disable setting modtime. Normally rclone updates modification time of files after they are done uploading. This can cause permissions issues on Linux platforms when the user rclone is running as does not own the file uploaded, such as when copying to a CIFS mount owned by another user. If this option is enabled, rclone will no longer update the modtime after copying a file.`, Default: false, Advanced: true, }, { Name: "time_type", Help: `Set what kind of time is returned. Normally rclone does all operations on the mtime or Modification time. If you set this flag then rclone will return the Modified time as whatever you set here. So if you use "rclone lsl --local-time-type ctime" then you will see ctimes in the listing. If the OS doesn't support returning the time_type specified then rclone will silently replace it with the modification time which all OSes support. - mtime is supported by all OSes - atime is supported on all OSes except: plan9, js - btime is only supported on: Windows, macOS, freebsd, netbsd - ctime is supported on all Oses except: Windows, plan9, js Note that setting the time will still set the modified time so this is only useful for reading. `, Default: mTime, Advanced: true, Examples: []fs.OptionExample{{ Value: mTime.String(), Help: "The last modification time.", }, { Value: aTime.String(), Help: "The last access time.", }, { Value: bTime.String(), Help: "The creation time.", }, { Value: cTime.String(), Help: "The last status change time.", }}, }, { Name: "hashes", Help: `Comma separated list of supported checksum types.`, Default: fs.CommaSepList{}, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: encoder.OS, }, }, } fs.Register(fsi) } // Options defines the configuration for this backend type Options struct { FollowSymlinks bool `config:"copy_links"` TranslateSymlinks bool `config:"links"` SkipSymlinks bool `config:"skip_links"` SkipSpecials bool `config:"skip_specials"` UTFNorm bool `config:"unicode_normalization"` NoCheckUpdated bool `config:"no_check_updated"` NoUNC bool `config:"nounc"` OneFileSystem bool `config:"one_file_system"` CaseSensitive bool `config:"case_sensitive"` CaseInsensitive bool `config:"case_insensitive"` NoPreAllocate bool `config:"no_preallocate"` NoSparse bool `config:"no_sparse"` NoSetModTime bool `config:"no_set_modtime"` TimeType timeType `config:"time_type"` Hashes fs.CommaSepList `config:"hashes"` Enc encoder.MultiEncoder `config:"encoding"` NoClone bool `config:"no_clone"` } // Fs represents a local filesystem rooted at root type Fs struct { name string // the name of the remote root string // The root directory (OS path) opt Options // parsed config options features *fs.Features // optional features dev uint64 // device number of root node precisionOk sync.Once // Whether we need to read the precision precision time.Duration // precision of local filesystem warnedMu sync.Mutex // used for locking access to 'warned'. warned map[string]struct{} // whether we have warned about this string xattrSupported atomic.Int32 // whether xattrs are supported // do os.Lstat or os.Stat lstat func(name string) (os.FileInfo, error) objectMetaMu sync.RWMutex // global lock for Object metadata } // Object represents a local filesystem object type Object struct { fs *Fs // The Fs this object is part of remote string // The remote path (encoded path) path string // The local path (OS path) // When using these items the fs.objectMetaMu must be held size int64 // file metadata - always present mode os.FileMode modTime time.Time hashes map[hash.Type]string // Hashes // these are read only and don't need the mutex held translatedLink bool // Is this object a translated link } // Directory represents a local filesystem directory type Directory struct { Object } // ------------------------------------------------------------ var ( errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links") errLinksNeedsSuffix = errors.New("need \"" + fs.LinkSuffix + "\" suffix to refer to symlink when using -l/--links") ) // NewFs constructs an Fs from the path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { ci := fs.GetConfig(ctx) // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } // Override --local-links with --links if set if ci.Links { opt.TranslateSymlinks = true } if opt.TranslateSymlinks && opt.FollowSymlinks { return nil, errLinksAndCopyLinks } f := &Fs{ name: name, opt: *opt, warned: make(map[string]struct{}), dev: devUnset, lstat: os.Lstat, } if xattrSupported { f.xattrSupported.Store(1) } f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc) f.features = (&fs.Features{ CaseInsensitive: f.caseInsensitive(), CanHaveEmptyDirectories: true, IsLocal: true, SlowHash: true, ReadMetadata: true, WriteMetadata: true, ReadDirMetadata: true, WriteDirMetadata: true, WriteDirSetModTime: true, UserDirMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported DirModTimeUpdatesOnWrite: true, UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported FilterAware: true, PartialUploads: true, }).Fill(ctx, f) if opt.FollowSymlinks { f.lstat = os.Stat } if opt.NoClone { // Disable server-side copy when --local-no-clone is set f.features.Copy = nil } // Check to see if this points to a file fi, err := f.lstat(f.root) if err == nil { f.dev = readDevice(fi, f.opt.OneFileSystem) } // Check to see if this is a .rclonelink if not found hasLinkSuffix := strings.HasSuffix(f.root, fs.LinkSuffix) if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) { fi, err = f.lstat(strings.TrimSuffix(f.root, fs.LinkSuffix)) } if err == nil && f.isRegular(fi.Mode()) { // Handle the odd case, that a symlink was specified by name without the link suffix if !hasLinkSuffix && opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 { return nil, errLinksNeedsSuffix } // It is a file, so use the parent as the root f.root = filepath.Dir(f.root) // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // Determine whether a file is a 'regular' file, // Symlinks are regular files, only if the TranslateSymlink // option is in-effect func (f *Fs) isRegular(mode os.FileMode) bool { if !f.opt.TranslateSymlinks { return mode.IsRegular() } // fi.Mode().IsRegular() tests that all mode bits are zero // Since symlinks are accepted, test that all other bits are zero, // except the symlink bit return mode&os.ModeType&^os.ModeSymlink == 0 } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.opt.Enc.ToStandardPath(filepath.ToSlash(f.root)) } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("Local file system at %s", f.Root()) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // caseInsensitive returns whether the remote is case insensitive or not func (f *Fs) caseInsensitive() bool { if f.opt.CaseSensitive { return false } if f.opt.CaseInsensitive { return true } // FIXME not entirely accurate since you can have case // sensitive Fses on darwin and case insensitive Fses on linux. // Should probably check but that would involve creating a // file in the remote to be most accurate which probably isn't // desirable. return runtime.GOOS == "windows" || runtime.GOOS == "darwin" } // translateLink checks whether the remote is a translated link // and returns a new path, removing the suffix as needed, // It also returns whether this is a translated link at all // // for regular files, localPath is returned unchanged func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) { isTranslatedLink = strings.HasSuffix(remote, fs.LinkSuffix) newLocalPath = strings.TrimSuffix(localPath, fs.LinkSuffix) return newLocalPath, isTranslatedLink } // newObject makes a half completed Object func (f *Fs) newObject(remote string) *Object { translatedLink := false localPath := f.localPath(remote) if f.opt.TranslateSymlinks { // Possibly receive a new name for localPath localPath, translatedLink = translateLink(remote, localPath) } return &Object{ fs: f, remote: remote, path: localPath, translatedLink: translatedLink, } } // Return an Object from a path // // May return nil if an error occurred func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, error) { o := f.newObject(remote) if info != nil { o.setMetadata(info) } else { err := o.lstat() if err != nil { if os.IsNotExist(err) { return nil, fs.ErrorObjectNotFound } if os.IsPermission(err) { return nil, fs.ErrorPermissionDenied } return nil, err } // Handle the odd case, that a symlink was specified by name without the link suffix if o.fs.opt.TranslateSymlinks && o.mode&os.ModeSymlink != 0 && !o.translatedLink { return nil, fs.ErrorObjectNotFound } } if o.mode.IsDir() { return nil, fs.ErrorIsDir } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(remote, nil) } // Create new directory object from the info passed in func (f *Fs) newDirectory(dir string, fi os.FileInfo) *Directory { o := f.newObject(dir) o.setMetadata(fi) return &Directory{ Object: *o, } } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { filter, useFilter := filter.GetConfig(ctx), filter.GetUseFilter(ctx) fsDirPath := f.localPath(dir) _, err = os.Stat(fsDirPath) if err != nil { return nil, fs.ErrorDirNotFound } fd, err := os.Open(fsDirPath) if err != nil { isPerm := os.IsPermission(err) err = fmt.Errorf("failed to open directory %q: %w", dir, err) fs.Errorf(dir, "%v", err) if isPerm { _ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err)) err = nil // ignore error but fail sync } return nil, err } defer func() { cerr := fd.Close() if cerr != nil && err == nil { err = fmt.Errorf("failed to close directory %q:: %w", dir, cerr) } }() for { var fis []os.FileInfo if useReadDir { // Windows and Plan9 read the directory entries with the stat information in which // shouldn't fail because of unreadable entries. fis, err = fd.Readdir(1024) if err == io.EOF && len(fis) == 0 { break } } else { // For other OSes we read the names only (which shouldn't fail) then stat the // individual ourselves so we can log errors but not fail the directory read. var names []string names, err = fd.Readdirnames(1024) if err == io.EOF && len(names) == 0 { break } if err == nil { for _, name := range names { namepath := filepath.Join(fsDirPath, name) fi, fierr := os.Lstat(namepath) if os.IsNotExist(fierr) { // skip entry removed by a concurrent goroutine continue } if fierr != nil { // Don't report errors on any file names that are excluded if useFilter { newRemote := f.cleanRemote(dir, name) if !filter.IncludeRemote(newRemote) { continue } } fierr = fmt.Errorf("failed to get info about directory entry %q: %w", namepath, fierr) fs.Errorf(dir, "%v", fierr) _ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync continue } fis = append(fis, fi) } } } if err != nil { return nil, fmt.Errorf("failed to read directory entry: %w", err) } for _, fi := range fis { name := fi.Name() mode := fi.Mode() newRemote := f.cleanRemote(dir, name) symlinkFlag := os.ModeSymlink if runtime.GOOS == "windows" { symlinkFlag |= os.ModeIrregular } // Follow symlinks if required if f.opt.FollowSymlinks && (mode&symlinkFlag) != 0 { localPath := filepath.Join(fsDirPath, name) fi, err = os.Stat(localPath) // Quietly skip errors on excluded files and directories if err != nil && useFilter && !filter.IncludeRemote(newRemote) { continue } if os.IsNotExist(err) || isCircularSymlinkError(err) { // Skip bad symlinks and circular symlinks err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err)) fs.Errorf(newRemote, "Listing error: %v", err) err = accounting.Stats(ctx).Error(err) continue } if err != nil { return nil, err } mode = fi.Mode() } if fi.IsDir() { // Ignore directories which are symlinks. These are junction points under windows which // are kind of a souped up symlink. Unix doesn't have directories which are symlinks. if (mode&symlinkFlag) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) { d := f.newDirectory(newRemote, fi) entries = append(entries, d) } } else { // Check whether this link should be translated if f.opt.TranslateSymlinks && fi.Mode()&symlinkFlag != 0 { newRemote += fs.LinkSuffix } // Don't include non directory if not included // we leave directory filtering to the layer above if useFilter && !filter.IncludeRemote(newRemote) { continue } fso, err := f.newObjectWithInfo(newRemote, fi) if err != nil { return nil, err } if fso.Storable() { entries = append(entries, fso) } } } } return entries, nil } func (f *Fs) cleanRemote(dir, filename string) (remote string) { if f.opt.UTFNorm { filename = norm.NFC.String(filename) } remote = path.Join(dir, f.opt.Enc.ToStandardName(filename)) if !utf8.ValidString(filename) { f.warnedMu.Lock() if _, ok := f.warned[remote]; !ok { fs.Logf(f, "Replacing invalid UTF-8 characters in %q", remote) f.warned[remote] = struct{}{} } f.warnedMu.Unlock() } return } func (f *Fs) localPath(name string) string { return filepath.Join(f.root, filepath.FromSlash(f.opt.Enc.FromStandardPath(name))) } // Put the Object to the local filesystem func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // Temporary Object under construction - info filled in by Update() o := f.newObject(src.Remote()) err := o.Update(ctx, in, src, options...) if err != nil { return nil, err } return o, nil } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(ctx, in, src, options...) } // Mkdir creates the directory if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { localPath := f.localPath(dir) err := file.MkdirAll(localPath, 0777) if err != nil { return err } if dir == "" { fi, err := f.lstat(localPath) if err != nil { return err } f.dev = readDevice(fi, f.opt.OneFileSystem) } return nil } // DirSetModTime sets the directory modtime for dir func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error { o := Object{ fs: f, remote: dir, path: f.localPath(dir), } return o.SetModTime(ctx, modTime) } // MkdirMetadata makes the directory passed in as dir. // // It shouldn't return an error if it already exists. // // If the metadata is not nil it is set. // // It returns the directory that was created. func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) { // Find and or create the directory localPath := f.localPath(dir) fi, err := f.lstat(localPath) if errors.Is(err, os.ErrNotExist) { err := f.Mkdir(ctx, dir) if err != nil { return nil, fmt.Errorf("mkdir metadata: failed make directory: %w", err) } fi, err = f.lstat(localPath) if err != nil { return nil, fmt.Errorf("mkdir metadata: failed to read info: %w", err) } } else if err != nil { return nil, err } // Create directory object d := f.newDirectory(dir, fi) // Set metadata on the directory object if provided if metadata != nil { err = d.writeMetadata(metadata) if err != nil { return nil, fmt.Errorf("failed to set metadata on directory: %w", err) } // Re-read info now we have finished setting stuff err = d.lstat() if err != nil { return nil, fmt.Errorf("mkdir metadata: failed to re-read info: %w", err) } } return d, nil } // Rmdir removes the directory // // If it isn't empty it will return an error func (f *Fs) Rmdir(ctx context.Context, dir string) error { localPath := f.localPath(dir) if fi, err := os.Stat(localPath); err != nil { return err } else if !fi.IsDir() { return fs.ErrorIsFile } err := os.Remove(localPath) if runtime.GOOS == "windows" && errors.Is(err, iofs.ErrPermission) { // https://github.com/golang/go/issues/26295 if os.Chmod(localPath, 0o600) == nil { err = os.Remove(localPath) } } return err } // Precision of the file system func (f *Fs) Precision() (precision time.Duration) { if f.opt.NoSetModTime { return fs.ModTimeNotSupported } f.precisionOk.Do(func() { f.precision = f.readPrecision() }) return f.precision } // Read the precision func (f *Fs) readPrecision() (precision time.Duration) { // Default precision of 1s precision = time.Second // Create temporary file and test it fd, err := os.CreateTemp("", "rclone") if err != nil { // If failed return 1s // fmt.Println("Failed to create temp file", err) return time.Second } path := fd.Name() // fmt.Println("Created temp file", path) err = fd.Close() if err != nil { return time.Second } // Delete it on return defer func() { // fmt.Println("Remove temp file") _ = os.Remove(path) // ignore error }() // Find the minimum duration we can detect for duration := time.Duration(1); duration < time.Second; duration *= 10 { // Current time with delta t := time.Unix(time.Now().Unix(), int64(duration)) err := os.Chtimes(path, t, t) if err != nil { // fmt.Println("Failed to Chtimes", err) break } // Read the actual time back fi, err := os.Stat(path) if err != nil { // fmt.Println("Failed to Stat", err) break } // If it matches - have found the precision // fmt.Println("compare", fi.ModTime(ctx), t) if fi.ModTime().Equal(t) { // fmt.Println("Precision detected as", duration) return duration } } return } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } // Temporary Object under construction dstObj := f.newObject(remote) dstObj.fs.objectMetaMu.RLock() dstObjMode := dstObj.mode dstObj.fs.objectMetaMu.RUnlock() // Check it is a file if it exists err := dstObj.lstat() if os.IsNotExist(err) { // OK } else if err != nil { return nil, err } else if !dstObj.fs.isRegular(dstObjMode) { // It isn't a file return nil, errors.New("can't move file onto non-file") } // Create destination err = dstObj.mkdirAll() if err != nil { return nil, err } // Fetch metadata if --metadata is in use meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx)) if err != nil { return nil, fmt.Errorf("move: failed to read metadata: %w", err) } // Do the move err = os.Rename(srcObj.path, dstObj.path) if os.IsNotExist(err) { // race condition, source was deleted in the meantime return nil, err } else if os.IsPermission(err) { // not enough rights to write to dst return nil, err } else if err != nil { // not quite clear, but probably trying to move a file across file system // boundaries. Copying might still work. fs.Debugf(src, "Can't move: %v: trying copy", err) return nil, fs.ErrorCantMove } // Set metadata if --metadata is in use err = dstObj.writeMetadata(meta) if err != nil { return nil, fmt.Errorf("move: failed to set metadata: %w", err) } // Update the info err = dstObj.lstat() if err != nil { return nil, err } return dstObj, nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } srcPath := srcFs.localPath(srcRemote) dstPath := f.localPath(dstRemote) // Check if destination exists _, err := os.Lstat(dstPath) if !os.IsNotExist(err) { return fs.ErrorDirExists } // Create parent of destination dstParentPath := filepath.Dir(dstPath) err = file.MkdirAll(dstParentPath, 0777) if err != nil { return err } // Do the move err = os.Rename(srcPath, dstPath) if os.IsNotExist(err) { // race condition, source was deleted in the meantime return err } else if os.IsPermission(err) { // not enough rights to write to dst return err } else if err != nil { // not quite clear, but probably trying to move directory across file system // boundaries. Copying might still work. fs.Debugf(src, "Can't move dir: %v: trying copy", err) return fs.ErrorCantDirMove } return nil } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { if len(f.opt.Hashes) > 0 { // Return only configured hashes. // Note: Could have used hash.SupportOnly to limit supported hashes for all hash related features. var supported hash.Set for _, hashName := range f.opt.Hashes { var ht hash.Type if err := ht.Set(hashName); err != nil { fs.Infof(nil, "Invalid token %q in hash string %q", hashName, f.opt.Hashes.String()) } supported.Add(ht) } return supported } return hash.Supported() } var commandHelp = []fs.CommandHelp{ { Name: "noop", Short: "A null operation for testing backend commands.", Long: `This is a test command which has some options you can try to change the output.`, Opts: map[string]string{ "echo": "Echo the input arguments.", "error": "Return an error based on option value.", }, }, } // Command the backend to run a named command // // The command run is name // args may be used to read arguments from // opts may be used to read optional arguments from //
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/clone_darwin.go
backend/local/clone_darwin.go
//go:build darwin && cgo // Package local provides a filesystem interface package local import ( "context" "fmt" "path/filepath" "runtime" "github.com/go-darwin/apfs" "github.com/rclone/rclone/fs" ) // Copy src to this remote using server-side copy operations. // // # This is stored with the remote path given // // # It returns the destination Object and a possible error // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { if runtime.GOOS != "darwin" || f.opt.NoClone { return nil, fs.ErrorCantCopy } srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't clone - not same remote type") return nil, fs.ErrorCantCopy } if f.opt.TranslateSymlinks && srcObj.translatedLink { // in --links mode, use cloning only for regular files return nil, fs.ErrorCantCopy } // Fetch metadata if --metadata is in use meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx)) if err != nil { return nil, fmt.Errorf("copy: failed to read metadata: %w", err) } // Create destination dstObj := f.newObject(remote) err = dstObj.mkdirAll() if err != nil { return nil, err } srcPath := srcObj.path if f.opt.FollowSymlinks { // in --copy-links mode, find the real file being pointed to and pass that in instead srcPath, err = filepath.EvalSymlinks(srcPath) if err != nil { return nil, err } } err = Clone(srcPath, f.localPath(remote)) if err != nil { return nil, err } // Set metadata if --metadata is in use if meta != nil { err = dstObj.writeMetadata(meta) if err != nil { return nil, fmt.Errorf("copy: failed to set metadata: %w", err) } } return f.NewObject(ctx, remote) } // Clone uses APFS cloning if possible, otherwise falls back to copying (with full metadata preservation) // note that this is closely related to unix.Clonefile(src, dst, unix.CLONE_NOFOLLOW) but not 100% identical // https://opensource.apple.com/source/copyfile/copyfile-173.40.2/copyfile.c.auto.html func Clone(src, dst string) error { state := apfs.CopyFileStateAlloc() defer func() { if err := apfs.CopyFileStateFree(state); err != nil { fs.Errorf(dst, "free state error: %v", err) } }() cloned, err := apfs.CopyFile(src, dst, state, apfs.COPYFILE_CLONE) fs.Debugf(dst, "isCloned: %v, error: %v", cloned, err) return err } // Check the interfaces are satisfied var ( _ fs.Copier = &Fs{} )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/lchtimes_windows.go
backend/local/lchtimes_windows.go
//go:build windows package local import ( "time" ) const haveLChtimes = true // lChtimes changes the access and modification times of the named // link, similar to the Unix utime() or utimes() functions. // // The underlying filesystem may truncate or round the values to a // less precise time unit. // If there is an error, it will be of type *PathError. func lChtimes(name string, atime time.Time, mtime time.Time) error { return setTimes(name, atime, mtime, time.Time{}, true) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/local_internal_windows_test.go
backend/local/local_internal_windows_test.go
//go:build windows package local import ( "context" "path/filepath" "runtime" "syscall" "testing" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fstest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // TestRmdirWindows tests that FILE_ATTRIBUTE_READONLY does not block Rmdir on windows. // Microsoft docs indicate that "This attribute is not honored on directories." // See https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants#file_attribute_readonly // and https://github.com/golang/go/issues/26295 func TestRmdirWindows(t *testing.T) { if runtime.GOOS != "windows" { t.Skipf("windows only") } r := fstest.NewRun(t) defer r.Finalise() err := operations.Mkdir(context.Background(), r.Flocal, "testdir") require.NoError(t, err) ptr, err := syscall.UTF16PtrFromString(filepath.Join(r.Flocal.Root(), "testdir")) require.NoError(t, err) err = syscall.SetFileAttributes(ptr, uint32(syscall.FILE_ATTRIBUTE_DIRECTORY+syscall.FILE_ATTRIBUTE_READONLY)) require.NoError(t, err) err = operations.Rmdir(context.Background(), r.Flocal, "testdir") assert.NoError(t, err) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/lchtimes.go
backend/local/lchtimes.go
//go:build plan9 || js package local import ( "time" ) const haveLChtimes = false // lChtimes changes the access and modification times of the named // link, similar to the Unix utime() or utimes() functions. // // The underlying filesystem may truncate or round the values to a // less precise time unit. // If there is an error, it will be of type *PathError. func lChtimes(name string, atime time.Time, mtime time.Time) error { // Does nothing return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/fadvise_other.go
backend/local/fadvise_other.go
//go:build !linux package local import ( "io" "os" ) func newFadviseReadCloser(o *Object, f *os.File, offset, limit int64) io.ReadCloser { return f }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/metadata.go
backend/local/metadata.go
package local import ( "fmt" "math" "os" "runtime" "strconv" "time" "github.com/rclone/rclone/fs" ) const metadataTimeFormat = time.RFC3339Nano // system metadata keys which this backend owns // // not all values supported on all OSes var systemMetadataInfo = map[string]fs.MetadataHelp{ "mode": { Help: "File type and mode", Type: "octal, unix style", Example: "0100664", }, "uid": { Help: "User ID of owner", Type: "decimal number", Example: "500", }, "gid": { Help: "Group ID of owner", Type: "decimal number", Example: "500", }, "rdev": { Help: "Device ID (if special file)", Type: "hexadecimal", Example: "1abc", }, "atime": { Help: "Time of last access", Type: "RFC 3339", Example: "2006-01-02T15:04:05.999999999Z07:00", }, "mtime": { Help: "Time of last modification", Type: "RFC 3339", Example: "2006-01-02T15:04:05.999999999Z07:00", }, "btime": { Help: "Time of file birth (creation)", Type: "RFC 3339", Example: "2006-01-02T15:04:05.999999999Z07:00", }, } // parse a time string from metadata with key func (o *Object) parseMetadataTime(m fs.Metadata, key string) (t time.Time, ok bool) { value, ok := m[key] if ok { var err error t, err = time.Parse(metadataTimeFormat, value) if err != nil { fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err) ok = false } } return t, ok } // parse am int from metadata with key and base func (o *Object) parseMetadataInt(m fs.Metadata, key string, base int) (result int, ok bool) { value, ok := m[key] if ok { var err error parsed, err := strconv.ParseInt(value, base, 0) if err != nil { fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err) ok = false } result = int(parsed) } return result, ok } // Write the metadata into the file // // It isn't possible to set the ctime and btime under Unix func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) { var err error atime, atimeOK := o.parseMetadataTime(m, "atime") mtime, mtimeOK := o.parseMetadataTime(m, "mtime") btime, btimeOK := o.parseMetadataTime(m, "btime") if atimeOK || mtimeOK { if atimeOK && !mtimeOK { mtime = atime } if !atimeOK && mtimeOK { atime = mtime } err = o.setTimes(atime, mtime) if err != nil { outErr = fmt.Errorf("failed to set times: %w", err) } } if haveSetBTime { if btimeOK { if o.translatedLink { err = lsetBTime(o.path, btime) } else { err = setBTime(o.path, btime) } if err != nil { outErr = fmt.Errorf("failed to set birth (creation) time: %w", err) } } } uid, hasUID := o.parseMetadataInt(m, "uid", 10) gid, hasGID := o.parseMetadataInt(m, "gid", 10) if hasUID { // FIXME should read UID and GID of current user and only attempt to set it if different if !hasGID { gid = uid } if runtime.GOOS == "windows" || runtime.GOOS == "plan9" { fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid) } else { if o.translatedLink { err = os.Lchown(o.path, uid, gid) } else { err = os.Chown(o.path, uid, gid) } if err != nil { outErr = fmt.Errorf("failed to change ownership: %w", err) } } } mode, hasMode := o.parseMetadataInt(m, "mode", 8) if hasMode { if mode >= 0 { umode := uint(mode) if umode <= math.MaxUint32 { if o.translatedLink { if haveLChmod { err = lChmod(o.path, os.FileMode(umode)) } else { fs.Debugf(o, "Unable to set mode %v on a symlink on this OS", os.FileMode(umode)) err = nil } } else { err = os.Chmod(o.path, os.FileMode(umode)) } if err != nil { outErr = fmt.Errorf("failed to change permissions: %w", err) } } } } // FIXME not parsing rdev yet return outErr }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/remove_other.go
backend/local/remove_other.go
//go:build !windows package local import "os" // Removes name, retrying on a sharing violation func remove(name string) error { return os.Remove(name) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/symlink_other.go
backend/local/symlink_other.go
//go:build windows || plan9 || js package local import ( "strings" ) // isCircularSymlinkError checks if the current error code is because of a circular symlink func isCircularSymlinkError(err error) bool { if err != nil { if strings.Contains(err.Error(), "The name of the file cannot be resolved by the system") { return true } } return false }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/remove_test.go
backend/local/remove_test.go
package local import ( "os" "sync" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // Check we can remove an open file func TestRemove(t *testing.T) { fd, err := os.CreateTemp("", "rclone-remove-test") require.NoError(t, err) name := fd.Name() defer func() { _ = os.Remove(name) }() exists := func() bool { _, err := os.Stat(name) if err == nil { return true } else if os.IsNotExist(err) { return false } require.NoError(t, err) return false } assert.True(t, exists()) // close the file in the background var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() time.Sleep(250 * time.Millisecond) require.NoError(t, fd.Close()) }() // delete the open file err = remove(name) require.NoError(t, err) // check it no longer exists assert.False(t, exists()) // wait for background close wg.Wait() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/lchmod_unix.go
backend/local/lchmod_unix.go
//go:build !windows && !plan9 && !js && !linux package local import ( "os" "syscall" "golang.org/x/sys/unix" ) const haveLChmod = true // syscallMode returns the syscall-specific mode bits from Go's portable mode bits. // // Borrowed from the syscall source since it isn't public. func syscallMode(i os.FileMode) (o uint32) { o |= uint32(i.Perm()) if i&os.ModeSetuid != 0 { o |= syscall.S_ISUID } if i&os.ModeSetgid != 0 { o |= syscall.S_ISGID } if i&os.ModeSticky != 0 { o |= syscall.S_ISVTX } return o } // lChmod changes the mode of the named file to mode. If the file is a symbolic // link, it changes the link, not the target. If there is an error, // it will be of type *PathError. func lChmod(name string, mode os.FileMode) error { // NB linux does not support AT_SYMLINK_NOFOLLOW as a parameter to fchmodat // and returns ENOTSUP if you try, so we don't support this on linux if e := unix.Fchmodat(unix.AT_FDCWD, name, syscallMode(mode), unix.AT_SYMLINK_NOFOLLOW); e != nil { return &os.PathError{Op: "lChmod", Path: name, Err: e} } return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/local_internal_test.go
backend/local/local_internal_test.go
package local import ( "bytes" "context" "fmt" "io" "os" "path" "path/filepath" "runtime" "sort" "testing" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/lib/file" "github.com/rclone/rclone/lib/readers" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // TestMain drives the tests func TestMain(m *testing.M) { fstest.TestMain(m) } // Test copy with source file that's updating func TestUpdatingCheck(t *testing.T) { r := fstest.NewRun(t) filePath := "sub dir/local test" r.WriteFile(filePath, "content", time.Now()) fd, err := file.Open(path.Join(r.LocalName, filePath)) if err != nil { t.Fatalf("failed opening file %q: %v", filePath, err) } defer func() { require.NoError(t, fd.Close()) }() fi, err := fd.Stat() require.NoError(t, err) o := &Object{size: fi.Size(), modTime: fi.ModTime(), fs: &Fs{}} wrappedFd := readers.NewLimitedReadCloser(fd, -1) hash, err := hash.NewMultiHasherTypes(hash.Supported()) require.NoError(t, err) in := localOpenFile{ o: o, in: wrappedFd, hash: hash, fd: fd, } buf := make([]byte, 1) _, err = in.Read(buf) require.NoError(t, err) r.WriteFile(filePath, "content updated", time.Now()) _, err = in.Read(buf) require.Errorf(t, err, "can't copy - source file is being updated") // turn the checking off and try again in.o.fs.opt.NoCheckUpdated = true r.WriteFile(filePath, "content updated", time.Now()) _, err = in.Read(buf) require.NoError(t, err) } // Test corrupted on transfer // should error due to size/hash mismatch func TestVerifyCopy(t *testing.T) { t.Skip("FIXME this test is unreliable") r := fstest.NewRun(t) filePath := "sub dir/local test" r.WriteFile(filePath, "some content", time.Now()) src, err := r.Flocal.NewObject(context.Background(), filePath) require.NoError(t, err) src.(*Object).fs.opt.NoCheckUpdated = true for i := range 100 { go r.WriteFile(src.Remote(), fmt.Sprintf("some new content %d", i), src.ModTime(context.Background())) } _, err = operations.Copy(context.Background(), r.Fremote, nil, filePath+"2", src) assert.Error(t, err) } func TestSymlink(t *testing.T) { ctx := context.Background() r := fstest.NewRun(t) f := r.Flocal.(*Fs) dir := f.root // Write a file modTime1 := fstest.Time("2001-02-03T04:05:10.123123123Z") file1 := r.WriteFile("file.txt", "hello", modTime1) // Write a symlink modTime2 := fstest.Time("2002-02-03T04:05:10.123123123Z") symlinkPath := filepath.Join(dir, "symlink.txt") require.NoError(t, os.Symlink("file.txt", symlinkPath)) require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2)) // Object viewed as symlink file2 := fstest.NewItem("symlink.txt"+fs.LinkSuffix, "file.txt", modTime2) // Object viewed as destination file2d := fstest.NewItem("symlink.txt", "hello", modTime1) // Check with no symlink flags r.CheckLocalItems(t, file1) r.CheckRemoteItems(t) // Set fs into "-L" mode f.opt.FollowSymlinks = true f.opt.TranslateSymlinks = false f.lstat = os.Stat r.CheckLocalItems(t, file1, file2d) r.CheckRemoteItems(t) // Set fs into "-l" mode f.opt.FollowSymlinks = false f.opt.TranslateSymlinks = true f.lstat = os.Lstat fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported) if haveLChtimes { r.CheckLocalItems(t, file1, file2) } // Create a symlink modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z") file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+fs.LinkSuffix, "file.txt", modTime3, false) fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported) if haveLChtimes { r.CheckLocalItems(t, file1, file2, file3) } // Check it got the correct contents symlinkPath = filepath.Join(dir, "symlink2.txt") fi, err := os.Lstat(symlinkPath) require.NoError(t, err) assert.False(t, fi.Mode().IsRegular()) linkText, err := os.Readlink(symlinkPath) require.NoError(t, err) assert.Equal(t, "file.txt", linkText) // Check that NewObject gets the correct object o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+fs.LinkSuffix) require.NoError(t, err) assert.Equal(t, "symlink2.txt"+fs.LinkSuffix, o.Remote()) assert.Equal(t, int64(8), o.Size()) // Check that NewObject doesn't see the non suffixed version _, err = r.Flocal.NewObject(ctx, "symlink2.txt") require.Equal(t, fs.ErrorObjectNotFound, err) // Check that NewFs works with the suffixed version and --links f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+fs.LinkSuffix), configmap.Simple{ "links": "true", }) require.Equal(t, fs.ErrorIsFile, err) require.Equal(t, dir, f2.(*Fs).root) // Check that NewFs doesn't see the non suffixed version with --links f2, err = NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"), configmap.Simple{ "links": "true", }) require.Equal(t, errLinksNeedsSuffix, err) require.Nil(t, f2) // Check reading the object in, err := o.Open(ctx) require.NoError(t, err) contents, err := io.ReadAll(in) require.NoError(t, err) require.Equal(t, "file.txt", string(contents)) require.NoError(t, in.Close()) // Check reading the object with range in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5}) require.NoError(t, err) contents, err = io.ReadAll(in) require.NoError(t, err) require.Equal(t, "file.txt"[2:5+1], string(contents)) require.NoError(t, in.Close()) } func TestSymlinkError(t *testing.T) { m := configmap.Simple{ "links": "true", "copy_links": "true", } _, err := NewFs(context.Background(), "local", "/", m) assert.Equal(t, errLinksAndCopyLinks, err) } func TestHashWithTypeNone(t *testing.T) { ctx := context.Background() r := fstest.NewRun(t) const filePath = "file.txt" r.WriteFile(filePath, "content", time.Now()) f := r.Flocal.(*Fs) // Get the object o, err := f.NewObject(ctx, filePath) require.NoError(t, err) // Test the hash is as we expect h, err := o.Hash(ctx, hash.None) require.Empty(t, h) require.NoError(t, err) } // Test hashes on updating an object func TestHashOnUpdate(t *testing.T) { ctx := context.Background() r := fstest.NewRun(t) const filePath = "file.txt" when := time.Now() r.WriteFile(filePath, "content", when) f := r.Flocal.(*Fs) // Get the object o, err := f.NewObject(ctx, filePath) require.NoError(t, err) // Test the hash is as we expect md5, err := o.Hash(ctx, hash.MD5) require.NoError(t, err) assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5) // Reupload it with different contents but same size and timestamp b := bytes.NewBufferString("CONTENT") src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f) err = o.Update(ctx, b, src) require.NoError(t, err) // Check the hash is as expected md5, err = o.Hash(ctx, hash.MD5) require.NoError(t, err) assert.Equal(t, "45685e95985e20822fb2538a522a5ccf", md5) } // Test hashes on deleting an object func TestHashOnDelete(t *testing.T) { ctx := context.Background() r := fstest.NewRun(t) const filePath = "file.txt" when := time.Now() r.WriteFile(filePath, "content", when) f := r.Flocal.(*Fs) // Get the object o, err := f.NewObject(ctx, filePath) require.NoError(t, err) // Test the hash is as we expect md5, err := o.Hash(ctx, hash.MD5) require.NoError(t, err) assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5) // Delete the object require.NoError(t, o.Remove(ctx)) // Test the hash cache is empty require.Nil(t, o.(*Object).hashes) // Test the hash returns an error _, err = o.Hash(ctx, hash.MD5) require.Error(t, err) } func TestMetadata(t *testing.T) { ctx := context.Background() r := fstest.NewRun(t) const filePath = "metafile.txt" when := time.Now() r.WriteFile(filePath, "metadata file contents", when) f := r.Flocal.(*Fs) // Set fs into "-l" / "--links" mode f.opt.TranslateSymlinks = true // Write a symlink to the file symlinkPath := "metafile-link.txt" osSymlinkPath := filepath.Join(f.root, symlinkPath) symlinkPath += fs.LinkSuffix require.NoError(t, os.Symlink(filePath, osSymlinkPath)) symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z") require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime)) // Get the object obj, err := f.NewObject(ctx, filePath) require.NoError(t, err) o := obj.(*Object) // Get the symlink object symlinkObj, err := f.NewObject(ctx, symlinkPath) require.NoError(t, err) symlinkO := symlinkObj.(*Object) // Record metadata for o oMeta, err := o.Metadata(ctx) require.NoError(t, err) // Test symlink first to check it doesn't mess up file t.Run("Symlink", func(t *testing.T) { testMetadata(t, r, symlinkO, symlinkModTime) }) // Read it again oMetaNew, err := o.Metadata(ctx) require.NoError(t, err) // Check that operating on the symlink didn't change the file it was pointing to // See: https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv assert.Equal(t, oMeta, oMetaNew, "metadata setting on symlink messed up file") // Now run the same tests on the file t.Run("File", func(t *testing.T) { testMetadata(t, r, o, when) }) } func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) { ctx := context.Background() whenRFC := when.Local().Format(time.RFC3339Nano) const dayLength = len("2001-01-01") f := r.Flocal.(*Fs) features := f.Features() var hasXID, hasAtime, hasBtime, canSetXattrOnLinks bool switch runtime.GOOS { case "darwin", "freebsd", "netbsd", "linux": hasXID, hasAtime, hasBtime = true, true, true canSetXattrOnLinks = runtime.GOOS != "linux" case "openbsd", "solaris": hasXID, hasAtime = true, true case "windows": hasAtime, hasBtime = true, true case "plan9", "js": // nada default: t.Errorf("No test cases for OS %q", runtime.GOOS) } assert.True(t, features.ReadMetadata) assert.True(t, features.WriteMetadata) assert.Equal(t, xattrSupported, features.UserMetadata) t.Run("Xattr", func(t *testing.T) { if !xattrSupported { t.Skip() } m, err := o.getXattr() require.NoError(t, err) assert.Nil(t, m) if !canSetXattrOnLinks && o.translatedLink { t.Skip("Skip remainder of test as can't set xattr on symlinks on this OS") } inM := fs.Metadata{ "potato": "chips", "cabbage": "soup", } err = o.setXattr(inM) require.NoError(t, err) m, err = o.getXattr() require.NoError(t, err) assert.NotNil(t, m) assert.Equal(t, inM, m) }) checkTime := func(m fs.Metadata, key string, when time.Time) { t.Helper() mt, ok := o.parseMetadataTime(m, key) assert.True(t, ok) dt := mt.Sub(when) precision := time.Second assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v want %v got %v", key, dt, precision, mt, when)) } checkInt := func(m fs.Metadata, key string, base int) int { t.Helper() value, ok := o.parseMetadataInt(m, key, base) assert.True(t, ok) return value } t.Run("Read", func(t *testing.T) { m, err := o.Metadata(ctx) require.NoError(t, err) assert.NotNil(t, m) // All OSes have these checkInt(m, "mode", 8) checkTime(m, "mtime", when) assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength]) if hasAtime && !o.translatedLink { // symlinks generally don't record atime checkTime(m, "atime", when) } if hasBtime && !o.translatedLink { // symlinks generally don't record btime checkTime(m, "btime", when) } if hasXID { checkInt(m, "uid", 10) checkInt(m, "gid", 10) } }) t.Run("Write", func(t *testing.T) { newAtimeString := "2011-12-13T14:15:16.999999999Z" newAtime := fstest.Time(newAtimeString) newMtimeString := "2011-12-12T14:15:16.999999999Z" newMtime := fstest.Time(newMtimeString) newBtimeString := "2011-12-11T14:15:16.999999999Z" newBtime := fstest.Time(newBtimeString) newM := fs.Metadata{ "mtime": newMtimeString, "atime": newAtimeString, "btime": newBtimeString, // Can't test uid, gid without being root "mode": "0767", "potato": "wedges", } if !canSetXattrOnLinks && o.translatedLink { // Don't change xattr if not supported on symlinks delete(newM, "potato") } err := o.writeMetadata(newM) require.NoError(t, err) m, err := o.Metadata(ctx) require.NoError(t, err) assert.NotNil(t, m) mode := checkInt(m, "mode", 8) if runtime.GOOS != "windows" { expectedMode := 0767 if o.translatedLink && runtime.GOOS == "linux" { expectedMode = 0777 // perms of symlinks always read as 0777 on linux } assert.Equal(t, expectedMode, mode&0777, fmt.Sprintf("mode wrong - expecting 0%o got 0%o", expectedMode, mode&0777)) } checkTime(m, "mtime", newMtime) if hasAtime { checkTime(m, "atime", newAtime) } if haveSetBTime { checkTime(m, "btime", newBtime) } if xattrSupported && (canSetXattrOnLinks || !o.translatedLink) { assert.Equal(t, "wedges", m["potato"]) } }) } func TestFilter(t *testing.T) { ctx := context.Background() r := fstest.NewRun(t) when := time.Now() r.WriteFile("included", "included file", when) r.WriteFile("excluded", "excluded file", when) f := r.Flocal.(*Fs) // Check set up for filtering assert.True(t, f.Features().FilterAware) // Add a filter ctx, fi := filter.AddConfig(ctx) require.NoError(t, fi.AddRule("+ included")) require.NoError(t, fi.AddRule("- *")) // Check listing without use filter flag entries, err := f.List(ctx, "") require.NoError(t, err) sort.Sort(entries) require.Equal(t, "[excluded included]", fmt.Sprint(entries)) // Add user filter flag ctx = filter.SetUseFilter(ctx, true) // Check listing with use filter flag entries, err = f.List(ctx, "") require.NoError(t, err) sort.Sort(entries) require.Equal(t, "[included]", fmt.Sprint(entries)) } func testFilterSymlink(t *testing.T, copyLinks bool) { ctx := context.Background() r := fstest.NewRun(t) defer r.Finalise() when := time.Now() f := r.Flocal.(*Fs) // Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink r.WriteFile("included.file", "included file", when) r.WriteFile("included.dir/included.sub.file", "included sub file", when) require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link"))) require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link"))) require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link"))) defer func() { // Reset -L/-l mode f.opt.FollowSymlinks = false f.opt.TranslateSymlinks = false f.lstat = os.Lstat }() if copyLinks { // Set fs into "-L" mode f.opt.FollowSymlinks = true f.opt.TranslateSymlinks = false f.lstat = os.Stat } else { // Set fs into "-l" mode f.opt.FollowSymlinks = false f.opt.TranslateSymlinks = true f.lstat = os.Lstat } // Check set up for filtering assert.True(t, f.Features().FilterAware) // Reset global error count accounting.Stats(ctx).ResetErrors() assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found") // Add a filter ctx, fi := filter.AddConfig(ctx) require.NoError(t, fi.AddRule("+ included.file")) require.NoError(t, fi.AddRule("+ included.dir/**")) if copyLinks { require.NoError(t, fi.AddRule("+ included.file.link")) require.NoError(t, fi.AddRule("+ included.dir.link/**")) } else { require.NoError(t, fi.AddRule("+ included.file.link.rclonelink")) require.NoError(t, fi.AddRule("+ included.dir.link.rclonelink")) } require.NoError(t, fi.AddRule("- *")) // Check listing without use filter flag entries, err := f.List(ctx, "") require.NoError(t, err) if copyLinks { // Check 1 global errors one for each dangling symlink assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found") } else { // Check 0 global errors as dangling symlink copied properly assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found") } accounting.Stats(ctx).ResetErrors() sort.Sort(entries) if copyLinks { require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries)) } else { require.Equal(t, "[dangling.link.rclonelink included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries)) } // Add user filter flag ctx = filter.SetUseFilter(ctx, true) // Check listing with use filter flag entries, err = f.List(ctx, "") require.NoError(t, err) assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found") sort.Sort(entries) if copyLinks { require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries)) } else { require.Equal(t, "[included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries)) } // Check listing through a symlink still works entries, err = f.List(ctx, "included.dir") require.NoError(t, err) assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found") sort.Sort(entries) require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries)) } func TestFilterSymlinkCopyLinks(t *testing.T) { testFilterSymlink(t, true) } func TestFilterSymlinkLinks(t *testing.T) { testFilterSymlink(t, false) } func TestCopySymlink(t *testing.T) { ctx := context.Background() r := fstest.NewRun(t) defer r.Finalise() when := time.Now() f := r.Flocal.(*Fs) // Create a file and a symlink to it r.WriteFile("src/file.txt", "hello world", when) require.NoError(t, os.Symlink("file.txt", filepath.Join(r.LocalName, "src", "link.txt"))) defer func() { // Reset -L/-l mode f.opt.FollowSymlinks = false f.opt.TranslateSymlinks = false f.lstat = os.Lstat }() // Set fs into "-l/--links" mode f.opt.FollowSymlinks = false f.opt.TranslateSymlinks = true f.lstat = os.Lstat // Create dst require.NoError(t, f.Mkdir(ctx, "dst")) // Do copy from src into dst src, err := f.NewObject(ctx, "src/link.txt.rclonelink") require.NoError(t, err) require.NotNil(t, src) dst, err := operations.Copy(ctx, f, nil, "dst/link.txt.rclonelink", src) require.NoError(t, err) require.NotNil(t, dst) // Test that we made a symlink and it has the right contents dstPath := filepath.Join(r.LocalName, "dst", "link.txt") linkContents, err := os.Readlink(dstPath) require.NoError(t, err) assert.Equal(t, "file.txt", linkContents) // Set fs into "-L/--copy-links" mode f.opt.FollowSymlinks = true f.opt.TranslateSymlinks = false f.lstat = os.Stat // Create dst require.NoError(t, f.Mkdir(ctx, "dst2")) // Do copy from src into dst src, err = f.NewObject(ctx, "src/link.txt") require.NoError(t, err) require.NotNil(t, src) dst, err = operations.Copy(ctx, f, nil, "dst2/link.txt", src) require.NoError(t, err) require.NotNil(t, dst) // Test that we made a NON-symlink and it has the right contents dstPath = filepath.Join(r.LocalName, "dst2", "link.txt") fi, err := os.Lstat(dstPath) require.NoError(t, err) assert.True(t, fi.Mode()&os.ModeSymlink == 0) want := fstest.NewItem("dst2/link.txt", "hello world", when) fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "") // Test that copying a normal file also works dst, err = operations.Copy(ctx, f, nil, "dst2/file.txt", dst) require.NoError(t, err) require.NotNil(t, dst) want = fstest.NewItem("dst2/file.txt", "hello world", when) fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "") }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/xattr_unsupported.go
backend/local/xattr_unsupported.go
// The pkg/xattr module doesn't compile for openbsd or plan9 //go:build openbsd || plan9 package local import "github.com/rclone/rclone/fs" const ( xattrSupported = false ) // getXattr returns the extended attributes for an object func (o *Object) getXattr() (metadata fs.Metadata, err error) { return nil, nil } // setXattr sets the metadata on the file Xattrs func (o *Object) setXattr(metadata fs.Metadata) (err error) { return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/tests_test.go
backend/local/tests_test.go
package local import ( "runtime" "testing" "github.com/rclone/rclone/lib/encoder" ) // Test Windows character replacements var testsWindows = [][2]string{ {`c:\temp`, `c:\temp`}, {`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`}, {`//?/UNC/theserver/dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`}, {`c:/temp`, `c:\temp`}, {`C:/temp/file.txt`, `C:\temp\file.txt`}, {`c:\!\"#¤%&/()=;:*^?+-`, `c:\!\"#¤%&\()=;:*^?+-`}, {`c:\<>"|?*:&\<>"|?*:&\<>"|?*:&`, `c:\<>"|?*:&\<>"|?*:&\<>"|?*:&`}, } func TestCleanWindows(t *testing.T) { if runtime.GOOS != "windows" { t.Skipf("windows only") } for _, test := range testsWindows { got := cleanRootPath(test[0], true, encoder.OS) expect := test[1] if got != expect { t.Fatalf("got %q, expected %q", got, expect) } } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/setbtime.go
backend/local/setbtime.go
//go:build !windows package local import ( "time" ) const haveSetBTime = false // setBTime changes the birth time of the file passed in func setBTime(name string, btime time.Time) error { // Does nothing return nil } // lsetBTime changes the birth time of the link passed in func lsetBTime(name string, btime time.Time) error { // Does nothing return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/read_device_other.go
backend/local/read_device_other.go
// Device reading functions //go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris package local import "os" // readDevice turns a valid os.FileInfo into a device number, // returning devUnset if it fails. func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 { return devUnset }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/metadata_bsd.go
backend/local/metadata_bsd.go
//go:build darwin || freebsd || netbsd package local import ( "fmt" "os" "syscall" "time" "github.com/rclone/rclone/fs" ) // Read the time specified from the os.FileInfo func readTime(t timeType, fi os.FileInfo) time.Time { stat, ok := fi.Sys().(*syscall.Stat_t) if !ok { fs.Debugf(nil, "didn't return Stat_t as expected") return fi.ModTime() } switch t { case aTime: return time.Unix(stat.Atimespec.Unix()) case bTime: return time.Unix(stat.Birthtimespec.Unix()) case cTime: return time.Unix(stat.Ctimespec.Unix()) } return fi.ModTime() } // Read the metadata from the file into metadata where possible func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) { info, err := o.fs.lstat(o.path) if err != nil { return err } stat, ok := info.Sys().(*syscall.Stat_t) if !ok { fs.Debugf(o, "didn't return Stat_t as expected") return nil } m.Set("mode", fmt.Sprintf("%0o", stat.Mode)) m.Set("uid", fmt.Sprintf("%d", stat.Uid)) m.Set("gid", fmt.Sprintf("%d", stat.Gid)) if stat.Rdev != 0 { m.Set("rdev", fmt.Sprintf("%x", stat.Rdev)) } setTime := func(key string, t syscall.Timespec) { m.Set(key, time.Unix(t.Unix()).Format(metadataTimeFormat)) } setTime("atime", stat.Atimespec) setTime("mtime", stat.Mtimespec) setTime("btime", stat.Birthtimespec) return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/local/read_device_unix.go
backend/local/read_device_unix.go
// Device reading functions //go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris package local import ( "os" "syscall" "github.com/rclone/rclone/fs" ) // readDevice turns a valid os.FileInfo into a device number, // returning devUnset if it fails. func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 { if !oneFileSystem { return devUnset } statT, ok := fi.Sys().(*syscall.Stat_t) if !ok { fs.Debugf(fi.Name(), "Type assertion fi.Sys().(*syscall.Stat_t) failed from: %#v", fi.Sys()) return devUnset } return uint64(statT.Dev) // nolint: unconvert }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false