repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/webdav/odrvcookie/fetch.go
backend/webdav/odrvcookie/fetch.go
// Package odrvcookie can fetch authentication cookies for a sharepoint webdav endpoint package odrvcookie import ( "bytes" "context" "encoding/xml" "fmt" "html/template" "net/http" "net/http/cookiejar" "net/url" "strings" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fshttp" "golang.org/x/net/publicsuffix" ) // CookieAuth hold the authentication information // These are username and password as well as the authentication endpoint type CookieAuth struct { user string pass string endpoint string } // CookieResponse contains the requested cookies type CookieResponse struct { RtFa http.Cookie FedAuth http.Cookie } // SharepointSuccessResponse holds a response from a successful microsoft login type SharepointSuccessResponse struct { XMLName xml.Name `xml:"Envelope"` Body SuccessResponseBody `xml:"Body"` } // SuccessResponseBody is the body of a successful response, it holds the token type SuccessResponseBody struct { XMLName xml.Name Type string `xml:"RequestSecurityTokenResponse>TokenType"` Created time.Time `xml:"RequestSecurityTokenResponse>Lifetime>Created"` Expires time.Time `xml:"RequestSecurityTokenResponse>Lifetime>Expires"` Token string `xml:"RequestSecurityTokenResponse>RequestedSecurityToken>BinarySecurityToken"` } // SharepointError holds an error response microsoft login type SharepointError struct { XMLName xml.Name `xml:"Envelope"` Body ErrorResponseBody `xml:"Body"` } func (e *SharepointError) Error() string { return fmt.Sprintf("%s: %s (%s)", e.Body.FaultCode, e.Body.Reason, e.Body.Detail) } // ErrorResponseBody contains the body of an erroneous response type ErrorResponseBody struct { XMLName xml.Name FaultCode string `xml:"Fault>Code>Subcode>Value"` Reason string `xml:"Fault>Reason>Text"` Detail string `xml:"Fault>Detail>error>internalerror>text"` } // reqString is a template that gets populated with the user data in order to retrieve a "BinarySecurityToken" const reqString = `<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:a="http://www.w3.org/2005/08/addressing" xmlns:u="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd"> <s:Header> <a:Action s:mustUnderstand="1">http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue</a:Action> <a:ReplyTo> <a:Address>http://www.w3.org/2005/08/addressing/anonymous</a:Address> </a:ReplyTo> <a:To s:mustUnderstand="1">{{ .SPTokenURL }}</a:To> <o:Security s:mustUnderstand="1" xmlns:o="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd"> <o:UsernameToken> <o:Username>{{ .Username }}</o:Username> <o:Password>{{ .Password }}</o:Password> </o:UsernameToken> </o:Security> </s:Header> <s:Body> <t:RequestSecurityToken xmlns:t="http://schemas.xmlsoap.org/ws/2005/02/trust"> <wsp:AppliesTo xmlns:wsp="http://schemas.xmlsoap.org/ws/2004/09/policy"> <a:EndpointReference> <a:Address>{{ .Address }}</a:Address> </a:EndpointReference> </wsp:AppliesTo> <t:KeyType>http://schemas.xmlsoap.org/ws/2005/05/identity/NoProofKey</t:KeyType> <t:RequestType>http://schemas.xmlsoap.org/ws/2005/02/trust/Issue</t:RequestType> <t:TokenType>urn:oasis:names:tc:SAML:1.0:assertion</t:TokenType> </t:RequestSecurityToken> </s:Body> </s:Envelope>` // New creates a new CookieAuth struct func New(pUser, pPass, pEndpoint string) CookieAuth { retStruct := CookieAuth{ user: pUser, pass: pPass, endpoint: pEndpoint, } return retStruct } // Cookies creates a CookieResponse. It fetches the auth token and then // retrieves the Cookies func (ca *CookieAuth) Cookies(ctx context.Context) (*CookieResponse, error) { tokenResp, err := ca.getSPToken(ctx) if err != nil { return nil, err } return ca.getSPCookie(tokenResp) } func (ca *CookieAuth) getSPCookie(conf *SharepointSuccessResponse) (*CookieResponse, error) { spRoot, err := url.Parse(ca.endpoint) if err != nil { return nil, fmt.Errorf("error while constructing endpoint URL: %w", err) } u, err := url.Parse(spRoot.Scheme + "://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0") if err != nil { return nil, fmt.Errorf("error while constructing login URL: %w", err) } // To authenticate with davfs or anything else we need two cookies (rtFa and FedAuth) // In order to get them we use the token we got earlier and a cookieJar jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) if err != nil { return nil, err } client := &http.Client{ Jar: jar, } // Send the previously acquired Token as a Post parameter if _, err = client.Post(u.String(), "text/xml", strings.NewReader(conf.Body.Token)); err != nil { return nil, fmt.Errorf("error while grabbing cookies from endpoint: %w", err) } cookieResponse := CookieResponse{} for _, cookie := range jar.Cookies(u) { if (cookie.Name == "rtFa") || (cookie.Name == "FedAuth") { switch cookie.Name { case "rtFa": cookieResponse.RtFa = *cookie case "FedAuth": cookieResponse.FedAuth = *cookie } } } return &cookieResponse, nil } var spTokenURLMap = map[string]string{ "com": "https://login.microsoftonline.com", "cn": "https://login.chinacloudapi.cn", "us": "https://login.microsoftonline.us", "de": "https://login.microsoftonline.de", } func getSPTokenURL(endpoint string) (string, error) { spRoot, err := url.Parse(endpoint) if err != nil { return "", fmt.Errorf("error while parse endpoint: %w", err) } domains := strings.Split(spRoot.Host, ".") tld := domains[len(domains)-1] spTokenURL, ok := spTokenURLMap[tld] if !ok { return "", fmt.Errorf("error while get SPToken url, unsupported tld: %s", tld) } return spTokenURL + "/extSTS.srf", nil } func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessResponse, err error) { spTokenURL, err := getSPTokenURL(ca.endpoint) if err != nil { return nil, err } reqData := map[string]any{ "Username": ca.user, "Password": ca.pass, "Address": ca.endpoint, "SPTokenURL": spTokenURL, } t := template.Must(template.New("authXML").Parse(reqString)) buf := &bytes.Buffer{} if err := t.Execute(buf, reqData); err != nil { return nil, fmt.Errorf("error while filling auth token template: %w", err) } // Create and execute the first request which returns an auth token for the sharepoint service // With this token we can authenticate on the login page and save the returned cookies req, err := http.NewRequestWithContext(ctx, "POST", spTokenURL, buf) if err != nil { return nil, err } client := fshttp.NewClient(ctx) resp, err := client.Do(req) if err != nil { return nil, fmt.Errorf("error while logging in to endpoint: %w", err) } defer fs.CheckClose(resp.Body, &err) respBuf := bytes.Buffer{} _, err = respBuf.ReadFrom(resp.Body) if err != nil { return nil, err } s := respBuf.Bytes() conf = &SharepointSuccessResponse{} err = xml.Unmarshal(s, conf) if conf.Body.Token == "" { // xml Unmarshal won't fail if the response doesn't contain a token // However, the token will be empty sErr := &SharepointError{} errSErr := xml.Unmarshal(s, sErr) if errSErr == nil { return nil, sErr } } if err != nil { return nil, fmt.Errorf("error while reading endpoint response: %w", err) } return }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/webdav/odrvcookie/renew.go
backend/webdav/odrvcookie/renew.go
package odrvcookie import ( "time" ) // CookieRenew holds information for the renew type CookieRenew struct { timer *time.Ticker renewFn func() } // NewRenew returns and starts a CookieRenew func NewRenew(interval time.Duration, renewFn func()) *CookieRenew { renew := CookieRenew{ timer: time.NewTicker(interval), renewFn: renewFn, } go renew.Renew() return &renew } // Renew calls the renewFn for every tick func (c *CookieRenew) Renew() { for { <-c.timer.C // wait for tick c.renewFn() } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/webdav/api/types.go
backend/webdav/api/types.go
// Package api has type definitions for webdav package api import ( "encoding/xml" "regexp" "strconv" "strings" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" ) const ( // Wed, 27 Sep 2017 14:28:34 GMT timeFormat = time.RFC1123 // The same as time.RFC1123 with optional leading zeros on the date // see https://github.com/rclone/rclone/issues/2574 noZerosRFC1123 = "Mon, _2 Jan 2006 15:04:05 MST" ) // Multistatus contains responses returned from an HTTP 207 return code type Multistatus struct { Responses []Response `xml:"response"` } // Response contains an Href the response it about and its properties type Response struct { Href string `xml:"href"` Props Prop `xml:"propstat"` } // Prop is the properties of a response // // This is a lazy way of decoding the multiple <s:propstat> in the // response. // // The response might look like this. // // <d:response> // // <d:href>/remote.php/webdav/Nextcloud%20Manual.pdf</d:href> // <d:propstat> // <d:prop> // <d:getlastmodified>Tue, 19 Dec 2017 22:02:36 GMT</d:getlastmodified> // <d:getcontentlength>4143665</d:getcontentlength> // <d:resourcetype/> // <d:getetag>"048d7be4437ff7deeae94db50ff3e209"</d:getetag> // <d:getcontenttype>application/pdf</d:getcontenttype> // </d:prop> // <d:status>HTTP/1.1 200 OK</d:status> // </d:propstat> // <d:propstat> // <d:prop> // <d:quota-used-bytes/> // <d:quota-available-bytes/> // </d:prop> // <d:status>HTTP/1.1 404 Not Found</d:status> // </d:propstat> // // </d:response> // // So we elide the array of <d:propstat> and within that the array of // <d:prop> into one struct. // // Note that status collects all the status values for which we just // check the first is OK. type Prop struct { Status []string `xml:"DAV: status"` Name string `xml:"DAV: prop>displayname,omitempty"` Type *xml.Name `xml:"DAV: prop>resourcetype>collection,omitempty"` IsCollection *string `xml:"DAV: prop>iscollection,omitempty"` // this is a Microsoft extension see #2716 Size int64 `xml:"DAV: prop>getcontentlength,omitempty"` Modified Time `xml:"DAV: prop>getlastmodified,omitempty"` Checksums []string `xml:"prop>checksums>checksum,omitempty"` Permissions string `xml:"prop>permissions,omitempty"` MESha1Hex *string `xml:"ME: prop>sha1hex,omitempty"` // Fastmail-specific sha1 checksum } // Parse a status of the form "HTTP/1.1 200 OK" or "HTTP/1.1 200" var parseStatus = regexp.MustCompile(`^HTTP/[0-9.]+\s+(\d+)`) // Code extracts the status code from the first status func (p *Prop) Code() int { if len(p.Status) == 0 { return -1 } match := parseStatus.FindStringSubmatch(p.Status[0]) if len(match) < 2 { return 0 } code, err := strconv.Atoi(match[1]) if err != nil { return 0 } return code } // StatusOK examines the Status and returns an OK flag func (p *Prop) StatusOK() bool { // Fetch status code as int c := p.Code() // Assume OK if no statuses received if c == -1 { return true } if c == 0 { return false } if c >= 200 && c < 300 { return true } return false } // Hashes returns a map of all checksums - may be nil func (p *Prop) Hashes() (hashes map[hash.Type]string) { if len(p.Checksums) > 0 { hashes = make(map[hash.Type]string) for _, checksums := range p.Checksums { checksums = strings.ToLower(checksums) for checksum := range strings.SplitSeq(checksums, " ") { switch { case strings.HasPrefix(checksum, "sha1:"): hashes[hash.SHA1] = checksum[5:] case strings.HasPrefix(checksum, "md5:"): hashes[hash.MD5] = checksum[4:] } } } return hashes } else if p.MESha1Hex != nil { hashes = make(map[hash.Type]string) hashes[hash.SHA1] = *p.MESha1Hex return hashes } return nil } // PropValue is a tagged name and value type PropValue struct { XMLName xml.Name `xml:""` Value string `xml:",chardata"` } // Error is used to describe webdav errors // // <d:error xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns"> // // <s:exception>Sabre\DAV\Exception\NotFound</s:exception> // <s:message>File with name Photo could not be located</s:message> // // </d:error> type Error struct { Exception string `xml:"exception,omitempty"` Message string `xml:"message,omitempty"` Status string StatusCode int } // Error returns a string for the error and satisfies the error interface func (e *Error) Error() string { var out []string if e.Message != "" { out = append(out, e.Message) } if e.Exception != "" { out = append(out, e.Exception) } if e.Status != "" { out = append(out, e.Status) } if len(out) == 0 { return "Webdav Error" } return strings.Join(out, ": ") } // Time represents date and time information for the // webdav API marshalling to and from timeFormat type Time time.Time // MarshalXML turns a Time into XML func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error { timeString := (*time.Time)(t).Format(timeFormat) return e.EncodeElement(timeString, start) } // Possible time formats to parse the time with var timeFormats = []string{ timeFormat, // Wed, 27 Sep 2017 14:28:34 GMT (as per RFC) time.RFC1123Z, // Fri, 05 Jan 2018 14:14:38 +0000 (as used by mydrive.ch) time.UnixDate, // Wed May 17 15:31:58 UTC 2017 (as used in an internal server) noZerosRFC1123, // Fri, 7 Sep 2018 08:49:58 GMT (as used by server in #2574) time.RFC3339, // Wed, 31 Oct 2018 13:57:11 CET (as used by komfortcloud.de) } var oneTimeError sync.Once // UnmarshalXML turns XML into a Time func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { var v string err := d.DecodeElement(&v, &start) if err != nil { return err } // If time is missing then return the epoch if v == "" { *t = Time(time.Unix(0, 0)) return nil } // Parse the time format in multiple possible ways var newT time.Time for _, timeFormat := range timeFormats { newT, err = time.Parse(timeFormat, v) if err == nil { *t = Time(newT) break } } if err != nil { oneTimeError.Do(func() { fs.Errorf(nil, "Failed to parse time %q - using the epoch", v) }) // Return the epoch instead *t = Time(time.Unix(0, 0)) // ignore error err = nil } return err } // Quota is used to read the bytes used and available // // <d:multistatus xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns"> // // <d:response> // <d:href>/remote.php/webdav/</d:href> // <d:propstat> // <d:prop> // <d:quota-available-bytes>-3</d:quota-available-bytes> // <d:quota-used-bytes>376461895</d:quota-used-bytes> // </d:prop> // <d:status>HTTP/1.1 200 OK</d:status> // </d:propstat> // </d:response> // // </d:multistatus> type Quota struct { Available string `xml:"DAV: response>propstat>prop>quota-available-bytes"` Used string `xml:"DAV: response>propstat>prop>quota-used-bytes"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/mega/mega_test.go
backend/mega/mega_test.go
// Test Mega filesystem interface package mega_test import ( "testing" "github.com/rclone/rclone/backend/mega" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestMega:", NilObject: (*mega.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/mega/mega.go
backend/mega/mega.go
// Package mega provides an interface to the Mega // object storage system. package mega /* Open questions * Does mega support a content hash - what exactly are the mega hashes? * Can mega support setting modification times? Improvements: * Uploads could be done in parallel * Downloads would be more efficient done in one go * Uploads would be more efficient with bigger chunks * Looks like mega can support server-side copy, but it isn't implemented in go-mega * Upload can set modtime... - set as int64_t - can set ctime and mtime? */ import ( "context" "crypto/tls" "encoding/base64" "errors" "fmt" "io" "net/http" "path" "slices" "strings" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/readers" mega "github.com/t3rm1n4l/go-mega" ) const ( minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second eventWaitTime = 500 * time.Millisecond decayConstant = 2 // bigger for slower decay, exponential sessionIDConfigKey = "session_id" masterKeyConfigKey = "master_key" ) var ( megaCacheMu sync.Mutex // mutex for the below megaCache = map[string]*mega.Mega{} // cache logged in Mega's by user ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "mega", Description: "Mega", NewFs: NewFs, Options: []fs.Option{{ Name: "user", Help: "User name.", Required: true, Sensitive: true, }, { Name: "pass", Help: "Password.", Required: true, IsPassword: true, }, { Name: "2fa", Help: `The 2FA code of your MEGA account if the account is set up with one`, Required: false, }, { Name: sessionIDConfigKey, Help: "Session (internal use only)", Required: false, Advanced: true, Sensitive: true, Hide: fs.OptionHideBoth, }, { Name: masterKeyConfigKey, Help: "Master key (internal use only)", Required: false, Advanced: true, Sensitive: true, Hide: fs.OptionHideBoth, }, { Name: "debug", Help: `Output more debug from Mega. If this flag is set (along with -vv) it will print further debugging information from the mega backend.`, Default: false, Advanced: true, }, { Name: "hard_delete", Help: `Delete files permanently rather than putting them into the trash. Normally the mega backend will put all deletions into the trash rather than permanently deleting them. If you specify this then rclone will permanently delete objects instead.`, Default: false, Advanced: true, }, { Name: "use_https", Help: `Use HTTPS for transfers. MEGA uses plain text HTTP connections by default. Some ISPs throttle HTTP connections, this causes transfers to become very slow. Enabling this will force MEGA to use HTTPS for all transfers. HTTPS is normally not necessary since all data is already encrypted anyway. Enabling it will increase CPU usage and add network overhead.`, Default: false, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // Encode invalid UTF-8 bytes as json doesn't handle them properly. Default: (encoder.Base | encoder.EncodeInvalidUtf8), }}, }) } // Options defines the configuration for this backend type Options struct { User string `config:"user"` Pass string `config:"pass"` TwoFA string `config:"2fa"` SessionID string `config:"session_id"` MasterKey string `config:"master_key"` Debug bool `config:"debug"` HardDelete bool `config:"hard_delete"` UseHTTPS bool `config:"use_https"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a remote mega type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed config options features *fs.Features // optional features srv *mega.Mega // the connection to the server pacer *fs.Pacer // pacer for API calls rootNodeMu sync.Mutex // mutex for _rootNode _rootNode *mega.Node // root node - call findRoot to use this mkdirMu sync.Mutex // used to serialize calls to mkdir / rmdir } // Object describes a mega object // // Will definitely have info but maybe not meta. // // Normally rclone would just store an ID here but go-mega and mega.nz // expect you to build an entire tree of all the objects in memory. // In this case we just store a pointer to the object. type Object struct { fs *Fs // what this object is part of remote string // The remote path info *mega.Node // pointer to the mega node } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("mega root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // parsePath parses a mega 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // shouldRetry returns a boolean as to whether this err deserves to be // retried. It returns the err as a convenience func shouldRetry(ctx context.Context, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } // Let the mega library handle the low level retries return false, err } // readMetaDataForPath reads the metadata from the path func (f *Fs) readMetaDataForPath(ctx context.Context, remote string) (info *mega.Node, err error) { rootNode, err := f.findRoot(ctx, false) if err != nil { return nil, err } return f.findObject(rootNode, remote) } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } if opt.Pass != "" { var err error opt.Pass, err = obscure.Reveal(opt.Pass) if err != nil { return nil, fmt.Errorf("couldn't decrypt password: %w", err) } } ci := fs.GetConfig(ctx) // Create Fs root = parsePath(root) f := &Fs{ name: name, root: root, opt: *opt, pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } f.features = (&fs.Features{ DuplicateFiles: true, CanHaveEmptyDirectories: true, }).Fill(ctx, f) // cache *mega.Mega on username so we can reuse and share // them between remotes. They are expensive to make as they // contain all the objects and sharing the objects makes the // move code easier as we don't have to worry about mixing // them up between different remotes. megaCacheMu.Lock() defer megaCacheMu.Unlock() srv := megaCache[opt.User] if srv == nil { // srv = mega.New().SetClient(fshttp.NewClient(ctx)) // Workaround for Mega's use of insecure cipher suites which are no longer supported by default since Go 1.22. // Relevant issues: // https://github.com/rclone/rclone/issues/8565 // https://github.com/meganz/webclient/issues/103 clt := fshttp.NewClient(ctx) clt.Transport = fshttp.NewTransportCustom(ctx, func(t *http.Transport) { var ids []uint16 // Read default ciphers for _, cs := range tls.CipherSuites() { ids = append(ids, cs.ID) } // Insecure but Mega uses TLS_RSA_WITH_AES_128_GCM_SHA256 for storage endpoints // (e.g. https://gfs302n114.userstorage.mega.co.nz) as of June 18, 2025. t.TLSClientConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256) }) srv = mega.New().SetClient(clt) srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries srv.SetHTTPS(opt.UseHTTPS) srv.SetLogger(func(format string, v ...any) { fs.Infof("*go-mega*", format, v...) }) if opt.Debug { srv.SetDebugger(func(format string, v ...any) { fs.Debugf("*go-mega*", format, v...) }) } if opt.SessionID == "" { fs.Debugf(f, "Using username and password to initialize the Mega API") err := srv.MultiFactorLogin(opt.User, opt.Pass, opt.TwoFA) if err != nil { return nil, fmt.Errorf("couldn't login: %w", err) } megaCache[opt.User] = srv m.Set(sessionIDConfigKey, srv.GetSessionID()) encodedMasterKey := base64.StdEncoding.EncodeToString(srv.GetMasterKey()) m.Set(masterKeyConfigKey, encodedMasterKey) } else { fs.Debugf(f, "Using previously stored session ID and master key to initialize the Mega API") decodedMasterKey, err := base64.StdEncoding.DecodeString(opt.MasterKey) if err != nil { return nil, fmt.Errorf("couldn't decode master key: %w", err) } err = srv.LoginWithKeys(opt.SessionID, decodedMasterKey) if err != nil { fs.Debugf(f, "login with previous auth keys failed: %v", err) } } } f.srv = srv // Find the root node and check if it is a file or not _, err = f.findRoot(ctx, false) switch err { case nil: // root node found and is a directory case fs.ErrorDirNotFound: // root node not found, so can't be a file case fs.ErrorIsFile: // root node is a file so point to parent directory root = path.Dir(root) if root == "." { root = "" } f.root = root return f, err } return f, nil } // splitNodePath splits nodePath into / separated parts, returning nil if it // should refer to the root. // It also encodes the parts into backend-specific encoding func (f *Fs) splitNodePath(nodePath string) (parts []string) { nodePath = path.Clean(nodePath) if nodePath == "." || nodePath == "/" { return nil } nodePath = f.opt.Enc.FromStandardPath(nodePath) return strings.Split(nodePath, "/") } // findNode looks up the node for the path of the name given from the root given // // It returns mega.ENOENT if it wasn't found func (f *Fs) findNode(rootNode *mega.Node, nodePath string) (*mega.Node, error) { parts := f.splitNodePath(nodePath) if parts == nil { return rootNode, nil } nodes, err := f.srv.FS.PathLookup(rootNode, parts) if err != nil { return nil, err } return nodes[len(nodes)-1], nil } // findDir finds the directory rooted from the node passed in func (f *Fs) findDir(rootNode *mega.Node, dir string) (node *mega.Node, err error) { node, err = f.findNode(rootNode, dir) if err == mega.ENOENT { return nil, fs.ErrorDirNotFound } else if err == nil && node.GetType() == mega.FILE { return nil, fs.ErrorIsFile } return node, err } // findObject looks up the node for the object of the name given func (f *Fs) findObject(rootNode *mega.Node, file string) (node *mega.Node, err error) { node, err = f.findNode(rootNode, file) if err == mega.ENOENT { return nil, fs.ErrorObjectNotFound } else if err == nil && node.GetType() != mega.FILE { return nil, fs.ErrorIsDir // all other node types are directories } return node, err } // lookupDir looks up the node for the directory of the name given // // if create is true it tries to create the root directory if not found func (f *Fs) lookupDir(ctx context.Context, dir string) (*mega.Node, error) { rootNode, err := f.findRoot(ctx, false) if err != nil { return nil, err } return f.findDir(rootNode, dir) } // lookupParentDir finds the parent node for the remote passed in func (f *Fs) lookupParentDir(ctx context.Context, remote string) (dirNode *mega.Node, leaf string, err error) { parent, leaf := path.Split(remote) dirNode, err = f.lookupDir(ctx, parent) return dirNode, leaf, err } // mkdir makes the directory and any parent directories for the // directory of the name given func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *mega.Node, err error) { f.mkdirMu.Lock() defer f.mkdirMu.Unlock() parts := f.splitNodePath(dir) if parts == nil { return rootNode, nil } var i int // look up until we find a directory which exists for i = 0; i <= len(parts); i++ { var nodes []*mega.Node nodes, err = f.srv.FS.PathLookup(rootNode, parts[:len(parts)-i]) if err == nil { if len(nodes) == 0 { node = rootNode } else { node = nodes[len(nodes)-1] } break } if err != mega.ENOENT { return nil, fmt.Errorf("mkdir lookup failed: %w", err) } } if err != nil { return nil, fmt.Errorf("internal error: mkdir called with nonexistent root node: %w", err) } // i is number of directories to create (may be 0) // node is directory to create them from for _, name := range parts[len(parts)-i:] { // create directory called name in node err = f.pacer.Call(func() (bool, error) { node, err = f.srv.CreateDir(name, node) return shouldRetry(ctx, err) }) if err != nil { return nil, fmt.Errorf("mkdir create node failed: %w", err) } } return node, nil } // mkdirParent creates the parent directory of remote func (f *Fs) mkdirParent(ctx context.Context, remote string) (dirNode *mega.Node, leaf string, err error) { rootNode, err := f.findRoot(ctx, true) if err != nil { return nil, "", err } parent, leaf := path.Split(remote) dirNode, err = f.mkdir(ctx, rootNode, parent) return dirNode, leaf, err } // findRoot looks up the root directory node and returns it. // // if create is true it tries to create the root directory if not found func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) { f.rootNodeMu.Lock() defer f.rootNodeMu.Unlock() // Check if we haven't found it already if f._rootNode != nil { return f._rootNode, nil } // Check for preexisting root absRoot := f.srv.FS.GetRoot() node, err := f.findDir(absRoot, f.root) //log.Printf("findRoot findDir %p %v", node, err) if err == nil { f._rootNode = node return node, nil } if !create || err != fs.ErrorDirNotFound { return nil, err } //..not found so create the root directory f._rootNode, err = f.mkdir(ctx, absRoot, f.root) return f._rootNode, err } // clearRoot unsets the root directory func (f *Fs) clearRoot() { f.rootNodeMu.Lock() f._rootNode = nil f.rootNodeMu.Unlock() //log.Printf("cleared root directory") } // CleanUp deletes all files currently in trash func (f *Fs) CleanUp(ctx context.Context) (err error) { trash := f.srv.FS.GetTrash() items := []*mega.Node{} _, err = f.list(ctx, trash, func(item *mega.Node) bool { items = append(items, item) return false }) if err != nil { return fmt.Errorf("CleanUp failed to list items in trash: %w", err) } fs.Infof(f, "Deleting %d items from the trash", len(items)) errors := 0 // similar to f.deleteNode(trash) but with HardDelete as true for _, item := range items { fs.Debugf(f, "Deleting trash %q", f.opt.Enc.ToStandardName(item.GetName())) deleteErr := f.pacer.Call(func() (bool, error) { err := f.srv.Delete(item, true) return shouldRetry(ctx, err) }) if deleteErr != nil { err = deleteErr errors++ } } fs.Infof(f, "Deleted %d items from the trash with %d errors", len(items), errors) return err } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *mega.Node) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } var err error if info != nil { // Set info err = o.setMetaData(info) } else { err = o.readMetaData(ctx) // reads info and meta, returning an error } if err != nil { return nil, err } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // list the objects into the function supplied // // If directories is set it only sends directories // User function to process a File item from listAll // // Should return true to finish processing type listFn func(*mega.Node) bool // Lists the directory required calling the user function on each item found // // If the user fn ever returns true then it early exits with found = true func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, err error) { nodes, err := f.srv.FS.GetChildren(dir) if err != nil { return false, fmt.Errorf("list failed: %w", err) } if slices.ContainsFunc(nodes, fn) { found = true } return } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { dirNode, err := f.lookupDir(ctx, dir) if err != nil { return nil, err } var iErr error _, err = f.list(ctx, dirNode, func(info *mega.Node) bool { remote := path.Join(dir, f.opt.Enc.ToStandardName(info.GetName())) switch info.GetType() { case mega.FOLDER, mega.ROOT, mega.INBOX, mega.TRASH: d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash()) entries = append(entries, d) case mega.FILE: o, err := f.newObjectWithInfo(ctx, remote, info) if err != nil { iErr = err return true } entries = append(entries, o) } return false }) if err != nil { return nil, err } if iErr != nil { return nil, iErr } return entries, nil } // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // // Returns the dirNode, object, leaf and error. // // Used to create new objects func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) { dirNode, leaf, err = f.mkdirParent(ctx, remote) if err != nil { return nil, nil, leaf, err } // Temporary Object under construction o = &Object{ fs: f, remote: remote, } return o, dirNode, leaf, nil } // Put the object // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned // PutUnchecked uploads the object // // This will create a duplicate if we upload a new file without // checking to see if there is one already - use Put() for that. func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil) switch err { case nil: return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it return f.PutUnchecked(ctx, in, src) default: return nil, err } } // PutUnchecked the object // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned // PutUnchecked uploads the object // // This will create a duplicate if we upload a new file without // checking to see if there is one already - use Put() for that. func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() modTime := src.ModTime(ctx) o, _, _, err := f.createObject(ctx, remote, modTime, size) if err != nil { return nil, err } return o, o.Update(ctx, in, src, options...) } // Mkdir creates the directory if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { rootNode, err := f.findRoot(ctx, true) if err != nil { return err } _, err = f.mkdir(ctx, rootNode, dir) if err != nil { return fmt.Errorf("Mkdir failed: %w", err) } return nil } // deleteNode removes a file or directory, observing useTrash func (f *Fs) deleteNode(ctx context.Context, node *mega.Node) (err error) { err = f.pacer.Call(func() (bool, error) { err = f.srv.Delete(node, f.opt.HardDelete) return shouldRetry(ctx, err) }) return err } // purgeCheck removes the directory dir, if check is set then it // refuses to do so if it has anything in func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { f.mkdirMu.Lock() defer f.mkdirMu.Unlock() rootNode, err := f.findRoot(ctx, false) if err != nil { return err } dirNode, err := f.findDir(rootNode, dir) if err != nil { return err } if check { children, err := f.srv.FS.GetChildren(dirNode) if err != nil { return fmt.Errorf("purgeCheck GetChildren failed: %w", err) } if len(children) > 0 { return fs.ErrorDirectoryNotEmpty } } waitEvent := f.srv.WaitEventsStart() err = f.deleteNode(ctx, dirNode) if err != nil { return fmt.Errorf("delete directory node failed: %w", err) } // Remove the root node if we just deleted it if dirNode == rootNode { f.clearRoot() } f.srv.WaitEvents(waitEvent, eventWaitTime) return nil } // Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, true) } // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported } // Purge deletes all the files in the directory // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, false) } // move a file or folder (srcFs, srcRemote, info) to (f, dstRemote) // // info will be updates func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node) (err error) { var ( dstFs = f srcDirNode, dstDirNode *mega.Node srcParent, dstParent string srcLeaf, dstLeaf string ) if dstRemote != "" { // lookup or create the destination parent directory dstDirNode, dstLeaf, err = dstFs.mkdirParent(ctx, dstRemote) } else { // find or create the parent of the root directory absRoot := dstFs.srv.FS.GetRoot() dstParent, dstLeaf = path.Split(dstFs.root) dstDirNode, err = dstFs.mkdir(ctx, absRoot, dstParent) } if err != nil { return fmt.Errorf("server-side move failed to make dst parent dir: %w", err) } if srcRemote != "" { // lookup the existing parent directory srcDirNode, srcLeaf, err = srcFs.lookupParentDir(ctx, srcRemote) } else { // lookup the existing root parent absRoot := srcFs.srv.FS.GetRoot() srcParent, srcLeaf = path.Split(srcFs.root) srcDirNode, err = f.findDir(absRoot, srcParent) } if err != nil { return fmt.Errorf("server-side move failed to lookup src parent dir: %w", err) } // move the object into its new directory if required if srcDirNode != dstDirNode && srcDirNode.GetHash() != dstDirNode.GetHash() { //log.Printf("move src %p %q dst %p %q", srcDirNode, srcDirNode.GetName(), dstDirNode, dstDirNode.GetName()) err = f.pacer.Call(func() (bool, error) { err = f.srv.Move(info, dstDirNode) return shouldRetry(ctx, err) }) if err != nil { return fmt.Errorf("server-side move failed: %w", err) } } waitEvent := f.srv.WaitEventsStart() // rename the object if required if srcLeaf != dstLeaf { //log.Printf("rename %q to %q", srcLeaf, dstLeaf) err = f.pacer.Call(func() (bool, error) { err = f.srv.Rename(info, f.opt.Enc.FromStandardName(dstLeaf)) return shouldRetry(ctx, err) }) if err != nil { return fmt.Errorf("server-side rename failed: %w", err) } } f.srv.WaitEvents(waitEvent, eventWaitTime) return nil } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { dstFs := f //log.Printf("Move %q -> %q", src.Remote(), remote) srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } // Do the move err := f.move(ctx, remote, srcObj.fs, srcObj.remote, srcObj.info) if err != nil { return nil, err } // Create a destination object dstObj := &Object{ fs: dstFs, remote: remote, info: srcObj.info, } return dstObj, nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { dstFs := f srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } // find the source info, err := srcFs.lookupDir(ctx, srcRemote) if err != nil { return err } // check the destination doesn't exist _, err = dstFs.lookupDir(ctx, dstRemote) if err == nil { return fs.ErrorDirExists } else if err != fs.ErrorDirNotFound { return fmt.Errorf("DirMove error while checking dest directory: %w", err) } // Do the move err = f.move(ctx, dstRemote, srcFs, srcRemote, info) if err != nil { return err } // Clear src if it was the root if srcRemote == "" { srcFs.clearRoot() } return nil } // DirCacheFlush an optional interface to flush internal directory cache func (f *Fs) DirCacheFlush() { // f.dirCache.ResetRoot() // FIXME Flush the mega somehow? } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.None) } // PublicLink generates a public link to the remote path (usually readable by anyone) func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) { root, err := f.findRoot(ctx, false) if err != nil { return "", fmt.Errorf("PublicLink failed to find root node: %w", err) } node, err := f.findNode(root, remote) if err != nil { return "", fmt.Errorf("PublicLink failed to find path: %w", err) } link, err = f.srv.Link(node, true) if err != nil { return "", fmt.Errorf("PublicLink failed to create link: %w", err) } return link, nil } // MergeDirs merges the contents of all the directories passed // in into the first one and rmdirs the other directories. func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { if len(dirs) < 2 { return nil } // find dst directory dstDir := dirs[0] dstDirNode := f.srv.FS.HashLookup(dstDir.ID()) if dstDirNode == nil { return fmt.Errorf("MergeDirs failed to find node for: %v", dstDir) } for _, srcDir := range dirs[1:] { // find src directory srcDirNode := f.srv.FS.HashLookup(srcDir.ID()) if srcDirNode == nil { return fmt.Errorf("MergeDirs failed to find node for: %v", srcDir) } // list the objects infos := []*mega.Node{} _, err := f.list(ctx, srcDirNode, func(info *mega.Node) bool { infos = append(infos, info) return false }) if err != nil { return fmt.Errorf("MergeDirs list failed on %v: %w", srcDir, err) } // move them into place for _, info := range infos { fs.Infof(srcDir, "merging %q", f.opt.Enc.ToStandardName(info.GetName())) err = f.pacer.Call(func() (bool, error) { err = f.srv.Move(info, dstDirNode) return shouldRetry(ctx, err) }) if err != nil { return fmt.Errorf("MergeDirs move failed on %q in %v: %w", f.opt.Enc.ToStandardName(info.GetName()), srcDir, err) } } // rmdir (into trash) the now empty source directory fs.Infof(srcDir, "removing empty directory") err = f.deleteNode(ctx, srcDirNode) if err != nil { return fmt.Errorf("MergeDirs move failed to rmdir %q: %w", srcDir, err) } } return nil } // About gets quota information func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { var q mega.QuotaResp var err error err = f.pacer.Call(func() (bool, error) { q, err = f.srv.GetQuota() return shouldRetry(ctx, err) }) if err != nil { return nil, fmt.Errorf("failed to get Mega Quota: %w", err) } usage := &fs.Usage{ Total: fs.NewUsageValue(q.Mstrg), // quota of bytes that can be used Used: fs.NewUsageValue(q.Cstrg), // bytes in use Free: fs.NewUsageValue(q.Mstrg - q.Cstrg), // bytes which can be uploaded before reaching the quota } return usage, nil } // ------------------------------------------------------------ // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Hash returns the hashes of an object func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { return "", hash.ErrUnsupported } // Size returns the size of an object in bytes func (o *Object) Size() int64 { return o.info.GetSize() } // setMetaData sets the metadata from info func (o *Object) setMetaData(info *mega.Node) (err error) { if info.GetType() != mega.FILE { return fs.ErrorIsDir // all other node types are directories } o.info = info return nil } // readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info func (o *Object) readMetaData(ctx context.Context) (err error) { if o.info != nil { return nil } info, err := o.fs.readMetaDataForPath(ctx, o.remote) if err != nil { if err == fs.ErrorDirNotFound { err = fs.ErrorObjectNotFound } return err } return o.setMetaData(info) } // ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime(ctx context.Context) time.Time { return o.info.GetTimeStamp() } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return fs.ErrorCantSetModTime } // Storable returns a boolean showing whether this object storable func (o *Object) Storable() bool { return true } // openObject represents a download in progress type openObject struct { ctx context.Context mu sync.Mutex o *Object d *mega.Download id int skip int64 chunk []byte closed bool } // get the next chunk func (oo *openObject) getChunk(ctx context.Context) (err error) { if oo.id >= oo.d.Chunks() { return io.EOF } var chunk []byte err = oo.o.fs.pacer.Call(func() (bool, error) { chunk, err = oo.d.DownloadChunk(oo.id) return shouldRetry(ctx, err) }) if err != nil { return err } oo.id++ oo.chunk = chunk return nil } // Read reads up to len(p) bytes into p. func (oo *openObject) Read(p []byte) (n int, err error) { oo.mu.Lock() defer oo.mu.Unlock() if oo.closed { return 0, errors.New("read on closed file") } // Skip data at the start if requested for oo.skip > 0 { _, size, err := oo.d.ChunkLocation(oo.id) if err != nil { return 0, err } if oo.skip < int64(size) { break } oo.id++ oo.skip -= int64(size) } if len(oo.chunk) == 0 { err = oo.getChunk(oo.ctx) if err != nil { return 0, err } if oo.skip > 0 { oo.chunk = oo.chunk[oo.skip:] oo.skip = 0 } } n = copy(p, oo.chunk) oo.chunk = oo.chunk[n:] return n, nil } // Close closed the file - MAC errors are reported here func (oo *openObject) Close() (err error) { oo.mu.Lock() defer oo.mu.Unlock() if oo.closed { return nil } err = oo.o.fs.pacer.Call(func() (bool, error) { err = oo.d.Finish() return shouldRetry(oo.ctx, err) }) if err != nil { return fmt.Errorf("failed to finish download: %w", err) } oo.closed = true return nil } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { var offset, limit int64 = 0, -1 for _, option := range options { switch x := option.(type) { case *fs.SeekOption: offset = x.Offset case *fs.RangeOption:
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/jottacloud/jottacloud_internal_test.go
backend/jottacloud/jottacloud_internal_test.go
package jottacloud import ( "context" "crypto/md5" "fmt" "io" "testing" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/readers" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestReadMD5(t *testing.T) { // Check readMD5 for different size and threshold for _, size := range []int64{0, 1024, 10 * 1024, 100 * 1024} { t.Run(fmt.Sprintf("%d", size), func(t *testing.T) { hasher := md5.New() n, err := io.Copy(hasher, readers.NewPatternReader(size)) require.NoError(t, err) assert.Equal(t, n, size) wantMD5 := fmt.Sprintf("%x", hasher.Sum(nil)) for _, threshold := range []int64{512, 1024, 10 * 1024, 20 * 1024} { t.Run(fmt.Sprintf("%d", threshold), func(t *testing.T) { in := readers.NewPatternReader(size) gotMD5, out, cleanup, err := readMD5(in, size, threshold) defer cleanup() require.NoError(t, err) assert.Equal(t, wantMD5, gotMD5) // check md5hash of out hasher := md5.New() n, err := io.Copy(hasher, out) require.NoError(t, err) assert.Equal(t, n, size) outMD5 := fmt.Sprintf("%x", hasher.Sum(nil)) assert.Equal(t, wantMD5, outMD5) }) } }) } } func (f *Fs) InternalTestMetadata(t *testing.T) { ctx := context.Background() contents := random.String(1000) item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) utime := time.Now() metadata := fs.Metadata{ "btime": "2009-05-06T04:05:06.499999999Z", "mtime": "2010-06-07T08:09:07.599999999Z", //"utime" - read-only //"content-type" - read-only } obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, false, contents, true, "text/html", metadata) defer func() { assert.NoError(t, obj.Remove(ctx)) }() o := obj.(*Object) gotMetadata, err := o.Metadata(ctx) require.NoError(t, err) for k, v := range metadata { got := gotMetadata[k] switch k { case "btime": assert.True(t, fstest.Time(v).Truncate(f.Precision()).Equal(fstest.Time(got)), fmt.Sprintf("btime not equal want %v got %v", v, got)) case "mtime": assert.True(t, fstest.Time(v).Truncate(f.Precision()).Equal(fstest.Time(got)), fmt.Sprintf("btime not equal want %v got %v", v, got)) case "utime": gotUtime := fstest.Time(got) dt := gotUtime.Sub(utime) assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("utime more than 1 minute out want %v got %v delta %v", utime, gotUtime, dt)) assert.True(t, fstest.Time(v).Equal(fstest.Time(got))) case "content-type": assert.True(t, o.MimeType(ctx) == got) default: assert.Equal(t, v, got, k) } } } func (f *Fs) InternalTest(t *testing.T) { t.Run("Metadata", f.InternalTestMetadata) } var _ fstests.InternalTester = (*Fs)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/jottacloud/jottacloud_test.go
backend/jottacloud/jottacloud_test.go
// Test Box filesystem interface package jottacloud_test import ( "testing" "github.com/rclone/rclone/backend/jottacloud" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestJottacloud:", NilObject: (*jottacloud.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/jottacloud/jottacloud.go
backend/jottacloud/jottacloud.go
// Package jottacloud provides an interface to the Jottacloud storage system. package jottacloud import ( "bytes" "context" "crypto/md5" "encoding/base64" "encoding/hex" "encoding/json" "encoding/xml" "errors" "fmt" "io" "math/rand" "net/http" "net/url" "os" "path" "slices" "strconv" "strings" "time" "github.com/rclone/rclone/backend/jottacloud/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" "golang.org/x/oauth2" ) // Globals const ( minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential defaultDevice = "Jotta" defaultMountpoint = "Archive" jfsURL = "https://jfs.jottacloud.com/jfs/" apiURL = "https://api.jottacloud.com/" wwwURL = "https://www.jottacloud.com/" cachePrefix = "rclone-jcmd5-" configDevice = "device" configMountpoint = "mountpoint" configTokenURL = "tokenURL" configClientID = "client_id" configClientSecret = "client_secret" configUsername = "username" configVersion = 1 defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token" defaultClientID = "jottacli" // Identified as "Jottacloud CLI" in "My logged in devices" legacyTokenURL = "https://api.jottacloud.com/auth/v1/token" legacyRegisterURL = "https://api.jottacloud.com/auth/v1/register" legacyClientID = "nibfk8biu12ju7hpqomr8b1e40" legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2" legacyConfigVersion = 0 ) type service struct { key string name string domain string realm string clientID string scopes []string } // The list of services and their settings for supporting traditional OAuth. // Please keep these in alphabetical order, but with jottacloud first. func getServices() []service { return []service{ {"jottacloud", "Jottacloud", "id.jottacloud.com", "jottacloud", "desktop", []string{"openid", "jotta-default", "offline_access"}}, // Chose client id "desktop" here, will be identified as "Jottacloud for Desktop" in "My logged in devices", but could have used "jottacli" here as well. {"elgiganten_dk", "Elgiganten Cloud (Denmark)", "cloud.elgiganten.dk", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}}, {"elgiganten_se", "Elgiganten Cloud (Sweden)", "cloud.elgiganten.se", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}}, {"elkjop", "Elkjøp Cloud (Norway)", "cloud.elkjop.no", "elkjop", "desktop", []string{"openid", "jotta-default", "offline_access"}}, {"elko", "ELKO Cloud (Iceland)", "cloud.elko.is", "elko", "desktop", []string{"openid", "jotta-default", "offline_access"}}, {"gigantti", "Gigantti Cloud (Finland)", "cloud.gigantti.fi", "gigantti", "desktop", []string{"openid", "jotta-default", "offline_access"}}, {"letsgo", "Let's Go Cloud (Germany)", "letsgo.jotta.cloud", "letsgo", "desktop-win", []string{"openid", "offline_access"}}, {"mediamarkt", "MediaMarkt Cloud (Multiregional)", "mediamarkt.jottacloud.com", "mediamarkt", "desktop", []string{"openid", "jotta-default", "offline_access"}}, {"onlime", "Onlime (Denmark)", "cloud-auth.onlime.dk", "onlime_wl", "desktop", []string{"openid", "jotta-default", "offline_access"}}, {"tele2", "Tele2 Cloud (Sweden)", "mittcloud-auth.tele2.se", "comhem", "desktop", []string{"openid", "jotta-default", "offline_access"}}, {"telia_no", "Telia Sky (Norway)", "sky-auth.telia.no", "get", "desktop", []string{"openid", "jotta-default", "offline_access"}}, {"telia_se", "Telia Cloud (Sweden)", "cloud-auth.telia.se", "telia_se", "desktop", []string{"openid", "jotta-default", "offline_access"}}, } } // Register with Fs func init() { // needs to be done early so we can use oauth during config fs.Register(&fs.RegInfo{ Name: "jottacloud", Description: "Jottacloud", NewFs: NewFs, Config: Config, MetadataInfo: &fs.MetadataInfo{ Help: `Jottacloud has limited support for metadata, currently an extended set of timestamps.`, System: map[string]fs.MetadataHelp{ "btime": { Help: "Time of file birth (creation), read from rclone metadata", Type: "RFC 3339", Example: "2006-01-02T15:04:05.999999999Z07:00", }, "mtime": { Help: "Time of last modification, read from rclone metadata", Type: "RFC 3339", Example: "2006-01-02T15:04:05.999999999Z07:00", }, "utime": { Help: "Time of last upload, when current revision was created, generated by backend", Type: "RFC 3339", Example: "2006-01-02T15:04:05.999999999Z07:00", ReadOnly: true, }, "content-type": { Help: "MIME type, also known as media type", Type: "string", Example: "text/plain", ReadOnly: true, }, }, }, Options: append(oauthutil.SharedOptions, []fs.Option{{ Name: "md5_memory_limit", Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.", Default: fs.SizeSuffix(10 * 1024 * 1024), Advanced: true, }, { Name: "trashed_only", Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.", Default: false, Advanced: true, }, { Name: "hard_delete", Help: "Delete files permanently rather than putting them into the trash.", Default: false, Advanced: true, }, { Name: "upload_resume_limit", Help: "Files bigger than this can be resumed if the upload fail's.", Default: fs.SizeSuffix(10 * 1024 * 1024), Advanced: true, }, { Name: "no_versions", Help: "Avoid server side versioning by deleting files and recreating files instead of overwriting them.", Default: false, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // Encode invalid UTF-8 bytes as xml doesn't handle them properly. // // Also: '*', '/', ':', '<', '>', '?', '\"', '\x00', '|' Default: (encoder.Display | encoder.EncodeWin | // :?"*<>| encoder.EncodeInvalidUtf8), }}...), }) } // Config runs the backend configuration protocol func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) { switch conf.State { case "": if isAuthorize, _ := m.Get(config.ConfigAuthorize); isAuthorize == "true" { return nil, errors.New("not supported by this backend") } return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Type of authentication.`, []fs.OptionExample{{ Value: "standard", Help: `Standard authentication. This is primarily supported by the official service, but may also be supported by some white-label services. It is designed for command-line applications, and you will be asked to enter a single-use personal login token which you must manually generate from the account security settings in the web interface of your service.`, }, { Value: "traditional", Help: `Traditional authentication. This is supported by the official service and all white-label services that rclone knows about. You will be asked which service to connect to. It has a limitation of only a single active authentication at a time. You need to be on, or have access to, a machine with an internet-connected web browser.`, }, { Value: "legacy", Help: `Legacy authentication. This is no longer supported by any known services and not recommended used. You will be asked for your account's username and password.`, }}) case "auth_type_done": // Jump to next state according to config chosen return fs.ConfigGoto(conf.Result) case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication m.Set("configVersion", fmt.Sprint(configVersion)) return fs.ConfigInput("standard_token", "config_login_token", `Personal login token. Generate it from the account security settings in the web interface of your service, for the official service on https://www.jottacloud.com/web/secure.`) case "standard_token": loginToken := conf.Result m.Set(configClientID, defaultClientID) m.Set(configClientSecret, "") srv := rest.NewClient(fshttp.NewClient(ctx)) token, tokenEndpoint, err := doTokenAuth(ctx, srv, loginToken) if err != nil { return nil, fmt.Errorf("failed to get oauth token: %w", err) } m.Set(configTokenURL, tokenEndpoint) err = oauthutil.PutToken(name, m, &token, true) if err != nil { return nil, fmt.Errorf("error while saving token: %w", err) } return fs.ConfigGoto("choose_device") case "traditional": services := getServices() options := make([]fs.OptionExample, 0, len(services)) for _, service := range services { options = append(options, fs.OptionExample{ Value: service.key, Help: service.name, }) } return fs.ConfigChooseExclusiveFixed("traditional_type", "config_traditional", "White-label service. This decides the domain name to connect to and\nthe authentication configuration to use.", options) case "traditional_type": services := getServices() i := slices.IndexFunc(services, func(s service) bool { return s.key == conf.Result }) if i == -1 { return nil, fmt.Errorf("unexpected service %q", conf.Result) } service := services[i] opts := rest.Opts{ Method: "GET", RootURL: "https://" + service.domain + "/auth/realms/" + service.realm + "/.well-known/openid-configuration", } var wellKnown api.WellKnown srv := rest.NewClient(fshttp.NewClient(ctx)) _, err := srv.CallJSON(ctx, &opts, nil, &wellKnown) if err != nil { return nil, fmt.Errorf("failed to get authentication provider configuration: %w", err) } m.Set("configVersion", fmt.Sprint(configVersion)) m.Set(configClientID, service.clientID) m.Set(configTokenURL, wellKnown.TokenEndpoint) return oauthutil.ConfigOut("choose_device", &oauthutil.Options{ OAuth2Config: &oauthutil.Config{ AuthURL: wellKnown.AuthorizationEndpoint, TokenURL: wellKnown.TokenEndpoint, ClientID: service.clientID, Scopes: service.scopes, RedirectURL: oauthutil.RedirectLocalhostURL, }, }) case "legacy": // configure a jottacloud backend using legacy authentication m.Set("configVersion", fmt.Sprint(legacyConfigVersion)) return fs.ConfigConfirm("legacy_api", false, "config_machine_specific", `Do you want to create a machine specific API key? Rclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.`) case "legacy_api": srv := rest.NewClient(fshttp.NewClient(ctx)) if conf.Result == "true" { deviceRegistration, err := registerDevice(ctx, srv) if err != nil { return nil, fmt.Errorf("failed to register device: %w", err) } m.Set(configClientID, deviceRegistration.ClientID) m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret)) fs.Debugf(nil, "Got clientID %q and clientSecret %q", deviceRegistration.ClientID, deviceRegistration.ClientSecret) } return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address) of your account.") case "legacy_username": m.Set(configUsername, conf.Result) return fs.ConfigPassword("legacy_password", "config_password", "Password of your account. This is only used in setup, it will not be stored.") case "legacy_password": m.Set("password", conf.Result) m.Set("auth_code", "") return fs.ConfigGoto("legacy_do_auth") case "legacy_auth_code": authCode := strings.ReplaceAll(conf.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number m.Set("auth_code", authCode) return fs.ConfigGoto("legacy_do_auth") case "legacy_do_auth": username, _ := m.Get(configUsername) password, _ := m.Get("password") password = obscure.MustReveal(password) authCode, _ := m.Get("auth_code") srv := rest.NewClient(fshttp.NewClient(ctx)) clientID, _ := m.Get(configClientID) if clientID == "" { clientID = legacyClientID } clientSecret, _ := m.Get(configClientSecret) if clientSecret == "" { clientSecret = legacyEncryptedClientSecret } oauthConfig := &oauth2.Config{ Endpoint: oauth2.Endpoint{ AuthURL: legacyTokenURL, }, ClientID: clientID, ClientSecret: obscure.MustReveal(clientSecret), } token, err := doLegacyAuth(ctx, srv, oauthConfig, username, password, authCode) if err == errAuthCodeRequired { return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification code.\nThis account uses 2 factor authentication you will receive a verification code via SMS.") } m.Set("password", "") m.Set("auth_code", "") if err != nil { return nil, fmt.Errorf("failed to get oauth token: %w", err) } err = oauthutil.PutToken(name, m, &token, true) if err != nil { return nil, fmt.Errorf("error while saving token: %w", err) } return fs.ConfigGoto("choose_device") case "choose_device": return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint? Choosing no, the default, will let you access the storage used for the archive section of the official Jottacloud client. If you instead want to access the sync or the backup section, for example, you must choose yes.`) case "choose_device_query": if conf.Result != "true" { m.Set(configDevice, "") m.Set(configMountpoint, "") return fs.ConfigGoto("end") } oAuthClient, _, err := getOAuthClient(ctx, name, m) if err != nil { return nil, err } jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL) apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL) cust, err := getCustomerInfo(ctx, apiSrv) if err != nil { return nil, err } acc, err := getDriveInfo(ctx, jfsSrv, cust.Username) if err != nil { return nil, err } deviceNames := make([]string, len(acc.Devices)) for i, dev := range acc.Devices { if i > 0 && dev.Name == defaultDevice { // Insert the special Jotta device as first entry, making it the default choice. copy(deviceNames[1:i+1], deviceNames[0:i]) deviceNames[0] = dev.Name } else { deviceNames[i] = dev.Name } } help := fmt.Sprintf(`The device to use. In standard setup the built-in %s device is used, which contains predefined mountpoints for archive, sync etc. All other devices are treated as backup devices by the official Jottacloud client. You may create a new by entering a unique name.`, defaultDevice) return fs.ConfigChoose("choose_device_result", "config_device", help, len(deviceNames), func(i int) (string, string) { return deviceNames[i], "" }) case "choose_device_result": device := conf.Result oAuthClient, _, err := getOAuthClient(ctx, name, m) if err != nil { return nil, err } jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL) apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL) cust, err := getCustomerInfo(ctx, apiSrv) if err != nil { return nil, err } acc, err := getDriveInfo(ctx, jfsSrv, cust.Username) if err != nil { return nil, err } isNew := true for _, dev := range acc.Devices { if strings.EqualFold(dev.Name, device) { // If device name exists with different casing we prefer the existing (not sure if and how the api handles the opposite) device = dev.Name // Prefer same casing as existing, e.g. if user entered "jotta" we use the standard casing "Jotta" instead isNew = false break } } var dev *api.JottaDevice if isNew { fs.Debugf(nil, "Creating new device: %s", device) dev, err = createDevice(ctx, jfsSrv, path.Join(cust.Username, device)) if err != nil { return nil, err } } m.Set(configDevice, device) if !isNew { dev, err = getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device)) if err != nil { return nil, err } } var help string if device == defaultDevice { // With built-in Jotta device the mountpoint choice is exclusive, // we do not want to risk any problems by creating new mountpoints on it. help = fmt.Sprintf(`The mountpoint to use on the built-in device %s. The standard setup is to use the %s mountpoint. Most other mountpoints have very limited support in rclone and should generally be avoided.`, defaultDevice, defaultMountpoint) return fs.ConfigChooseExclusive("choose_device_mountpoint", "config_mountpoint", help, len(dev.MountPoints), func(i int) (string, string) { return dev.MountPoints[i].Name, "" }) } help = fmt.Sprintf(`The mountpoint to use on the non-standard device %s. You may create a new by entering a unique name.`, device) return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", help, len(dev.MountPoints), func(i int) (string, string) { return dev.MountPoints[i].Name, "" }) case "choose_device_mountpoint": mountpoint := conf.Result oAuthClient, _, err := getOAuthClient(ctx, name, m) if err != nil { return nil, err } jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL) apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL) cust, err := getCustomerInfo(ctx, apiSrv) if err != nil { return nil, err } device, _ := m.Get(configDevice) dev, err := getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device)) if err != nil { return nil, err } isNew := true for _, mnt := range dev.MountPoints { if strings.EqualFold(mnt.Name, mountpoint) { mountpoint = mnt.Name isNew = false break } } if isNew { if device == defaultDevice { return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device", defaultDevice) } fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint) _, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint)) if err != nil { return nil, err } } m.Set(configMountpoint, mountpoint) return fs.ConfigGoto("end") case "end": // All the config flows end up here in case we need to carry on with something return nil, nil } return nil, fmt.Errorf("unknown state %q", conf.State) } // Options defines the configuration for this backend type Options struct { Device string `config:"device"` Mountpoint string `config:"mountpoint"` MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"` TrashedOnly bool `config:"trashed_only"` HardDelete bool `config:"hard_delete"` NoVersions bool `config:"no_versions"` UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a remote jottacloud type Fs struct { name string root string user string opt Options features *fs.Features fileEndpoint string allocateEndpoint string jfsSrv *rest.Client apiSrv *rest.Client pacer *fs.Pacer tokenRenewer *oauthutil.Renew // renew the token on expiry } // Object describes a jottacloud object // // Will definitely have info but maybe not meta type Object struct { fs *Fs remote string hasMetaData bool size int64 createTime time.Time modTime time.Time updateTime time.Time md5 string mimeType string } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("jottacloud root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // joinPath joins two path/url elements // // Does not perform clean on the result like path.Join does, // which breaks urls by changing prefix "https://" into "https:/". func joinPath(base string, rel string) string { if rel == "" { return base } if strings.HasSuffix(base, "/") { return base + strings.TrimPrefix(rel, "/") } if strings.HasPrefix(rel, "/") { return strings.TrimSuffix(base, "/") + rel } return base + "/" + rel } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // registerDevice register a new device for use with the jottacloud API func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) { // random generator to generate random device names seededRand := rand.New(rand.NewSource(time.Now().UnixNano())) randonDeviceNamePartLength := 21 randomDeviceNamePart := make([]byte, randonDeviceNamePartLength) charset := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" for i := range randomDeviceNamePart { randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))] } randomDeviceName := "rclone-" + string(randomDeviceNamePart) fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName) values := url.Values{} values.Set("device_id", randomDeviceName) opts := rest.Opts{ Method: "POST", RootURL: legacyRegisterURL, ContentType: "application/x-www-form-urlencoded", ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"}, Parameters: values, } var deviceRegistration *api.DeviceRegistrationResponse _, err = srv.CallJSON(ctx, &opts, nil, &deviceRegistration) return deviceRegistration, err } var errAuthCodeRequired = errors.New("auth code required") // doLegacyAuth runs the actual token request for V1 authentication // // Call this first with blank authCode. If errAuthCodeRequired is // returned then call it again with an authCode func doLegacyAuth(ctx context.Context, srv *rest.Client, oauthConfig *oauth2.Config, username, password, authCode string) (token oauth2.Token, err error) { // prepare out token request with username and password values := url.Values{} values.Set("grant_type", "PASSWORD") values.Set("password", password) values.Set("username", username) values.Set("client_id", oauthConfig.ClientID) values.Set("client_secret", oauthConfig.ClientSecret) opts := rest.Opts{ Method: "POST", RootURL: oauthConfig.Endpoint.AuthURL, ContentType: "application/x-www-form-urlencoded", Parameters: values, } if authCode != "" { opts.ExtraHeaders = make(map[string]string) opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode } // do the first request var jsonToken api.TokenJSON resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken) if err != nil && authCode == "" { // if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header if resp != nil { if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" { return token, errAuthCodeRequired } } } token.AccessToken = jsonToken.AccessToken token.RefreshToken = jsonToken.RefreshToken token.TokenType = jsonToken.TokenType token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second) return token, err } // doTokenAuth runs the actual token request for V2 authentication func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 string) (token oauth2.Token, tokenEndpoint string, err error) { loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64) if err != nil { return token, "", err } // decode login token var loginToken api.LoginToken decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes)) err = decoder.Decode(&loginToken) if err != nil { return token, "", err } // retrieve endpoint urls opts := rest.Opts{ Method: "GET", RootURL: loginToken.WellKnownLink, } var wellKnown api.WellKnown _, err = apiSrv.CallJSON(ctx, &opts, nil, &wellKnown) if err != nil { return token, "", err } // prepare out token request with username and password values := url.Values{} values.Set("client_id", defaultClientID) values.Set("grant_type", "password") values.Set("password", loginToken.AuthToken) values.Set("scope", "openid offline_access") values.Set("username", loginToken.Username) values.Encode() opts = rest.Opts{ Method: "POST", RootURL: wellKnown.TokenEndpoint, ContentType: "application/x-www-form-urlencoded", Body: strings.NewReader(values.Encode()), } // do the first request var jsonToken api.TokenJSON _, err = apiSrv.CallJSON(ctx, &opts, nil, &jsonToken) if err != nil { return token, "", err } token.AccessToken = jsonToken.AccessToken token.RefreshToken = jsonToken.RefreshToken token.TokenType = jsonToken.TokenType token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second) return token, wellKnown.TokenEndpoint, err } // getCustomerInfo queries general information about the account func getCustomerInfo(ctx context.Context, apiSrv *rest.Client) (info *api.CustomerInfo, err error) { opts := rest.Opts{ Method: "GET", Path: "account/v1/customer", } _, err = apiSrv.CallJSON(ctx, &opts, nil, &info) if err != nil { return nil, fmt.Errorf("couldn't get customer info: %w", err) } return info, nil } // getDriveInfo queries general information about the account and the available devices and mountpoints. func getDriveInfo(ctx context.Context, srv *rest.Client, username string) (info *api.DriveInfo, err error) { opts := rest.Opts{ Method: "GET", Path: username, } _, err = srv.CallXML(ctx, &opts, nil, &info) if err != nil { return nil, fmt.Errorf("couldn't get drive info: %w", err) } return info, nil } // getDeviceInfo queries Information about a jottacloud device func getDeviceInfo(ctx context.Context, srv *rest.Client, path string) (info *api.JottaDevice, err error) { opts := rest.Opts{ Method: "GET", Path: urlPathEscape(path), } _, err = srv.CallXML(ctx, &opts, nil, &info) if err != nil { return nil, fmt.Errorf("couldn't get device info: %w", err) } return info, nil } // createDevice makes a device func createDevice(ctx context.Context, srv *rest.Client, path string) (info *api.JottaDevice, err error) { opts := rest.Opts{ Method: "POST", Path: urlPathEscape(path), Parameters: url.Values{}, } opts.Parameters.Set("type", "WORKSTATION") _, err = srv.CallXML(ctx, &opts, nil, &info) if err != nil { return nil, fmt.Errorf("couldn't create device: %w", err) } return info, nil } // createMountPoint makes a mount point func createMountPoint(ctx context.Context, srv *rest.Client, path string) (info *api.JottaMountPoint, err error) { opts := rest.Opts{ Method: "POST", Path: urlPathEscape(path), } _, err = srv.CallXML(ctx, &opts, nil, &info) if err != nil { return nil, fmt.Errorf("couldn't create mountpoint: %w", err) } return info, nil } // setEndpoints generates the API endpoints func (f *Fs) setEndpoints() { if f.opt.Device == "" { f.opt.Device = defaultDevice } if f.opt.Mountpoint == "" { f.opt.Mountpoint = defaultMountpoint } f.fileEndpoint = path.Join(f.user, f.opt.Device, f.opt.Mountpoint) f.allocateEndpoint = path.Join("/jfs", f.opt.Device, f.opt.Mountpoint) } // readMetaDataForPath reads the metadata from the path func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.JottaFile, err error) { opts := rest.Opts{ Method: "GET", Path: f.filePath(path), } var result api.JottaFile var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if apiErr, ok := err.(*api.Error); ok { // does not exist if apiErr.StatusCode == http.StatusNotFound { return nil, fs.ErrorObjectNotFound } } if err != nil { return nil, fmt.Errorf("read metadata failed: %w", err) } if result.XMLName.Local == "folder" { return nil, fs.ErrorIsDir } else if result.XMLName.Local != "file" { return nil, fs.ErrorNotAFile } return &result, nil } // errorHandler parses a non 2xx error response into an error func errorHandler(resp *http.Response) error { // Decode error response errResponse := new(api.Error) err := rest.DecodeXML(resp, &errResponse) if err != nil { fs.Debugf(nil, "Couldn't decode error response: %v", err) } if errResponse.Message == "" { errResponse.Message = resp.Status } if errResponse.StatusCode == 0 { errResponse.StatusCode = resp.StatusCode } return errResponse } // Jottacloud wants '+' to be URL encoded even though the RFC states it's not reserved func urlPathEscape(in string) string { return strings.ReplaceAll(rest.URLPathEscape(in), "+", "%2B") } // filePathRaw returns an unescaped file path (f.root, file) // Optionally made absolute by prefixing with "/", typically required when used // as request parameter instead of the path (which is relative to some root url). func (f *Fs) filePathRaw(file string, absolute bool) string { prefix := "" if absolute { prefix = "/" } return path.Join(prefix, f.fileEndpoint, f.opt.Enc.FromStandardPath(path.Join(f.root, file))) } // filePath returns an escaped file path (f.root, file) func (f *Fs) filePath(file string) string { return urlPathEscape(f.filePathRaw(file, false)) } // allocatePathRaw returns an unescaped allocate file path (f.root, file) // Optionally made absolute by prefixing with "/", typically required when used // as request parameter instead of the path (which is relative to some root url). func (f *Fs) allocatePathRaw(file string, absolute bool) string { prefix := "" if absolute { prefix = "/" } return path.Join(prefix, f.allocateEndpoint, f.opt.Enc.FromStandardPath(path.Join(f.root, file))) } // Jottacloud requires the grant_type 'refresh_token' string // to be uppercase and throws a 400 Bad Request if we use the // lower case used by the oauth2 module // // This filter catches all refresh requests, reads the body, // changes the case and then sends it on func grantTypeFilter(req *http.Request) { if legacyTokenURL == req.URL.String() { // read the entire body refreshBody, err := io.ReadAll(req.Body) if err != nil { return } _ = req.Body.Close() // make the refresh token upper case refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1)) // set the new ReadCloser (with a dummy Close()) req.Body = io.NopCloser(bytes.NewReader(refreshBody)) } } func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuthClient *http.Client, ts *oauthutil.TokenSource, err error) { // Check config version var ver int version, ok := m.Get("configVersion") if ok { ver, err = strconv.Atoi(version) if err != nil { return nil, nil, errors.New("failed to parse config version") } ok = (ver == configVersion) || (ver == legacyConfigVersion) } if !ok { return nil, nil, errors.New("outdated config - please reconfigure this backend") } baseClient := fshttp.NewClient(ctx) oauthConfig := &oauthutil.Config{ AuthURL: defaultTokenURL, TokenURL: defaultTokenURL, } if ver == configVersion { oauthConfig.ClientID = defaultClientID // if custom endpoints are set use them else stick with defaults if tokenURL, ok := m.Get(configTokenURL); ok { oauthConfig.TokenURL = tokenURL
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/jottacloud/api/types_test.go
backend/jottacloud/api/types_test.go
package api import ( "encoding/xml" "testing" "time" ) func TestMountpointEmptyModificationTime(t *testing.T) { mountpoint := ` <mountPoint time="2018-08-12-T09:58:24Z" host="dn-157"> <name xml:space="preserve">Sync</name> <path xml:space="preserve">/foo/Jotta</path> <abspath xml:space="preserve">/foo/Jotta</abspath> <size>0</size> <modified></modified> <device>Jotta</device> <user>foo</user> <metadata first="" max="" total="0" num_folders="0" num_files="0"/> </mountPoint> ` var jf JottaFolder if err := xml.Unmarshal([]byte(mountpoint), &jf); err != nil { t.Fatal(err) } if !time.Time(jf.ModifiedAt).IsZero() { t.Errorf("got non-zero time, want zero") } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/jottacloud/api/types.go
backend/jottacloud/api/types.go
// Package api provides types used by the Jottacloud API. package api import ( "encoding/xml" "errors" "fmt" "time" ) const ( // default time format historically used for all request and responses. // Similar to time.RFC3339, but with an extra '-' in front of 'T', // and no ':' separator in timezone offset. Some newer endpoints have // moved to proper time.RFC3339 conformant format instead. jottaTimeFormat = "2006-01-02-T15:04:05Z0700" ) // unmarshalXML turns XML into a Time func unmarshalXMLTime(d *xml.Decoder, start xml.StartElement, timeFormat string) (time.Time, error) { var v string if err := d.DecodeElement(&v, &start); err != nil { return time.Time{}, err } if v == "" { return time.Time{}, nil } newTime, err := time.Parse(timeFormat, v) if err == nil { return newTime, nil } return time.Time{}, err } // JottaTime represents time values in the classic API using a custom RFC3339 like format type JottaTime time.Time // String returns JottaTime string in Jottacloud classic format func (t JottaTime) String() string { return time.Time(t).Format(jottaTimeFormat) } // UnmarshalXML turns XML into a JottaTime func (t *JottaTime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { tm, err := unmarshalXMLTime(d, start, jottaTimeFormat) *t = JottaTime(tm) return err } // MarshalXML turns a JottaTime into XML func (t *JottaTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error { return e.EncodeElement(t.String(), start) } // Rfc3339Time represents time values in the newer APIs using standard RFC3339 format type Rfc3339Time time.Time // String returns Rfc3339Time string in Jottacloud RFC3339 format func (t Rfc3339Time) String() string { return time.Time(t).Format(time.RFC3339) } // UnmarshalXML turns XML into a Rfc3339Time func (t *Rfc3339Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { tm, err := unmarshalXMLTime(d, start, time.RFC3339) *t = Rfc3339Time(tm) return err } // MarshalXML turns a Rfc3339Time into XML func (t *Rfc3339Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error { return e.EncodeElement(t.String(), start) } // MarshalJSON turns a Rfc3339Time into JSON func (t *Rfc3339Time) MarshalJSON() ([]byte, error) { return fmt.Appendf(nil, "\"%s\"", t.String()), nil } // LoginToken is struct representing the login token generated in the WebUI type LoginToken struct { Username string `json:"username"` Realm string `json:"realm"` WellKnownLink string `json:"well_known_link"` AuthToken string `json:"auth_token"` } // WellKnown contains some configuration parameters for setting up endpoints type WellKnown struct { Issuer string `json:"issuer"` AuthorizationEndpoint string `json:"authorization_endpoint"` TokenEndpoint string `json:"token_endpoint"` TokenIntrospectionEndpoint string `json:"token_introspection_endpoint"` UserinfoEndpoint string `json:"userinfo_endpoint"` EndSessionEndpoint string `json:"end_session_endpoint"` JwksURI string `json:"jwks_uri"` CheckSessionIframe string `json:"check_session_iframe"` GrantTypesSupported []string `json:"grant_types_supported"` ResponseTypesSupported []string `json:"response_types_supported"` SubjectTypesSupported []string `json:"subject_types_supported"` IDTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported"` UserinfoSigningAlgValuesSupported []string `json:"userinfo_signing_alg_values_supported"` RequestObjectSigningAlgValuesSupported []string `json:"request_object_signing_alg_values_supported"` ResponseNodesSupported []string `json:"response_modes_supported"` RegistrationEndpoint string `json:"registration_endpoint"` TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported"` TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported"` ClaimsSupported []string `json:"claims_supported"` ClaimTypesSupported []string `json:"claim_types_supported"` ClaimsParameterSupported bool `json:"claims_parameter_supported"` ScopesSupported []string `json:"scopes_supported"` RequestParameterSupported bool `json:"request_parameter_supported"` RequestURIParameterSupported bool `json:"request_uri_parameter_supported"` CodeChallengeMethodsSupported []string `json:"code_challenge_methods_supported"` TLSClientCertificateBoundAccessTokens bool `json:"tls_client_certificate_bound_access_tokens"` IntrospectionEndpoint string `json:"introspection_endpoint"` } // TokenJSON is the struct representing the HTTP response from OAuth2 // providers returning a token in JSON form. type TokenJSON struct { AccessToken string `json:"access_token"` ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number RefreshExpiresIn int32 `json:"refresh_expires_in"` RefreshToken string `json:"refresh_token"` TokenType string `json:"token_type"` IDToken string `json:"id_token"` NotBeforePolicy int32 `json:"not-before-policy"` SessionState string `json:"session_state"` Scope string `json:"scope"` } // JSON structures returned by new API // AllocateFileRequest to prepare an upload to Jottacloud type AllocateFileRequest struct { Bytes int64 `json:"bytes"` Created string `json:"created"` Md5 string `json:"md5"` Modified string `json:"modified"` Path string `json:"path"` } // AllocateFileResponse for upload requests type AllocateFileResponse struct { Name string `json:"name"` Path string `json:"path"` State string `json:"state"` UploadID string `json:"upload_id"` UploadURL string `json:"upload_url"` Bytes int64 `json:"bytes"` ResumePos int64 `json:"resume_pos"` } // UploadResponse after an upload type UploadResponse struct { Path string `json:"path"` ContentID string `json:"content_id"` Bytes int64 `json:"bytes"` Md5 string `json:"md5"` Modified int64 `json:"modified"` } // DeviceRegistrationResponse is the response to registering a device type DeviceRegistrationResponse struct { ClientID string `json:"client_id"` ClientSecret string `json:"client_secret"` } // CustomerInfo provides general information about the account. Required for finding the correct internal username. type CustomerInfo struct { Username string `json:"username"` Email string `json:"email"` Name string `json:"name"` CountryCode string `json:"country_code"` LanguageCode string `json:"language_code"` CustomerGroupCode string `json:"customer_group_code"` BrandCode string `json:"brand_code"` AccountType string `json:"account_type"` SubscriptionType string `json:"subscription_type"` Usage int64 `json:"usage"` Quota int64 `json:"quota"` BusinessUsage int64 `json:"business_usage"` BusinessQuota int64 `json:"business_quota"` WriteLocked bool `json:"write_locked"` ReadLocked bool `json:"read_locked"` LockedCause any `json:"locked_cause"` WebHash string `json:"web_hash"` AndroidHash string `json:"android_hash"` IOSHash string `json:"ios_hash"` } // TrashResponse is returned when emptying the Trash type TrashResponse struct { Folders int64 `json:"folders"` Files int64 `json:"files"` } // XML structures returned by the old API // Flag is a hacky type for checking if an attribute is present type Flag bool // UnmarshalXMLAttr sets Flag to true if the attribute is present func (f *Flag) UnmarshalXMLAttr(attr xml.Attr) error { *f = true return nil } // MarshalXMLAttr : Do not use func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) { attr := xml.Attr{ Name: name, Value: "false", } return attr, errors.New("unimplemented") } /* GET http://www.jottacloud.com/JFS/<account> <user time="2018-07-18-T21:39:10Z" host="dn-132"> <username>12qh1wsht8cssxdtwl15rqh9</username> <account-type>free</account-type> <locked>false</locked> <capacity>5368709120</capacity> <max-devices>-1</max-devices> <max-mobile-devices>-1</max-mobile-devices> <usage>0</usage> <read-locked>false</read-locked> <write-locked>false</write-locked> <quota-write-locked>false</quota-write-locked> <enable-sync>true</enable-sync> <enable-foldershare>true</enable-foldershare> <devices> <device> <name xml:space="preserve">Jotta</name> <display_name xml:space="preserve">Jotta</display_name> <type>JOTTA</type> <sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid> <size>0</size> <modified>2018-07-15-T22:04:59Z</modified> </device> </devices> </user> */ // DriveInfo represents a Jottacloud account type DriveInfo struct { Username string `xml:"username"` AccountType string `xml:"account-type"` Locked bool `xml:"locked"` Capacity int64 `xml:"capacity"` MaxDevices int `xml:"max-devices"` MaxMobileDevices int `xml:"max-mobile-devices"` Usage int64 `xml:"usage"` ReadLocked bool `xml:"read-locked"` WriteLocked bool `xml:"write-locked"` QuotaWriteLocked bool `xml:"quota-write-locked"` EnableSync bool `xml:"enable-sync"` EnableFolderShare bool `xml:"enable-foldershare"` Devices []JottaDevice `xml:"devices>device"` } /* GET http://www.jottacloud.com/JFS/<account>/<device> <device time="2018-07-23-T20:21:50Z" host="dn-158"> <name xml:space="preserve">Jotta</name> <display_name xml:space="preserve">Jotta</display_name> <type>JOTTA</type> <sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid> <size>0</size> <modified>2018-07-15-T22:04:59Z</modified> <user>12qh1wsht8cssxdtwl15rqh9</user> <mountPoints> <mountPoint> <name xml:space="preserve">Archive</name> <size>0</size> <modified>2018-07-15-T22:04:59Z</modified> </mountPoint> <mountPoint> <name xml:space="preserve">Shared</name> <size>0</size> <modified></modified> </mountPoint> <mountPoint> <name xml:space="preserve">Sync</name> <size>0</size> <modified></modified> </mountPoint> </mountPoints> <metadata first="" max="" total="3" num_mountpoints="3"/> </device> */ // JottaDevice represents a Jottacloud Device type JottaDevice struct { Name string `xml:"name"` DisplayName string `xml:"display_name"` Type string `xml:"type"` Sid string `xml:"sid"` Size int64 `xml:"size"` User string `xml:"user"` MountPoints []JottaMountPoint `xml:"mountPoints>mountPoint"` } /* GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint> <mountPoint time="2018-07-24-T20:35:02Z" host="dn-157"> <name xml:space="preserve">Sync</name> <path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</path> <abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</abspath> <size>0</size> <modified></modified> <device>Jotta</device> <user>12qh1wsht8cssxdtwl15rqh9</user> <folders> <folder name="test"/> </folders> <metadata first="" max="" total="1" num_folders="1" num_files="0"/> </mountPoint> */ // JottaMountPoint represents a Jottacloud mountpoint type JottaMountPoint struct { Name string `xml:"name"` Size int64 `xml:"size"` Device string `xml:"device"` Folders []JottaFolder `xml:"folders>folder"` Files []JottaFile `xml:"files>file"` } /* GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/<folder> <folder name="test" time="2018-07-24-T20:41:37Z" host="dn-158"> <path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</path> <abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</abspath> <folders> <folder name="t2"/>c </folders> <files> <file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2"> <currentRevision> <number>1</number> <state>COMPLETED</state> <created>2018-07-05-T15:08:02Z</created> <modified>2018-07-05-T15:08:02Z</modified> <mime>application/octet-stream</mime> <size>30827730</size> <md5>1e8a7b728ab678048df00075c9507158</md5> <updated>2018-07-24-T20:41:10Z</updated> </currentRevision> </file> </files> <metadata first="" max="" total="2" num_folders="1" num_files="1"/> </folder> */ // JottaFolder represents a JottacloudFolder type JottaFolder struct { XMLName xml.Name Name string `xml:"name,attr"` Deleted Flag `xml:"deleted,attr"` Path string `xml:"path"` CreatedAt JottaTime `xml:"created"` ModifiedAt JottaTime `xml:"modified"` Updated JottaTime `xml:"updated"` Folders []JottaFolder `xml:"folders>folder"` Files []JottaFile `xml:"files>file"` } /* GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file> <file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2"> <currentRevision> <number>1</number> <state>COMPLETED</state> <created>2018-07-05-T15:08:02Z</created> <modified>2018-07-05-T15:08:02Z</modified> <mime>application/octet-stream</mime> <size>30827730</size> <md5>1e8a7b728ab678048df00075c9507158</md5> <updated>2018-07-24-T20:41:10Z</updated> </currentRevision> </file> */ // JottaFile represents a Jottacloud file type JottaFile struct { XMLName xml.Name Name string `xml:"name,attr"` Deleted Flag `xml:"deleted,attr"` PublicURI string `xml:"publicURI"` PublicSharePath string `xml:"publicSharePath"` State string `xml:"currentRevision>state"` CreatedAt JottaTime `xml:"currentRevision>created"` ModifiedAt JottaTime `xml:"currentRevision>modified"` UpdatedAt JottaTime `xml:"currentRevision>updated"` Size int64 `xml:"currentRevision>size"` MimeType string `xml:"currentRevision>mime"` MD5 string `xml:"currentRevision>md5"` } // Error is a custom Error for wrapping Jottacloud error responses type Error struct { StatusCode int `xml:"code"` Message string `xml:"message"` Reason string `xml:"reason"` Cause string `xml:"cause"` } // Error returns a string for the error and satisfies the error interface func (e *Error) Error() string { out := fmt.Sprintf("error %d", e.StatusCode) if e.Message != "" { out += ": " + e.Message } if e.Reason != "" { out += fmt.Sprintf(" (%+v)", e.Reason) } return out }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hdfs/hdfs_test.go
backend/hdfs/hdfs_test.go
// Test HDFS filesystem interface //go:build !plan9 package hdfs_test import ( "testing" "github.com/rclone/rclone/backend/hdfs" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestHdfs:", NilObject: (*hdfs.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hdfs/hdfs.go
backend/hdfs/hdfs.go
//go:build !plan9 // Package hdfs provides an interface to the HDFS storage system. package hdfs import ( "path" "strings" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/lib/encoder" ) func init() { fsi := &fs.RegInfo{ Name: "hdfs", Description: "Hadoop distributed file system", NewFs: NewFs, Options: []fs.Option{{ Name: "namenode", Help: "Hadoop name nodes and ports.\n\nE.g. \"namenode-1:8020,namenode-2:8020,...\" to connect to host namenodes at port 8020.", Required: true, Sensitive: true, Default: fs.CommaSepList{}, }, { Name: "username", Help: "Hadoop user name.", Examples: []fs.OptionExample{{ Value: "root", Help: "Connect to hdfs as root.", }}, Sensitive: true, }, { Name: "service_principal_name", Help: `Kerberos service principal name for the namenode. Enables KERBEROS authentication. Specifies the Service Principal Name (SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\" for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`, Advanced: true, Sensitive: true, }, { Name: "data_transfer_protection", Help: `Kerberos data transfer protection: authentication|integrity|privacy. Specifies whether or not authentication, data signature integrity checks, and wire encryption are required when communicating with the datanodes. Possible values are 'authentication', 'integrity' and 'privacy'. Used only with KERBEROS enabled.`, Examples: []fs.OptionExample{{ Value: "privacy", Help: "Ensure authentication, integrity and encryption enabled.", }}, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.Display | encoder.EncodeInvalidUtf8 | encoder.EncodeColon), }}, } fs.Register(fsi) } // Options for this backend type Options struct { Namenode fs.CommaSepList `config:"namenode"` Username string `config:"username"` ServicePrincipalName string `config:"service_principal_name"` DataTransferProtection string `config:"data_transfer_protection"` Enc encoder.MultiEncoder `config:"encoding"` } // xPath make correct file path with leading '/' func xPath(root string, tail string) string { if !strings.HasPrefix(root, "/") { root = "/" + root } return path.Join(root, tail) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hdfs/object.go
backend/hdfs/object.go
//go:build !plan9 package hdfs import ( "context" "errors" "io" "path" "time" "github.com/colinmarc/hdfs/v2" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/readers" ) // Object describes an HDFS file type Object struct { fs *Fs remote string size int64 modTime time.Time } // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Size returns the size of an object in bytes func (o *Object) Size() int64 { return o.size } // ModTime returns the modification time of the object func (o *Object) ModTime(ctx context.Context) time.Time { return o.modTime } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { realpath := o.fs.realpath(o.Remote()) err := o.fs.client.Chtimes(realpath, modTime, modTime) if err != nil { return err } o.modTime = modTime return nil } // Storable returns whether this object is storable func (o *Object) Storable() bool { return true } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.Remote() } // Hash is not supported func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) { return "", hash.ErrUnsupported } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { realpath := o.realpath() fs.Debugf(o.fs, "open [%s]", realpath) f, err := o.fs.client.Open(realpath) if err != nil { return nil, err } var offset, limit int64 = 0, -1 for _, option := range options { switch x := option.(type) { case *fs.SeekOption: offset = x.Offset case *fs.RangeOption: offset, limit = x.Decode(o.Size()) } } _, err = f.Seek(offset, io.SeekStart) if err != nil { return nil, err } if limit != -1 { in = readers.NewLimitedReadCloser(f, limit) } else { in = f } return in, err } // Update object func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { realpath := o.fs.realpath(o.remote) dirname := path.Dir(realpath) fs.Debugf(o.fs, "update [%s]", realpath) err := o.fs.client.MkdirAll(dirname, 0755) if err != nil { return err } _, err = o.fs.client.Stat(realpath) if err == nil { err = o.fs.client.Remove(realpath) if err != nil { return err } } out, err := o.fs.client.Create(realpath) if err != nil { return err } cleanup := func() { rerr := o.fs.client.Remove(realpath) if rerr != nil { fs.Errorf(o.fs, "failed to remove [%v]: %v", realpath, rerr) } } _, err = io.Copy(out, in) if err != nil { cleanup() return err } // If the datanodes have acknowledged all writes but not yet // to the namenode, FileWriter.Close can return ErrReplicating // (wrapped in an os.PathError). This indicates that all data // has been written, but the lease is still open for the file. // // It is safe in this case to either ignore the error (and let // the lease expire on its own) or to call Close multiple // times until it completes without an error. The Java client, // for context, always chooses to retry, with exponential // backoff. err = o.fs.pacer.Call(func() (bool, error) { err := out.Close() if err == nil { return false, nil } return errors.Is(err, hdfs.ErrReplicating), err }) if err != nil { cleanup() return err } info, err := o.fs.client.Stat(realpath) if err != nil { return err } err = o.SetModTime(ctx, src.ModTime(ctx)) if err != nil { return err } o.size = info.Size() return nil } // Remove an object func (o *Object) Remove(ctx context.Context) error { realpath := o.fs.realpath(o.remote) fs.Debugf(o.fs, "remove [%s]", realpath) return o.fs.client.Remove(realpath) } func (o *Object) realpath() string { return o.fs.opt.Enc.FromStandardPath(xPath(o.Fs().Root(), o.remote)) } // Check the interfaces are satisfied var ( _ fs.Object = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hdfs/fs.go
backend/hdfs/fs.go
//go:build !plan9 package hdfs import ( "context" "fmt" "io" "os" "os/user" "path" "strings" "time" "github.com/colinmarc/hdfs/v2" krb "github.com/jcmturner/gokrb5/v8/client" "github.com/jcmturner/gokrb5/v8/config" "github.com/jcmturner/gokrb5/v8/credentials" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/pacer" ) // Fs represents a HDFS server type Fs struct { name string root string features *fs.Features // optional features opt Options // options for this backend ci *fs.ConfigInfo // global config client *hdfs.Client pacer *fs.Pacer // pacer for API calls } const ( minSleep = 20 * time.Millisecond maxSleep = 10 * time.Second decayConstant = 2 // bigger for slower decay, exponential ) // copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go func getKerberosClient() (*krb.Client, error) { configPath := os.Getenv("KRB5_CONFIG") if configPath == "" { configPath = "/etc/krb5.conf" } cfg, err := config.Load(configPath) if err != nil { return nil, err } // Determine the ccache location from the environment, falling back to the // default location. ccachePath := os.Getenv("KRB5CCNAME") if strings.Contains(ccachePath, ":") { if strings.HasPrefix(ccachePath, "FILE:") { ccachePath = strings.SplitN(ccachePath, ":", 2)[1] } else { return nil, fmt.Errorf("unusable ccache: %s", ccachePath) } } else if ccachePath == "" { u, err := user.Current() if err != nil { return nil, err } ccachePath = fmt.Sprintf("/tmp/krb5cc_%s", u.Uid) } ccache, err := credentials.LoadCCache(ccachePath) if err != nil { return nil, err } client, err := krb.NewFromCCache(ccache, cfg) if err != nil { return nil, err } return client, nil } // NewFs constructs an Fs from the path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } options := hdfs.ClientOptions{ Addresses: opt.Namenode, UseDatanodeHostname: false, } if opt.ServicePrincipalName != "" { options.KerberosClient, err = getKerberosClient() if err != nil { return nil, fmt.Errorf("problem with kerberos authentication: %w", err) } options.KerberosServicePrincipleName = opt.ServicePrincipalName if opt.DataTransferProtection != "" { options.DataTransferProtection = opt.DataTransferProtection } } else { options.User = opt.Username } client, err := hdfs.NewClient(options) if err != nil { return nil, err } f := &Fs{ name: name, root: root, opt: *opt, ci: fs.GetConfig(ctx), client: client, pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } f.features = (&fs.Features{ CanHaveEmptyDirectories: true, }).Fill(ctx, f) info, err := f.client.Stat(f.realpath("")) if err == nil && !info.IsDir() { f.root = path.Dir(f.root) return f, fs.ErrorIsFile } return f, nil } // Name of this fs func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String returns a description of the FS func (f *Fs) String() string { return fmt.Sprintf("hdfs://%s/%s", f.opt.Namenode, f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { return time.Second } // Hashes are not supported func (f *Fs) Hashes() hash.Set { return hash.Set(hash.None) } // NewObject finds file at remote or return fs.ErrorObjectNotFound func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { realpath := f.realpath(remote) fs.Debugf(f, "new [%s]", realpath) info, err := f.ensureFile(realpath) if err != nil { return nil, err } return &Object{ fs: f, remote: remote, size: info.Size(), modTime: info.ModTime(), }, nil } // List the objects and directories in dir into entries. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { realpath := f.realpath(dir) fs.Debugf(f, "list [%s]", realpath) err = f.ensureDirectory(realpath) if err != nil { return nil, err } list, err := f.client.ReadDir(realpath) if err != nil { return nil, err } for _, x := range list { stdName := f.opt.Enc.ToStandardName(x.Name()) remote := path.Join(dir, stdName) if x.IsDir() { entries = append(entries, fs.NewDir(remote, x.ModTime())) } else { entries = append(entries, &Object{ fs: f, remote: remote, size: x.Size(), modTime: x.ModTime(), }) } } return entries, nil } // Put the object func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { o := &Object{ fs: f, remote: src.Remote(), } err := o.Update(ctx, in, src, options...) return o, err } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(ctx, in, src, options...) } // Mkdir makes a directory func (f *Fs) Mkdir(ctx context.Context, dir string) error { fs.Debugf(f, "mkdir [%s]", f.realpath(dir)) return f.client.MkdirAll(f.realpath(dir), 0755) } // Rmdir deletes the directory func (f *Fs) Rmdir(ctx context.Context, dir string) error { realpath := f.realpath(dir) fs.Debugf(f, "rmdir [%s]", realpath) err := f.ensureDirectory(realpath) if err != nil { return err } // do not remove empty directory list, err := f.client.ReadDir(realpath) if err != nil { return err } if len(list) > 0 { return fs.ErrorDirectoryNotEmpty } return f.client.Remove(realpath) } // Purge deletes all the files in the directory func (f *Fs) Purge(ctx context.Context, dir string) error { realpath := f.realpath(dir) fs.Debugf(f, "purge [%s]", realpath) err := f.ensureDirectory(realpath) if err != nil { return err } return f.client.RemoveAll(realpath) } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } // Get the real paths from the remote specs: sourcePath := srcObj.fs.realpath(srcObj.remote) targetPath := f.realpath(remote) fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath) // Make sure the target folder exists: dirname := path.Dir(targetPath) err := f.client.MkdirAll(dirname, 0755) if err != nil { return nil, err } // Do the move // Note that the underlying HDFS library hard-codes Overwrite=True, but this is expected rclone behaviour. err = f.client.Rename(sourcePath, targetPath) if err != nil { return nil, err } // Look up the resulting object info, err := f.client.Stat(targetPath) if err != nil { return nil, err } // And return it: return &Object{ fs: f, remote: remote, size: info.Size(), modTime: info.ModTime(), }, nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) { srcFs, ok := src.(*Fs) if !ok { return fs.ErrorCantDirMove } // Get the real paths from the remote specs: sourcePath := srcFs.realpath(srcRemote) targetPath := f.realpath(dstRemote) fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath) // Check if the destination exists: info, err := f.client.Stat(targetPath) if err == nil { fs.Debugf(f, "target directory already exits, IsDir = [%t]", info.IsDir()) return fs.ErrorDirExists } // Make sure the targets parent folder exists: dirname := path.Dir(targetPath) err = f.client.MkdirAll(dirname, 0755) if err != nil { return err } // Do the move err = f.client.Rename(sourcePath, targetPath) if err != nil { return err } return nil } // About gets quota information from the Fs func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { info, err := f.client.StatFs() if err != nil { return nil, err } return &fs.Usage{ Total: fs.NewUsageValue(info.Capacity), Used: fs.NewUsageValue(info.Used), Free: fs.NewUsageValue(info.Remaining), }, nil } func (f *Fs) ensureDirectory(realpath string) error { info, err := f.client.Stat(realpath) if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist { return fs.ErrorDirNotFound } if err != nil { return err } if !info.IsDir() { return fs.ErrorDirNotFound } return nil } func (f *Fs) ensureFile(realpath string) (os.FileInfo, error) { info, err := f.client.Stat(realpath) if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist { return nil, fs.ErrorObjectNotFound } if err != nil { return nil, err } if info.IsDir() { return nil, fs.ErrorObjectNotFound } return info, nil } func (f *Fs) realpath(dir string) string { return f.opt.Enc.FromStandardPath(xPath(f.Root(), dir)) } // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) _ fs.Purger = (*Fs)(nil) _ fs.PutStreamer = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil) _ fs.Mover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hdfs/hdfs_unsupported.go
backend/hdfs/hdfs_unsupported.go
// Build for hdfs for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 // Package hdfs provides an interface to the HDFS storage system. package hdfs
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/archive/archive_unsupported.go
backend/archive/archive_unsupported.go
// Build for archive for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 // Package archive implements a backend to access archive files in a remote package archive
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/archive/archive.go
backend/archive/archive.go
//go:build !plan9 // Package archive implements a backend to access archive files in a remote package archive // FIXME factor common code between backends out - eg VFS initialization // FIXME can we generalize the VFS handle caching and use it in zip backend // Factor more stuff out if possible // Odd stats which are probably coming from the VFS // * tensorflow.sqfs: 0% /3.074Gi, 204.426Ki/s, 4h22m46s // FIXME this will perform poorly for unpacking as the VFS Reader is bad // at multiple streams - need cache mode setting? import ( "context" "errors" "fmt" "io" "path" "strings" "sync" "time" // Import all the required archivers here _ "github.com/rclone/rclone/backend/archive/squashfs" _ "github.com/rclone/rclone/backend/archive/zip" "github.com/rclone/rclone/backend/archive/archiver" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/hash" ) // Register with Fs func init() { fsi := &fs.RegInfo{ Name: "archive", Description: "Read archives", NewFs: NewFs, MetadataInfo: &fs.MetadataInfo{ Help: `Any metadata supported by the underlying remote is read and written.`, }, Options: []fs.Option{{ Name: "remote", Help: `Remote to wrap to read archives from. Normally should contain a ':' and a path, e.g. "myremote:path/to/dir", "myremote:bucket" or "myremote:". If this is left empty, then the archive backend will use the root as the remote. This means that you can use :archive:remote:path and it will be equivalent to setting remote="remote:path". `, Required: false, }}, } fs.Register(fsi) } // Options defines the configuration for this backend type Options struct { Remote string `config:"remote"` } // Fs represents a archive of upstreams type Fs struct { name string // name of this remote features *fs.Features // optional features opt Options // options for this Fs root string // the path we are working on f fs.Fs // remote we are wrapping wrapper fs.Fs // fs that wraps us mu sync.Mutex // protects the below archives map[string]*archive // the archives we have, by path } // A single open archive type archive struct { archiver archiver.Archiver // archiver responsible remote string // path to the archive prefix string // prefix to add on to listings root string // root of the archive to remove from listings mu sync.Mutex // protects the following variables f fs.Fs // the archive Fs, may be nil } // If remote is an archive then return it otherwise return nil func findArchive(remote string) *archive { // FIXME use something faster than linear search? for _, archiver := range archiver.Archivers { if strings.HasSuffix(remote, archiver.Extension) { return &archive{ archiver: archiver, remote: remote, prefix: remote, root: "", } } } return nil } // Find an archive buried in remote func subArchive(remote string) *archive { archive := findArchive(remote) if archive != nil { return archive } parent := path.Dir(remote) if parent == "/" || parent == "." { return nil } return subArchive(parent) } // If remote is an archive then return it otherwise return nil func (f *Fs) findArchive(remote string) (archive *archive) { archive = findArchive(remote) if archive != nil { f.mu.Lock() f.archives[remote] = archive f.mu.Unlock() } return archive } // Instantiate archive if it hasn't been instantiated yet // // This is done lazily so that we can list a directory full of // archives without opening them all. func (a *archive) init(ctx context.Context, f fs.Fs) (fs.Fs, error) { a.mu.Lock() defer a.mu.Unlock() if a.f != nil { return a.f, nil } newFs, err := a.archiver.New(ctx, f, a.remote, a.prefix, a.root) if err != nil && err != fs.ErrorIsFile { return nil, fmt.Errorf("failed to create archive %q: %w", a.remote, err) } a.f = newFs return a.f, nil } // NewFs constructs an Fs from the path. // // The returned Fs is the actual Fs, referenced by remote in the config func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) { // defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err) // Parse config into Options struct opt := new(Options) err = configstruct.Set(m, opt) if err != nil { return nil, err } remote := opt.Remote origRoot := root // If remote is empty, use the root instead if remote == "" { remote = root root = "" } isDirectory := strings.HasSuffix(remote, "/") remote = strings.TrimRight(remote, "/") if remote == "" { remote = "/" } if strings.HasPrefix(remote, name+":") { return nil, errors.New("can't point archive remote at itself - check the value of the upstreams setting") } _ = isDirectory foundArchive := subArchive(remote) if foundArchive != nil { fs.Debugf(nil, "Found archiver for %q remote %q", foundArchive.archiver.Extension, foundArchive.remote) // Archive path foundArchive.root = strings.Trim(remote[len(foundArchive.remote):], "/") // Path to the archive archiveRemote := remote[:len(foundArchive.remote)] // Remote is archive leaf name foundArchive.remote = path.Base(archiveRemote) foundArchive.prefix = "" // Point remote to archive file remote = archiveRemote } // Make sure to remove trailing . referring to the current dir if path.Base(root) == "." { root = strings.TrimSuffix(root, ".") } remotePath := fspath.JoinRootPath(remote, root) wrappedFs, err := cache.Get(ctx, remotePath) if err != fs.ErrorIsFile && err != nil { return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err) } f := &Fs{ name: name, //root: path.Join(remotePath, root), root: origRoot, opt: *opt, f: wrappedFs, archives: make(map[string]*archive), } cache.PinUntilFinalized(f.f, f) // the features here are ones we could support, and they are // ANDed with the ones from wrappedFs f.features = (&fs.Features{ CaseInsensitive: true, DuplicateFiles: false, ReadMimeType: true, WriteMimeType: true, CanHaveEmptyDirectories: true, BucketBased: true, SetTier: true, GetTier: true, ReadMetadata: true, WriteMetadata: true, UserMetadata: true, PartialUploads: true, }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) if foundArchive != nil { fs.Debugf(f, "Root is an archive") if err != fs.ErrorIsFile { return nil, fmt.Errorf("expecting to find a file at %q", remote) } return foundArchive.init(ctx, f.f) } // Correct root if definitely pointing to a file if err == fs.ErrorIsFile { f.root = path.Dir(f.root) if f.root == "." || f.root == "/" { f.root = "" } } return f, err } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("archive root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // Rmdir removes the root directory of the Fs object func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.f.Rmdir(ctx, dir) } // Hashes returns hash.HashNone to indicate remote hashing is unavailable func (f *Fs) Hashes() hash.Set { return f.f.Hashes() } // Mkdir makes the root directory of the Fs object func (f *Fs) Mkdir(ctx context.Context, dir string) error { return f.f.Mkdir(ctx, dir) } // Purge all files in the directory // // Implement this if you have a way of deleting all the files // quicker than just running Remove() on the result of List() // // Return an error if it doesn't exist func (f *Fs) Purge(ctx context.Context, dir string) error { do := f.f.Features().Purge if do == nil { return fs.ErrorCantPurge } return do(ctx, dir) } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { do := f.f.Features().Copy if do == nil { return nil, fs.ErrorCantCopy } // FIXME // o, ok := src.(*Object) // if !ok { // return nil, fs.ErrorCantCopy // } return do(ctx, src, remote) } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { do := f.f.Features().Move if do == nil { return nil, fs.ErrorCantMove } // FIXME // o, ok := src.(*Object) // if !ok { // return nil, fs.ErrorCantMove // } return do(ctx, src, remote) } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) { do := f.f.Features().DirMove if do == nil { return fs.ErrorCantDirMove } srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } return do(ctx, srcFs.f, srcRemote, dstRemote) } // ChangeNotify calls the passed function with a path // that has had changes. If the implementation // uses polling, it should adhere to the given interval. // At least one value will be written to the channel, // specifying the initial value and updated values might // follow. A 0 Duration should pause the polling. // The ChangeNotify implementation must empty the channel // regularly. When the channel gets closed, the implementation // should stop polling and release resources. func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) { do := f.f.Features().ChangeNotify if do == nil { return } wrappedNotifyFunc := func(path string, entryType fs.EntryType) { // fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType) notifyFunc(path, entryType) } do(ctx, wrappedNotifyFunc, ch) } // DirCacheFlush resets the directory cache - used in testing // as an optional interface func (f *Fs) DirCacheFlush() { do := f.f.Features().DirCacheFlush if do != nil { do() } } func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) { var o fs.Object var err error if stream { o, err = f.f.Features().PutStream(ctx, in, src, options...) } else { o, err = f.f.Put(ctx, in, src, options...) } if err != nil { return nil, err } return o, nil } // Put in to the remote path with the modTime given of the given size // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { o, err := f.NewObject(ctx, src.Remote()) switch err { case nil: return o, o.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: return f.put(ctx, in, src, false, options...) default: return nil, err } } // PutStream uploads to the remote path with the modTime given of indeterminate size // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { o, err := f.NewObject(ctx, src.Remote()) switch err { case nil: return o, o.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: return f.put(ctx, in, src, true, options...) default: return nil, err } } // About gets quota information from the Fs func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { do := f.f.Features().About if do == nil { return nil, errors.New("not supported by underlying remote") } return do(ctx) } // Find the Fs for the directory func (f *Fs) findFs(ctx context.Context, dir string) (subFs fs.Fs, err error) { f.mu.Lock() defer f.mu.Unlock() subFs = f.f // FIXME should do this with a better datastructure like a prefix tree // FIXME want to find the longest first otherwise nesting won't work dirSlash := dir + "/" for archiverRemote, archive := range f.archives { subRemote := archiverRemote + "/" if strings.HasPrefix(dirSlash, subRemote) { subFs, err = archive.init(ctx, f.f) if err != nil { return nil, err } break } } return subFs, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { // defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err) subFs, err := f.findFs(ctx, dir) if err != nil { return nil, err } entries, err = subFs.List(ctx, dir) if err != nil { return nil, err } for i, entry := range entries { // Can only unarchive files if o, ok := entry.(fs.Object); ok { remote := o.Remote() archive := f.findArchive(remote) if archive != nil { // Overwrite entry with directory entries[i] = fs.NewDir(remote, o.ModTime(ctx)) } } } return entries, nil } // NewObject creates a new remote archive file object func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { dir := path.Dir(remote) if dir == "/" || dir == "." { dir = "" } subFs, err := f.findFs(ctx, dir) if err != nil { return nil, err } o, err := subFs.NewObject(ctx, remote) if err != nil { return nil, err } return o, nil } // Precision is the greatest precision of all the archivers func (f *Fs) Precision() time.Duration { return time.Second } // Shutdown the backend, closing any background tasks and any // cached connections. func (f *Fs) Shutdown(ctx context.Context) error { if do := f.f.Features().Shutdown; do != nil { return do(ctx) } return nil } // PublicLink generates a public link to the remote path (usually readable by anyone) func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { do := f.f.Features().PublicLink if do == nil { return "", errors.New("PublicLink not supported") } return do(ctx, remote, expire, unlink) } // PutUnchecked in to the remote path with the modTime given of the given size // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error // // May create duplicates or return errors if src already // exists. func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { do := f.f.Features().PutUnchecked if do == nil { return nil, errors.New("can't PutUnchecked") } o, err := do(ctx, in, src, options...) if err != nil { return nil, err } return o, nil } // MergeDirs merges the contents of all the directories passed // in into the first one and rmdirs the other directories. func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { if len(dirs) == 0 { return nil } do := f.f.Features().MergeDirs if do == nil { return errors.New("MergeDirs not supported") } return do(ctx, dirs) } // CleanUp the trash in the Fs // // Implement this if you have a way of emptying the trash or // otherwise cleaning up old versions of files. func (f *Fs) CleanUp(ctx context.Context) error { do := f.f.Features().CleanUp if do == nil { return errors.New("not supported by underlying remote") } return do(ctx) } // OpenWriterAt opens with a handle for random access writes // // Pass in the remote desired and the size if known. // // It truncates any existing object func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) { do := f.f.Features().OpenWriterAt if do == nil { return nil, fs.ErrorNotImplemented } return do(ctx, remote, size) } // UnWrap returns the Fs that this Fs is wrapping func (f *Fs) UnWrap() fs.Fs { return f.f } // WrapFs returns the Fs that is wrapping this Fs func (f *Fs) WrapFs() fs.Fs { return f.wrapper } // SetWrapper sets the Fs that is wrapping this Fs func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper } // OpenChunkWriter returns the chunk size and a ChunkWriter // // Pass in the remote and the src object // You can also use options to hint at the desired chunk size func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) { do := f.f.Features().OpenChunkWriter if do == nil { return info, nil, fs.ErrorNotImplemented } return do(ctx, remote, src, options...) } // UserInfo returns info about the connected user func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) { do := f.f.Features().UserInfo if do == nil { return nil, fs.ErrorNotImplemented } return do(ctx) } // Disconnect the current user func (f *Fs) Disconnect(ctx context.Context) error { do := f.f.Features().Disconnect if do == nil { return fs.ErrorNotImplemented } return do(ctx) } // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) _ fs.Purger = (*Fs)(nil) _ fs.PutStreamer = (*Fs)(nil) _ fs.Copier = (*Fs)(nil) _ fs.Mover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil) _ fs.ChangeNotifier = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil) _ fs.Shutdowner = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil) _ fs.PutUncheckeder = (*Fs)(nil) _ fs.MergeDirser = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil) _ fs.OpenWriterAter = (*Fs)(nil) _ fs.OpenChunkWriter = (*Fs)(nil) _ fs.UserInfoer = (*Fs)(nil) _ fs.Disconnecter = (*Fs)(nil) // FIXME _ fs.FullObject = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/archive/archive_test.go
backend/archive/archive_test.go
//go:build !plan9 // Test Archive filesystem interface package archive_test import ( "testing" _ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/memory" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" ) var ( unimplementableFsMethods = []string{"ListR", "ListP", "MkdirMetadata", "DirSetModTime"} // In these tests we receive objects from the underlying remote which don't implement these methods unimplementableObjectMethods = []string{"GetTier", "ID", "Metadata", "MimeType", "SetTier", "UnWrap", "SetMetadata"} ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { if *fstest.RemoteName == "" { t.Skip("Skipping as -remote not set") } fstests.Run(t, &fstests.Opt{ RemoteName: *fstest.RemoteName, UnimplementableFsMethods: unimplementableFsMethods, UnimplementableObjectMethods: unimplementableObjectMethods, }) } func TestLocal(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("Skipping as -remote set") } remote := t.TempDir() name := "TestArchiveLocal" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":", ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "archive"}, {Name: name, Key: "remote", Value: remote}, }, QuickTestOK: true, UnimplementableFsMethods: unimplementableFsMethods, UnimplementableObjectMethods: unimplementableObjectMethods, }) } func TestMemory(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("Skipping as -remote set") } remote := ":memory:" name := "TestArchiveMemory" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":", ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "archive"}, {Name: name, Key: "remote", Value: remote}, }, QuickTestOK: true, UnimplementableFsMethods: unimplementableFsMethods, UnimplementableObjectMethods: unimplementableObjectMethods, }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/archive/archive_internal_test.go
backend/archive/archive_internal_test.go
//go:build !plan9 package archive import ( "bytes" "context" "fmt" "os" "os/exec" "path" "path/filepath" "strconv" "strings" "testing" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // FIXME need to test Open with seek // run - run a shell command func run(t *testing.T, args ...string) { cmd := exec.Command(args[0], args[1:]...) fs.Debugf(nil, "run args = %v", args) out, err := cmd.CombinedOutput() if err != nil { t.Fatalf(` ---------------------------- Failed to run %v: %v Command output was: %s ---------------------------- `, args, err, out) } } // check the dst and src are identical func checkTree(ctx context.Context, name string, t *testing.T, dstArchive, src string, expectedCount int) { t.Run(name, func(t *testing.T) { fs.Debugf(nil, "check %q vs %q", dstArchive, src) Farchive, err := cache.Get(ctx, dstArchive) if err != fs.ErrorIsFile { require.NoError(t, err) } Fsrc, err := cache.Get(ctx, src) if err != fs.ErrorIsFile { require.NoError(t, err) } var matches bytes.Buffer opt := operations.CheckOpt{ Fdst: Farchive, Fsrc: Fsrc, Match: &matches, } for _, action := range []string{"Check", "Download"} { t.Run(action, func(t *testing.T) { matches.Reset() if action == "Download" { assert.NoError(t, operations.CheckDownload(ctx, &opt)) } else { assert.NoError(t, operations.Check(ctx, &opt)) } if expectedCount > 0 { assert.Equal(t, expectedCount, strings.Count(matches.String(), "\n")) } }) } t.Run("NewObject", func(t *testing.T) { // Check we can run NewObject on all files and read them assert.NoError(t, operations.ListFn(ctx, Fsrc, func(srcObj fs.Object) { if t.Failed() { return } remote := srcObj.Remote() archiveObj, err := Farchive.NewObject(ctx, remote) require.NoError(t, err, remote) assert.Equal(t, remote, archiveObj.Remote(), remote) // Test that the contents are the same archiveBuf := fstests.ReadObject(ctx, t, archiveObj, -1) srcBuf := fstests.ReadObject(ctx, t, srcObj, -1) assert.Equal(t, srcBuf, archiveBuf) if len(srcBuf) < 81 { return } // Tests that Open works with SeekOption assert.Equal(t, srcBuf[50:], fstests.ReadObject(ctx, t, archiveObj, -1, &fs.SeekOption{Offset: 50}), "contents differ after seek") // Tests that Open works with RangeOption for _, test := range []struct { ro fs.RangeOption wantStart, wantEnd int }{ {fs.RangeOption{Start: 5, End: 15}, 5, 16}, {fs.RangeOption{Start: 80, End: -1}, 80, len(srcBuf)}, {fs.RangeOption{Start: 81, End: 100000}, 81, len(srcBuf)}, {fs.RangeOption{Start: -1, End: 20}, len(srcBuf) - 20, len(srcBuf)}, // if start is omitted this means get the final bytes // {fs.RangeOption{Start: -1, End: -1}, 0, len(srcBuf)}, - this seems to work but the RFC doesn't define it } { got := fstests.ReadObject(ctx, t, archiveObj, -1, &test.ro) foundAt := strings.Index(srcBuf, got) help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got)) assert.Equal(t, srcBuf[test.wantStart:test.wantEnd], got, help) } // Test that the modtimes are correct fstest.AssertTimeEqualWithPrecision(t, remote, srcObj.ModTime(ctx), archiveObj.ModTime(ctx), Farchive.Precision()) // Test that the sizes are correct assert.Equal(t, srcObj.Size(), archiveObj.Size()) // Test that Strings are OK assert.Equal(t, srcObj.String(), archiveObj.String()) })) }) // t.Logf("Fdst ------------- %v", Fdst) // operations.List(ctx, Fdst, os.Stdout) // t.Logf("Fsrc ------------- %v", Fsrc) // operations.List(ctx, Fsrc, os.Stdout) }) } // test creating and reading back some archives // // Note that this uses rclone and zip as external binaries. func testArchive(t *testing.T, archiveName string, archiveFn func(t *testing.T, output, input string)) { ctx := context.Background() checkFiles := 1000 // create random test input files inputRoot := t.TempDir() input := filepath.Join(inputRoot, archiveName) require.NoError(t, os.Mkdir(input, 0777)) run(t, "rclone", "test", "makefiles", "--files", strconv.Itoa(checkFiles), "--ascii", input) // Create the archive output := t.TempDir() zipFile := path.Join(output, archiveName) archiveFn(t, zipFile, input) // Check the archive itself checkTree(ctx, "Archive", t, ":archive:"+zipFile, input, checkFiles) // Now check a subdirectory fis, err := os.ReadDir(input) require.NoError(t, err) subDir := "NOT FOUND" aFile := "NOT FOUND" for _, fi := range fis { if fi.IsDir() { subDir = fi.Name() } else { aFile = fi.Name() } } checkTree(ctx, "SubDir", t, ":archive:"+zipFile+"/"+subDir, filepath.Join(input, subDir), 0) // Now check a single file fiCtx, fi := filter.AddConfig(ctx) require.NoError(t, fi.AddRule("+ "+aFile)) require.NoError(t, fi.AddRule("- *")) checkTree(fiCtx, "SingleFile", t, ":archive:"+zipFile+"/"+aFile, filepath.Join(input, aFile), 0) // Now check the level above checkTree(ctx, "Root", t, ":archive:"+output, inputRoot, checkFiles) // run(t, "cp", "-a", inputRoot, output, "/tmp/test-"+archiveName) } // Make sure we have the executable named func skipIfNoExe(t *testing.T, exeName string) { _, err := exec.LookPath(exeName) if err != nil { t.Skipf("%s executable not installed", exeName) } } // Test creating and reading back some archives // // Note that this uses rclone and zip as external binaries. func TestArchiveZip(t *testing.T) { fstest.Initialise() skipIfNoExe(t, "zip") skipIfNoExe(t, "rclone") testArchive(t, "test.zip", func(t *testing.T, output, input string) { oldcwd, err := os.Getwd() require.NoError(t, err) require.NoError(t, os.Chdir(input)) defer func() { require.NoError(t, os.Chdir(oldcwd)) }() run(t, "zip", "-9r", output, ".") }) } // Test creating and reading back some archives // // Note that this uses rclone and squashfs as external binaries. func TestArchiveSquashfs(t *testing.T) { fstest.Initialise() skipIfNoExe(t, "mksquashfs") skipIfNoExe(t, "rclone") testArchive(t, "test.sqfs", func(t *testing.T, output, input string) { run(t, "mksquashfs", input, output) }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/archive/base/base.go
backend/archive/base/base.go
// Package base is a base archive Fs package base import ( "context" "errors" "fmt" "io" "path" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/vfs" ) // Fs represents a wrapped fs.Fs type Fs struct { f fs.Fs wrapper fs.Fs name string features *fs.Features // optional features vfs *vfs.VFS node vfs.Node // archive object remote string // remote of the archive object prefix string // position for objects prefixSlash string // position for objects with a slash on root string // position to read from within the archive } var errNotImplemented = errors.New("internal error: method not implemented in archiver") // New constructs an Fs from the (wrappedFs, remote) with the objects // prefix with prefix and rooted at root func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (*Fs, error) { // FIXME vfs cache? // FIXME could factor out ReadFileHandle and just use that rather than the full VFS fs.Debugf(nil, "New: remote=%q, prefix=%q, root=%q", remote, prefix, root) VFS := vfs.New(wrappedFs, nil) node, err := VFS.Stat(remote) if err != nil { return nil, fmt.Errorf("failed to find %q archive: %w", remote, err) } f := &Fs{ f: wrappedFs, name: path.Join(fs.ConfigString(wrappedFs), remote), vfs: VFS, node: node, remote: remote, root: root, prefix: prefix, prefixSlash: prefix + "/", } // FIXME // the features here are ones we could support, and they are // ANDed with the ones from wrappedFs // // FIXME some of these need to be forced on - CanHaveEmptyDirectories f.features = (&fs.Features{ CaseInsensitive: false, DuplicateFiles: false, ReadMimeType: false, // MimeTypes not supported with gzip WriteMimeType: false, BucketBased: false, CanHaveEmptyDirectories: true, }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) return f, nil } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // String returns a description of the FS func (f *Fs) String() string { return f.name } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return nil, errNotImplemented } // NewObject finds the Object at remote. func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) { return nil, errNotImplemented } // Precision of the ModTimes in this Fs func (f *Fs) Precision() time.Duration { return time.Second } // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists func (f *Fs) Mkdir(ctx context.Context, dir string) error { return vfs.EROFS } // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return vfs.EROFS } // Put in to the remote path with the modTime given of the given size // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) { return nil, vfs.EROFS } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.None) } // UnWrap returns the Fs that this Fs is wrapping func (f *Fs) UnWrap() fs.Fs { return f.f } // WrapFs returns the Fs that is wrapping this Fs func (f *Fs) WrapFs() fs.Fs { return f.wrapper } // SetWrapper sets the Fs that is wrapping this Fs func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper } // Object describes an object to be read from the raw zip file type Object struct { f *Fs remote string } // Fs returns read only access to the Fs that this object is part of func (o *Object) Fs() fs.Info { return o.f } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.Remote() } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Size returns the size of the file func (o *Object) Size() int64 { return -1 } // ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime(ctx context.Context) time.Time { return time.Now() } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return vfs.EROFS } // Storable raturns a boolean indicating if this object is storable func (o *Object) Storable() bool { return true } // Hash returns the selected checksum of the file // If no checksum is available it returns "" func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { return "", hash.ErrUnsupported } // Open opens the file for read. Call Close() on the returned io.ReadCloser func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { return nil, errNotImplemented } // Update in to the object with the modTime given of the given size func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { return vfs.EROFS } // Remove an object func (o *Object) Remove(ctx context.Context) error { return vfs.EROFS } // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) _ fs.UnWrapper = (*Fs)(nil) _ fs.Wrapper = (*Fs)(nil) _ fs.Object = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/archive/squashfs/cache.go
backend/archive/squashfs/cache.go
package squashfs // Could just be using bare object Open with RangeRequest which // would transfer the minimum amount of data but may be slower. import ( "errors" "fmt" "io/fs" "os" "sync" "github.com/diskfs/go-diskfs/backend" "github.com/rclone/rclone/vfs" ) // Cache file handles for accessing the file type cache struct { node vfs.Node fhsMu sync.Mutex fhs []cacheHandle } // A cached file handle type cacheHandle struct { offset int64 fh vfs.Handle } // Make a new cache func newCache(node vfs.Node) *cache { return &cache{ node: node, } } // Get a vfs.Handle from the pool or open one // // This tries to find an open file handle which doesn't require seeking. func (c *cache) open(off int64) (fh vfs.Handle, err error) { c.fhsMu.Lock() defer c.fhsMu.Unlock() if len(c.fhs) > 0 { // Look for exact match first for i, cfh := range c.fhs { if cfh.offset == off { // fs.Debugf(nil, "CACHE MATCH") c.fhs = append(c.fhs[:i], c.fhs[i+1:]...) return cfh.fh, nil } } // fs.Debugf(nil, "CACHE MISS") // Just take the first one if not found cfh := c.fhs[0] c.fhs = c.fhs[1:] return cfh.fh, nil } fh, err = c.node.Open(os.O_RDONLY) if err != nil { return nil, fmt.Errorf("failed to open squashfs archive: %w", err) } return fh, nil } // Close a vfs.Handle or return it to the pool // // off should be the offset the file handle would read from without seeking func (c *cache) close(fh vfs.Handle, off int64) { c.fhsMu.Lock() defer c.fhsMu.Unlock() c.fhs = append(c.fhs, cacheHandle{ offset: off, fh: fh, }) } // ReadAt reads len(p) bytes into p starting at offset off in the underlying // input source. It returns the number of bytes read (0 <= n <= len(p)) and any // error encountered. // // When ReadAt returns n < len(p), it returns a non-nil error explaining why // more bytes were not returned. In this respect, ReadAt is stricter than Read. // // Even if ReadAt returns n < len(p), it may use all of p as scratch // space during the call. If some data is available but not len(p) bytes, // ReadAt blocks until either all the data is available or an error occurs. // In this respect ReadAt is different from Read. // // If the n = len(p) bytes returned by ReadAt are at the end of the input // source, ReadAt may return either err == EOF or err == nil. // // If ReadAt is reading from an input source with a seek offset, ReadAt should // not affect nor be affected by the underlying seek offset. // // Clients of ReadAt can execute parallel ReadAt calls on the same input // source. // // Implementations must not retain p. func (c *cache) ReadAt(p []byte, off int64) (n int, err error) { fh, err := c.open(off) if err != nil { return n, err } defer func() { c.close(fh, off+int64(len(p))) }() // fs.Debugf(nil, "ReadAt(p[%d], off=%d, fh=%p)", len(p), off, fh) return fh.ReadAt(p, off) } var errCacheNotImplemented = errors.New("internal error: squashfs cache doesn't implement method") // WriteAt method dummy stub to satisfy interface func (c *cache) WriteAt(p []byte, off int64) (n int, err error) { return 0, errCacheNotImplemented } // Seek method dummy stub to satisfy interface func (c *cache) Seek(offset int64, whence int) (int64, error) { return 0, errCacheNotImplemented } // Read method dummy stub to satisfy interface func (c *cache) Read(p []byte) (n int, err error) { return 0, errCacheNotImplemented } func (c *cache) Stat() (fs.FileInfo, error) { return nil, errCacheNotImplemented } // Close the file func (c *cache) Close() (err error) { c.fhsMu.Lock() defer c.fhsMu.Unlock() // Close any open file handles for i := range c.fhs { fh := &c.fhs[i] newErr := fh.fh.Close() if err == nil { err = newErr } } c.fhs = nil return err } // Sys returns OS-specific file for ioctl calls via fd func (c *cache) Sys() (*os.File, error) { return nil, errCacheNotImplemented } // Writable returns file for read-write operations func (c *cache) Writable() (backend.WritableFile, error) { return nil, errCacheNotImplemented } // check interfaces var _ backend.Storage = (*cache)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/archive/squashfs/squashfs.go
backend/archive/squashfs/squashfs.go
// Package squashfs implements a squashfs archiver for the archive backend package squashfs import ( "context" "fmt" "io" "path" "strings" "time" "github.com/diskfs/go-diskfs/filesystem/squashfs" "github.com/rclone/rclone/backend/archive/archiver" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" ) func init() { archiver.Register(archiver.Archiver{ New: New, Extension: ".sqfs", }) } // Fs represents a wrapped fs.Fs type Fs struct { f fs.Fs wrapper fs.Fs name string features *fs.Features // optional features vfs *vfs.VFS sqfs *squashfs.FileSystem // interface to the squashfs c *cache node vfs.Node // squashfs file object - set if reading remote string // remote of the squashfs file object prefix string // position for objects prefixSlash string // position for objects with a slash on root string // position to read from within the archive } // New constructs an Fs from the (wrappedFs, remote) with the objects // prefix with prefix and rooted at root func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) { // FIXME vfs cache? // FIXME could factor out ReadFileHandle and just use that rather than the full VFS fs.Debugf(nil, "Squashfs: New: remote=%q, prefix=%q, root=%q", remote, prefix, root) vfsOpt := vfscommon.Opt vfsOpt.ReadWait = 0 VFS := vfs.New(wrappedFs, &vfsOpt) node, err := VFS.Stat(remote) if err != nil { return nil, fmt.Errorf("failed to find %q archive: %w", remote, err) } c := newCache(node) // FIXME blocksize sqfs, err := squashfs.Read(c, node.Size(), 0, 1024*1024) if err != nil { return nil, fmt.Errorf("failed to read squashfs: %w", err) } f := &Fs{ f: wrappedFs, name: path.Join(fs.ConfigString(wrappedFs), remote), vfs: VFS, node: node, sqfs: sqfs, c: c, remote: remote, root: strings.Trim(root, "/"), prefix: prefix, prefixSlash: prefix + "/", } if prefix == "" { f.prefixSlash = "" } singleObject := false // Find the directory the root points to if f.root != "" && !strings.HasSuffix(root, "/") { native, err := f.toNative("") if err == nil { native = strings.TrimRight(native, "/") _, err := f.newObjectNative(native) if err == nil { // If it pointed to a file, find the directory above f.root = path.Dir(f.root) if f.root == "." || f.root == "/" { f.root = "" } } } } // FIXME // the features here are ones we could support, and they are // ANDed with the ones from wrappedFs // // FIXME some of these need to be forced on - CanHaveEmptyDirectories f.features = (&fs.Features{ CaseInsensitive: false, DuplicateFiles: false, ReadMimeType: false, // MimeTypes not supported with gsquashfs WriteMimeType: false, BucketBased: false, CanHaveEmptyDirectories: true, }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) if singleObject { return f, fs.ErrorIsFile } return f, nil } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // String returns a description of the FS func (f *Fs) String() string { return fmt.Sprintf("Squashfs %q", f.name) } // This turns a remote into a native path in the squashfs starting with a / func (f *Fs) toNative(remote string) (string, error) { native := strings.Trim(remote, "/") if f.prefix == "" { native = "/" + native } else if native == f.prefix { native = "/" } else if !strings.HasPrefix(native, f.prefixSlash) { return "", fmt.Errorf("internal error: %q doesn't start with prefix %q", native, f.prefixSlash) } else { native = native[len(f.prefix):] } if f.root != "" { native = "/" + f.root + native } return native, nil } // Turn a (nativeDir, leaf) into a remote func (f *Fs) fromNative(nativeDir string, leaf string) string { // fs.Debugf(nil, "nativeDir = %q, leaf = %q, root=%q", nativeDir, leaf, f.root) dir := nativeDir if f.root != "" { dir = strings.TrimPrefix(dir, "/"+f.root) } remote := f.prefixSlash + strings.Trim(path.Join(dir, leaf), "/") // fs.Debugf(nil, "dir = %q, remote=%q", dir, remote) return remote } // Convert a FileInfo into an Object from native dir func (f *Fs) objectFromFileInfo(nativeDir string, item squashfs.FileStat) *Object { return &Object{ fs: f, remote: f.fromNative(nativeDir, item.Name()), size: item.Size(), modTime: item.ModTime(), item: item, } } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err) nativeDir, err := f.toNative(dir) if err != nil { return nil, err } items, err := f.sqfs.ReadDir(nativeDir) if err != nil { return nil, fmt.Errorf("read squashfs: couldn't read directory: %w", err) } entries = make(fs.DirEntries, 0, len(items)) for _, fi := range items { item, ok := fi.(squashfs.FileStat) if !ok { return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi) } // fs.Debugf(item.Name(), "entry = %#v", item) var entry fs.DirEntry if err != nil { return nil, fmt.Errorf("error reading item %q: %q", item.Name(), err) } if item.IsDir() { var remote = f.fromNative(nativeDir, item.Name()) entry = fs.NewDir(remote, item.ModTime()) } else { if item.Mode().IsRegular() { entry = f.objectFromFileInfo(nativeDir, item) } else { fs.Debugf(item.Name(), "FIXME Not regular file - skipping") continue } } entries = append(entries, entry) } // fs.Debugf(f, "dir=%q, entries=%v", dir, entries) return entries, nil } // newObjectNative finds the object at the native path passed in func (f *Fs) newObjectNative(nativePath string) (o fs.Object, err error) { // get the path and filename dir, leaf := path.Split(nativePath) dir = strings.TrimRight(dir, "/") leaf = strings.Trim(leaf, "/") // FIXME need to detect directory not found fis, err := f.sqfs.ReadDir(dir) if err != nil { return nil, fs.ErrorObjectNotFound } for _, fi := range fis { if fi.Name() == leaf { if fi.IsDir() { return nil, fs.ErrorNotAFile } item, ok := fi.(squashfs.FileStat) if !ok { return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi) } o = f.objectFromFileInfo(dir, item) break } } if o == nil { return nil, fs.ErrorObjectNotFound } return o, nil } // NewObject finds the Object at remote. func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) { defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err) nativePath, err := f.toNative(remote) if err != nil { return nil, err } return f.newObjectNative(nativePath) } // Precision of the ModTimes in this Fs func (f *Fs) Precision() time.Duration { return time.Second } // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists func (f *Fs) Mkdir(ctx context.Context, dir string) error { return vfs.EROFS } // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return vfs.EROFS } // Put in to the remote path with the modTime given of the given size // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) { return nil, vfs.EROFS } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.None) } // UnWrap returns the Fs that this Fs is wrapping func (f *Fs) UnWrap() fs.Fs { return f.f } // WrapFs returns the Fs that is wrapping this Fs func (f *Fs) WrapFs() fs.Fs { return f.wrapper } // SetWrapper sets the Fs that is wrapping this Fs func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper } // Object describes an object to be read from the raw squashfs file type Object struct { fs *Fs remote string size int64 modTime time.Time item squashfs.FileStat } // Fs returns read only access to the Fs that this object is part of func (o *Object) Fs() fs.Info { return o.fs } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.Remote() } // Turn a squashfs path into a full path for the parent Fs // func (o *Object) path(remote string) string { // return path.Join(o.fs.prefix, remote) // } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Size returns the size of the file func (o *Object) Size() int64 { return o.size } // ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime(ctx context.Context) time.Time { return o.modTime } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return vfs.EROFS } // Storable raturns a boolean indicating if this object is storable func (o *Object) Storable() bool { return true } // Hash returns the selected checksum of the file // If no checksum is available it returns "" func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { return "", hash.ErrUnsupported } // Open opens the file for read. Call Close() on the returned io.ReadCloser func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { var offset, limit int64 = 0, -1 for _, option := range options { switch x := option.(type) { case *fs.SeekOption: offset = x.Offset case *fs.RangeOption: offset, limit = x.Decode(o.Size()) default: if option.Mandatory() { fs.Logf(o, "Unsupported mandatory option: %v", option) } } } remote, err := o.fs.toNative(o.remote) if err != nil { return nil, err } fs.Debugf(o, "Opening %q", remote) //fh, err := o.fs.sqfs.OpenFile(remote, os.O_RDONLY) fh, err := o.item.Open() if err != nil { return nil, err } // discard data from start as necessary if offset > 0 { _, err = fh.Seek(offset, io.SeekStart) if err != nil { return nil, err } } // If limited then don't return everything if limit >= 0 { fs.Debugf(nil, "limit=%d, offset=%d, options=%v", limit, offset, options) return readers.NewLimitedReadCloser(fh, limit), nil } return fh, nil } // Update in to the object with the modTime given of the given size func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { return vfs.EROFS } // Remove an object func (o *Object) Remove(ctx context.Context) error { return vfs.EROFS } // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) _ fs.UnWrapper = (*Fs)(nil) _ fs.Wrapper = (*Fs)(nil) _ fs.Object = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/archive/archiver/archiver.go
backend/archive/archiver/archiver.go
// Package archiver registers all the archivers package archiver import ( "context" "github.com/rclone/rclone/fs" ) // Archiver describes an archive package type Archiver struct { // New constructs an Fs from the (wrappedFs, remote) with the objects // prefix with prefix and rooted at root New func(ctx context.Context, f fs.Fs, remote, prefix, root string) (fs.Fs, error) Extension string } // Archivers is a slice of all registered archivers var Archivers []Archiver // Register adds the archivers provided to the list of known archivers func Register(as ...Archiver) { Archivers = append(Archivers, as...) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/archive/zip/zip.go
backend/archive/zip/zip.go
// Package zip implements a zip archiver for the archive backend package zip import ( "archive/zip" "context" "errors" "fmt" "io" "os" "path" "strings" "time" "github.com/rclone/rclone/backend/archive/archiver" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/dirtree" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" ) func init() { archiver.Register(archiver.Archiver{ New: New, Extension: ".zip", }) } // Fs represents a wrapped fs.Fs type Fs struct { f fs.Fs wrapper fs.Fs name string features *fs.Features // optional features vfs *vfs.VFS node vfs.Node // zip file object - set if reading remote string // remote of the zip file object prefix string // position for objects prefixSlash string // position for objects with a slash on root string // position to read from within the archive dt dirtree.DirTree // read from zipfile } // New constructs an Fs from the (wrappedFs, remote) with the objects // prefix with prefix and rooted at root func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) { // FIXME vfs cache? // FIXME could factor out ReadFileHandle and just use that rather than the full VFS fs.Debugf(nil, "Zip: New: remote=%q, prefix=%q, root=%q", remote, prefix, root) vfsOpt := vfscommon.Opt vfsOpt.ReadWait = 0 VFS := vfs.New(wrappedFs, &vfsOpt) node, err := VFS.Stat(remote) if err != nil { return nil, fmt.Errorf("failed to find %q archive: %w", remote, err) } f := &Fs{ f: wrappedFs, name: path.Join(fs.ConfigString(wrappedFs), remote), vfs: VFS, node: node, remote: remote, root: root, prefix: prefix, prefixSlash: prefix + "/", } // Read the contents of the zip file singleObject, err := f.readZip() if err != nil { return nil, fmt.Errorf("failed to open zip file: %w", err) } // FIXME // the features here are ones we could support, and they are // ANDed with the ones from wrappedFs // // FIXME some of these need to be forced on - CanHaveEmptyDirectories f.features = (&fs.Features{ CaseInsensitive: false, DuplicateFiles: false, ReadMimeType: false, // MimeTypes not supported with gzip WriteMimeType: false, BucketBased: false, CanHaveEmptyDirectories: true, }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) if singleObject { return f, fs.ErrorIsFile } return f, nil } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // String returns a description of the FS func (f *Fs) String() string { return fmt.Sprintf("Zip %q", f.name) } // readZip the zip file into f // // Returns singleObject=true if f.root points to a file func (f *Fs) readZip() (singleObject bool, err error) { if f.node == nil { return singleObject, fs.ErrorDirNotFound } size := f.node.Size() if size < 0 { return singleObject, errors.New("can't read from zip file with unknown size") } r, err := f.node.Open(os.O_RDONLY) if err != nil { return singleObject, fmt.Errorf("failed to open zip file: %w", err) } zr, err := zip.NewReader(r, size) if err != nil { return singleObject, fmt.Errorf("failed to read zip file: %w", err) } dt := dirtree.New() for _, file := range zr.File { remote := strings.Trim(path.Clean(file.Name), "/") if remote == "." { remote = "" } remote = path.Join(f.prefix, remote) if f.root != "" { // Ignore all files outside the root if !strings.HasPrefix(remote, f.root) { continue } if remote == f.root { remote = "" } else { remote = strings.TrimPrefix(remote, f.root+"/") } } if strings.HasSuffix(file.Name, "/") { dir := fs.NewDir(remote, file.Modified) dt.AddDir(dir) } else { if remote == "" { remote = path.Base(f.root) singleObject = true dt = dirtree.New() } o := &Object{ f: f, remote: remote, fh: &file.FileHeader, file: file, } dt.Add(o) if singleObject { break } } } dt.CheckParents("") dt.Sort() f.dt = dt //fs.Debugf(nil, "dt = %v", dt) return singleObject, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err) // _, err = f.strip(dir) // if err != nil { // return nil, err // } entries, ok := f.dt[dir] if !ok { return nil, fs.ErrorDirNotFound } fs.Debugf(f, "dir=%q, entries=%v", dir, entries) return entries, nil } // NewObject finds the Object at remote. func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) { defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err) if f.dt == nil { return nil, fs.ErrorObjectNotFound } _, entry := f.dt.Find(remote) if entry == nil { return nil, fs.ErrorObjectNotFound } o, ok := entry.(*Object) if !ok { return nil, fs.ErrorNotAFile } return o, nil } // Precision of the ModTimes in this Fs func (f *Fs) Precision() time.Duration { return time.Second } // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists func (f *Fs) Mkdir(ctx context.Context, dir string) error { return vfs.EROFS } // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return vfs.EROFS } // Put in to the remote path with the modTime given of the given size // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) { return nil, vfs.EROFS } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.CRC32) } // UnWrap returns the Fs that this Fs is wrapping func (f *Fs) UnWrap() fs.Fs { return f.f } // WrapFs returns the Fs that is wrapping this Fs func (f *Fs) WrapFs() fs.Fs { return f.wrapper } // SetWrapper sets the Fs that is wrapping this Fs func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper } // Object describes an object to be read from the raw zip file type Object struct { f *Fs remote string fh *zip.FileHeader file *zip.File } // Fs returns read only access to the Fs that this object is part of func (o *Object) Fs() fs.Info { return o.f } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.Remote() } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Size returns the size of the file func (o *Object) Size() int64 { return int64(o.fh.UncompressedSize64) } // ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime(ctx context.Context) time.Time { return o.fh.Modified } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return vfs.EROFS } // Storable raturns a boolean indicating if this object is storable func (o *Object) Storable() bool { return true } // Hash returns the selected checksum of the file // If no checksum is available it returns "" func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { if ht == hash.CRC32 { // FIXME return empty CRC if writing if o.f.dt == nil { return "", nil } return fmt.Sprintf("%08x", o.fh.CRC32), nil } return "", hash.ErrUnsupported } // Open opens the file for read. Call Close() on the returned io.ReadCloser func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { var offset, limit int64 = 0, -1 for _, option := range options { switch x := option.(type) { case *fs.SeekOption: offset = x.Offset case *fs.RangeOption: offset, limit = x.Decode(o.Size()) default: if option.Mandatory() { fs.Logf(o, "Unsupported mandatory option: %v", option) } } } rc, err = o.file.Open() if err != nil { return nil, err } // discard data from start as necessary if offset > 0 { _, err = io.CopyN(io.Discard, rc, offset) if err != nil { return nil, err } } // If limited then don't return everything if limit >= 0 { return readers.NewLimitedReadCloser(rc, limit), nil } return rc, nil } // Update in to the object with the modTime given of the given size func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { return vfs.EROFS } // Remove an object func (o *Object) Remove(ctx context.Context) error { return vfs.EROFS } // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) _ fs.UnWrapper = (*Fs)(nil) _ fs.Wrapper = (*Fs)(nil) _ fs.Object = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/gofile/gofile.go
backend/gofile/gofile.go
// Package gofile provides an interface to the Gofile // object storage system. package gofile import ( "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "path" "strconv" "strings" "time" "github.com/rclone/rclone/backend/gofile/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" ) const ( minSleep = 10 * time.Millisecond maxSleep = 20 * time.Second decayConstant = 1 // bigger for slower decay, exponential rootURL = "https://api.gofile.io" rateLimitSleep = 5 * time.Second // penalise a goroutine by this long for making a rate limit error maxDepth = 4 // in ListR recursive list this deep (maximum is 16) ) /* // TestGoFile{sb0-v} stringNeedsEscaping = []rune{ '!', '*', '.', '/', ':', '<', '>', '?', '\"', '\\', '\a', '\b', '\f', '\n', '\r', '\t', '\v', '\x00', '\x01', '\x02', '\x03', '\x04', '\x05', '\x06', '\x0e', '\x0f', '\x10', '\x11', '\x12', '\x13', '\x14', '\x15', '\x16', '\x17', '\x18', '\x19', '\x1a', '\x1b', '\x1c', '\x1d', '\x1e', '\x1f', '\x7f', '\xbf', '\xfe', '|' } maxFileLength = 255 // for 1 byte unicode characters maxFileLength = 255 // for 2 byte unicode characters maxFileLength = 255 // for 3 byte unicode characters maxFileLength = 255 // for 4 byte unicode characters canWriteUnnormalized = true canReadUnnormalized = true canReadRenormalized = false canStream = true base32768isOK = false // make sure maxFileLength for 2 byte unicode chars is the same as for 1 byte characters */ // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "gofile", Description: "Gofile", NewFs: NewFs, Options: []fs.Option{{ Name: "access_token", Help: `API Access token You can get this from the web control panel.`, Sensitive: true, }, { Name: "root_folder_id", Help: `ID of the root folder Leave this blank normally, rclone will fill it in automatically. If you want rclone to be restricted to a particular folder you can fill it in - see the docs for more info. `, Default: "", Advanced: true, Sensitive: true, }, { Name: "account_id", Help: `Account ID Leave this blank normally, rclone will fill it in automatically. `, Default: "", Advanced: true, Sensitive: true, }, { Name: "list_chunk", Help: `Number of items to list in each call`, Default: 1000, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.Display | // Slash Control Delete Dot encoder.EncodeDoubleQuote | encoder.EncodeAsterisk | encoder.EncodeColon | encoder.EncodeLtGt | encoder.EncodeQuestion | encoder.EncodeBackSlash | encoder.EncodePipe | encoder.EncodeExclamation | encoder.EncodeLeftPeriod | encoder.EncodeRightPeriod | encoder.EncodeInvalidUtf8), }}, }) } // Options defines the configuration for this backend type Options struct { AccessToken string `config:"access_token"` RootFolderID string `config:"root_folder_id"` AccountID string `config:"account_id"` ListChunk int `config:"list_chunk"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a remote gofile type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options features *fs.Features // optional features srv *rest.Client // the connection to the server dirCache *dircache.DirCache // Map of directory path to directory id pacer *fs.Pacer // pacer for API calls } // Object describes a gofile object // // The full set of metadata will always be present type Object struct { fs *Fs // what this object is part of remote string // The remote path size int64 // size of the object modTime time.Time // modification time of the object id string // ID of the object dirID string // ID of the object's directory mimeType string // mime type of the object md5 string // MD5 of the object content url string // where to download this object } // Directory describes a gofile directory type Directory struct { Object items int64 // number of items in the directory } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("gofile root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // parsePath parses a gofile 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // Return true if the api error has the status given func isAPIErr(err error, status string) bool { var apiErr api.Error if errors.As(err, &apiErr) { return apiErr.Status == status } return false } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } if isAPIErr(err, "error-rateLimit") { // Give an immediate penalty to rate limits fs.Debugf(nil, "Rate limited, sleep for %v", rateLimitSleep) time.Sleep(rateLimitSleep) //return true, pacer.RetryAfterError(err, 2*time.Second) return true, err } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // readMetaDataForPath reads the metadata from the path func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) { // defer log.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, fs.ErrorObjectNotFound } return nil, err } found, err := f.listAll(ctx, directoryID, false, true, leaf, func(item *api.Item) bool { if item.Name == leaf { info = item return true } return false }) if err != nil { return nil, err } if !found { return nil, fs.ErrorObjectNotFound } return info, nil } // readMetaDataForID reads the metadata for the ID given func (f *Fs) readMetaDataForID(ctx context.Context, id string) (info *api.Item, err error) { opts := rest.Opts{ Method: "GET", Path: "/contents/" + id, Parameters: url.Values{ "page": {"1"}, "pageSize": {"1"}, // not interested in children so just ask for 1 }, } var result api.Contents err = f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, nil, &result) // Retry not found errors - when looking for an ID it should really exist if isAPIErr(err, "error-notFound") { return true, err } return shouldRetry(ctx, resp, err) }) if err = result.Err(err); err != nil { return nil, fmt.Errorf("failed to get item info: %w", err) } return &result.Data.Item, nil } // errorHandler parses a non 2xx error response into an error func errorHandler(resp *http.Response) error { body, err := rest.ReadBody(resp) if err != nil { fs.Debugf(nil, "Couldn't read error out of body: %v", err) body = nil } // Decode error response if there was one - they can be blank var errResponse api.Error if len(body) > 0 { err = json.Unmarshal(body, &errResponse) if err != nil { fs.Debugf(nil, "Couldn't decode error response: %v", err) } } if errResponse.Status == "" { errResponse.Status = fmt.Sprintf("%s (%d): %s", resp.Status, resp.StatusCode, string(body)) } return errResponse } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } root = parsePath(root) client := fshttp.NewClient(ctx) f := &Fs{ name: name, root: root, opt: *opt, srv: rest.NewClient(client).SetRoot(rootURL), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } f.features = (&fs.Features{ CaseInsensitive: false, CanHaveEmptyDirectories: true, DuplicateFiles: true, ReadMimeType: true, WriteMimeType: false, WriteDirSetModTime: true, DirModTimeUpdatesOnWrite: true, }).Fill(ctx, f) f.srv.SetErrorHandler(errorHandler) f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken) // Read account ID if not present err = f.readAccountID(ctx, m) if err != nil { return nil, err } // Read Root Folder ID if not present err = f.readRootFolderID(ctx, m) if err != nil { return nil, err } // Get rootFolderID rootID := f.opt.RootFolderID f.dirCache = dircache.New(root, rootID, f) // Find the current root err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) tempF := *f tempF.dirCache = dircache.New(newRoot, rootID, &tempF) tempF.root = newRoot // Make new Fs which is the parent err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } _, err := tempF.newObjectWithInfo(ctx, remote, nil) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f return f, nil } return nil, err } f.features.Fill(ctx, &tempF) // XXX: update the old f here instead of returning tempF, since // `features` were already filled with functions having *f as a receiver. // See https://github.com/rclone/rclone/issues/2182 f.dirCache = tempF.dirCache f.root = tempF.root // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // Read the AccountID into f.opt if not set and cache in the config file as account_id func (f *Fs) readAccountID(ctx context.Context, m configmap.Mapper) (err error) { if f.opt.AccountID != "" { return nil } opts := rest.Opts{ Method: "GET", Path: "/accounts/getid", } var result api.AccountsGetID var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if err = result.Err(err); err != nil { return fmt.Errorf("failed to read account ID: %w", err) } f.opt.AccountID = result.Data.ID m.Set("account_id", f.opt.AccountID) return nil } // Read the Accounts info func (f *Fs) getAccounts(ctx context.Context) (result *api.AccountsGet, err error) { opts := rest.Opts{ Method: "GET", Path: "/accounts/" + f.opt.AccountID, } result = new(api.AccountsGet) var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if err = result.Err(err); err != nil { return nil, fmt.Errorf("failed to read accountd info: %w", err) } return result, nil } // Read the RootFolderID into f.opt if not set and cache in the config file as root_folder_id func (f *Fs) readRootFolderID(ctx context.Context, m configmap.Mapper) (err error) { if f.opt.RootFolderID != "" { return nil } result, err := f.getAccounts(ctx) if err != nil { return err } f.opt.RootFolderID = result.Data.RootFolder m.Set("root_folder_id", f.opt.RootFolderID) return nil } // rootSlash returns root with a slash on if it is empty, otherwise empty string func (f *Fs) rootSlash() string { if f.root == "" { return f.root } return f.root + "/" } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } var err error if info != nil { // Set info err = o.setMetaData(info) } else { err = o.readMetaData(ctx) // reads info and meta, returning an error } if err != nil { return nil, err } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // FindLeaf finds a directory of name leaf in the folder with ID pathID func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { // Find the leaf in pathID found, err = f.listAll(ctx, pathID, true, false, leaf, func(item *api.Item) bool { if item.Name == leaf { pathIDOut = item.ID return true } return false }) return pathIDOut, found, err } // createDir makes a directory with pathID as parent and name leaf and modTime func (f *Fs) createDir(ctx context.Context, pathID, leaf string, modTime time.Time) (item *api.Item, err error) { var resp *http.Response var result api.CreateFolderResponse opts := rest.Opts{ Method: "POST", Path: "/contents/createFolder", } mkdir := api.CreateFolderRequest{ FolderName: f.opt.Enc.FromStandardName(leaf), ParentFolderID: pathID, ModTime: api.ToNativeTime(modTime), } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &result) return shouldRetry(ctx, resp, err) }) if err = result.Err(err); err != nil { return nil, fmt.Errorf("failed to create folder: %w", err) } return &result.Data, nil } // CreateDir makes a directory with pathID as parent and name leaf func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) item, err := f.createDir(ctx, pathID, leaf, time.Now()) if err != nil { return "", err } return item.ID, nil } // list the objects into the function supplied // // If directories is set it only sends directories // User function to process a File item from listAll // // Should return true to finish processing type listAllFn func(*api.Item) bool // Lists the directory required calling the user function on each item found // // If name is set then the server will limit the returned items to those // with that name. // // If the user fn ever returns true then it early exits with found = true func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, name string, fn listAllFn) (found bool, err error) { opts := rest.Opts{ Method: "GET", Path: "/contents/" + dirID, Parameters: url.Values{}, } if name != "" { opts.Parameters.Add("contentname", f.opt.Enc.FromStandardName(name)) } page := 1 OUTER: for { opts.Parameters.Set("page", strconv.Itoa(page)) opts.Parameters.Set("pageSize", strconv.Itoa(f.opt.ListChunk)) var result api.Contents var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if err = result.Err(err); err != nil { if isAPIErr(err, "error-notFound") { return found, fs.ErrorDirNotFound } return found, fmt.Errorf("couldn't list files: %w", err) } for id, item := range result.Data.Children { _ = id if item.Type == api.ItemTypeFolder { if filesOnly { continue } } else if item.Type == api.ItemTypeFile { if directoriesOnly { continue } } else { fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type) continue } item.Name = f.opt.Enc.ToStandardName(item.Name) if fn(item) { found = true break OUTER } } if !result.Metadata.HasNextPage { break } page += 1 } return found, err } // Convert a list item into a DirEntry func (f *Fs) itemToDirEntry(ctx context.Context, remote string, info *api.Item) (entry fs.DirEntry, err error) { if info.Type == api.ItemTypeFolder { // cache the directory ID for later lookups f.dirCache.Put(remote, info.ID) d := &Directory{ Object: Object{ fs: f, remote: remote, }, items: int64(info.ChildrenCount), } d.setMetaDataAny(info) entry = d } else if info.Type == api.ItemTypeFile { entry, err = f.newObjectWithInfo(ctx, remote, info) if err != nil { return nil, err } } return entry, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } var iErr error _, err = f.listAll(ctx, directoryID, false, false, "", func(info *api.Item) bool { remote := path.Join(dir, info.Name) entry, err := f.itemToDirEntry(ctx, remote, info) if err != nil { iErr = err return true } entries = append(entries, entry) return false }) if err != nil { return nil, err } if iErr != nil { return nil, iErr } return entries, nil } // implementation of ListR func (f *Fs) listR(ctx context.Context, dir string, list *list.Helper) (err error) { directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return err } opts := rest.Opts{ Method: "GET", Path: "/contents/" + directoryID, Parameters: url.Values{"maxdepth": {strconv.Itoa(maxDepth)}}, } page := 1 for { opts.Parameters.Set("page", strconv.Itoa(page)) opts.Parameters.Set("pageSize", strconv.Itoa(f.opt.ListChunk)) var result api.Contents var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if err = result.Err(err); err != nil { if isAPIErr(err, "error-notFound") { return fs.ErrorDirNotFound } return fmt.Errorf("couldn't recursively list files: %w", err) } // Result.Data.Item now contains a recursive listing so we will have to decode recursively var decode func(string, *api.Item) error decode = func(dir string, dirItem *api.Item) error { // If we have ChildrenCount but no Children this means the recursion stopped here if dirItem.ChildrenCount > 0 && len(dirItem.Children) == 0 { return f.listR(ctx, dir, list) } for _, item := range dirItem.Children { if item.Type != api.ItemTypeFolder && item.Type != api.ItemTypeFile { fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type) continue } item.Name = f.opt.Enc.ToStandardName(item.Name) remote := path.Join(dir, item.Name) entry, err := f.itemToDirEntry(ctx, remote, item) if err != nil { return err } err = list.Add(entry) if err != nil { return err } if item.Type == api.ItemTypeFolder { err := decode(remote, item) if err != nil { return err } } } return nil } err = decode(dir, &result.Data.Item) if err != nil { return err } if !result.Metadata.HasNextPage { break } page += 1 } return err } // ListR lists the objects and directories of the Fs starting // from dir recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. // // Don't implement this unless you have a more efficient way // of listing recursively than doing a directory traversal. func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { list := list.NewHelper(callback) err = f.listR(ctx, dir, list) if err != nil { return err } return list.Flush() } // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // // Returns the object, leaf, directoryID and error. // // Used to create new objects func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { // Create the directory for the object if it doesn't exist leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true) if err != nil { return } // Temporary Object under construction o = &Object{ fs: f, remote: remote, } return o, leaf, directoryID, nil } // Put the object // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { existingObj, err := f.NewObject(ctx, src.Remote()) switch err { case nil: return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it return f.PutUnchecked(ctx, in, src, options...) default: return nil, err } } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(ctx, in, src, options...) } // PutUnchecked the object into the container // // This will produce a duplicate if the object already exists. // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() modTime := src.ModTime(ctx) o, _, _, err := f.createObject(ctx, remote, modTime, size) if err != nil { return nil, err } return o, o.Update(ctx, in, src, options...) } // Mkdir creates the container if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { _, err := f.dirCache.FindDir(ctx, dir, true) return err } // DirSetModTime sets the directory modtime for dir func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error { dirID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return err } _, err = f.setModTime(ctx, dirID, modTime) return err } // deleteObject removes an object by ID func (f *Fs) deleteObject(ctx context.Context, id string) error { opts := rest.Opts{ Method: "DELETE", Path: "/contents/", } request := api.DeleteRequest{ ContentsID: id, } var result api.DeleteResponse err := f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, &request, &result) return shouldRetry(ctx, resp, err) }) if err = result.Err(err); err != nil { return fmt.Errorf("failed to delete item: %w", err) } // Check the individual result codes also for _, err := range result.Data { if err.IsError() { return fmt.Errorf("failed to delete item: %w", err) } } return nil } // purgeCheck removes the root directory, if check is set then it // refuses to do so if it has anything in func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } dc := f.dirCache rootID, err := dc.FindDir(ctx, dir, false) if err != nil { return err } // Check to see if there is contents in the directory if check { found, err := f.listAll(ctx, rootID, false, false, "", func(item *api.Item) bool { return true }) if err != nil { return err } if found { return fs.ErrorDirectoryNotEmpty } } // Delete the directory err = f.deleteObject(ctx, rootID) if err != nil { return err } f.dirCache.FlushDir(dir) return nil } // Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, true) } // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { return time.Second } // Purge deletes all the files and the container // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, false) } // About gets quota information func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { result, err := f.getAccounts(ctx) if err != nil { return nil, err } used := result.Data.StatsCurrent.Storage files := result.Data.StatsCurrent.FileCount total := result.Data.SubscriptionLimitStorage usage = &fs.Usage{ Used: fs.NewUsageValue(used), // bytes in use Total: fs.NewUsageValue(total), // bytes total Free: fs.NewUsageValue(total - used), // bytes free Objects: fs.NewUsageValue(files), // total objects } return usage, nil } // patch an attribute on an object to value func (f *Fs) patch(ctx context.Context, id, attribute string, value any) (item *api.Item, err error) { var resp *http.Response var request = api.UpdateItemRequest{ Attribute: attribute, Value: value, } var result api.UpdateItemResponse opts := rest.Opts{ Method: "PUT", Path: "/contents/" + id + "/update", } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) return shouldRetry(ctx, resp, err) }) if err = result.Err(err); err != nil { return nil, fmt.Errorf("failed to patch item %q to %v: %w", attribute, value, err) } return &result.Data, nil } // rename a file or a folder func (f *Fs) rename(ctx context.Context, id, newLeaf string) (item *api.Item, err error) { return f.patch(ctx, id, "name", f.opt.Enc.FromStandardName(newLeaf)) } // setModTime sets the modification time of a file or folder func (f *Fs) setModTime(ctx context.Context, id string, modTime time.Time) (item *api.Item, err error) { return f.patch(ctx, id, "modTime", api.ToNativeTime(modTime)) } // move a file or a folder to a new directory func (f *Fs) move(ctx context.Context, id, newDirID string) (item *api.Item, err error) { var resp *http.Response var request = api.MoveRequest{ FolderID: newDirID, ContentsID: id, } var result api.MoveResponse opts := rest.Opts{ Method: "PUT", Path: "/contents/move", } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) return shouldRetry(ctx, resp, err) }) if err = result.Err(err); err != nil { return nil, fmt.Errorf("failed to move item: %w", err) } itemResult, ok := result.Data[id] if !ok || itemResult.Item.ID == "" { return nil, errors.New("failed to read result of move") } return &itemResult.Item, nil } // move and rename a file or folder to directoryID with leaf func (f *Fs) moveTo(ctx context.Context, id, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID string) (info *api.Item, err error) { // Can have duplicates so don't have to be careful here // Rename if required if srcLeaf != dstLeaf { info, err = f.rename(ctx, id, dstLeaf) if err != nil { return nil, err } } // Move if required if srcDirectoryID != dstDirectoryID { info, err = f.move(ctx, id, dstDirectoryID) if err != nil { return nil, err } } if info == nil { return f.readMetaDataForID(ctx, id) } return info, nil } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } // Find existing object srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) if err != nil { return nil, err } // Create temporary object dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } // Do the move info, err := f.moveTo(ctx, srcObj.id, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID) if err != nil { return nil, err } err = dstObj.setMetaData(info) if err != nil { return nil, err } return dstObj, nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } srcID, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) if err != nil { return err } // Do the move _, err = f.moveTo(ctx, srcID, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID) if err != nil { return err } srcFs.dirCache.FlushDir(srcRemote) return nil } // copy a file or a folder to a new directory func (f *Fs) copy(ctx context.Context, id, newDirID string) (item *api.Item, err error) { var resp *http.Response var request = api.CopyRequest{ FolderID: newDirID, ContentsID: id, } var result api.CopyResponse opts := rest.Opts{ Method: "POST", Path: "/contents/copy", } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) return shouldRetry(ctx, resp, err) }) if err = result.Err(err); err != nil { return nil, fmt.Errorf("failed to copy item: %w", err) } itemResult, ok := result.Data[id] if !ok || itemResult.Item.ID == "" { return nil, errors.New("failed to read result of copy") } return &itemResult.Item, nil } // copy and rename a file or folder to directoryID with leaf func (f *Fs) copyTo(ctx context.Context, srcID, srcLeaf, dstLeaf, dstDirectoryID string) (info *api.Item, err error) { // Can have duplicates so don't have to be careful here // Copy to dstDirectoryID first info, err = f.copy(ctx, srcID, dstDirectoryID) if err != nil { return nil, err } // Rename if required if srcLeaf != dstLeaf { info, err = f.rename(ctx, info.ID, dstLeaf) if err != nil { return nil, err } } return info, nil } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } srcLeaf := path.Base(srcObj.remote) srcPath := srcObj.fs.rootSlash() + srcObj.remote dstPath := f.rootSlash() + remote if srcPath == dstPath { return nil, fmt.Errorf("can't copy %q -> %q as are same name", srcPath, dstPath) } // Find existing object existingObj, err := f.NewObject(ctx, remote) if err == nil { defer func() { // Don't remove existing object if returning an error if err != nil { return } fs.Debugf(existingObj, "Server side copy: removing existing object after successful copy")
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/gofile/gofile_test.go
backend/gofile/gofile_test.go
// Test Gofile filesystem interface package gofile_test import ( "testing" "github.com/rclone/rclone/backend/gofile" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestGoFile:", NilObject: (*gofile.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/gofile/api/types.go
backend/gofile/api/types.go
// Package api has type definitions for gofile // // Converted from the API docs with help from https://mholt.github.io/json-to-go/ package api import ( "fmt" "time" ) const ( // 2017-05-03T07:26:10-07:00 timeFormat = `"` + time.RFC3339 + `"` ) // Time represents date and time information for the // gofile API, by using RFC3339 type Time time.Time // MarshalJSON turns a Time into JSON (in UTC) func (t *Time) MarshalJSON() (out []byte, err error) { timeString := (*time.Time)(t).Format(timeFormat) return []byte(timeString), nil } // UnmarshalJSON turns JSON into a Time func (t *Time) UnmarshalJSON(data []byte) error { newT, err := time.Parse(timeFormat, string(data)) if err != nil { return err } *t = Time(newT) return nil } // Error is returned from gofile when things go wrong type Error struct { Status string `json:"status"` } // Error returns a string for the error and satisfies the error interface func (e Error) Error() string { out := fmt.Sprintf("Error %q", e.Status) return out } // IsError returns true if there is an error func (e Error) IsError() bool { return e.Status != "ok" } // Err returns err if not nil, or e if IsError or nil func (e Error) Err(err error) error { if err != nil { return err } if e.IsError() { return e } return nil } // Check Error satisfies the error interface var _ error = (*Error)(nil) // Types of things in Item const ( ItemTypeFolder = "folder" ItemTypeFile = "file" ) // Item describes a folder or a file as returned by /contents type Item struct { ID string `json:"id"` ParentFolder string `json:"parentFolder"` Type string `json:"type"` Name string `json:"name"` Size int64 `json:"size"` Code string `json:"code"` CreateTime int64 `json:"createTime"` ModTime int64 `json:"modTime"` Link string `json:"link"` MD5 string `json:"md5"` MimeType string `json:"mimetype"` ChildrenCount int `json:"childrenCount"` DirectLinks map[string]*DirectLink `json:"directLinks"` //Public bool `json:"public"` //ServerSelected string `json:"serverSelected"` //Thumbnail string `json:"thumbnail"` //DownloadCount int `json:"downloadCount"` //TotalDownloadCount int64 `json:"totalDownloadCount"` //TotalSize int64 `json:"totalSize"` //ChildrenIDs []string `json:"childrenIds"` Children map[string]*Item `json:"children"` } // ToNativeTime converts a go time to a native time func ToNativeTime(t time.Time) int64 { return t.Unix() } // FromNativeTime converts native time to a go time func FromNativeTime(t int64) time.Time { return time.Unix(t, 0) } // DirectLink describes a direct link to a file so it can be // downloaded by third parties. type DirectLink struct { ExpireTime int64 `json:"expireTime"` SourceIpsAllowed []any `json:"sourceIpsAllowed"` DomainsAllowed []any `json:"domainsAllowed"` Auth []any `json:"auth"` IsReqLink bool `json:"isReqLink"` DirectLink string `json:"directLink"` } // Contents is returned from the /contents call type Contents struct { Error Data struct { Item } `json:"data"` Metadata Metadata `json:"metadata"` } // Metadata is returned when paging is in use type Metadata struct { TotalCount int `json:"totalCount"` TotalPages int `json:"totalPages"` Page int `json:"page"` PageSize int `json:"pageSize"` HasNextPage bool `json:"hasNextPage"` } // AccountsGetID is the result of /accounts/getid type AccountsGetID struct { Error Data struct { ID string `json:"id"` } `json:"data"` } // Stats of storage and traffic type Stats struct { FolderCount int64 `json:"folderCount"` FileCount int64 `json:"fileCount"` Storage int64 `json:"storage"` TrafficDirectGenerated int64 `json:"trafficDirectGenerated"` TrafficReqDownloaded int64 `json:"trafficReqDownloaded"` TrafficWebDownloaded int64 `json:"trafficWebDownloaded"` } // AccountsGet is the result of /accounts/{id} type AccountsGet struct { Error Data struct { ID string `json:"id"` Email string `json:"email"` Tier string `json:"tier"` PremiumType string `json:"premiumType"` Token string `json:"token"` RootFolder string `json:"rootFolder"` SubscriptionProvider string `json:"subscriptionProvider"` SubscriptionEndDate int `json:"subscriptionEndDate"` SubscriptionLimitDirectTraffic int64 `json:"subscriptionLimitDirectTraffic"` SubscriptionLimitStorage int64 `json:"subscriptionLimitStorage"` StatsCurrent Stats `json:"statsCurrent"` // StatsHistory map[int]map[int]map[int]Stats `json:"statsHistory"` } `json:"data"` } // CreateFolderRequest is the input to /contents/createFolder type CreateFolderRequest struct { ParentFolderID string `json:"parentFolderId"` FolderName string `json:"folderName"` ModTime int64 `json:"modTime,omitempty"` } // CreateFolderResponse is the output from /contents/createFolder type CreateFolderResponse struct { Error Data Item `json:"data"` } // DeleteRequest is the input to DELETE /contents type DeleteRequest struct { ContentsID string `json:"contentsId"` // comma separated list of IDs } // DeleteResponse is the input to DELETE /contents type DeleteResponse struct { Error Data map[string]Error } // DirectUploadURL returns the direct upload URL for Gofile func DirectUploadURL() string { return "https://upload.gofile.io/uploadfile" } // UploadResponse is returned by POST /contents/uploadfile type UploadResponse struct { Error Data Item `json:"data"` } // DirectLinksRequest specifies the parameters for the direct link type DirectLinksRequest struct { ExpireTime int64 `json:"expireTime,omitempty"` SourceIpsAllowed []any `json:"sourceIpsAllowed,omitempty"` DomainsAllowed []any `json:"domainsAllowed,omitempty"` Auth []any `json:"auth,omitempty"` } // DirectLinksResult is returned from POST /contents/{id}/directlinks type DirectLinksResult struct { Error Data struct { ExpireTime int64 `json:"expireTime"` SourceIpsAllowed []any `json:"sourceIpsAllowed"` DomainsAllowed []any `json:"domainsAllowed"` Auth []any `json:"auth"` IsReqLink bool `json:"isReqLink"` ID string `json:"id"` DirectLink string `json:"directLink"` } `json:"data"` } // UpdateItemRequest describes the updates to be done to an item for PUT /contents/{id}/update // // The Value of the attribute to define : // For Attribute "name" : The name of the content (file or folder) // For Attribute "description" : The description displayed on the download page (folder only) // For Attribute "tags" : A comma-separated list of tags (folder only) // For Attribute "public" : either true or false (folder only) // For Attribute "expiry" : A unix timestamp of the expiration date (folder only) // For Attribute "password" : The password to set (folder only) type UpdateItemRequest struct { Attribute string `json:"attribute"` Value any `json:"attributeValue"` } // UpdateItemResponse is returned by PUT /contents/{id}/update type UpdateItemResponse struct { Error Data Item `json:"data"` } // MoveRequest is the input to /contents/move type MoveRequest struct { FolderID string `json:"folderId"` ContentsID string `json:"contentsId"` // comma separated list of IDs } // MoveResponse is returned by POST /contents/move type MoveResponse struct { Error Data map[string]struct { Error Item `json:"data"` } `json:"data"` } // CopyRequest is the input to /contents/copy type CopyRequest struct { FolderID string `json:"folderId"` ContentsID string `json:"contentsId"` // comma separated list of IDs } // CopyResponse is returned by POST /contents/copy type CopyResponse struct { Error Data map[string]struct { Error Item `json:"data"` } `json:"data"` } // UploadServerStatus is returned when fetching the root of an upload server type UploadServerStatus struct { Error Data struct { Server string `json:"server"` Test string `json:"test"` } `json:"data"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/putio/error.go
backend/putio/error.go
package putio import ( "context" "fmt" "net/http" "slices" "strconv" "time" "github.com/putdotio/go-putio/putio" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/lib/pacer" ) func checkStatusCode(resp *http.Response, expected ...int) error { if slices.Contains(expected, resp.StatusCode) { return nil } return &statusCodeError{response: resp} } type statusCodeError struct { response *http.Response } func (e *statusCodeError) Error() string { return fmt.Sprintf("unexpected status code (%d) response while doing %s to %s", e.response.StatusCode, e.response.Request.Method, e.response.Request.URL.String()) } // This method is called from fserrors.ShouldRetry() to determine if an error should be retried. // Some errors (e.g. 429 Too Many Requests) are handled before this step, so they are not included here. func (e *statusCodeError) Temporary() bool { return e.response.StatusCode >= 500 } // shouldRetry returns a boolean as to whether this err deserves to be // retried. It returns the err as a convenience func shouldRetry(ctx context.Context, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } if err == nil { return false, nil } if perr, ok := err.(*putio.ErrorResponse); ok { err = &statusCodeError{response: perr.Response} } if scerr, ok := err.(*statusCodeError); ok && scerr.response.StatusCode == 429 { delay := defaultRateLimitSleep header := scerr.response.Header.Get("x-ratelimit-reset") if header != "" { if resetTime, cerr := strconv.ParseInt(header, 10, 64); cerr == nil { delay = time.Until(time.Unix(resetTime+1, 0)) } } return true, pacer.RetryAfterError(scerr, delay) } if fserrors.ShouldRetry(err) { return true, err } return false, err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/putio/putio_test.go
backend/putio/putio_test.go
// Test Put.io filesystem interface package putio import ( "testing" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestPutio:", NilObject: (*Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/putio/object.go
backend/putio/object.go
package putio import ( "context" "fmt" "io" "net/http" "net/url" "path" "strconv" "time" "github.com/putdotio/go-putio/putio" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/hash" ) // Object describes a Putio object // // Putio Objects always have full metadata type Object struct { fs *Fs // what this object is part of file *putio.File remote string // The remote path modtime time.Time } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) { // defer log.Trace(f, "remote=%v", remote)("o=%+v, err=%v", &o, &err) obj := &Object{ fs: f, remote: remote, } err = obj.readEntryAndSetMetadata(ctx) if err != nil { return nil, err } return obj, err } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info putio.File) (o fs.Object, err error) { // defer log.Trace(f, "remote=%v, info=+v", remote, &info)("o=%+v, err=%v", &o, &err) obj := &Object{ fs: f, remote: remote, } err = obj.setMetadataFromEntry(info) if err != nil { return nil, err } return obj, err } // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Hash returns the dropbox special hash func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.CRC32 { return "", hash.ErrUnsupported } err := o.readEntryAndSetMetadata(ctx) if err != nil { return "", fmt.Errorf("failed to read hash from metadata: %w", err) } return o.file.CRC32, nil } // Size returns the size of an object in bytes func (o *Object) Size() int64 { if o.file == nil { return 0 } return o.file.Size } // ID returns the ID of the Object if known, or "" if not func (o *Object) ID() string { if o.file == nil { return "" } return itoa(o.file.ID) } // MimeType returns the content type of the Object if // known, or "" if not func (o *Object) MimeType(ctx context.Context) string { err := o.readEntryAndSetMetadata(ctx) if err != nil { return "" } return o.file.ContentType } // setMetadataFromEntry sets the fs data from a putio.File // // This isn't a complete set of metadata and has an inaccurate date func (o *Object) setMetadataFromEntry(info putio.File) error { o.file = &info o.modtime = info.UpdatedAt.Time return nil } // Reads the entry for a file from putio func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) { // defer log.Trace(o, "")("f=%+v, err=%v", f, &err) leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, o.remote, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, fs.ErrorObjectNotFound } return nil, err } var resp struct { File putio.File `json:"file"` } err = o.fs.pacer.Call(func() (bool, error) { // fs.Debugf(o, "requesting child. directoryID: %s, name: %s", directoryID, leaf) req, err := o.fs.client.NewRequest(ctx, "GET", "/v2/files/"+directoryID+"/child?name="+url.QueryEscape(o.fs.opt.Enc.FromStandardName(leaf)), nil) if err != nil { return false, err } _, err = o.fs.client.Do(req, &resp) if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode == 404 { return false, fs.ErrorObjectNotFound } return shouldRetry(ctx, err) }) if err != nil { return nil, err } if resp.File.IsDir() { return nil, fs.ErrorIsDir } return &resp.File, err } // Read entry if not set and set metadata from it func (o *Object) readEntryAndSetMetadata(ctx context.Context) error { if o.file != nil { return nil } entry, err := o.readEntry(ctx) if err != nil { return err } return o.setMetadataFromEntry(*entry) } // Returns the remote path for the object func (o *Object) remotePath() string { return path.Join(o.fs.root, o.remote) } // ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime(ctx context.Context) time.Time { if o.modtime.IsZero() { err := o.readEntryAndSetMetadata(ctx) if err != nil { fs.Debugf(o, "Failed to read metadata: %v", err) return time.Now() } } return o.modtime } // SetModTime sets the modification time of the local fs object // // Commits the datastore func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) { // defer log.Trace(o, "modTime=%v", modTime.String())("err=%v", &err) req, err := o.fs.client.NewRequest(ctx, "POST", "/v2/files/touch?file_id="+strconv.FormatInt(o.file.ID, 10)+"&updated_at="+url.QueryEscape(modTime.Format(time.RFC3339)), nil) if err != nil { return err } // fs.Debugf(o, "setting modtime: %s", modTime.String()) _, err = o.fs.client.Do(req, nil) if err != nil { return err } o.modtime = modTime if o.file != nil { o.file.UpdatedAt.Time = modTime } return nil } // Storable returns whether this object is storable func (o *Object) Storable() bool { return true } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { // defer log.Trace(o, "")("err=%v", &err) var storageURL string err = o.fs.pacer.Call(func() (bool, error) { storageURL, err = o.fs.client.Files.URL(ctx, o.file.ID, true) return shouldRetry(ctx, err) }) if err != nil { return } var resp *http.Response headers := fs.OpenOptionHeaders(options) err = o.fs.pacer.Call(func() (bool, error) { req, err := http.NewRequestWithContext(ctx, http.MethodGet, storageURL, nil) if err != nil { return shouldRetry(ctx, err) } req.Header.Set("User-Agent", o.fs.client.UserAgent) // merge headers with extra headers for header, value := range headers { req.Header.Set(header, value) } // fs.Debugf(o, "opening file: id=%d", o.file.ID) resp, err = o.fs.httpClient.Do(req) if err != nil { return shouldRetry(ctx, err) } if err := checkStatusCode(resp, 200, 206); err != nil { return shouldRetry(ctx, err) } return false, nil }) if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 { _ = resp.Body.Close() return nil, fserrors.NoRetryError(err) } if err != nil { return nil, err } return resp.Body, nil } // Update the already existing object // // Copy the reader into the object updating modTime and size. // // The new object may have been created if an error is returned func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { // defer log.Trace(o, "src=%+v", src)("err=%v", &err) remote := o.remotePath() if ignoredFiles.MatchString(remote) { fs.Logf(o, "File name disallowed - not uploading") return nil } err = o.Remove(ctx) if err != nil { return err } newObj, err := o.fs.putUnchecked(ctx, in, src, o.remote, options...) if err != nil { return err } *o = *(newObj.(*Object)) return err } // Remove an object func (o *Object) Remove(ctx context.Context) (err error) { // defer log.Trace(o, "")("err=%v", &err) return o.fs.pacer.Call(func() (bool, error) { // fs.Debugf(o, "removing file: id=%d", o.file.ID) err = o.fs.client.Files.Delete(ctx, o.file.ID) return shouldRetry(ctx, err) }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/putio/fs.go
backend/putio/fs.go
package putio import ( "bytes" "context" "encoding/base64" "errors" "fmt" "io" "net/http" "net/url" "path" "strconv" "strings" "time" "github.com/putdotio/go-putio/putio" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/readers" ) // Fs represents a remote Putio server type Fs struct { name string // name of this remote root string // the path we are working on features *fs.Features // optional features opt Options // options for this Fs client *putio.Client // client for making API calls to Put.io pacer *fs.Pacer // To pace the API calls dirCache *dircache.DirCache // Map of directory path to directory id httpClient *http.Client // base http client oAuthClient *http.Client // http client with oauth Authorization } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("Putio root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // parsePath parses a putio 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (f fs.Fs, err error) { // defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err) // Parse config into Options struct opt := new(Options) err = configstruct.Set(m, opt) if err != nil { return nil, err } root = parsePath(root) httpClient := fshttp.NewClient(ctx) oAuthClient, _, err := oauthutil.NewClientWithBaseClient(ctx, name, m, putioConfig, httpClient) if err != nil { return nil, fmt.Errorf("failed to configure putio: %w", err) } p := &Fs{ name: name, root: root, opt: *opt, pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), client: putio.NewClient(oAuthClient), httpClient: httpClient, oAuthClient: oAuthClient, } p.features = (&fs.Features{ DuplicateFiles: true, ReadMimeType: true, CanHaveEmptyDirectories: true, }).Fill(ctx, p) p.dirCache = dircache.New(root, "0", p) // Find the current root err = p.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) tempF := *p tempF.dirCache = dircache.New(newRoot, "0", &tempF) tempF.root = newRoot // Make new Fs which is the parent err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return p, nil } _, err := tempF.NewObject(ctx, remote) if err != nil { // unable to list folder so return old f return p, nil } // XXX: update the old f here instead of returning tempF, since // `features` were already filled with functions having *f as a receiver. // See https://github.com/rclone/rclone/issues/2182 p.dirCache = tempF.dirCache p.root = tempF.root return p, fs.ErrorIsFile } // fs.Debugf(p, "Root id: %s", p.dirCache.RootID()) return p, nil } func itoa(i int64) string { return strconv.FormatInt(i, 10) } func atoi(a string) int64 { i, err := strconv.ParseInt(a, 10, 64) if err != nil { panic(err) } return i } // CreateDir makes a directory with pathID as parent and name leaf func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { // defer log.Trace(f, "pathID=%v, leaf=%v", pathID, leaf)("newID=%v, err=%v", newID, &err) parentID := atoi(pathID) var entry putio.File err = f.pacer.Call(func() (bool, error) { // fs.Debugf(f, "creating folder. part: %s, parentID: %d", leaf, parentID) entry, err = f.client.Files.CreateFolder(ctx, f.opt.Enc.FromStandardName(leaf), parentID) return shouldRetry(ctx, err) }) return itoa(entry.ID), err } // FindLeaf finds a directory of name leaf in the folder with ID pathID func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { // defer log.Trace(f, "pathID=%v, leaf=%v", pathID, leaf)("pathIDOut=%v, found=%v, err=%v", pathIDOut, found, &err) if pathID == "0" && leaf == "" { // that's the root directory return pathID, true, nil } fileID := atoi(pathID) var children []putio.File err = f.pacer.Call(func() (bool, error) { // fs.Debugf(f, "listing file: %d", fileID) children, _, err = f.client.Files.List(ctx, fileID) return shouldRetry(ctx, err) }) if err != nil { if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode == 404 { err = nil } return } for _, child := range children { if f.opt.Enc.ToStandardName(child.Name) == leaf { found = true pathIDOut = itoa(child.ID) if !child.IsDir() { err = fs.ErrorIsFile } return } } return } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { // defer log.Trace(f, "dir=%v", dir)("err=%v", &err) directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } parentID := atoi(directoryID) var children []putio.File err = f.pacer.Call(func() (bool, error) { // fs.Debugf(f, "listing files inside List: %d", parentID) children, _, err = f.client.Files.List(ctx, parentID) return shouldRetry(ctx, err) }) if err != nil { return } for _, child := range children { remote := path.Join(dir, f.opt.Enc.ToStandardName(child.Name)) // fs.Debugf(f, "child: %s", remote) if child.IsDir() { f.dirCache.Put(remote, itoa(child.ID)) d := fs.NewDir(remote, child.UpdatedAt.Time) entries = append(entries, d) } else { o, err := f.newObjectWithInfo(ctx, remote, child) if err != nil { return nil, err } entries = append(entries, o) } } return } // Put the object // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) { // defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err) existingObj, err := f.NewObject(ctx, src.Remote()) switch err { case nil: return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it return f.PutUnchecked(ctx, in, src, options...) default: return nil, err } } // PutUnchecked uploads the object // // This will create a duplicate if we upload a new file without // checking to see if there is one already - use Put() for that. func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) { return f.putUnchecked(ctx, in, src, src.Remote(), options...) } func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options ...fs.OpenOption) (o fs.Object, err error) { // defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err) size := src.Size() leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) if err != nil { return nil, err } loc, err := f.createUpload(ctx, leaf, size, directoryID, src.ModTime(ctx), options) if err != nil { return nil, err } fileID, err := f.sendUpload(ctx, loc, size, in) if err != nil { return nil, err } var entry putio.File err = f.pacer.Call(func() (bool, error) { // fs.Debugf(f, "getting file: %d", fileID) entry, err = f.client.Files.Get(ctx, fileID) return shouldRetry(ctx, err) }) if err != nil { return nil, err } return f.newObjectWithInfo(ctx, remote, entry) } func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time, options []fs.OpenOption) (location string, err error) { // defer log.Trace(f, "name=%v, size=%v, parentID=%v, modTime=%v", name, size, parentID, modTime.String())("location=%v, err=%v", location, &err) err = f.pacer.Call(func() (bool, error) { req, err := http.NewRequestWithContext(ctx, "POST", "https://upload.put.io/files/", nil) if err != nil { return false, err } req.Header.Set("tus-resumable", "1.0.0") req.Header.Set("upload-length", strconv.FormatInt(size, 10)) b64name := base64.StdEncoding.EncodeToString([]byte(f.opt.Enc.FromStandardName(name))) b64true := base64.StdEncoding.EncodeToString([]byte("true")) b64parentID := base64.StdEncoding.EncodeToString([]byte(parentID)) b64modifiedAt := base64.StdEncoding.EncodeToString([]byte(modTime.Format(time.RFC3339))) req.Header.Set("upload-metadata", fmt.Sprintf("name %s,no-torrent %s,parent_id %s,updated-at %s", b64name, b64true, b64parentID, b64modifiedAt)) fs.OpenOptionAddHTTPHeaders(req.Header, options) resp, err := f.oAuthClient.Do(req) retry, err := shouldRetry(ctx, err) if retry { return true, err } if err != nil { return false, err } if err := checkStatusCode(resp, 201); err != nil { return shouldRetry(ctx, err) } location = resp.Header.Get("location") if location == "" { return false, errors.New("empty location header from upload create") } return false, nil }) return } func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.Reader) (fileID int64, err error) { // defer log.Trace(f, "location=%v, size=%v", location, size)("fileID=%v, err=%v", &fileID, &err) if size == 0 { err = f.pacer.Call(func() (bool, error) { fs.Debugf(f, "Sending zero length chunk") _, fileID, err = f.transferChunk(ctx, location, 0, bytes.NewReader([]byte{}), 0) return shouldRetry(ctx, err) }) return } var clientOffset int64 var offsetMismatch bool buf := make([]byte, defaultChunkSize) for clientOffset < size { chunkSize := min(size-clientOffset, int64(defaultChunkSize)) chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize) chunkStart := clientOffset reqSize := chunkSize transferOffset := clientOffset fs.Debugf(f, "chunkStart: %d, reqSize: %d", chunkStart, reqSize) // Transfer the chunk err = f.pacer.Call(func() (bool, error) { if offsetMismatch { // Get file offset and seek to the position offset, err := f.getServerOffset(ctx, location) if err != nil { return shouldRetry(ctx, err) } sentBytes := offset - chunkStart fs.Debugf(f, "sentBytes: %d", sentBytes) _, err = chunk.Seek(sentBytes, io.SeekStart) if err != nil { return shouldRetry(ctx, err) } transferOffset = offset reqSize = chunkSize - sentBytes offsetMismatch = false } fs.Debugf(f, "Sending chunk. transferOffset: %d length: %d", transferOffset, reqSize) var serverOffset int64 serverOffset, fileID, err = f.transferChunk(ctx, location, transferOffset, chunk, reqSize) if cerr, ok := err.(*statusCodeError); ok && cerr.response.StatusCode == 409 { offsetMismatch = true return true, err } if serverOffset != (transferOffset + reqSize) { offsetMismatch = true return true, errors.New("connection broken") } return shouldRetry(ctx, err) }) if err != nil { return } clientOffset += chunkSize } return } func (f *Fs) getServerOffset(ctx context.Context, location string) (offset int64, err error) { // defer log.Trace(f, "location=%v", location)("offset=%v, err=%v", &offset, &err) req, err := f.makeUploadHeadRequest(ctx, location) if err != nil { return 0, err } resp, err := f.oAuthClient.Do(req) if err != nil { return 0, err } err = checkStatusCode(resp, 200) if err != nil { return 0, err } return strconv.ParseInt(resp.Header.Get("upload-offset"), 10, 64) } func (f *Fs) transferChunk(ctx context.Context, location string, start int64, chunk io.ReadSeeker, chunkSize int64) (serverOffset, fileID int64, err error) { // defer log.Trace(f, "location=%v, start=%v, chunkSize=%v", location, start, chunkSize)("fileID=%v, err=%v", &fileID, &err) req, err := f.makeUploadPatchRequest(ctx, location, chunk, start, chunkSize) if err != nil { return } resp, err := f.oAuthClient.Do(req) if err != nil { return } defer func() { _ = resp.Body.Close() }() err = checkStatusCode(resp, 204) if err != nil { return } serverOffset, err = strconv.ParseInt(resp.Header.Get("upload-offset"), 10, 64) if err != nil { return } sfid := resp.Header.Get("putio-file-id") if sfid != "" { fileID, err = strconv.ParseInt(sfid, 10, 64) if err != nil { return } } return } func (f *Fs) makeUploadHeadRequest(ctx context.Context, location string) (*http.Request, error) { req, err := http.NewRequestWithContext(ctx, "HEAD", location, nil) if err != nil { return nil, err } req.Header.Set("tus-resumable", "1.0.0") return req, nil } func (f *Fs) makeUploadPatchRequest(ctx context.Context, location string, in io.Reader, offset, length int64) (*http.Request, error) { req, err := http.NewRequestWithContext(ctx, "PATCH", location, in) if err != nil { return nil, err } req.Header.Set("tus-resumable", "1.0.0") req.Header.Set("upload-offset", strconv.FormatInt(offset, 10)) req.Header.Set("content-length", strconv.FormatInt(length, 10)) req.Header.Set("content-type", "application/offset+octet-stream") return req, nil } // Mkdir creates the container if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { // defer log.Trace(f, "dir=%v", dir)("err=%v", &err) _, err = f.dirCache.FindDir(ctx, dir, true) return err } // purgeCheck removes the root directory, if check is set then it // refuses to do so if it has anything in func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) { // defer log.Trace(f, "dir=%v", dir)("err=%v", &err) root := strings.Trim(path.Join(f.root, dir), "/") // can't remove root if root == "" { return errors.New("can't remove root directory") } // check directory exists directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return fmt.Errorf("Rmdir: %w", err) } dirID := atoi(directoryID) if check { // check directory empty var children []putio.File err = f.pacer.Call(func() (bool, error) { // fs.Debugf(f, "listing files: %d", dirID) children, _, err = f.client.Files.List(ctx, dirID) return shouldRetry(ctx, err) }) if err != nil { return fmt.Errorf("Rmdir: %w", err) } if len(children) != 0 { return errors.New("directory not empty") } } // remove it err = f.pacer.Call(func() (bool, error) { // fs.Debugf(f, "deleting file: %d", dirID) err = f.client.Files.Delete(ctx, dirID) return shouldRetry(ctx, err) }) f.dirCache.FlushDir(dir) return err } // Rmdir deletes the container // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { return f.purgeCheck(ctx, dir, true) } // Precision returns the precision func (f *Fs) Precision() time.Duration { return time.Second } // Purge deletes all the files in the directory // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge(ctx context.Context, dir string) (err error) { // defer log.Trace(f, "")("err=%v", &err) return f.purgeCheck(ctx, dir, false) } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Object, err error) { // defer log.Trace(f, "src=%+v, remote=%v", src, remote)("o=%+v, err=%v", &o, &err) srcObj, ok := src.(*Object) if !ok { return nil, fs.ErrorCantCopy } leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) if err != nil { return nil, err } modTime := src.ModTime(ctx) var resp struct { File putio.File `json:"file"` } // For some unknown reason the API sometimes returns the name // already exists unless we upload to a temporary name and // rename // // {"error_id":null,"error_message":"Name already exist","error_type":"NAME_ALREADY_EXIST","error_uri":"http://api.put.io/v2/docs","extra":{},"status":"ERROR","status_code":400} suffix := "." + random.String(8) err = f.pacer.Call(func() (bool, error) { params := url.Values{} params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10)) params.Set("parent_id", directoryID) params.Set("name", f.opt.Enc.FromStandardName(leaf+suffix)) req, err := f.client.NewRequest(ctx, "POST", "/v2/files/copy", strings.NewReader(params.Encode())) if err != nil { return false, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") // fs.Debugf(f, "copying file (%d) to parent_id: %s", srcObj.file.ID, directoryID) _, err = f.client.Do(req, &resp) return shouldRetry(ctx, err) }) if err != nil { return nil, err } // We have successfully copied the file to random name // Check to see if file already exists first and delete it if so existingObj, err := f.NewObject(ctx, remote) if err == nil { err = existingObj.Remove(ctx) if err != nil { return nil, fmt.Errorf("server side copy: failed to remove existing file: %w", err) } } err = f.pacer.Call(func() (bool, error) { params := url.Values{} params.Set("file_id", strconv.FormatInt(resp.File.ID, 10)) params.Set("name", f.opt.Enc.FromStandardName(leaf)) req, err := f.client.NewRequest(ctx, "POST", "/v2/files/rename", strings.NewReader(params.Encode())) if err != nil { return false, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") _, err = f.client.Do(req, &resp) return shouldRetry(ctx, err) }) if err != nil { return nil, err } o, err = f.newObjectWithInfo(ctx, remote, resp.File) if err != nil { return nil, err } err = o.SetModTime(ctx, modTime) if err != nil { return nil, err } return o, nil } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Object, err error) { // defer log.Trace(f, "src=%+v, remote=%v", src, remote)("o=%+v, err=%v", &o, &err) srcObj, ok := src.(*Object) if !ok { return nil, fs.ErrorCantMove } leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) if err != nil { return nil, err } modTime := src.ModTime(ctx) err = f.pacer.Call(func() (bool, error) { params := url.Values{} params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10)) params.Set("parent_id", directoryID) params.Set("name", f.opt.Enc.FromStandardName(leaf)) req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode())) if err != nil { return false, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") // fs.Debugf(f, "moving file (%d) to parent_id: %s", srcObj.file.ID, directoryID) _, err = f.client.Do(req, nil) return shouldRetry(ctx, err) }) if err != nil { return nil, err } o, err = f.NewObject(ctx, remote) if err != nil { return nil, err } err = o.SetModTime(ctx, modTime) if err != nil { return nil, err } return o, nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) { // defer log.Trace(f, "src=%+v, srcRemote=%v, dstRemote", src, srcRemote, dstRemote)("err=%v", &err) srcFs, ok := src.(*Fs) if !ok { return fs.ErrorCantDirMove } srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) if err != nil { return err } err = f.pacer.Call(func() (bool, error) { params := url.Values{} params.Set("file_id", srcID) params.Set("parent_id", dstDirectoryID) params.Set("name", f.opt.Enc.FromStandardName(dstLeaf)) req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode())) if err != nil { return false, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") // fs.Debugf(f, "moving file (%s) to parent_id: %s", srcID, dstDirectoryID) _, err = f.client.Do(req, nil) return shouldRetry(ctx, err) }) srcFs.dirCache.FlushDir(srcRemote) return err } // About gets quota information func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { // defer log.Trace(f, "")("usage=%+v, err=%v", usage, &err) var ai putio.AccountInfo err = f.pacer.Call(func() (bool, error) { // fs.Debugf(f, "getting account info") ai, err = f.client.Account.Info(ctx) return shouldRetry(ctx, err) }) if err != nil { return nil, err } return &fs.Usage{ Total: fs.NewUsageValue(ai.Disk.Size), // quota of bytes that can be used Used: fs.NewUsageValue(ai.Disk.Used), // bytes in use Free: fs.NewUsageValue(ai.Disk.Avail), // bytes which can be uploaded before reaching the quota }, nil } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.CRC32) } // DirCacheFlush resets the directory cache - used in testing as an // optional interface func (f *Fs) DirCacheFlush() { // defer log.Trace(f, "")("") f.dirCache.ResetRoot() } // CleanUp the trash in the Fs func (f *Fs) CleanUp(ctx context.Context) (err error) { // defer log.Trace(f, "")("err=%v", &err) return f.pacer.Call(func() (bool, error) { req, err := f.client.NewRequest(ctx, "POST", "/v2/trash/empty", nil) if err != nil { return false, err } // fs.Debugf(f, "emptying trash") _, err = f.client.Do(req, nil) return shouldRetry(ctx, err) }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/putio/putio.go
backend/putio/putio.go
// Package putio provides an interface to the put.io storage system. package putio import ( "context" "regexp" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/oauthutil" ) /* // TestPutio stringNeedsEscaping = []rune{ '/', '\x00' } maxFileLength = 255 canWriteUnnormalized = true canReadUnnormalized = true canReadRenormalized = true canStream = false */ // Constants const ( rcloneClientID = "4131" rcloneObscuredClientSecret = "cMwrjWVmrHZp3gf1ZpCrlyGAmPpB-YY5BbVnO1fj-G9evcd8" minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 1 // bigger for slower decay, exponential defaultChunkSize = 48 * fs.Mebi defaultRateLimitSleep = 60 * time.Second ) var ( // Description of how to auth for this app putioConfig = &oauthutil.Config{ Scopes: []string{}, AuthURL: "https://api.put.io/v2/oauth2/authenticate", TokenURL: "https://api.put.io/v2/oauth2/access_token", ClientID: rcloneClientID, ClientSecret: obscure.MustReveal(rcloneObscuredClientSecret), RedirectURL: oauthutil.RedirectLocalhostURL, } // A regexp matching path names for ignoring unnecessary files ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r)$`) ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "putio", Description: "Put.io", NewFs: NewFs, Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { return oauthutil.ConfigOut("", &oauthutil.Options{ OAuth2Config: putioConfig, NoOffline: true, }) }, Options: append(oauthutil.SharedOptions, []fs.Option{{ Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // Note that \ is renamed to - // // Encode invalid UTF-8 bytes as json doesn't handle them properly. Default: (encoder.Display | encoder.EncodeBackSlash | encoder.EncodeInvalidUtf8), }}...), }) } // Options defines the configuration for this backend type Options struct { Enc encoder.MultiEncoder `config:"encoding"` } // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) _ fs.Purger = (*Fs)(nil) _ fs.PutUncheckeder = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil) _ fs.Mover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil) _ dircache.DirCacher = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil) _ fs.Copier = (*Fs)(nil) _ fs.Object = (*Object)(nil) _ fs.MimeTyper = (*Object)(nil) _ fs.IDer = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sia/sia_test.go
backend/sia/sia_test.go
// Test Sia filesystem interface package sia_test import ( "testing" "github.com/rclone/rclone/backend/sia" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestSia:", NilObject: (*sia.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sia/sia.go
backend/sia/sia.go
// Package sia provides an interface to the Sia storage system. package sia import ( "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "path" "strings" "time" "github.com/rclone/rclone/backend/sia/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" ) const ( minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "sia", Description: "Sia Decentralized Cloud", NewFs: NewFs, Options: []fs.Option{{ Name: "api_url", Help: `Sia daemon API URL, like http://sia.daemon.host:9980. Note that siad must run with --disable-api-security to open API port for other hosts (not recommended). Keep default if Sia daemon runs on localhost.`, Default: "http://127.0.0.1:9980", Sensitive: true, }, { Name: "api_password", Help: `Sia Daemon API Password. Can be found in the apipassword file located in HOME/.sia/ or in the daemon directory.`, IsPassword: true, }, { Name: "user_agent", Help: `Siad User Agent Sia daemon requires the 'Sia-Agent' user agent by default for security`, Default: "Sia-Agent", Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: encoder.EncodeInvalidUtf8 | encoder.EncodeCtl | encoder.EncodeDel | encoder.EncodeHashPercent | encoder.EncodeQuestion | encoder.EncodeDot | encoder.EncodeSlash, }, }}) } // Options defines the configuration for this backend type Options struct { APIURL string `config:"api_url"` APIPassword string `config:"api_password"` UserAgent string `config:"user_agent"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a remote siad type Fs struct { name string // name of this remote root string // the path we are working on if any opt Options // parsed config options features *fs.Features // optional features srv *rest.Client // the connection to siad pacer *fs.Pacer // pacer for API calls } // Object describes a Sia object type Object struct { fs *Fs remote string modTime time.Time size int64 } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // ModTime is the last modified time (read-only) func (o *Object) ModTime(ctx context.Context) time.Time { return o.modTime } // Size is the file length func (o *Object) Size() int64 { return o.size } // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Hash is not supported func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) { return "", hash.ErrUnsupported } // Storable returns if this object is storable func (o *Object) Storable() bool { return true } // SetModTime is not supported func (o *Object) SetModTime(ctx context.Context, t time.Time) error { return fs.ErrorCantSetModTime } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { var optionsFixed []fs.OpenOption for _, opt := range options { if optRange, ok := opt.(*fs.RangeOption); ok { // Ignore range option if file is empty if o.Size() == 0 && optRange.Start == 0 && optRange.End > 0 { continue } } optionsFixed = append(optionsFixed, opt) } var resp *http.Response opts := rest.Opts{ Method: "GET", Path: path.Join("/renter/stream/", o.fs.root, o.fs.opt.Enc.FromStandardPath(o.remote)), Options: optionsFixed, } err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return o.fs.shouldRetry(resp, err) }) if err != nil { return nil, err } return resp.Body, err } // Update the object with the contents of the io.Reader func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { size := src.Size() var resp *http.Response opts := rest.Opts{ Method: "POST", Path: path.Join("/renter/uploadstream/", o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))), Body: in, ContentLength: &size, Parameters: url.Values{}, } opts.Parameters.Set("force", "true") err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return o.fs.shouldRetry(resp, err) }) if err == nil { err = o.readMetaData(ctx) } return err } // Remove an object func (o *Object) Remove(ctx context.Context) (err error) { var resp *http.Response opts := rest.Opts{ Method: "POST", Path: path.Join("/renter/delete/", o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))), } err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return o.fs.shouldRetry(resp, err) }) return err } // sync the size and other metadata down for the object func (o *Object) readMetaData(ctx context.Context) (err error) { opts := rest.Opts{ Method: "GET", Path: path.Join("/renter/file/", o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))), } var result api.FileResponse var resp *http.Response err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result) return o.fs.shouldRetry(resp, err) }) if err != nil { return err } o.size = int64(result.File.Filesize) o.modTime = result.File.ModTime return nil } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("Sia %s", f.opt.APIURL) } // Precision is unsupported because ModTime is not changeable func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported } // Hashes are not exposed anywhere func (f *Fs) Hashes() hash.Set { return hash.Set(hash.None) } // Features for this fs func (f *Fs) Features() *fs.Features { return f.features } // List files and directories in a directory func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { dirPrefix := f.opt.Enc.FromStandardPath(path.Join(f.root, dir)) + "/" var result api.DirectoriesResponse var resp *http.Response opts := rest.Opts{ Method: "GET", Path: path.Join("/renter/dir/", dirPrefix) + "/", } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(resp, err) }) if err != nil { return nil, err } for _, directory := range result.Directories { if directory.SiaPath+"/" == dirPrefix { continue } d := fs.NewDir(f.opt.Enc.ToStandardPath(strings.TrimPrefix(directory.SiaPath, f.opt.Enc.FromStandardPath(f.root)+"/")), directory.MostRecentModTime) entries = append(entries, d) } for _, file := range result.Files { o := &Object{fs: f, remote: f.opt.Enc.ToStandardPath(strings.TrimPrefix(file.SiaPath, f.opt.Enc.FromStandardPath(f.root)+"/")), modTime: file.ModTime, size: int64(file.Filesize)} entries = append(entries, o) } return entries, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) { obj := &Object{ fs: f, remote: remote, } err = obj.readMetaData(ctx) if err != nil { return nil, err } return obj, nil } // Put the object into the remote siad via uploadstream func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { o := &Object{ fs: f, remote: src.Remote(), modTime: src.ModTime(ctx), size: src.Size(), } err := o.Update(ctx, in, src, options...) if err == nil { return o, nil } // Cleanup stray files left after failed upload for range 5 { cleanObj, cleanErr := f.NewObject(ctx, src.Remote()) if cleanErr == nil { cleanErr = cleanObj.Remove(ctx) } if cleanErr == nil { break } if cleanErr != fs.ErrorObjectNotFound { fs.Logf(f, "%q: cleanup failed upload: %v", src.Remote(), cleanErr) break } time.Sleep(100 * time.Millisecond) } return nil, err } // PutStream the object into the remote siad via uploadstream func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(ctx, in, src, options...) } // Mkdir creates a directory func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { var resp *http.Response opts := rest.Opts{ Method: "POST", Path: path.Join("/renter/dir/", f.opt.Enc.FromStandardPath(path.Join(f.root, dir))), Parameters: url.Values{}, } opts.Parameters.Set("action", "create") err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) return f.shouldRetry(resp, err) }) if err == fs.ErrorDirExists { err = nil } return err } // Rmdir removes a directory func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { var resp *http.Response opts := rest.Opts{ Method: "GET", Path: path.Join("/renter/dir/", f.opt.Enc.FromStandardPath(path.Join(f.root, dir))), } var result api.DirectoriesResponse err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return f.shouldRetry(resp, err) }) if len(result.Directories) == 0 { return fs.ErrorDirNotFound } else if len(result.Files) > 0 || len(result.Directories) > 1 { return fs.ErrorDirectoryNotEmpty } opts = rest.Opts{ Method: "POST", Path: path.Join("/renter/dir/", f.opt.Enc.FromStandardPath(path.Join(f.root, dir))), Parameters: url.Values{}, } opts.Parameters.Set("action", "delete") err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(ctx, &opts) return f.shouldRetry(resp, err) }) return err } // NewFs constructs an Fs from the path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } opt.APIURL = strings.TrimSuffix(opt.APIURL, "/") // Parse the endpoint u, err := url.Parse(opt.APIURL) if err != nil { return nil, err } rootIsDir := strings.HasSuffix(root, "/") root = strings.Trim(root, "/") f := &Fs{ name: name, opt: *opt, root: root, } f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))) f.features = (&fs.Features{ CanHaveEmptyDirectories: true, }).Fill(ctx, f) // Adjust client config and pass it attached to context cliCtx, cliCfg := fs.AddConfig(ctx) if opt.UserAgent != "" { cliCfg.UserAgent = opt.UserAgent } f.srv = rest.NewClient(fshttp.NewClient(cliCtx)) f.srv.SetRoot(u.String()) f.srv.SetErrorHandler(errorHandler) if opt.APIPassword != "" { opt.APIPassword, err = obscure.Reveal(opt.APIPassword) if err != nil { return nil, fmt.Errorf("couldn't decrypt API password: %w", err) } f.srv.SetUserPass("", opt.APIPassword) } if root != "" && !rootIsDir { // Check to see if the root actually an existing file remote := path.Base(root) f.root = path.Dir(root) if f.root == "." { f.root = "" } _, err := f.NewObject(ctx, remote) if err != nil { if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorNotAFile) { // File doesn't exist so return old f f.root = root return f, nil } return nil, err } // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // errorHandler translates Siad errors into native rclone filesystem errors. // Sadly this is using string matching since Siad can't expose meaningful codes. func errorHandler(resp *http.Response) error { body, err := rest.ReadBody(resp) if err != nil { return fmt.Errorf("error when trying to read error body: %w", err) } // Decode error response errResponse := new(api.Error) err = json.Unmarshal(body, &errResponse) if err != nil { // Set the Message to be the body if we can't parse the JSON errResponse.Message = strings.TrimSpace(string(body)) } errResponse.Status = resp.Status errResponse.StatusCode = resp.StatusCode msg := strings.Trim(errResponse.Message, "[]") code := errResponse.StatusCode switch { case code == 400 && msg == "no file known with that path": return fs.ErrorObjectNotFound case code == 400 && strings.HasPrefix(msg, "unable to get the fileinfo from the filesystem") && strings.HasSuffix(msg, "path does not exist"): return fs.ErrorObjectNotFound case code == 500 && strings.HasPrefix(msg, "failed to create directory") && strings.HasSuffix(msg, "a siadir already exists at that location"): return fs.ErrorDirExists case code == 500 && strings.HasPrefix(msg, "failed to get directory contents") && strings.HasSuffix(msg, "path does not exist"): return fs.ErrorDirNotFound case code == 500 && strings.HasSuffix(msg, "no such file or directory"): return fs.ErrorDirNotFound } return errResponse } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) { return fserrors.ShouldRetry(err), err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/sia/api/types.go
backend/sia/api/types.go
// Package api provides types used by the Sia API. package api import ( "strings" "time" ) // DirectoriesResponse is the response for https://sia.tech/docs/#renter-dir-siapath-get type DirectoriesResponse struct { Directories []DirectoryInfo `json:"directories"` Files []FileInfo `json:"files"` } // FilesResponse is the response for https://sia.tech/docs/#renter-files-get type FilesResponse struct { Files []FileInfo `json:"files"` } // FileResponse is the response for https://sia.tech/docs/#renter-file-siapath-get type FileResponse struct { File FileInfo `json:"file"` } // FileInfo is used in https://sia.tech/docs/#renter-files-get type FileInfo struct { AccessTime time.Time `json:"accesstime"` Available bool `json:"available"` ChangeTime time.Time `json:"changetime"` CipherType string `json:"ciphertype"` CreateTime time.Time `json:"createtime"` Expiration uint64 `json:"expiration"` Filesize uint64 `json:"filesize"` Health float64 `json:"health"` LocalPath string `json:"localpath"` MaxHealth float64 `json:"maxhealth"` MaxHealthPercent float64 `json:"maxhealthpercent"` ModTime time.Time `json:"modtime"` NumStuckChunks uint64 `json:"numstuckchunks"` OnDisk bool `json:"ondisk"` Recoverable bool `json:"recoverable"` Redundancy float64 `json:"redundancy"` Renewing bool `json:"renewing"` SiaPath string `json:"siapath"` Stuck bool `json:"stuck"` StuckHealth float64 `json:"stuckhealth"` UploadedBytes uint64 `json:"uploadedbytes"` UploadProgress float64 `json:"uploadprogress"` } // DirectoryInfo is used in https://sia.tech/docs/#renter-dir-siapath-get type DirectoryInfo struct { AggregateHealth float64 `json:"aggregatehealth"` AggregateLastHealthCheckTime time.Time `json:"aggregatelasthealthchecktime"` AggregateMaxHealth float64 `json:"aggregatemaxhealth"` AggregateMaxHealthPercentage float64 `json:"aggregatemaxhealthpercentage"` AggregateMinRedundancy float64 `json:"aggregateminredundancy"` AggregateMostRecentModTime time.Time `json:"aggregatemostrecentmodtime"` AggregateNumFiles uint64 `json:"aggregatenumfiles"` AggregateNumStuckChunks uint64 `json:"aggregatenumstuckchunks"` AggregateNumSubDirs uint64 `json:"aggregatenumsubdirs"` AggregateSize uint64 `json:"aggregatesize"` AggregateStuckHealth float64 `json:"aggregatestuckhealth"` Health float64 `json:"health"` LastHealthCheckTime time.Time `json:"lasthealthchecktime"` MaxHealthPercentage float64 `json:"maxhealthpercentage"` MaxHealth float64 `json:"maxhealth"` MinRedundancy float64 `json:"minredundancy"` MostRecentModTime time.Time `json:"mostrecentmodtime"` NumFiles uint64 `json:"numfiles"` NumStuckChunks uint64 `json:"numstuckchunks"` NumSubDirs uint64 `json:"numsubdirs"` SiaPath string `json:"siapath"` Size uint64 `json:"size"` StuckHealth float64 `json:"stuckhealth"` } // Error contains an error message per https://sia.tech/docs/#error type Error struct { Message string `json:"message"` Status string StatusCode int } // Error returns a string for the error and satisfies the error interface func (e *Error) Error() string { var out []string if e.Message != "" { out = append(out, e.Message) } if e.Status != "" { out = append(out, e.Status) } if len(out) == 0 { return "Siad Error" } return strings.Join(out, ": ") }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/qingstor/qingstor.go
backend/qingstor/qingstor.go
//go:build !plan9 && !js // Package qingstor provides an interface to QingStor object storage // Home: https://www.qingcloud.com/ package qingstor import ( "context" "errors" "fmt" "io" "net/http" "path" "regexp" "strconv" "strings" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/encoder" qsConfig "github.com/yunify/qingstor-sdk-go/v3/config" qsErr "github.com/yunify/qingstor-sdk-go/v3/request/errors" qs "github.com/yunify/qingstor-sdk-go/v3/service" ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "qingstor", Description: "QingCloud Object Storage", NewFs: NewFs, Options: []fs.Option{{ Name: "env_auth", Help: "Get QingStor credentials from runtime.\n\nOnly applies if access_key_id and secret_access_key is blank.", Default: false, Examples: []fs.OptionExample{{ Value: "false", Help: "Enter QingStor credentials in the next step.", }, { Value: "true", Help: "Get QingStor credentials from the environment (env vars or IAM).", }}, }, { Name: "access_key_id", Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.", Sensitive: true, }, { Name: "secret_access_key", Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.", Sensitive: true, }, { Name: "endpoint", Help: "Enter an endpoint URL to connection QingStor API.\n\nLeave blank will use the default value \"https://qingstor.com:443\".", }, { Name: "zone", Help: "Zone to connect to.\n\nDefault is \"pek3a\".", Examples: []fs.OptionExample{{ Value: "pek3a", Help: "The Beijing (China) Three Zone.\nNeeds location constraint pek3a.", }, { Value: "sh1a", Help: "The Shanghai (China) First Zone.\nNeeds location constraint sh1a.", }, { Value: "gd2a", Help: "The Guangdong (China) Second Zone.\nNeeds location constraint gd2a.", }}, }, { Name: "connection_retries", Help: "Number of connection retries.", Default: 3, Advanced: true, }, { Name: "upload_cutoff", Help: `Cutoff for switching to chunked upload. Any files larger than this will be uploaded in chunks of chunk_size. The minimum is 0 and the maximum is 5 GiB.`, Default: defaultUploadCutoff, Advanced: true, }, { Name: "chunk_size", Help: `Chunk size to use for uploading. When uploading files larger than upload_cutoff they will be uploaded as multipart uploads using this chunk size. Note that "--qingstor-upload-concurrency" chunks of this size are buffered in memory per transfer. If you are transferring large files over high-speed links and you have enough memory, then increasing this will speed up the transfers.`, Default: minChunkSize, Advanced: true, }, { Name: "upload_concurrency", Help: `Concurrency for multipart uploads. This is the number of chunks of the same file that are uploaded concurrently. NB if you set this to > 1 then the checksums of multipart uploads become corrupted (the uploads themselves are not corrupted though). If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing this may help to speed up the transfers.`, Default: 1, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.EncodeInvalidUtf8 | encoder.EncodeCtl | encoder.EncodeSlash), }}, }) } // Constants const ( listLimitSize = 1000 // Number of items to read at once maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY minChunkSize = fs.SizeSuffix(minMultiPartSize) defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024) maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024) ) // Globals func timestampToTime(tp int64) time.Time { timeLayout := time.RFC3339Nano ts := time.Unix(tp, 0).Format(timeLayout) tm, _ := time.Parse(timeLayout, ts) return tm.UTC() } // Options defines the configuration for this backend type Options struct { EnvAuth bool `config:"env_auth"` AccessKeyID string `config:"access_key_id"` SecretAccessKey string `config:"secret_access_key"` Endpoint string `config:"endpoint"` Zone string `config:"zone"` ConnectionRetries int `config:"connection_retries"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` ChunkSize fs.SizeSuffix `config:"chunk_size"` UploadConcurrency int `config:"upload_concurrency"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a remote qingstor server type Fs struct { name string // The name of the remote root string // The root is a subdir, is a special object opt Options // parsed options features *fs.Features // optional features svc *qs.Service // The connection to the qingstor server zone string // The zone we are working on rootBucket string // bucket part of root (if any) rootDirectory string // directory part of root (if any) cache *bucket.Cache // cache for bucket creation status } // Object describes a qingstor object type Object struct { // Will definitely have everything but meta which may be nil // // List will read everything but meta & mimeType - to fill // that in you need to call readMetaData fs *Fs // what this object is part of remote string // object of remote etag string // md5sum of the object size int64 // length of the object content mimeType string // ContentType of object - may be "" lastModified time.Time // Last modified encrypted bool // whether the object is encryption algo string // Custom encryption algorithms } // ------------------------------------------------------------ // parsePath parses a remote 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // split returns bucket and bucketPath from the rootRelativePath // relative to f.root func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) { bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath)) return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath) } // split returns bucket and bucketPath from the object func (o *Object) split() (bucket, bucketPath string) { return o.fs.split(o.remote) } // Split a URL into three parts: protocol host and port func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) { /* Pattern to match an endpoint, e.g.: "http(s)://qingstor.com:443" --> "http(s)", "qingstor.com", 443 "http(s)//qingstor.com" --> "http(s)", "qingstor.com", "" "qingstor.com" --> "", "qingstor.com", "" */ defer func() { if r := recover(); r != nil { switch x := r.(type) { case error: err = x default: err = nil } } }() var mather = regexp.MustCompile(`^(?:(http|https)://)*(\w+\.(?:[\w\.])*)(?::(\d{0,5}))*$`) parts := mather.FindStringSubmatch(endpoint) protocol, host, port = parts[1], parts[2], parts[3] return } // qsConnection makes a connection to qingstor func qsServiceConnection(ctx context.Context, opt *Options) (*qs.Service, error) { accessKeyID := opt.AccessKeyID secretAccessKey := opt.SecretAccessKey switch { case opt.EnvAuth: // No need for empty checks if "env_auth" is true case accessKeyID == "" && secretAccessKey == "": // if no access key/secret and iam is explicitly disabled then fall back to anon interaction case accessKeyID == "": return nil, errors.New("access_key_id not found") case secretAccessKey == "": return nil, errors.New("secret_access_key not found") } protocol := "https" host := "qingstor.com" port := 443 endpoint := opt.Endpoint if endpoint != "" { _protocol, _host, _port, err := qsParseEndpoint(endpoint) if err != nil { return nil, fmt.Errorf("the endpoint \"%s\" format error", endpoint) } if _protocol != "" { protocol = _protocol } host = _host if _port != "" { port, _ = strconv.Atoi(_port) } else if protocol == "http" { port = 80 } } cf, err := qsConfig.NewDefault() if err != nil { return nil, err } cf.AccessKeyID = accessKeyID cf.SecretAccessKey = secretAccessKey cf.Protocol = protocol cf.Host = host cf.Port = port // unsupported in v3.1: cf.ConnectionRetries = opt.ConnectionRetries cf.Connection = fshttp.NewClient(ctx) return qs.Init(cf) } func checkUploadChunkSize(cs fs.SizeSuffix) error { if cs < minChunkSize { return fmt.Errorf("%s is less than %s", cs, minChunkSize) } return nil } func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadChunkSize(cs) if err == nil { old, f.opt.ChunkSize = f.opt.ChunkSize, cs } return } func checkUploadCutoff(cs fs.SizeSuffix) error { if cs > maxUploadCutoff { return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff) } return nil } func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { err = checkUploadCutoff(cs) if err == nil { old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs } return } // setRoot changes the root of the Fs func (f *Fs) setRoot(root string) { f.root = parsePath(root) f.rootBucket, f.rootDirectory = bucket.Split(f.root) } // NewFs constructs an Fs from the path, bucket:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } err = checkUploadChunkSize(opt.ChunkSize) if err != nil { return nil, fmt.Errorf("qingstor: chunk size: %w", err) } err = checkUploadCutoff(opt.UploadCutoff) if err != nil { return nil, fmt.Errorf("qingstor: upload cutoff: %w", err) } svc, err := qsServiceConnection(ctx, opt) if err != nil { return nil, err } if opt.Zone == "" { opt.Zone = "pek3a" } f := &Fs{ name: name, opt: *opt, svc: svc, zone: opt.Zone, cache: bucket.NewCache(), } f.setRoot(root) f.features = (&fs.Features{ ReadMimeType: true, WriteMimeType: true, BucketBased: true, BucketBasedRootOK: true, SlowModTime: true, }).Fill(ctx, f) if f.rootBucket != "" && f.rootDirectory != "" { // Check to see if the object exists bucketInit, err := svc.Bucket(f.rootBucket, opt.Zone) if err != nil { return nil, err } encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory) _, err = bucketInit.HeadObject(encodedDirectory, &qs.HeadObjectInput{}) if err == nil { newRoot := path.Dir(f.root) if newRoot == "." { newRoot = "" } f.setRoot(newRoot) // return an error with an fs which points to the parent return f, fs.ErrorIsFile } } return f, nil } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { if f.rootBucket == "" { return "QingStor root" } if f.rootDirectory == "" { return fmt.Sprintf("QingStor bucket %s", f.rootBucket) } return fmt.Sprintf("QingStor bucket %s path %s", f.rootBucket, f.rootDirectory) } // Precision of the remote func (f *Fs) Precision() time.Duration { //return time.Nanosecond //Not supported temporary return fs.ModTimeNotSupported } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.MD5) //return hash.HashSet(hash.HashNone) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // Put created a new object func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { fsObj := &Object{ fs: f, remote: src.Remote(), } return fsObj, fsObj.Update(ctx, in, src, options...) } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { dstBucket, dstPath := f.split(remote) err := f.makeBucket(ctx, dstBucket) if err != nil { return nil, err } srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } srcBucket, srcPath := srcObj.split() source := path.Join("/", srcBucket, srcPath) // fs.Debugf(f, "Copied, source key is: %s, and dst key is: %s", source, key) req := qs.PutObjectInput{ XQSCopySource: &source, } bucketInit, err := f.svc.Bucket(dstBucket, f.zone) if err != nil { return nil, err } _, err = bucketInit.PutObject(dstPath, &req) if err != nil { // fs.Debugf(f, "Copy Failed, API Error: %v", err) return nil, err } return f.NewObject(ctx, remote) } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(remote, nil) } // Return an Object from a path // // If it can't be found it returns the error ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(remote string, info *qs.KeyType) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } if info != nil { // Set info if info.Size != nil { o.size = *info.Size } if info.Etag != nil { o.etag = qs.StringValue(info.Etag) } if info.Modified == nil { fs.Logf(o, "Failed to read last modified") o.lastModified = time.Now() } else { o.lastModified = timestampToTime(int64(*info.Modified)) } if info.MimeType != nil { o.mimeType = qs.StringValue(info.MimeType) } if info.Encrypted != nil { o.encrypted = qs.BoolValue(info.Encrypted) } } else { err := o.readMetaData() // reads info and meta, returning an error if err != nil { return nil, err } } return o, nil } // listFn is called from list to handle an object. type listFn func(remote string, object *qs.KeyType, isDirectory bool) error // list the objects into the function supplied // // dir is the starting directory, "" for root // // Set recurse to read sub directories func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error { if prefix != "" { prefix += "/" } if directory != "" { directory += "/" } delimiter := "" if !recurse { delimiter = "/" } maxLimit := int(listLimitSize) var marker *string for { bucketInit, err := f.svc.Bucket(bucket, f.zone) if err != nil { return err } req := qs.ListObjectsInput{ Delimiter: &delimiter, Prefix: &directory, Limit: &maxLimit, Marker: marker, } resp, err := bucketInit.ListObjects(&req) if err != nil { if e, ok := err.(*qsErr.QingStorError); ok { if e.StatusCode == http.StatusNotFound { err = fs.ErrorDirNotFound } } return err } if !recurse { for _, commonPrefix := range resp.CommonPrefixes { if commonPrefix == nil { fs.Logf(f, "Nil common prefix received") continue } remote := *commonPrefix remote = f.opt.Enc.ToStandardPath(remote) if !strings.HasPrefix(remote, prefix) { fs.Logf(f, "Odd name received %q", remote) continue } remote = remote[len(prefix):] if addBucket { remote = path.Join(bucket, remote) } remote = strings.TrimSuffix(remote, "/") err = fn(remote, &qs.KeyType{Key: &remote}, true) if err != nil { return err } } } for _, object := range resp.Keys { remote := qs.StringValue(object.Key) remote = f.opt.Enc.ToStandardPath(remote) if !strings.HasPrefix(remote, prefix) { fs.Logf(f, "Odd name received %q", remote) continue } remote = remote[len(prefix):] if addBucket { remote = path.Join(bucket, remote) } err = fn(remote, object, false) if err != nil { return err } } if resp.HasMore != nil && !*resp.HasMore { break } // Use NextMarker if set, otherwise use last Key if resp.NextMarker == nil || *resp.NextMarker == "" { fs.Errorf(f, "Expecting NextMarker but didn't find one") break } else { marker = resp.NextMarker } } return nil } // Convert a list item into a BasicInfo func (f *Fs) itemToDirEntry(remote string, object *qs.KeyType, isDirectory bool) (fs.DirEntry, error) { if isDirectory { size := int64(0) if object.Size != nil { size = *object.Size } d := fs.NewDir(remote, time.Time{}).SetSize(size) return d, nil } o, err := f.newObjectWithInfo(remote, object) if err != nil { return nil, err } return o, nil } // listDir lists files and directories to out func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) { // List the objects and directories err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *qs.KeyType, isDirectory bool) error { entry, err := f.itemToDirEntry(remote, object, isDirectory) if err != nil { return err } if entry != nil { entries = append(entries, entry) } return nil }) if err != nil { return nil, err } // bucket must be present if listing succeeded f.cache.MarkOK(bucket) return entries, nil } // listBuckets lists the buckets to out func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) { req := qs.ListBucketsInput{ Location: &f.zone, } resp, err := f.svc.ListBuckets(&req) if err != nil { return nil, err } for _, bucket := range resp.Buckets { d := fs.NewDir(f.opt.Enc.ToStandardName(qs.StringValue(bucket.Name)), qs.TimeValue(bucket.Created)) entries = append(entries, d) } return entries, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { bucket, directory := f.split(dir) if bucket == "" { if directory != "" { return nil, fs.ErrorListBucketRequired } return f.listBuckets(ctx) } return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "") } // ListR lists the objects and directories of the Fs starting // from dir recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { bucket, directory := f.split(dir) list := list.NewHelper(callback) listR := func(bucket, directory, prefix string, addBucket bool) error { return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *qs.KeyType, isDirectory bool) error { entry, err := f.itemToDirEntry(remote, object, isDirectory) if err != nil { return err } return list.Add(entry) }) } if bucket == "" { entries, err := f.listBuckets(ctx) if err != nil { return err } for _, entry := range entries { err = list.Add(entry) if err != nil { return err } bucket := entry.Remote() err = listR(bucket, "", f.rootDirectory, true) if err != nil { return err } // bucket must be present if listing succeeded f.cache.MarkOK(bucket) } } else { err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "") if err != nil { return err } // bucket must be present if listing succeeded f.cache.MarkOK(bucket) } return list.Flush() } // Mkdir creates the bucket if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { bucket, _ := f.split(dir) return f.makeBucket(ctx, bucket) } // makeBucket creates the bucket if it doesn't exist func (f *Fs) makeBucket(ctx context.Context, bucket string) error { return f.cache.Create(bucket, func() error { bucketInit, err := f.svc.Bucket(bucket, f.zone) if err != nil { return err } /* When delete a bucket, qingstor need about 60 second to sync status; So, need wait for it sync end if we try to operation a just deleted bucket */ wasDeleted := false retries := 0 for retries <= 120 { statistics, err := bucketInit.GetStatistics() if statistics == nil || err != nil { break } switch *statistics.Status { case "deleted": fs.Debugf(f, "Wait for qingstor bucket to be deleted, retries: %d", retries) time.Sleep(time.Second * 1) retries++ wasDeleted = true continue } break } retries = 0 for retries <= 120 { _, err = bucketInit.Put() if e, ok := err.(*qsErr.QingStorError); ok { if e.StatusCode == http.StatusConflict { if wasDeleted { fs.Debugf(f, "Wait for qingstor bucket to be creatable, retries: %d", retries) time.Sleep(time.Second * 1) retries++ continue } err = nil } } break } return err }, nil) } // bucketIsEmpty check if the bucket empty func (f *Fs) bucketIsEmpty(bucket string) (bool, error) { bucketInit, err := f.svc.Bucket(bucket, f.zone) if err != nil { return true, err } statistics, err := bucketInit.GetStatistics() if err != nil { return true, err } if *statistics.Count == 0 { return true, nil } return false, nil } // Rmdir delete a bucket func (f *Fs) Rmdir(ctx context.Context, dir string) error { bucket, directory := f.split(dir) if bucket == "" || directory != "" { return nil } isEmpty, err := f.bucketIsEmpty(bucket) if err != nil { return err } if !isEmpty { // fs.Debugf(f, "The bucket %s you tried to delete not empty.", bucket) return errors.New("BucketNotEmpty: The bucket you tried to delete is not empty") } return f.cache.Remove(bucket, func() error { // fs.Debugf(f, "Deleting the bucket %s", bucket) bucketInit, err := f.svc.Bucket(bucket, f.zone) if err != nil { return err } retries := 0 for retries <= 10 { _, delErr := bucketInit.Delete() if delErr != nil { if e, ok := delErr.(*qsErr.QingStorError); ok { switch e.Code { // The status of "lease" takes a few seconds to "ready" when creating a new bucket // wait for lease status ready case "lease_not_ready": fs.Debugf(f, "QingStor bucket lease not ready, retries: %d", retries) retries++ time.Sleep(time.Second * 1) continue default: err = e } } } else { err = delErr } break } return err }) } // cleanUpBucket removes all pending multipart uploads for a given bucket func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) { fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than 24 hours", bucket) bucketInit, err := f.svc.Bucket(bucket, f.zone) if err != nil { return err } // maxLimit := int(listLimitSize) var marker *string for { req := qs.ListMultipartUploadsInput{ // The default is 200 but this errors if more than 200 is put in so leave at the default // Limit: &maxLimit, KeyMarker: marker, } var resp *qs.ListMultipartUploadsOutput resp, err = bucketInit.ListMultipartUploads(&req) if err != nil { return fmt.Errorf("clean up bucket list multipart uploads: %w", err) } for _, upload := range resp.Uploads { if upload.Created != nil && upload.Key != nil && upload.UploadID != nil { age := time.Since(*upload.Created) if age > 24*time.Hour { fs.Infof(f, "removing pending multipart upload for %q dated %v (%v ago)", *upload.Key, upload.Created, age) req := qs.AbortMultipartUploadInput{ UploadID: upload.UploadID, } _, abortErr := bucketInit.AbortMultipartUpload(*upload.Key, &req) if abortErr != nil { err = fmt.Errorf("failed to remove multipart upload for %q: %w", *upload.Key, abortErr) fs.Errorf(f, "%v", err) } } else { fs.Debugf(f, "ignoring pending multipart upload for %q dated %v (%v ago)", *upload.Key, upload.Created, age) } } } if resp.HasMore != nil && !*resp.HasMore { break } // Use NextMarker if set, otherwise use last Key if resp.NextKeyMarker == nil || *resp.NextKeyMarker == "" { fs.Errorf(f, "Expecting NextKeyMarker but didn't find one") break } else { marker = resp.NextKeyMarker } } return err } // CleanUp removes all pending multipart uploads func (f *Fs) CleanUp(ctx context.Context) (err error) { if f.rootBucket != "" { return f.cleanUpBucket(ctx, f.rootBucket) } entries, err := f.listBuckets(ctx) if err != nil { return err } for _, entry := range entries { cleanErr := f.cleanUpBucket(ctx, f.opt.Enc.FromStandardName(entry.Remote())) if cleanErr != nil { fs.Errorf(f, "Failed to cleanup bucket: %q", cleanErr) err = cleanErr } } return err } // readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info func (o *Object) readMetaData() (err error) { bucket, bucketPath := o.split() bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone) if err != nil { return err } // fs.Debugf(o, "Read metadata of key: %s", key) resp, err := bucketInit.HeadObject(bucketPath, &qs.HeadObjectInput{}) if err != nil { // fs.Debugf(o, "Read metadata failed, API Error: %v", err) if e, ok := err.(*qsErr.QingStorError); ok { if e.StatusCode == http.StatusNotFound { return fs.ErrorObjectNotFound } } return err } // Ignore missing Content-Length assuming it is 0 if resp.ContentLength != nil { o.size = *resp.ContentLength } if resp.ETag != nil { o.etag = qs.StringValue(resp.ETag) } if resp.LastModified == nil { fs.Logf(o, "Failed to read last modified from HEAD: %v", err) o.lastModified = time.Now() } else { o.lastModified = *resp.LastModified } if resp.ContentType != nil { o.mimeType = qs.StringValue(resp.ContentType) } if resp.XQSEncryptionCustomerAlgorithm != nil { o.algo = qs.StringValue(resp.XQSEncryptionCustomerAlgorithm) o.encrypted = true } return nil } // ModTime returns the modification date of the file // It should return a best guess if one isn't available func (o *Object) ModTime(ctx context.Context) time.Time { err := o.readMetaData() if err != nil { fs.Logf(o, "Failed to read metadata, %v", err) return time.Now() } modTime := o.lastModified return modTime } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { err := o.readMetaData() if err != nil { return err } o.lastModified = modTime mimeType := fs.MimeType(ctx, o) if o.size >= maxSizeForCopy { fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy)) return nil } // Copy the object to itself to update the metadata bucket, bucketPath := o.split() sourceKey := path.Join("/", bucket, bucketPath) bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone) if err != nil { return err } req := qs.PutObjectInput{ XQSCopySource: &sourceKey, ContentType: &mimeType, } _, err = bucketInit.PutObject(bucketPath, &req) return err } // Open opens the file for read. Call Close() on the returned io.ReadCloser func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { bucket, bucketPath := o.split() bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone) if err != nil { return nil, err } req := qs.GetObjectInput{} fs.FixRangeOption(options, o.size) for _, option := range options { switch option.(type) { case *fs.RangeOption, *fs.SeekOption: _, value := option.Header() req.Range = &value default: if option.Mandatory() { fs.Logf(o, "Unsupported mandatory option: %v", option) } } } resp, err := bucketInit.GetObject(bucketPath, &req) if err != nil { return nil, err } return resp.Body, nil } // Update in to the object func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { // The maximum size of upload object is multipartUploadSize * MaxMultipleParts bucket, bucketPath := o.split() err := o.fs.makeBucket(ctx, bucket) if err != nil { return err } // Guess the content type mimeType := fs.MimeType(ctx, src) req := uploadInput{ body: in, qsSvc: o.fs.svc, bucket: bucket, zone: o.fs.zone, key: bucketPath, mimeType: mimeType, partSize: int64(o.fs.opt.ChunkSize), concurrency: o.fs.opt.UploadConcurrency, } uploader := newUploader(&req) size := src.Size() multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff) if multipart { err = uploader.upload() } else { err = uploader.singlePartUpload(in, size) } if err != nil { return err } // Read Metadata of object err = o.readMetaData() return err } // Remove this object func (o *Object) Remove(ctx context.Context) error { bucket, bucketPath := o.split() bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone) if err != nil { return err } _, err = bucketInit.DeleteObject(bucketPath) return err } // Fs returns read only access to the Fs that this object is part of func (o *Object) Fs() fs.Info { return o.fs } var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`) // Hash returns the selected checksum of the file // If no checksum is available it returns "" func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } etag := strings.Trim(strings.ToLower(o.etag), `"`) // Check the etag is a valid md5sum if !matchMd5.MatchString(etag) { fs.Debugf(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag) return "", nil } return etag, nil } // Storable says whether this object can be stored func (o *Object) Storable() bool { return true } // String returns a description of the Object func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Size returns the size of the file func (o *Object) Size() int64 { return o.size } // MimeType of an Object if known, "" otherwise func (o *Object) MimeType(ctx context.Context) string { err := o.readMetaData() if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return "" } return o.mimeType } // Check the interfaces are satisfied var ( _ fs.Fs = &Fs{} _ fs.CleanUpper = &Fs{} _ fs.Copier = &Fs{} _ fs.Object = &Object{} _ fs.ListRer = &Fs{} _ fs.MimeTyper = &Object{} )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/qingstor/qingstor_test.go
backend/qingstor/qingstor_test.go
// Test QingStor filesystem interface //go:build !plan9 && !js package qingstor import ( "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestQingStor:", NilObject: (*Object)(nil), ChunkedUpload: fstests.ChunkedUploadConfig{ MinChunkSize: minChunkSize, }, }) } func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadChunkSize(cs) } func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadCutoff(cs) } var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/qingstor/upload.go
backend/qingstor/upload.go
// Upload object to QingStor //go:build !plan9 && !js package qingstor import ( "bytes" "crypto/md5" "errors" "fmt" "hash" "io" "sort" "sync" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/atexit" qs "github.com/yunify/qingstor-sdk-go/v3/service" ) const ( // maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor // maxMultiPartSize = 1024 * 1024 * 1024 * 1 // The maximum allowed part size when uploading a part to QingStor minMultiPartSize = 1024 * 1024 * 4 // The minimum allowed part size when uploading a part to QingStor maxMultiParts = 10000 // The maximum allowed number of parts in a multi-part upload ) const ( defaultUploadPartSize = 1024 * 1024 * 64 // The default part size to buffer chunks of a payload into. defaultUploadConcurrency = 4 // the default number of goroutines to spin up when using multiPartUpload. ) func readFillBuf(r io.Reader, b []byte) (offset int, err error) { for offset < len(b) && err == nil { var n int n, err = r.Read(b[offset:]) offset += n } return offset, err } // uploadInput contains all input for upload requests to QingStor. type uploadInput struct { body io.Reader qsSvc *qs.Service mimeType string zone string bucket string key string partSize int64 concurrency int maxUploadParts int } // uploader internal structure to manage an upload to QingStor. type uploader struct { cfg *uploadInput totalSize int64 // set to -1 if the size is not known readerPos int64 // current reader position readerSize int64 // current reader content size } // newUploader creates a new Uploader instance to upload objects to QingStor. func newUploader(in *uploadInput) *uploader { u := &uploader{ cfg: in, } return u } // bucketInit initiate as bucket controller func (u *uploader) bucketInit() (*qs.Bucket, error) { bucketInit, err := u.cfg.qsSvc.Bucket(u.cfg.bucket, u.cfg.zone) return bucketInit, err } // String converts uploader to a string func (u *uploader) String() string { return fmt.Sprintf("QingStor bucket %s key %s", u.cfg.bucket, u.cfg.key) } // nextReader returns a seekable reader representing the next packet of data. // This operation increases the shared u.readerPos counter, but note that it // does not need to be wrapped in a mutex because nextReader is only called // from the main thread. func (u *uploader) nextReader() (io.ReadSeeker, int, error) { type readerAtSeeker interface { io.ReaderAt io.ReadSeeker } switch r := u.cfg.body.(type) { case readerAtSeeker: var err error n := u.cfg.partSize if u.totalSize >= 0 { bytesLeft := u.totalSize - u.readerPos if bytesLeft <= u.cfg.partSize { err = io.EOF n = bytesLeft } } reader := io.NewSectionReader(r, u.readerPos, n) u.readerPos += n u.readerSize = n return reader, int(n), err default: part := make([]byte, u.cfg.partSize) n, err := readFillBuf(r, part) u.readerPos += int64(n) u.readerSize = int64(n) return bytes.NewReader(part[0:n]), n, err } } // init will initialize all default options. func (u *uploader) init() { if u.cfg.concurrency == 0 { u.cfg.concurrency = defaultUploadConcurrency } if u.cfg.partSize == 0 { u.cfg.partSize = defaultUploadPartSize } if u.cfg.maxUploadParts == 0 { u.cfg.maxUploadParts = maxMultiParts } // Try to get the total size for some optimizations u.totalSize = -1 switch r := u.cfg.body.(type) { case io.Seeker: pos, _ := r.Seek(0, io.SeekCurrent) defer func() { _, _ = r.Seek(pos, io.SeekStart) }() n, err := r.Seek(0, io.SeekEnd) if err != nil { return } u.totalSize = n // Try to adjust partSize if it is too small and account for // integer division truncation. if u.totalSize/u.cfg.partSize >= u.cfg.partSize { // Add one to the part size to account for remainders // during the size calculation. e.g odd number of bytes. u.cfg.partSize = (u.totalSize / int64(u.cfg.maxUploadParts)) + 1 } } } // singlePartUpload upload a single object that contentLength less than "defaultUploadPartSize" func (u *uploader) singlePartUpload(buf io.Reader, size int64) error { bucketInit, _ := u.bucketInit() req := qs.PutObjectInput{ ContentLength: &size, ContentType: &u.cfg.mimeType, Body: buf, } _, err := bucketInit.PutObject(u.cfg.key, &req) if err == nil { fs.Debugf(u, "Upload single object finished") } return err } // Upload upload an object into QingStor func (u *uploader) upload() error { u.init() if u.cfg.partSize < minMultiPartSize { return fmt.Errorf("part size must be at least %d bytes", minMultiPartSize) } // Do one read to determine if we have more than one part reader, _, err := u.nextReader() if err == io.EOF { // single part fs.Debugf(u, "Uploading as single part object to QingStor") return u.singlePartUpload(reader, u.readerPos) } else if err != nil { return fmt.Errorf("read upload data failed: %w", err) } fs.Debugf(u, "Uploading as multi-part object to QingStor") mu := multiUploader{uploader: u} return mu.multiPartUpload(reader) } // internal structure to manage a specific multipart upload to QingStor. type multiUploader struct { *uploader wg sync.WaitGroup mtx sync.Mutex err error uploadID *string objectParts completedParts hashMd5 hash.Hash } // keeps track of a single chunk of data being sent to QingStor. type chunk struct { buffer io.ReadSeeker partNumber int size int64 } // completedParts is a wrapper to make parts sortable by their part number, // since QingStor required this list to be sent in sorted order. type completedParts []*qs.ObjectPartType func (a completedParts) Len() int { return len(a) } func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } // String converts multiUploader to a string func (mu *multiUploader) String() string { if uploadID := mu.uploadID; uploadID != nil { return fmt.Sprintf("QingStor bucket %s key %s uploadID %s", mu.cfg.bucket, mu.cfg.key, *uploadID) } return fmt.Sprintf("QingStor bucket %s key %s uploadID <nil>", mu.cfg.bucket, mu.cfg.key) } // getErr is a thread-safe getter for the error object func (mu *multiUploader) getErr() error { mu.mtx.Lock() defer mu.mtx.Unlock() return mu.err } // setErr is a thread-safe setter for the error object func (mu *multiUploader) setErr(e error) { mu.mtx.Lock() defer mu.mtx.Unlock() mu.err = e } // readChunk runs in worker goroutines to pull chunks off of the ch channel // and send() them as UploadPart requests. func (mu *multiUploader) readChunk(ch chan chunk) { defer mu.wg.Done() for { c, ok := <-ch if !ok { break } if mu.getErr() == nil { if err := mu.send(c); err != nil { mu.setErr(err) } } } } // initiate init a Multiple Object and obtain UploadID func (mu *multiUploader) initiate() error { bucketInit, _ := mu.bucketInit() req := qs.InitiateMultipartUploadInput{ ContentType: &mu.cfg.mimeType, } fs.Debugf(mu, "Initiating a multi-part upload") rsp, err := bucketInit.InitiateMultipartUpload(mu.cfg.key, &req) if err == nil { mu.uploadID = rsp.UploadID mu.hashMd5 = md5.New() } return err } // send upload a part into QingStor func (mu *multiUploader) send(c chunk) error { bucketInit, _ := mu.bucketInit() req := qs.UploadMultipartInput{ PartNumber: &c.partNumber, UploadID: mu.uploadID, ContentLength: &c.size, Body: c.buffer, } fs.Debugf(mu, "Uploading a part to QingStor with partNumber %d and partSize %d", c.partNumber, c.size) _, err := bucketInit.UploadMultipart(mu.cfg.key, &req) if err != nil { return err } fs.Debugf(mu, "Done uploading part partNumber %d and partSize %d", c.partNumber, c.size) mu.mtx.Lock() defer mu.mtx.Unlock() _, _ = c.buffer.Seek(0, 0) _, _ = io.Copy(mu.hashMd5, c.buffer) parts := qs.ObjectPartType{PartNumber: &c.partNumber, Size: &c.size} mu.objectParts = append(mu.objectParts, &parts) return err } // complete complete a multipart upload func (mu *multiUploader) complete() error { var err error if err = mu.getErr(); err != nil { return err } bucketInit, _ := mu.bucketInit() //if err = mu.list(); err != nil { // return err //} //md5String := fmt.Sprintf("\"%s\"", hex.EncodeToString(mu.hashMd5.Sum(nil))) md5String := fmt.Sprintf("\"%x\"", mu.hashMd5.Sum(nil)) sort.Sort(mu.objectParts) req := qs.CompleteMultipartUploadInput{ UploadID: mu.uploadID, ObjectParts: mu.objectParts, ETag: &md5String, } fs.Debugf(mu, "Completing multi-part object") _, err = bucketInit.CompleteMultipartUpload(mu.cfg.key, &req) if err == nil { fs.Debugf(mu, "Complete multi-part finished") } return err } // abort abort a multipart upload func (mu *multiUploader) abort() error { var err error bucketInit, _ := mu.bucketInit() if uploadID := mu.uploadID; uploadID != nil { req := qs.AbortMultipartUploadInput{ UploadID: uploadID, } fs.Debugf(mu, "Aborting multi-part object %q", *uploadID) _, err = bucketInit.AbortMultipartUpload(mu.cfg.key, &req) } return err } // multiPartUpload upload a multiple object into QingStor func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) (err error) { // Initiate a multi-part upload if err = mu.initiate(); err != nil { return err } // Cancel the session if something went wrong defer atexit.OnError(&err, func() { fs.Debugf(mu, "Cancelling multipart upload: %v", err) cancelErr := mu.abort() if cancelErr != nil { fs.Logf(mu, "Failed to cancel multipart upload: %v", cancelErr) } })() ch := make(chan chunk, mu.cfg.concurrency) for range mu.cfg.concurrency { mu.wg.Add(1) go mu.readChunk(ch) } var partNumber int ch <- chunk{partNumber: partNumber, buffer: firstBuf, size: mu.readerSize} for mu.getErr() == nil { partNumber++ // This upload exceeded maximum number of supported parts, error now. if partNumber > mu.cfg.maxUploadParts || partNumber > maxMultiParts { var msg string if partNumber > mu.cfg.maxUploadParts { msg = fmt.Sprintf("exceeded total allowed configured maxUploadParts (%d). "+ "Adjust PartSize to fit in this limit", mu.cfg.maxUploadParts) } else { msg = fmt.Sprintf("exceeded total allowed QingStor limit maxUploadParts (%d). "+ "Adjust PartSize to fit in this limit", maxMultiParts) } mu.setErr(errors.New(msg)) break } var reader io.ReadSeeker var nextChunkLen int reader, nextChunkLen, err = mu.nextReader() if err != nil && err != io.EOF { // empty ch go func() { for range ch { } }() // Wait for all goroutines finish close(ch) mu.wg.Wait() return err } if nextChunkLen == 0 && partNumber > 0 { // No need to upload empty part, if file was empty to start // with empty single part would of been created and never // started multipart upload. break } num := partNumber ch <- chunk{partNumber: num, buffer: reader, size: mu.readerSize} } // Wait for all goroutines finish close(ch) mu.wg.Wait() // Complete Multipart Upload return mu.complete() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/qingstor/qingstor_unsupported.go
backend/qingstor/qingstor_unsupported.go
// Build for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 || js // Package qingstor provides an interface to QingStor object storage // Home: https://www.qingcloud.com/ package qingstor
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/mailru/mailru.go
backend/mailru/mailru.go
// Package mailru provides an interface to the Mail.ru Cloud storage system. package mailru import ( "bytes" "context" "errors" "fmt" gohash "hash" "io" "path" "path/filepath" "sort" "strconv" "strings" "sync" "time" "encoding/hex" "encoding/json" "net/http" "net/url" "github.com/rclone/rclone/backend/mailru/api" "github.com/rclone/rclone/backend/mailru/mrhash" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/rest" "golang.org/x/oauth2" ) // Global constants const ( minSleepPacer = 100 * time.Millisecond maxSleepPacer = 5 * time.Second decayConstPacer = 2 // bigger for slower decay, exponential metaExpirySec = 20 * 60 // meta server expiration time serverExpirySec = 3 * 60 // download server expiration time shardExpirySec = 30 * 60 // upload server expiration time maxServerLocks = 4 // maximum number of locks per single download server maxInt32 = 2147483647 // used as limit in directory list request speedupMinSize = 512 // speedup is not optimal if data is smaller than average packet ) // Global errors var ( ErrorDirAlreadyExists = errors.New("directory already exists") ErrorDirSourceNotExists = errors.New("directory source does not exist") ErrorInvalidName = errors.New("invalid characters in object name") // MrHashType is the hash.Type for Mailru MrHashType hash.Type ) // Description of how to authorize var oauthConfig = &oauthutil.Config{ ClientID: api.OAuthClientID, ClientSecret: "", AuthURL: api.OAuthURL, TokenURL: api.OAuthURL, AuthStyle: oauth2.AuthStyleInParams, } // Register with Fs func init() { MrHashType = hash.RegisterHash("mailru", "MailruHash", 40, mrhash.New) fs.Register(&fs.RegInfo{ Name: "mailru", Description: "Mail.ru Cloud", NewFs: NewFs, Options: append(oauthutil.SharedOptions, []fs.Option{{ Name: "user", Help: "User name (usually email).", Required: true, Sensitive: true, }, { Name: "pass", Help: `Password. This must be an app password - rclone will not work with your normal password. See the Configuration section in the docs for how to make an app password. `, Required: true, IsPassword: true, }, { Name: "speedup_enable", Default: true, Advanced: false, Help: `Skip full upload if there is another file with same data hash. This feature is called "speedup" or "put by hash". It is especially efficient in case of generally available files like popular books, video or audio clips, because files are searched by hash in all accounts of all mailru users. It is meaningless and ineffective if source file is unique or encrypted. Please note that rclone may need local memory and disk space to calculate content hash in advance and decide whether full upload is required. Also, if rclone does not know file size in advance (e.g. in case of streaming or partial uploads), it will not even try this optimization.`, Examples: []fs.OptionExample{{ Value: "true", Help: "Enable", }, { Value: "false", Help: "Disable", }}, }, { Name: "speedup_file_patterns", Default: "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf", Advanced: true, Help: `Comma separated list of file name patterns eligible for speedup (put by hash). Patterns are case insensitive and can contain '*' or '?' meta characters.`, Examples: []fs.OptionExample{{ Value: "", Help: "Empty list completely disables speedup (put by hash).", }, { Value: "*", Help: "All files will be attempted for speedup.", }, { Value: "*.mkv,*.avi,*.mp4,*.mp3", Help: "Only common audio/video files will be tried for put by hash.", }, { Value: "*.zip,*.gz,*.rar,*.pdf", Help: "Only common archives or PDF books will be tried for speedup.", }}, }, { Name: "speedup_max_disk", Default: fs.SizeSuffix(3 * 1024 * 1024 * 1024), Advanced: true, Help: `This option allows you to disable speedup (put by hash) for large files. Reason is that preliminary hashing can exhaust your RAM or disk space.`, Examples: []fs.OptionExample{{ Value: "0", Help: "Completely disable speedup (put by hash).", }, { Value: "1G", Help: "Files larger than 1Gb will be uploaded directly.", }, { Value: "3G", Help: "Choose this option if you have less than 3Gb free on local disk.", }}, }, { Name: "speedup_max_memory", Default: fs.SizeSuffix(32 * 1024 * 1024), Advanced: true, Help: `Files larger than the size given below will always be hashed on disk.`, Examples: []fs.OptionExample{{ Value: "0", Help: "Preliminary hashing will always be done in a temporary disk location.", }, { Value: "32M", Help: "Do not dedicate more than 32Mb RAM for preliminary hashing.", }, { Value: "256M", Help: "You have at most 256Mb RAM free for hash calculations.", }}, }, { Name: "check_hash", Default: true, Advanced: true, Help: "What should copy do if file checksum is mismatched or invalid.", Examples: []fs.OptionExample{{ Value: "true", Help: "Fail with error.", }, { Value: "false", Help: "Ignore and continue.", }}, }, { Name: "user_agent", Default: "", Advanced: true, Hide: fs.OptionHideBoth, Help: `HTTP user agent used internally by client. Defaults to "rclone/VERSION" or "--user-agent" provided on command line.`, }, { Name: "quirks", Default: "", Advanced: true, Hide: fs.OptionHideBoth, Help: `Comma separated list of internal maintenance flags. This option must not be used by an ordinary user. It is intended only to facilitate remote troubleshooting of backend issues. Strict meaning of flags is not documented and not guaranteed to persist between releases. Quirks will be removed when the backend grows stable. Supported quirks: atomicmkdir binlist unknowndirs`, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // Encode invalid UTF-8 bytes as json doesn't handle them properly. Default: (encoder.Display | encoder.EncodeWin | // :?"*<>| encoder.EncodeBackSlash | encoder.EncodeInvalidUtf8), }}...), }) } // Options defines the configuration for this backend type Options struct { Username string `config:"user"` Password string `config:"pass"` UserAgent string `config:"user_agent"` CheckHash bool `config:"check_hash"` SpeedupEnable bool `config:"speedup_enable"` SpeedupPatterns string `config:"speedup_file_patterns"` SpeedupMaxDisk fs.SizeSuffix `config:"speedup_max_disk"` SpeedupMaxMem fs.SizeSuffix `config:"speedup_max_memory"` Quirks string `config:"quirks"` Enc encoder.MultiEncoder `config:"encoding"` } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry returns a boolean as to whether this response and err // deserve to be retried. It returns the err as a convenience. // Retries password authorization (once) in a special case of access denied. func shouldRetry(ctx context.Context, res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } if res != nil && res.StatusCode == 403 && f.opt.Password != "" && !f.passFailed { reAuthErr := f.reAuthorize(opts, err) return reAuthErr == nil, err // return an original error } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err } // errorHandler parses a non 2xx error response into an error func errorHandler(res *http.Response) (err error) { data, err := rest.ReadBody(res) if err != nil { return err } fileError := &api.FileErrorResponse{} err = json.NewDecoder(bytes.NewReader(data)).Decode(fileError) if err == nil { fileError.Message = fileError.Body.Home.Error return fileError } serverError := &api.ServerErrorResponse{} err = json.NewDecoder(bytes.NewReader(data)).Decode(serverError) if err == nil { return serverError } serverError.Message = string(data) if serverError.Message == "" || strings.HasPrefix(serverError.Message, "{") { // Replace empty or JSON response with a human-readable text. serverError.Message = res.Status } serverError.Status = res.StatusCode return serverError } // Fs represents a remote mail.ru type Fs struct { name string root string // root path opt Options // parsed options ci *fs.ConfigInfo // global config speedupGlobs []string // list of file name patterns eligible for speedup speedupAny bool // true if all file names are eligible for speedup features *fs.Features // optional features srv *rest.Client // REST API client cli *http.Client // underlying HTTP client (for authorize) m configmap.Mapper // config reader (for authorize) source oauth2.TokenSource // OAuth token refresher pacer *fs.Pacer // pacer for API calls metaMu sync.Mutex // lock for meta server switcher metaURL string // URL of meta server metaExpiry time.Time // time to refresh meta server shardMu sync.Mutex // lock for upload shard switcher shardURL string // URL of upload shard shardExpiry time.Time // time to refresh upload shard fileServers serverPool // file server dispatcher authMu sync.Mutex // mutex for authorize() passFailed bool // true if authorize() failed after 403 quirks quirks // internal maintenance flags } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // fs.Debugf(nil, ">>> NewFs %q %q", name, root) // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } if opt.Password != "" { opt.Password = obscure.MustReveal(opt.Password) } // Trailing slash signals us to optimize out one file check rootIsDir := strings.HasSuffix(root, "/") // However the f.root string should not have leading or trailing slashes root = strings.Trim(root, "/") ci := fs.GetConfig(ctx) f := &Fs{ name: name, root: root, opt: *opt, ci: ci, m: m, } if err := f.parseSpeedupPatterns(opt.SpeedupPatterns); err != nil { return nil, err } f.quirks.parseQuirks(opt.Quirks) f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleepPacer), pacer.MaxSleep(maxSleepPacer), pacer.DecayConstant(decayConstPacer))) f.features = (&fs.Features{ CaseInsensitive: true, CanHaveEmptyDirectories: true, // Can copy/move across mailru configs (almost, thus true here), but // only when they share common account (this is checked in Copy/Move). ServerSideAcrossConfigs: true, }).Fill(ctx, f) // Override few config settings and create a client newCtx, clientConfig := fs.AddConfig(ctx) if opt.UserAgent != "" { clientConfig.UserAgent = opt.UserAgent } clientConfig.NoGzip = true // Mimic official client, skip sending "Accept-Encoding: gzip" f.cli = fshttp.NewClient(newCtx) f.srv = rest.NewClient(f.cli) f.srv.SetRoot(api.APIServerURL) f.srv.SetHeader("Accept", "*/*") // Send "Accept: */*" with every request like official client f.srv.SetErrorHandler(errorHandler) if err = f.authorize(ctx, false); err != nil { return nil, err } f.fileServers = serverPool{ pool: make(pendingServerMap), fs: f, path: "/d", expirySec: serverExpirySec, } if !rootIsDir { _, dirSize, err := f.readItemMetaData(ctx, f.root) rootIsDir = (dirSize >= 0) // Ignore non-existing item and other errors if err == nil && !rootIsDir { root = path.Dir(f.root) if root == "." { root = "" } f.root = root // Return fs that points to the parent and signal rclone to do filtering return f, fs.ErrorIsFile } } return f, nil } // Internal maintenance flags (to be removed when the backend matures). // Primarily intended to facilitate remote support and troubleshooting. type quirks struct { binlist bool atomicmkdir bool unknowndirs bool } func (q *quirks) parseQuirks(option string) { for flag := range strings.SplitSeq(option, ",") { switch strings.ToLower(strings.TrimSpace(flag)) { case "binlist": // The official client sometimes uses a so called "bin" protocol, // implemented in the listBin file system method below. This method // is generally faster than non-recursive listM1 but results in // sporadic deserialization failures if total size of tree data // approaches 8Kb (?). The recursive method is normally disabled. // This quirk can be used to enable it for further investigation. // Remove this quirk when the "bin" protocol support is complete. q.binlist = true case "atomicmkdir": // At the moment rclone requires Mkdir to return success if the // directory already exists. However, such programs as borgbackup // use mkdir as a locking primitive and depend on its atomicity. // Remove this quirk when the above issue is investigated. q.atomicmkdir = true case "unknowndirs": // Accepts unknown resource types as folders. q.unknowndirs = true default: // Ignore unknown flags } } } // Note: authorize() is not safe for concurrent access as it updates token source func (f *Fs) authorize(ctx context.Context, force bool) (err error) { var t *oauth2.Token if !force { t, err = oauthutil.GetToken(f.name, f.m) } if err != nil || !tokenIsValid(t) { fs.Infof(f, "Valid token not found, authorizing.") ctx := oauthutil.Context(ctx, f.cli) oauth2Conf := oauthConfig.MakeOauth2Config() t, err = oauth2Conf.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password) } if err == nil && !tokenIsValid(t) { err = errors.New("invalid token") } if err != nil { return fmt.Errorf("failed to authorize: %w", err) } if err = oauthutil.PutToken(f.name, f.m, t, false); err != nil { return err } // Mailru API server expects access token not in the request header but // in the URL query string, so we must use a bare token source rather than // client provided by oauthutil. // // WARNING: direct use of the returned token source triggers a bug in the // `(*token != *ts.token)` comparison in oauthutil.TokenSource.Token() // crashing with panic `comparing uncomparable type map[string]interface{}` // As a workaround, mimic oauth2.NewClient() wrapping token source in // oauth2.ReuseTokenSource _, ts, err := oauthutil.NewClientWithBaseClient(ctx, f.name, f.m, oauthConfig, f.cli) if err == nil { f.source = oauth2.ReuseTokenSource(nil, ts) } return err } func tokenIsValid(t *oauth2.Token) bool { return t.Valid() && t.RefreshToken != "" && t.Type() == "Bearer" } // reAuthorize is called after getting 403 (access denied) from the server. // It handles the case when user has changed password since a previous // rclone invocation and obtains a new access token, if needed. func (f *Fs) reAuthorize(opts *rest.Opts, origErr error) error { // lock and recheck the flag to ensure authorize() is attempted only once f.authMu.Lock() defer f.authMu.Unlock() if f.passFailed { return origErr } ctx := context.Background() // Note: reAuthorize is called by ShouldRetry, no context! fs.Debugf(f, "re-authorize with new password") if err := f.authorize(ctx, true); err != nil { f.passFailed = true return err } // obtain new token, if needed tokenParameter := "" if opts != nil && opts.Parameters.Get("token") != "" { tokenParameter = "token" } if opts != nil && opts.Parameters.Get("access_token") != "" { tokenParameter = "access_token" } if tokenParameter != "" { token, err := f.accessToken() if err != nil { f.passFailed = true return err } opts.Parameters.Set(tokenParameter, token) } return nil } // accessToken() returns OAuth token and possibly refreshes it func (f *Fs) accessToken() (string, error) { token, err := f.source.Token() if err != nil { return "", fmt.Errorf("cannot refresh access token: %w", err) } return token.AccessToken, nil } // absPath converts root-relative remote to absolute home path func (f *Fs) absPath(remote string) string { return path.Join("/", f.root, remote) } // relPath converts absolute home path to root-relative remote // Note that f.root can not have leading and trailing slashes func (f *Fs) relPath(absPath string) (string, error) { target := strings.Trim(absPath, "/") if f.root == "" { return target, nil } if target == f.root { return "", nil } if strings.HasPrefix(target+"/", f.root+"/") { return target[len(f.root)+1:], nil } return "", fmt.Errorf("path %q should be under %q", absPath, f.root) } // metaServer returns URL of current meta server func (f *Fs) metaServer(ctx context.Context) (string, error) { f.metaMu.Lock() defer f.metaMu.Unlock() if f.metaURL != "" && time.Now().Before(f.metaExpiry) { return f.metaURL, nil } opts := rest.Opts{ RootURL: api.DispatchServerURL, Method: "GET", Path: "/m", } var ( res *http.Response url string err error ) err = f.pacer.Call(func() (bool, error) { res, err = f.srv.Call(ctx, &opts) if err == nil { url, err = readBodyWord(res) } return fserrors.ShouldRetry(err), err }) if err != nil { closeBody(res) return "", err } f.metaURL = url f.metaExpiry = time.Now().Add(metaExpirySec * time.Second) fs.Debugf(f, "new meta server: %s", f.metaURL) return f.metaURL, nil } // readBodyWord reads the single line response to completion // and extracts the first word from the first line. func readBodyWord(res *http.Response) (word string, err error) { var body []byte body, err = rest.ReadBody(res) if err == nil { line := strings.Trim(string(body), " \r\n") word = strings.Split(line, " ")[0] } if word == "" { return "", errors.New("empty reply from dispatcher") } return word, nil } // readItemMetaData returns a file/directory info at given full path // If it can't be found it fails with fs.ErrorObjectNotFound // For the return value `dirSize` please see Fs.itemToEntry() func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEntry, dirSize int, err error) { token, err := f.accessToken() if err != nil { return nil, -1, err } opts := rest.Opts{ Method: "GET", Path: "/api/m1/file", Parameters: url.Values{ "access_token": {token}, "home": {f.opt.Enc.FromStandardPath(path)}, "offset": {"0"}, "limit": {strconv.Itoa(maxInt32)}, }, } var info api.ItemInfoResponse err = f.pacer.Call(func() (bool, error) { res, err := f.srv.CallJSON(ctx, &opts, nil, &info) return shouldRetry(ctx, res, err, f, &opts) }) if err != nil { if apiErr, ok := err.(*api.FileErrorResponse); ok { switch apiErr.Status { case 404: err = fs.ErrorObjectNotFound case 400: fs.Debugf(f, "object %q status %d (%s)", path, apiErr.Status, apiErr.Message) err = fs.ErrorObjectNotFound } } return } entry, dirSize, err = f.itemToDirEntry(ctx, &info.Body) return } // itemToDirEntry converts API item to rclone directory entry // The dirSize return value is: // // <0 - for a file or in case of error // =0 - for an empty directory // >0 - for a non-empty directory func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.DirEntry, dirSize int, err error) { remote, err := f.relPath(f.opt.Enc.ToStandardPath(item.Home)) if err != nil { return nil, -1, err } modTime := time.Unix(int64(item.Mtime), 0) isDir, err := f.isDir(item.Kind, remote) if err != nil { return nil, -1, err } if isDir { dir := fs.NewDir(remote, modTime).SetSize(item.Size) return dir, item.Count.Files + item.Count.Folders, nil } binHash, err := mrhash.DecodeString(item.Hash) if err != nil { return nil, -1, err } file := &Object{ fs: f, remote: remote, hasMetaData: true, size: item.Size, mrHash: binHash, modTime: modTime, } return file, -1, nil } // isDir returns true for directories, false for files func (f *Fs) isDir(kind, path string) (bool, error) { switch kind { case "": return false, errors.New("empty resource type") case "file": return false, nil case "folder": // fall thru case "camera-upload", "mounted", "shared": fs.Debugf(f, "[%s]: folder has type %q", path, kind) default: if !f.quirks.unknowndirs { return false, fmt.Errorf("unknown resource type %q", kind) } fs.Errorf(f, "[%s]: folder has unknown type %q", path, kind) } return true, nil } // List the objects and directories in dir into entries. // The entries can be returned in any order but should be for a complete directory. // dir should be "" to list the root, and should not have trailing slashes. // This should return ErrDirNotFound if the directory isn't found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { // fs.Debugf(f, ">>> List: %q", dir) if f.quirks.binlist { entries, err = f.listBin(ctx, f.absPath(dir), 1) } else { entries, err = f.listM1(ctx, f.absPath(dir), 0, maxInt32) } if err == nil && f.ci.LogLevel >= fs.LogLevelDebug { names := []string{} for _, entry := range entries { names = append(names, entry.Remote()) } sort.Strings(names) // fs.Debugf(f, "List(%q): %v", dir, names) } return } // list using protocol "m1" func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int) (entries fs.DirEntries, err error) { token, err := f.accessToken() if err != nil { return nil, err } params := url.Values{} params.Set("access_token", token) params.Set("offset", strconv.Itoa(offset)) params.Set("limit", strconv.Itoa(limit)) data := url.Values{} data.Set("home", f.opt.Enc.FromStandardPath(dirPath)) opts := rest.Opts{ Method: "POST", Path: "/api/m1/folder", Parameters: params, Body: strings.NewReader(data.Encode()), ContentType: api.BinContentType, } var ( info api.FolderInfoResponse res *http.Response ) err = f.pacer.Call(func() (bool, error) { res, err = f.srv.CallJSON(ctx, &opts, nil, &info) return shouldRetry(ctx, res, err, f, &opts) }) if err != nil { apiErr, ok := err.(*api.FileErrorResponse) if ok && apiErr.Status == 404 { return nil, fs.ErrorDirNotFound } return nil, err } isDir, err := f.isDir(info.Body.Kind, dirPath) if err != nil { return nil, err } if !isDir { return nil, fs.ErrorIsFile } for _, item := range info.Body.List { entry, _, err := f.itemToDirEntry(ctx, &item) if err == nil { entries = append(entries, entry) } else { fs.Debugf(f, "Excluding path %q from list: %v", item.Home, err) } } return entries, nil } // list using protocol "bin" func (f *Fs) listBin(ctx context.Context, dirPath string, depth int) (entries fs.DirEntries, err error) { options := api.ListOptDefaults req := api.NewBinWriter() req.WritePu16(api.OperationFolderList) req.WriteString(f.opt.Enc.FromStandardPath(dirPath)) req.WritePu32(int64(depth)) req.WritePu32(int64(options)) req.WritePu32(0) token, err := f.accessToken() if err != nil { return nil, err } metaURL, err := f.metaServer(ctx) if err != nil { return nil, err } opts := rest.Opts{ Method: "POST", RootURL: metaURL, Parameters: url.Values{ "client_id": {api.OAuthClientID}, "token": {token}, }, ContentType: api.BinContentType, Body: req.Reader(), } var res *http.Response err = f.pacer.Call(func() (bool, error) { res, err = f.srv.Call(ctx, &opts) return shouldRetry(ctx, res, err, f, &opts) }) if err != nil { closeBody(res) return nil, err } r := api.NewBinReader(res.Body) defer closeBody(res) // read status switch status := r.ReadByteAsInt(); status { case api.ListResultOK: // go on... case api.ListResultNotExists: return nil, fs.ErrorDirNotFound default: return nil, fmt.Errorf("directory list error %d", status) } t := &treeState{ f: f, r: r, options: options, rootDir: parentDir(dirPath), lastDir: "", level: 0, } t.currDir = t.rootDir // read revision if err := t.revision.Read(r); err != nil { return nil, err } // read space if (options & api.ListOptTotalSpace) != 0 { t.totalSpace = int64(r.ReadULong()) } if (options & api.ListOptUsedSpace) != 0 { t.usedSpace = int64(r.ReadULong()) } t.fingerprint = r.ReadBytesByLength() // deserialize for { entry, err := t.NextRecord() if err != nil { break } if entry != nil { entries = append(entries, entry) } } if err != nil && err != fs.ErrorListAborted { fs.Debugf(f, "listBin failed at offset %d: %v", r.Count(), err) return nil, err } return entries, nil } func (t *treeState) NextRecord() (fs.DirEntry, error) { r := t.r parseOp := r.ReadByteAsShort() if r.Error() != nil { return nil, r.Error() } switch parseOp { case api.ListParseDone: return nil, fs.ErrorListAborted case api.ListParsePin: if t.lastDir == "" { return nil, errors.New("last folder is null") } t.currDir = t.lastDir t.level++ return nil, nil case api.ListParsePinUpper: if t.currDir == t.rootDir { return nil, nil } if t.level <= 0 { return nil, errors.New("no parent folder") } t.currDir = parentDir(t.currDir) t.level-- return nil, nil case api.ListParseUnknown15: skip := int(r.ReadPu32()) for range skip { r.ReadPu32() r.ReadPu32() } return nil, nil case api.ListParseReadItem: // get item (see below) default: return nil, fmt.Errorf("unknown parse operation %d", parseOp) } // get item head := r.ReadIntSpl() itemType := head & 3 if (head & 4096) != 0 { t.dunnoNodeID = r.ReadNBytes(api.DunnoNodeIDLength) } name := t.f.opt.Enc.FromStandardPath(string(r.ReadBytesByLength())) t.dunno1 = int(r.ReadULong()) t.dunno2 = 0 t.dunno3 = 0 if r.Error() != nil { return nil, r.Error() } var ( modTime time.Time size int64 binHash []byte dirSize int64 isDir = true ) switch itemType { case api.ListItemMountPoint: t.treeID = r.ReadNBytes(api.TreeIDLength) t.dunno2 = int(r.ReadULong()) t.dunno3 = int(r.ReadULong()) case api.ListItemFolder: t.dunno2 = int(r.ReadULong()) case api.ListItemSharedFolder: t.dunno2 = int(r.ReadULong()) t.treeID = r.ReadNBytes(api.TreeIDLength) case api.ListItemFile: isDir = false modTime = r.ReadDate() size = int64(r.ReadULong()) binHash = r.ReadNBytes(mrhash.Size) default: return nil, fmt.Errorf("unknown item type %d", itemType) } if isDir { t.lastDir = path.Join(t.currDir, name) if (t.options & api.ListOptDelete) != 0 { t.dunnoDel1 = int(r.ReadPu32()) t.dunnoDel2 = int(r.ReadPu32()) } if (t.options & api.ListOptFolderSize) != 0 { dirSize = int64(r.ReadULong()) } } if r.Error() != nil { return nil, r.Error() } if t.f.ci.LogLevel >= fs.LogLevelDebug { ctime, _ := modTime.MarshalJSON() fs.Debugf(t.f, "binDir %d.%d %q %q (%d) %s", t.level, itemType, t.currDir, name, size, ctime) } if t.level != 1 { // TODO: implement recursion and ListR // Note: recursion is broken because maximum buffer size is 8K return nil, nil } remote, err := t.f.relPath(path.Join(t.currDir, name)) if err != nil { return nil, err } if isDir { return fs.NewDir(remote, modTime).SetSize(dirSize), nil } obj := &Object{ fs: t.f, remote: remote, hasMetaData: true, size: size, mrHash: binHash, modTime: modTime, } return obj, nil } type treeState struct { f *Fs r *api.BinReader options int rootDir string currDir string lastDir string level int revision treeRevision totalSpace int64 usedSpace int64 fingerprint []byte dunno1 int dunno2 int dunno3 int dunnoDel1 int dunnoDel2 int dunnoNodeID []byte treeID []byte } type treeRevision struct { ver int16 treeID []byte treeIDNew []byte bgn uint64 bgnNew uint64 } func (rev *treeRevision) Read(data *api.BinReader) error { rev.ver = data.ReadByteAsShort() switch rev.ver { case 0: // Revision() case 1, 2: rev.treeID = data.ReadNBytes(api.TreeIDLength) rev.bgn = data.ReadULong() case 3, 4: rev.treeID = data.ReadNBytes(api.TreeIDLength) rev.bgn = data.ReadULong() rev.treeIDNew = data.ReadNBytes(api.TreeIDLength) rev.bgnNew = data.ReadULong() case 5: rev.treeID = data.ReadNBytes(api.TreeIDLength) rev.bgn = data.ReadULong() rev.treeIDNew = data.ReadNBytes(api.TreeIDLength) default: return fmt.Errorf("unknown directory revision %d", rev.ver) } return data.Error() } // CreateDir makes a directory (parent must exist) func (f *Fs) CreateDir(ctx context.Context, path string) error { // fs.Debugf(f, ">>> CreateDir %q", path) req := api.NewBinWriter() req.WritePu16(api.OperationCreateFolder) req.WritePu16(0) // revision req.WriteString(f.opt.Enc.FromStandardPath(path)) req.WritePu32(0) token, err := f.accessToken() if err != nil { return err } metaURL, err := f.metaServer(ctx) if err != nil { return err } opts := rest.Opts{ Method: "POST", RootURL: metaURL, Parameters: url.Values{ "client_id": {api.OAuthClientID}, "token": {token}, }, ContentType: api.BinContentType, Body: req.Reader(), } var res *http.Response err = f.pacer.Call(func() (bool, error) { res, err = f.srv.Call(ctx, &opts) return shouldRetry(ctx, res, err, f, &opts) }) if err != nil { closeBody(res) return err } reply := api.NewBinReader(res.Body) defer closeBody(res) switch status := reply.ReadByteAsInt(); status { case api.MkdirResultOK: return nil case api.MkdirResultAlreadyExists, api.MkdirResultExistsDifferentCase: return ErrorDirAlreadyExists case api.MkdirResultSourceNotExists: return ErrorDirSourceNotExists case api.MkdirResultInvalidName: return ErrorInvalidName default: return fmt.Errorf("mkdir error %d", status) } } // Mkdir creates the container (and its parents) if it doesn't exist. // Normally it ignores the ErrorDirAlreadyExist, as required by rclone tests. // Nevertheless, such programs as borgbackup or restic use mkdir as a locking // primitive and depend on its atomicity, i.e. mkdir should fail if directory // already exists. As a workaround, users can add string "atomicmkdir" in the // hidden `quirks` parameter or in the `--mailru-quirks` command-line option. func (f *Fs) Mkdir(ctx context.Context, dir string) error { // fs.Debugf(f, ">>> Mkdir %q", dir) err := f.mkDirs(ctx, f.absPath(dir)) if err == ErrorDirAlreadyExists && !f.quirks.atomicmkdir { return nil } return err } // mkDirs creates container and its parents by absolute path, // fails with ErrorDirAlreadyExists if it already exists. func (f *Fs) mkDirs(ctx context.Context, path string) error { if path == "/" || path == "" { return nil } switch err := f.CreateDir(ctx, path); err { case nil: return nil case ErrorDirSourceNotExists: fs.Debugf(f, "mkDirs by part %q", path) // fall thru... default: return err } parts := strings.Split(strings.Trim(path, "/"), "/") path = "" for _, part := range parts { if part == "" { continue } path += "/" + part switch err := f.CreateDir(ctx, path); err { case nil, ErrorDirAlreadyExists: continue default: return err } } return nil } func parentDir(absPath string) string { parent := path.Dir(strings.TrimRight(absPath, "/")) if parent == "." { parent = "" } return parent } // mkParentDirs creates parent containers by absolute path, // ignores the ErrorDirAlreadyExists
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/mailru/mailru_test.go
backend/mailru/mailru_test.go
// Test Mailru filesystem interface package mailru_test import ( "testing" "github.com/rclone/rclone/backend/mailru" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestMailru:", NilObject: (*mailru.Object)(nil), SkipBadWindowsCharacters: true, }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/mailru/api/bin.go
backend/mailru/api/bin.go
package api // BIN protocol constants const ( BinContentType = "application/x-www-form-urlencoded" TreeIDLength = 12 DunnoNodeIDLength = 16 ) // Operations in binary protocol const ( OperationAddFile = 103 // 0x67 OperationRename = 105 // 0x69 OperationCreateFolder = 106 // 0x6A OperationFolderList = 117 // 0x75 OperationSharedFoldersList = 121 // 0x79 // TODO investigate opcodes below Operation154MaybeItemInfo = 154 // 0x9A Operation102MaybeAbout = 102 // 0x66 Operation104MaybeDelete = 104 // 0x68 ) // CreateDir protocol constants const ( MkdirResultOK = 0 MkdirResultSourceNotExists = 1 MkdirResultAlreadyExists = 4 MkdirResultExistsDifferentCase = 9 MkdirResultInvalidName = 10 MkdirResultFailed254 = 254 ) // Move result codes const ( MoveResultOK = 0 MoveResultSourceNotExists = 1 MoveResultFailed002 = 2 MoveResultAlreadyExists = 4 MoveResultFailed005 = 5 MoveResultFailed254 = 254 ) // AddFile result codes const ( AddResultOK = 0 AddResultError01 = 1 AddResultDunno04 = 4 AddResultWrongPath = 5 AddResultNoFreeSpace = 7 AddResultDunno09 = 9 AddResultInvalidName = 10 AddResultNotModified = 12 AddResultFailedA = 253 AddResultFailedB = 254 ) // List request options const ( ListOptTotalSpace = 1 ListOptDelete = 2 ListOptFingerprint = 4 ListOptUnknown8 = 8 ListOptUnknown16 = 16 ListOptFolderSize = 32 ListOptUsedSpace = 64 ListOptUnknown128 = 128 ListOptUnknown256 = 256 ) // ListOptDefaults ... const ListOptDefaults = ListOptUnknown128 | ListOptUnknown256 | ListOptFolderSize | ListOptTotalSpace | ListOptUsedSpace // List parse flags const ( ListParseDone = 0 ListParseReadItem = 1 ListParsePin = 2 ListParsePinUpper = 3 ListParseUnknown15 = 15 ) // List operation results const ( ListResultOK = 0 ListResultNotExists = 1 ListResultDunno02 = 2 ListResultDunno03 = 3 ListResultAlreadyExists04 = 4 ListResultDunno05 = 5 ListResultDunno06 = 6 ListResultDunno07 = 7 ListResultDunno08 = 8 ListResultAlreadyExists09 = 9 ListResultDunno10 = 10 ListResultDunno11 = 11 ListResultDunno12 = 12 ListResultFailedB = 253 ListResultFailedA = 254 ) // Directory item types const ( ListItemMountPoint = 0 ListItemFile = 1 ListItemFolder = 2 ListItemSharedFolder = 3 )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/mailru/api/m1.go
backend/mailru/api/m1.go
// Package api provides types used by the Mail.ru API. package api import ( "fmt" ) // M1 protocol constants and structures const ( APIServerURL = "https://cloud.mail.ru" PublicLinkURL = "https://cloud.mail.ru/public/" DispatchServerURL = "https://dispatcher.cloud.mail.ru" OAuthURL = "https://o2.mail.ru/token" OAuthClientID = "cloud-win" ) // ServerErrorResponse represents erroneous API response. type ServerErrorResponse struct { Message string `json:"body"` Time int64 `json:"time"` Status int `json:"status"` } func (e *ServerErrorResponse) Error() string { return fmt.Sprintf("server error %d (%s)", e.Status, e.Message) } // FileErrorResponse represents erroneous API response for a file type FileErrorResponse struct { Body struct { Home struct { Value string `json:"value"` Error string `json:"error"` } `json:"home"` } `json:"body"` Status int `json:"status"` Account string `json:"email,omitempty"` Time int64 `json:"time,omitempty"` Message string // non-json, calculated field } func (e *FileErrorResponse) Error() string { return fmt.Sprintf("file error %d (%s)", e.Status, e.Body.Home.Error) } // UserInfoResponse contains account metadata type UserInfoResponse struct { Body struct { AccountType string `json:"account_type"` AccountVerified bool `json:"account_verified"` Cloud struct { Beta struct { Allowed bool `json:"allowed"` Asked bool `json:"asked"` } `json:"beta"` Billing struct { ActiveCostID string `json:"active_cost_id"` ActiveRateID string `json:"active_rate_id"` AutoProlong bool `json:"auto_prolong"` Basequota int64 `json:"basequota"` Enabled bool `json:"enabled"` Expires int64 `json:"expires"` Prolong bool `json:"prolong"` Promocodes struct { } `json:"promocodes"` Subscription []any `json:"subscription"` Version string `json:"version"` } `json:"billing"` Bonuses struct { CameraUpload bool `json:"camera_upload"` Complete bool `json:"complete"` Desktop bool `json:"desktop"` Feedback bool `json:"feedback"` Links bool `json:"links"` Mobile bool `json:"mobile"` Registration bool `json:"registration"` } `json:"bonuses"` Enable struct { Sharing bool `json:"sharing"` } `json:"enable"` FileSizeLimit int64 `json:"file_size_limit"` Space struct { BytesTotal int64 `json:"bytes_total"` BytesUsed int64 `json:"bytes_used"` Overquota bool `json:"overquota"` } `json:"space"` } `json:"cloud"` Cloudflags struct { Exists bool `json:"exists"` } `json:"cloudflags"` Domain string `json:"domain"` Login string `json:"login"` Newbie bool `json:"newbie"` UI struct { ExpandLoader bool `json:"expand_loader"` Kind string `json:"kind"` Sidebar bool `json:"sidebar"` Sort struct { Order string `json:"order"` Type string `json:"type"` } `json:"sort"` Thumbs bool `json:"thumbs"` } `json:"ui"` } `json:"body"` Email string `json:"email"` Status int `json:"status"` Time int64 `json:"time"` } // ListItem ... type ListItem struct { Count struct { Folders int `json:"folders"` Files int `json:"files"` } `json:"count,omitempty"` Kind string `json:"kind"` Type string `json:"type"` Name string `json:"name"` Home string `json:"home"` Size int64 `json:"size"` Mtime uint64 `json:"mtime,omitempty"` Hash string `json:"hash,omitempty"` VirusScan string `json:"virus_scan,omitempty"` Tree string `json:"tree,omitempty"` Grev int `json:"grev,omitempty"` Rev int `json:"rev,omitempty"` } // ItemInfoResponse ... type ItemInfoResponse struct { Email string `json:"email"` Body ListItem `json:"body"` Time int64 `json:"time"` Status int `json:"status"` } // FolderInfoResponse ... type FolderInfoResponse struct { Body struct { Count struct { Folders int `json:"folders"` Files int `json:"files"` } `json:"count"` Tree string `json:"tree"` Name string `json:"name"` Grev int `json:"grev"` Size int64 `json:"size"` Sort struct { Order string `json:"order"` Type string `json:"type"` } `json:"sort"` Kind string `json:"kind"` Rev int `json:"rev"` Type string `json:"type"` Home string `json:"home"` List []ListItem `json:"list"` } `json:"body,omitempty"` Time int64 `json:"time"` Status int `json:"status"` Email string `json:"email"` } // CleanupResponse ... type CleanupResponse struct { Email string `json:"email"` Time int64 `json:"time"` StatusStr string `json:"status"` } // GenericResponse ... type GenericResponse struct { Email string `json:"email"` Time int64 `json:"time"` Status int `json:"status"` // ignore other fields } // GenericBodyResponse ... type GenericBodyResponse struct { Email string `json:"email"` Body string `json:"body"` Time int64 `json:"time"` Status int `json:"status"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/mailru/api/helpers.go
backend/mailru/api/helpers.go
package api // BIN protocol helpers import ( "bufio" "bytes" "encoding/binary" "errors" "fmt" "io" "time" "github.com/rclone/rclone/lib/readers" ) // protocol errors var ( ErrorPrematureEOF = errors.New("premature EOF") ErrorInvalidLength = errors.New("invalid length") ErrorZeroTerminate = errors.New("string must end with zero") ) // BinWriter is a binary protocol writer type BinWriter struct { b *bytes.Buffer // growing byte buffer a []byte // temporary buffer for next varint } // NewBinWriter creates a binary protocol helper func NewBinWriter() *BinWriter { return &BinWriter{ b: new(bytes.Buffer), a: make([]byte, binary.MaxVarintLen64), } } // Bytes returns binary data func (w *BinWriter) Bytes() []byte { return w.b.Bytes() } // Reader returns io.Reader with binary data func (w *BinWriter) Reader() io.Reader { return bytes.NewReader(w.b.Bytes()) } // WritePu16 writes a short as unsigned varint func (w *BinWriter) WritePu16(val int) { if val < 0 || val > 65535 { panic(fmt.Sprintf("Invalid UInt16 %v", val)) } w.WritePu64(int64(val)) } // WritePu32 writes a signed long as unsigned varint func (w *BinWriter) WritePu32(val int64) { if val < 0 || val > 4294967295 { panic(fmt.Sprintf("Invalid UInt32 %v", val)) } w.WritePu64(val) } // WritePu64 writes an unsigned (actually, signed) long as unsigned varint func (w *BinWriter) WritePu64(val int64) { if val < 0 { panic(fmt.Sprintf("Invalid UInt64 %v", val)) } w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))]) } // WriteP64 writes an signed long as unsigned varint func (w *BinWriter) WriteP64(val int64) { w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))]) } // WriteString writes a zero-terminated string func (w *BinWriter) WriteString(str string) { buf := []byte(str) w.WritePu64(int64(len(buf) + 1)) w.b.Write(buf) w.b.WriteByte(0) } // Write writes a byte buffer func (w *BinWriter) Write(buf []byte) { w.b.Write(buf) } // WriteWithLength writes a byte buffer prepended with its length as varint func (w *BinWriter) WriteWithLength(buf []byte) { w.WritePu64(int64(len(buf))) w.b.Write(buf) } // BinReader is a binary protocol reader helper type BinReader struct { b *bufio.Reader count *readers.CountingReader err error // keeps the first error encountered } // NewBinReader creates a binary protocol reader helper func NewBinReader(reader io.Reader) *BinReader { r := &BinReader{} r.count = readers.NewCountingReader(reader) r.b = bufio.NewReader(r.count) return r } // Count returns number of bytes read func (r *BinReader) Count() uint64 { return r.count.BytesRead() } // Error returns first encountered error or nil func (r *BinReader) Error() error { return r.err } // check() keeps the first error encountered in a stream func (r *BinReader) check(err error) bool { if err == nil { return true } if r.err == nil { // keep the first error r.err = err } if err != io.EOF { panic(fmt.Sprintf("Error parsing response: %v", err)) } return false } // ReadByteAsInt reads a single byte as uint32, returns -1 for EOF or errors func (r *BinReader) ReadByteAsInt() int { if octet, err := r.b.ReadByte(); r.check(err) { return int(octet) } return -1 } // ReadByteAsShort reads a single byte as uint16, returns -1 for EOF or errors func (r *BinReader) ReadByteAsShort() int16 { if octet, err := r.b.ReadByte(); r.check(err) { return int16(octet) } return -1 } // ReadIntSpl reads two bytes as little-endian uint16, returns -1 for EOF or errors func (r *BinReader) ReadIntSpl() int { var val uint16 if r.check(binary.Read(r.b, binary.LittleEndian, &val)) { return int(val) } return -1 } // ReadULong returns uint64 equivalent of -1 for EOF or errors func (r *BinReader) ReadULong() uint64 { if val, err := binary.ReadUvarint(r.b); r.check(err) { return val } return 0xffffffffffffffff } // ReadPu32 returns -1 for EOF or errors func (r *BinReader) ReadPu32() int64 { if val, err := binary.ReadUvarint(r.b); r.check(err) { return int64(val) } return -1 } // ReadNBytes reads given number of bytes, returns invalid data for EOF or errors func (r *BinReader) ReadNBytes(len int) []byte { buf := make([]byte, len) n, err := r.b.Read(buf) if r.check(err) { return buf } if n != len { r.check(ErrorPrematureEOF) } return buf } // ReadBytesByLength reads buffer length and its bytes func (r *BinReader) ReadBytesByLength() []byte { len := r.ReadPu32() if len < 0 { r.check(ErrorInvalidLength) return []byte{} } return r.ReadNBytes(int(len)) } // ReadString reads a zero-terminated string with length func (r *BinReader) ReadString() string { len := int(r.ReadPu32()) if len < 1 { r.check(ErrorInvalidLength) return "" } buf := make([]byte, len-1) n, err := r.b.Read(buf) if !r.check(err) { return "" } if n != len-1 { r.check(ErrorPrematureEOF) return "" } zeroByte, err := r.b.ReadByte() if !r.check(err) { return "" } if zeroByte != 0 { r.check(ErrorZeroTerminate) return "" } return string(buf) } // ReadDate reads a Unix encoded time func (r *BinReader) ReadDate() time.Time { return time.Unix(r.ReadPu32(), 0) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/mailru/mrhash/mrhash.go
backend/mailru/mrhash/mrhash.go
// Package mrhash implements the mailru hash, which is a modified SHA1. // If file size is less than or equal to the SHA1 block size (20 bytes), // its hash is simply its data right-padded with zero bytes. // Hash sum of a larger file is computed as a SHA1 sum of the file data // bytes concatenated with a decimal representation of the data length. package mrhash import ( "crypto/sha1" "encoding" "encoding/hex" "errors" "hash" "strconv" ) const ( // BlockSize of the checksum in bytes. BlockSize = sha1.BlockSize // Size of the checksum in bytes. Size = sha1.Size startString = "mrCloud" hashError = "hash function returned error" ) // Global errors var ( ErrorInvalidHash = errors.New("invalid hash") ) type digest struct { total int // bytes written into hash so far sha hash.Hash // underlying SHA1 small []byte // small content } // New returns a new hash.Hash computing the Mailru checksum. func New() hash.Hash { d := &digest{} d.Reset() return d } // Write writes len(p) bytes from p to the underlying data stream. It returns // the number of bytes written from p (0 <= n <= len(p)) and any error // encountered that caused the write to stop early. Write must return a non-nil // error if it returns n < len(p). Write must not modify the slice data, even // temporarily. // // Implementations must not retain p. func (d *digest) Write(p []byte) (n int, err error) { n, err = d.sha.Write(p) if err != nil { panic(hashError) } d.total += n if d.total <= Size { d.small = append(d.small, p...) } return n, nil } // Sum appends the current hash to b and returns the resulting slice. // It does not change the underlying hash state. func (d *digest) Sum(b []byte) []byte { // If content is small, return it padded to Size if d.total <= Size { padded := make([]byte, Size) copy(padded, d.small) return append(b, padded...) } endString := strconv.Itoa(d.total) copy, err := cloneSHA1(d.sha) if err == nil { _, err = copy.Write([]byte(endString)) } if err != nil { panic(hashError) } return copy.Sum(b) } // cloneSHA1 clones state of SHA1 hash func cloneSHA1(orig hash.Hash) (clone hash.Hash, err error) { state, err := orig.(encoding.BinaryMarshaler).MarshalBinary() if err != nil { return nil, err } clone = sha1.New() err = clone.(encoding.BinaryUnmarshaler).UnmarshalBinary(state) return } // Reset resets the Hash to its initial state. func (d *digest) Reset() { d.sha = sha1.New() _, _ = d.sha.Write([]byte(startString)) d.total = 0 } // Size returns the number of bytes Sum will return. func (d *digest) Size() int { return Size } // BlockSize returns the hash's underlying block size. // The Write method must be able to accept any amount // of data, but it may operate more efficiently if all writes // are a multiple of the block size. func (d *digest) BlockSize() int { return BlockSize } // Sum returns the Mailru checksum of the data. func Sum(data []byte) []byte { var d digest d.Reset() _, _ = d.Write(data) return d.Sum(nil) } // DecodeString converts a string to the Mailru hash func DecodeString(s string) ([]byte, error) { b, err := hex.DecodeString(s) if err != nil || len(b) != Size { return nil, ErrorInvalidHash } return b, nil } // must implement this interface var ( _ hash.Hash = (*digest)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/mailru/mrhash/mrhash_test.go
backend/mailru/mrhash/mrhash_test.go
package mrhash_test import ( "encoding/hex" "fmt" "testing" "github.com/rclone/rclone/backend/mailru/mrhash" "github.com/stretchr/testify/assert" ) func testChunk(t *testing.T, chunk int) { data := make([]byte, chunk) for i := range chunk { data[i] = 'A' } for _, test := range []struct { n int want string }{ {0, "0000000000000000000000000000000000000000"}, {1, "4100000000000000000000000000000000000000"}, {2, "4141000000000000000000000000000000000000"}, {19, "4141414141414141414141414141414141414100"}, {20, "4141414141414141414141414141414141414141"}, {21, "eb1d05e78a18691a5aa196a6c2b60cd40b5faafb"}, {22, "037e6d960601118a0639afbeff30fe716c66ed2d"}, {4096, "45a16aa192502b010280fb5b44274c601a91fd9f"}, {4194303, "fa019d5bd26498cf6abe35e0d61801bf19bf704b"}, {4194304, "5ed0e07aa6ea5c1beb9402b4d807258f27d40773"}, {4194305, "67bd0b9247db92e0e7d7e29a0947a50fedcb5452"}, {8388607, "41a8e2eb044c2e242971b5445d7be2a13fc0dd84"}, {8388608, "267a970917c624c11fe624276ec60233a66dc2c0"}, {8388609, "37b60b308d553d2732aefb62b3ea88f74acfa13f"}, } { d := mrhash.New() var toWrite int for toWrite = test.n; toWrite >= chunk; toWrite -= chunk { n, err := d.Write(data) assert.Nil(t, err) assert.Equal(t, chunk, n) } n, err := d.Write(data[:toWrite]) assert.Nil(t, err) assert.Equal(t, toWrite, n) got1 := hex.EncodeToString(d.Sum(nil)) assert.Equal(t, test.want, got1, fmt.Sprintf("when testing length %d", n)) got2 := hex.EncodeToString(d.Sum(nil)) assert.Equal(t, test.want, got2, fmt.Sprintf("when testing length %d (2nd sum)", n)) } } func TestHashChunk16M(t *testing.T) { testChunk(t, 16*1024*1024) } func TestHashChunk8M(t *testing.T) { testChunk(t, 8*1024*1024) } func TestHashChunk4M(t *testing.T) { testChunk(t, 4*1024*1024) } func TestHashChunk2M(t *testing.T) { testChunk(t, 2*1024*1024) } func TestHashChunk1M(t *testing.T) { testChunk(t, 1*1024*1024) } func TestHashChunk64k(t *testing.T) { testChunk(t, 64*1024) } func TestHashChunk32k(t *testing.T) { testChunk(t, 32*1024) } func TestHashChunk2048(t *testing.T) { testChunk(t, 2048) } func TestHashChunk2047(t *testing.T) { testChunk(t, 2047) } func TestSumCalledTwice(t *testing.T) { d := mrhash.New() assert.NotPanics(t, func() { d.Sum(nil) }) d.Reset() assert.NotPanics(t, func() { d.Sum(nil) }) assert.NotPanics(t, func() { d.Sum(nil) }) _, _ = d.Write([]byte{1}) assert.NotPanics(t, func() { d.Sum(nil) }) } func TestSize(t *testing.T) { d := mrhash.New() assert.Equal(t, 20, d.Size()) } func TestBlockSize(t *testing.T) { d := mrhash.New() assert.Equal(t, 64, d.BlockSize()) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hasher/kv.go
backend/hasher/kv.go
package hasher import ( "bytes" "context" "encoding/gob" "errors" "fmt" "maps" "strings" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/kv" ) const ( timeFormat = "2006-01-02T15:04:05.000000000-0700" anyFingerprint = "*" ) type hashMap map[hash.Type]string type hashRecord struct { Fp string // fingerprint Hashes operations.HashSums Created time.Time } func (r *hashRecord) encode(key string) ([]byte, error) { var buf bytes.Buffer if err := gob.NewEncoder(&buf).Encode(r); err != nil { fs.Debugf(key, "hasher encoding %v: %v", r, err) return nil, err } return buf.Bytes(), nil } func (r *hashRecord) decode(key string, data []byte) error { if err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(r); err != nil { fs.Debugf(key, "hasher decoding %q failed: %v", data, err) return err } return nil } // kvPrune: prune a single hash type kvPrune struct { key string } func (op *kvPrune) Do(ctx context.Context, b kv.Bucket) error { return b.Delete([]byte(op.key)) } // kvPurge: delete a subtree type kvPurge struct { dir string } func (op *kvPurge) Do(ctx context.Context, b kv.Bucket) error { dir := op.dir if !strings.HasSuffix(dir, "/") { dir += "/" } var items []string cur := b.Cursor() bkey, _ := cur.Seek([]byte(dir)) for bkey != nil { key := string(bkey) if !strings.HasPrefix(key, dir) { break } items = append(items, key[len(dir):]) bkey, _ = cur.Next() } nerr := 0 for _, sub := range items { if err := b.Delete([]byte(dir + sub)); err != nil { nerr++ } } fs.Debugf(dir, "%d hashes purged, %d failed", len(items)-nerr, nerr) return nil } // kvMove: assign hashes to new path type kvMove struct { src string dst string dir bool fs *Fs } func (op *kvMove) Do(ctx context.Context, b kv.Bucket) error { src, dst := op.src, op.dst if !op.dir { err := moveHash(b, src, dst) fs.Debugf(op.fs, "moving cached hash %s to %s (err: %v)", src, dst, err) return err } if !strings.HasSuffix(src, "/") { src += "/" } if !strings.HasSuffix(dst, "/") { dst += "/" } var items []string cur := b.Cursor() bkey, _ := cur.Seek([]byte(src)) for bkey != nil { key := string(bkey) if !strings.HasPrefix(key, src) { break } items = append(items, key[len(src):]) bkey, _ = cur.Next() } nerr := 0 for _, suffix := range items { srcKey, dstKey := src+suffix, dst+suffix err := moveHash(b, srcKey, dstKey) fs.Debugf(op.fs, "Rename cache record %s -> %s (err: %v)", srcKey, dstKey, err) if err != nil { nerr++ } } fs.Debugf(op.fs, "%d hashes moved, %d failed", len(items)-nerr, nerr) return nil } func moveHash(b kv.Bucket, src, dst string) error { data := b.Get([]byte(src)) err := b.Delete([]byte(src)) if err != nil || len(data) == 0 { return err } return b.Put([]byte(dst), data) } // kvGet: get single hash from database type kvGet struct { key string fp string hash string val string age time.Duration } func (op *kvGet) Do(ctx context.Context, b kv.Bucket) error { data := b.Get([]byte(op.key)) if len(data) == 0 { return errors.New("no record") } var r hashRecord if err := r.decode(op.key, data); err != nil { return errors.New("invalid record") } if !(r.Fp == anyFingerprint || op.fp == anyFingerprint || r.Fp == op.fp) { return errors.New("fingerprint changed") } if time.Since(r.Created) > op.age { return errors.New("record timed out") } if r.Hashes != nil { op.val = r.Hashes[op.hash] } return nil } // kvPut: set hashes for an object by key type kvPut struct { key string fp string hashes operations.HashSums age time.Duration } func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) { data := b.Get([]byte(op.key)) var r hashRecord if len(data) > 0 { err = r.decode(op.key, data) if err != nil || r.Fp != op.fp || time.Since(r.Created) > op.age { r.Hashes = nil } } if len(r.Hashes) == 0 { r.Created = time.Now() r.Hashes = operations.HashSums{} r.Fp = op.fp } maps.Copy(r.Hashes, op.hashes) if data, err = r.encode(op.key); err != nil { return fmt.Errorf("marshal failed: %w", err) } if err = b.Put([]byte(op.key), data); err != nil { return fmt.Errorf("put failed: %w", err) } return err } // kvDump: dump the database. // Note: long dump can cause concurrent operations to fail. type kvDump struct { full bool root string path string fs *Fs num int total int } func (op *kvDump) Do(ctx context.Context, b kv.Bucket) error { f, baseRoot, dbPath := op.fs, op.root, op.path if op.full { total := 0 num := 0 _ = b.ForEach(func(bkey, data []byte) error { total++ key := string(bkey) include := (baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/")) var r hashRecord if err := r.decode(key, data); err != nil { fs.Errorf(nil, "%s: invalid record: %v", key, err) return nil } fmt.Println(f.dumpLine(&r, key, include, nil)) if include { num++ } return nil }) fs.Infof(dbPath, "%d records out of %d", num, total) op.num, op.total = num, total // for unit tests return nil } num := 0 cur := b.Cursor() var bkey, data []byte if baseRoot != "" { bkey, data = cur.Seek([]byte(baseRoot)) } else { bkey, data = cur.First() } for bkey != nil { key := string(bkey) if !(baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/")) { break } var r hashRecord if err := r.decode(key, data); err != nil { fs.Errorf(nil, "%s: invalid record: %v", key, err) continue } if key = strings.TrimPrefix(key[len(baseRoot):], "/"); key == "" { key = "/" } fmt.Println(f.dumpLine(&r, key, true, nil)) num++ bkey, data = cur.Next() } fs.Infof(dbPath, "%d records", num) op.num = num // for unit tests return nil } func (f *Fs) dumpLine(r *hashRecord, path string, include bool, err error) string { var status string switch { case !include: status = "ext" case err != nil: status = "bad" case r.Fp == anyFingerprint: status = "stk" default: status = "ok " } var hashes []string for _, hashType := range f.keepHashes.Array() { hashName := hashType.String() hashVal := r.Hashes[hashName] if hashVal == "" || err != nil { hashVal = "-" } hashVal = fmt.Sprintf("%-*s", hash.Width(hashType, false), hashVal) hashes = append(hashes, hashName+":"+hashVal) } hashesStr := strings.Join(hashes, " ") age := time.Since(r.Created).Round(time.Second) if age > 24*time.Hour { age = age.Round(time.Hour) } if err != nil { age = 0 } ageStr := age.String() if strings.HasSuffix(ageStr, "h0m0s") { ageStr = strings.TrimSuffix(ageStr, "0m0s") } return fmt.Sprintf("%s %s %9s %s", status, hashesStr, ageStr, path) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hasher/hasher.go
backend/hasher/hasher.go
// Package hasher implements a checksum handling overlay backend package hasher import ( "context" "encoding/gob" "errors" "fmt" "io" "path" "strings" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/lib/kv" ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "hasher", Description: "Better checksums for other remotes", NewFs: NewFs, MetadataInfo: &fs.MetadataInfo{ Help: `Any metadata supported by the underlying remote is read and written.`, }, CommandHelp: commandHelp, Options: []fs.Option{{ Name: "remote", Required: true, Help: "Remote to cache checksums for (e.g. myRemote:path).", }, { Name: "hashes", Default: fs.CommaSepList{"md5", "sha1"}, Advanced: false, Help: "Comma separated list of supported checksum types.", }, { Name: "max_age", Advanced: false, Default: fs.DurationOff, Help: "Maximum time to keep checksums in cache (0 = no cache, off = cache forever).", }, { Name: "auto_size", Advanced: true, Default: fs.SizeSuffix(0), Help: "Auto-update checksum for files smaller than this size (disabled by default).", }}, }) } // Options defines the configuration for this backend type Options struct { Remote string `config:"remote"` Hashes fs.CommaSepList `config:"hashes"` AutoSize fs.SizeSuffix `config:"auto_size"` MaxAge fs.Duration `config:"max_age"` } // Fs represents a wrapped fs.Fs type Fs struct { fs.Fs name string root string wrapper fs.Fs features *fs.Features opt *Options db *kv.DB // fingerprinting fpTime bool // true if using time in fingerprints fpHash hash.Type // hash type to use in fingerprints or None // hash types triaged by groups suppHashes hash.Set // all supported checksum types passHashes hash.Set // passed directly to the base without caching slowHashes hash.Set // passed to the base and then cached autoHashes hash.Set // calculated in-house and cached keepHashes hash.Set // checksums to keep in cache (slow + auto) } var warnExperimental sync.Once // NewFs constructs an Fs from the remote:path string func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs.Fs, error) { if !kv.Supported() { return nil, errors.New("hasher is not supported on this OS") } warnExperimental.Do(func() { fs.Infof(nil, "Hasher is EXPERIMENTAL!") }) opt := &Options{} err := configstruct.Set(cmap, opt) if err != nil { return nil, err } if strings.HasPrefix(opt.Remote, fsname+":") { return nil, errors.New("can't point remote at itself") } remotePath := fspath.JoinRootPath(opt.Remote, rpath) baseFs, err := cache.Get(ctx, remotePath) if err != nil && err != fs.ErrorIsFile { return nil, fmt.Errorf("failed to derive base remote %q: %w", opt.Remote, err) } f := &Fs{ Fs: baseFs, name: fsname, root: rpath, opt: opt, } // Correct root if definitely pointing to a file if err == fs.ErrorIsFile { f.root = path.Dir(f.root) if f.root == "." || f.root == "/" { f.root = "" } } baseFeatures := baseFs.Features() f.fpTime = baseFs.Precision() != fs.ModTimeNotSupported if baseFeatures.SlowHash { f.slowHashes = f.Fs.Hashes() } else { f.passHashes = f.Fs.Hashes() f.fpHash = f.passHashes.GetOne() } f.suppHashes = f.passHashes f.suppHashes.Add(f.slowHashes.Array()...) for _, hashName := range opt.Hashes { var ht hash.Type if err := ht.Set(hashName); err != nil { return nil, fmt.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String()) } if !f.slowHashes.Contains(ht) { f.autoHashes.Add(ht) } f.keepHashes.Add(ht) f.suppHashes.Add(ht) } fs.Debugf(f, "Groups by usage: cached %s, passed %s, auto %s, slow %s, supported %s", f.keepHashes, f.passHashes, f.autoHashes, f.slowHashes, f.suppHashes) var nilSet hash.Set if f.keepHashes == nilSet { return nil, errors.New("configured hash_names have nothing to keep in cache") } if f.opt.MaxAge > 0 { gob.Register(hashRecord{}) db, err := kv.Start(ctx, "hasher", f.Fs) if err != nil { return nil, err } f.db = db } stubFeatures := &fs.Features{ CanHaveEmptyDirectories: true, IsLocal: true, ReadMimeType: true, WriteMimeType: true, SetTier: true, GetTier: true, ReadMetadata: true, WriteMetadata: true, UserMetadata: true, ReadDirMetadata: true, WriteDirMetadata: true, WriteDirSetModTime: true, UserDirMetadata: true, DirModTimeUpdatesOnWrite: true, PartialUploads: true, } f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs) // Enable ListP always f.features.ListP = f.ListP cache.PinUntilFinalized(f.Fs, f) return f, err } // // Filesystem // // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return f.suppHashes } // String returns a description of the FS // The "hasher::" prefix is a distinctive feature. func (f *Fs) String() string { return fmt.Sprintf("hasher::%s:%s", f.name, f.root) } // UnWrap returns the Fs that this Fs is wrapping func (f *Fs) UnWrap() fs.Fs { return f.Fs } // WrapFs returns the Fs that is wrapping this Fs func (f *Fs) WrapFs() fs.Fs { return f.wrapper } // SetWrapper sets the Fs that is wrapping this Fs func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper } // Wrap base entries into hasher entries. func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries, err error) { hashEntries = baseEntries[:0] // work inplace for _, entry := range baseEntries { switch x := entry.(type) { case fs.Object: obj, err := f.wrapObject(x, nil) if err != nil { return nil, err } hashEntries = append(hashEntries, obj) default: hashEntries = append(hashEntries, entry) // trash in - trash out } } return hashEntries, nil } // List the objects and directories in dir into entries. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return list.WithListP(ctx, dir, f) } // ListP lists the objects and directories of the Fs starting // from dir non recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error { wrappedCallback := func(entries fs.DirEntries) error { entries, err := f.wrapEntries(entries) if err != nil { return err } return callback(entries) } listP := f.Fs.Features().ListP if listP == nil { entries, err := f.Fs.List(ctx, dir) if err != nil { return err } return wrappedCallback(entries) } return listP(ctx, dir, wrappedCallback) } // ListR lists the objects and directories recursively into out. func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { return f.Fs.Features().ListR(ctx, dir, func(baseEntries fs.DirEntries) error { hashEntries, err := f.wrapEntries(baseEntries) if err != nil { return err } return callback(hashEntries) }) } // Purge a directory func (f *Fs) Purge(ctx context.Context, dir string) error { if do := f.Fs.Features().Purge; do != nil { if err := do(ctx, dir); err != nil { return err } err := f.db.Do(true, &kvPurge{ dir: path.Join(f.Fs.Root(), dir), }) if err != nil { fs.Errorf(f, "Failed to purge some hashes: %v", err) } return nil } return fs.ErrorCantPurge } // PutStream uploads to the remote path with undeterminate size. func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { if do := f.Fs.Features().PutStream; do != nil { _ = f.pruneHash(src.Remote()) oResult, err := do(ctx, in, src, options...) return f.wrapObject(oResult, err) } return nil, errors.New("PutStream not supported") } // PutUnchecked uploads the object, allowing duplicates. func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { if do := f.Fs.Features().PutUnchecked; do != nil { _ = f.pruneHash(src.Remote()) oResult, err := do(ctx, in, src, options...) return f.wrapObject(oResult, err) } return nil, errors.New("PutUnchecked not supported") } // pruneHash deletes hash for a path func (f *Fs) pruneHash(remote string) error { return f.db.Do(true, &kvPrune{ key: path.Join(f.Fs.Root(), remote), }) } // CleanUp the trash in the Fs func (f *Fs) CleanUp(ctx context.Context) error { if do := f.Fs.Features().CleanUp; do != nil { return do(ctx) } return errors.New("not supported by underlying remote") } // About gets quota information from the Fs func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { if do := f.Fs.Features().About; do != nil { return do(ctx) } return nil, errors.New("not supported by underlying remote") } // ChangeNotify calls the passed function with a path that has had changes. func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { if do := f.Fs.Features().ChangeNotify; do != nil { do(ctx, notifyFunc, pollIntervalChan) } } // UserInfo returns info about the connected user func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) { if do := f.Fs.Features().UserInfo; do != nil { return do(ctx) } return nil, fs.ErrorNotImplemented } // Disconnect the current user func (f *Fs) Disconnect(ctx context.Context) error { if do := f.Fs.Features().Disconnect; do != nil { return do(ctx) } return fs.ErrorNotImplemented } // MergeDirs merges the contents of all the directories passed // in into the first one and rmdirs the other directories. func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { if do := f.Fs.Features().MergeDirs; do != nil { return do(ctx, dirs) } return errors.New("MergeDirs not supported") } // DirSetModTime sets the directory modtime for dir func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error { if do := f.Fs.Features().DirSetModTime; do != nil { return do(ctx, dir, modTime) } return fs.ErrorNotImplemented } // MkdirMetadata makes the root directory of the Fs object func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) { if do := f.Fs.Features().MkdirMetadata; do != nil { return do(ctx, dir, metadata) } return nil, fs.ErrorNotImplemented } // DirCacheFlush resets the directory cache - used in testing // as an optional interface func (f *Fs) DirCacheFlush() { if do := f.Fs.Features().DirCacheFlush; do != nil { do() } } // PublicLink generates a public link to the remote path (usually readable by anyone) func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { if do := f.Fs.Features().PublicLink; do != nil { return do(ctx, remote, expire, unlink) } return "", errors.New("PublicLink not supported") } // Copy src to this remote using server-side copy operations. func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { do := f.Fs.Features().Copy if do == nil { return nil, fs.ErrorCantCopy } o, ok := src.(*Object) if !ok { return nil, fs.ErrorCantCopy } oResult, err := do(ctx, o.Object, remote) return f.wrapObject(oResult, err) } // Move src to this remote using server-side move operations. func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { do := f.Fs.Features().Move if do == nil { return nil, fs.ErrorCantMove } o, ok := src.(*Object) if !ok { return nil, fs.ErrorCantMove } oResult, err := do(ctx, o.Object, remote) if err != nil { return nil, err } _ = f.db.Do(true, &kvMove{ src: path.Join(f.Fs.Root(), src.Remote()), dst: path.Join(f.Fs.Root(), remote), dir: false, fs: f, }) return f.wrapObject(oResult, nil) } // DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations. func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { do := f.Fs.Features().DirMove if do == nil { return fs.ErrorCantDirMove } srcFs, ok := src.(*Fs) if !ok { return fs.ErrorCantDirMove } err := do(ctx, srcFs.Fs, srcRemote, dstRemote) if err == nil { _ = f.db.Do(true, &kvMove{ src: path.Join(srcFs.Fs.Root(), srcRemote), dst: path.Join(f.Fs.Root(), dstRemote), dir: true, fs: f, }) } return err } // Shutdown the backend, closing any background tasks and any cached connections. func (f *Fs) Shutdown(ctx context.Context) (err error) { if f.db != nil && !f.db.IsStopped() { err = f.db.Stop(false) } if do := f.Fs.Features().Shutdown; do != nil { if err2 := do(ctx); err2 != nil { err = err2 } } return } // NewObject finds the Object at remote. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { o, err := f.Fs.NewObject(ctx, remote) return f.wrapObject(o, err) } // // Object // // Object represents a composite file wrapping one or more data chunks type Object struct { fs.Object f *Fs } // Wrap base object into hasher object func (f *Fs) wrapObject(o fs.Object, err error) (obj fs.Object, outErr error) { // log.Trace(o, "err=%v", err)("obj=%#v, outErr=%v", &obj, &outErr) if err != nil { return nil, err } if o == nil { return nil, fs.ErrorObjectNotFound } return &Object{Object: o, f: f}, nil } // Fs returns read only access to the Fs that this object is part of func (o *Object) Fs() fs.Info { return o.f } // UnWrap returns the wrapped Object func (o *Object) UnWrap() fs.Object { return o.Object } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.Object.String() } // ID returns the ID of the Object if possible func (o *Object) ID() string { if doer, ok := o.Object.(fs.IDer); ok { return doer.ID() } return "" } // GetTier returns the Tier of the Object if possible func (o *Object) GetTier() string { if doer, ok := o.Object.(fs.GetTierer); ok { return doer.GetTier() } return "" } // SetTier set the Tier of the Object if possible func (o *Object) SetTier(tier string) error { if doer, ok := o.Object.(fs.SetTierer); ok { return doer.SetTier(tier) } return errors.New("SetTier not supported") } // MimeType of an Object if known, "" otherwise func (o *Object) MimeType(ctx context.Context) string { if doer, ok := o.Object.(fs.MimeTyper); ok { return doer.MimeType(ctx) } return "" } // Metadata returns metadata for an object // // It should return nil if there is no Metadata func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) { do, ok := o.Object.(fs.Metadataer) if !ok { return nil, nil } return do.Metadata(ctx) } // SetMetadata sets metadata for an Object // // It should return fs.ErrorNotImplemented if it can't set metadata func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error { do, ok := o.Object.(fs.SetMetadataer) if !ok { return fs.ErrorNotImplemented } return do.SetMetadata(ctx, metadata) } // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) _ fs.Purger = (*Fs)(nil) _ fs.Copier = (*Fs)(nil) _ fs.Mover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil) _ fs.Commander = (*Fs)(nil) _ fs.PutUncheckeder = (*Fs)(nil) _ fs.PutStreamer = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil) _ fs.UnWrapper = (*Fs)(nil) _ fs.ListRer = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil) _ fs.Wrapper = (*Fs)(nil) _ fs.MergeDirser = (*Fs)(nil) _ fs.DirSetModTimer = (*Fs)(nil) _ fs.MkdirMetadataer = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil) _ fs.ChangeNotifier = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil) _ fs.UserInfoer = (*Fs)(nil) _ fs.Disconnecter = (*Fs)(nil) _ fs.Shutdowner = (*Fs)(nil) _ fs.FullObject = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hasher/commands.go
backend/hasher/commands.go
package hasher import ( "context" "errors" "fmt" "path" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/kv" ) // Command the backend to run a named command // // The command run is name // args may be used to read arguments from // opts may be used to read optional arguments from // // The result should be capable of being JSON encoded // If it is a string or a []string it will be shown to the user // otherwise it will be JSON encoded and shown to the user like that func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { switch name { case "drop": return nil, f.db.Stop(true) case "dump", "fulldump": return nil, f.dbDump(ctx, name == "fulldump", "") case "import", "stickyimport": sticky := name == "stickyimport" if len(arg) != 2 { return nil, errors.New("please provide checksum type and path to sum file") } return nil, f.dbImport(ctx, arg[0], arg[1], sticky) default: return nil, fs.ErrorCommandNotFound } } var commandHelp = []fs.CommandHelp{{ Name: "drop", Short: "Drop cache.", Long: `Completely drop checksum cache. Usage example: ` + "```console" + ` rclone backend drop hasher: ` + "```", }, { Name: "dump", Short: "Dump the database.", Long: "Dump cache records covered by the current remote.", }, { Name: "fulldump", Short: "Full dump of the database.", Long: "Dump all cache records in the database.", }, { Name: "import", Short: "Import a SUM file.", Long: `Amend hash cache from a SUM file and bind checksums to files by size/time. Usage example: ` + "```console" + ` rclone backend import hasher:subdir md5 /path/to/sum.md5 ` + "```", }, { Name: "stickyimport", Short: "Perform fast import of a SUM file.", Long: `Fill hash cache from a SUM file without verifying file fingerprints. Usage example: ` + "```console" + ` rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5 ` + "```", }} func (f *Fs) dbDump(ctx context.Context, full bool, root string) error { if root == "" { remoteFs, err := cache.Get(ctx, f.opt.Remote) if err != nil { return err } root = fspath.JoinRootPath(remoteFs.Root(), f.Root()) } if f.db == nil { if f.opt.MaxAge == 0 { fs.Errorf(f, "db not found. (disabled with max_age = 0)") } else { fs.Errorf(f, "db not found.") } return kv.ErrInactive } op := &kvDump{ full: full, root: root, path: f.db.Path(), fs: f, } err := f.db.Do(false, op) if err == kv.ErrEmpty { fs.Infof(op.path, "empty") err = nil } return err } func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bool) error { var hashType hash.Type if err := hashType.Set(hashName); err != nil { return err } if hashType == hash.None { return errors.New("please provide a valid hash type") } if !f.suppHashes.Contains(hashType) { return errors.New("unsupported hash type") } if !f.keepHashes.Contains(hashType) { fs.Infof(nil, "Need not import hashes of this type") return nil } _, sumPath, err := fspath.SplitFs(sumRemote) if err != nil { return err } sumFs, err := cache.Get(ctx, sumRemote) switch err { case fs.ErrorIsFile: // ok case nil: return fmt.Errorf("not a file: %s", sumRemote) default: return err } sumObj, err := sumFs.NewObject(ctx, path.Base(sumPath)) if err != nil { return fmt.Errorf("cannot open sum file: %w", err) } hashes, err := operations.ParseSumFile(ctx, sumObj) if err != nil { return fmt.Errorf("failed to parse sum file: %w", err) } if sticky { rootPath := f.Fs.Root() for remote, hashVal := range hashes { key := path.Join(rootPath, remote) hashSums := operations.HashSums{hashName: hashVal} if err := f.putRawHashes(ctx, key, anyFingerprint, hashSums); err != nil { fs.Errorf(nil, "%s: failed to import: %v", remote, err) } } fs.Infof(nil, "Summary: %d checksum(s) imported", len(hashes)) return nil } const longImportThreshold = 100 if len(hashes) > longImportThreshold { fs.Infof(nil, "Importing %d checksums. Please wait...", len(hashes)) } doneCount := 0 err = operations.ListFn(ctx, f, func(obj fs.Object) { remote := obj.Remote() hash := hashes[remote] hashes[remote] = "" // mark as handled o, ok := obj.(*Object) if ok && hash != "" { if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil { fs.Errorf(nil, "%s: failed to import: %v", remote, err) } accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err) doneCount++ } }) if err != nil { fs.Errorf(nil, "Import failed: %v", err) } skipCount := 0 for remote, emptyOrDone := range hashes { if emptyOrDone != "" { fs.Infof(nil, "Skip vanished object: %s", remote) skipCount++ } } fs.Infof(nil, "Summary: %d imported, %d skipped", doneCount, skipCount) return err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hasher/hasher_test.go
backend/hasher/hasher_test.go
package hasher_test import ( "os" "path/filepath" "testing" "github.com/rclone/rclone/backend/hasher" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/lib/kv" _ "github.com/rclone/rclone/backend/all" // for integration tests ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { if !kv.Supported() { t.Skip("hasher is not supported on this OS") } opt := fstests.Opt{ RemoteName: *fstest.RemoteName, NilObject: (*hasher.Object)(nil), UnimplementableFsMethods: []string{ "OpenWriterAt", "OpenChunkWriter", }, UnimplementableObjectMethods: []string{}, } if *fstest.RemoteName == "" { tempDir := filepath.Join(os.TempDir(), "rclone-hasher-test") opt.ExtraConfig = []fstests.ExtraConfigItem{ {Name: "TestHasher", Key: "type", Value: "hasher"}, {Name: "TestHasher", Key: "remote", Value: tempDir}, } opt.RemoteName = "TestHasher:" opt.QuickTestOK = true } fstests.Run(t, &opt) // test again with MaxAge = 0 if *fstest.RemoteName == "" { opt.ExtraConfig = append(opt.ExtraConfig, fstests.ExtraConfigItem{Name: "TestHasher", Key: "max_age", Value: "0"}) fstests.Run(t, &opt) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hasher/hasher_internal_test.go
backend/hasher/hasher_internal_test.go
package hasher import ( "context" "fmt" "os" "testing" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/lib/kv" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object { mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z") item := fstest.Item{Path: name, ModTime: mtime1} o := fstests.PutTestContents(ctx, t, f, &item, data, true) require.NotNil(t, o) return o } func (f *Fs) testUploadFromCrypt(t *testing.T) { // make a temporary local remote tempRoot, err := fstest.LocalRemote() require.NoError(t, err) defer func() { _ = os.RemoveAll(tempRoot) }() // make a temporary crypt remote ctx := context.Background() pass := obscure.MustObscure("crypt") remote := fmt.Sprintf(`:crypt,remote="%s",password="%s":`, tempRoot, pass) cryptFs, err := fs.NewFs(ctx, remote) require.NoError(t, err) // make a test file on the crypt remote const dirName = "from_crypt_1" const fileName = dirName + "/file_from_crypt_1" const longTime = fs.ModTimeNotSupported src := putFile(ctx, t, cryptFs, fileName, "doggy froggy") // ensure that hash does not exist yet _ = f.pruneHash(fileName) hashType := f.keepHashes.GetOne() hash, err := f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime) assert.Error(t, err) assert.Empty(t, hash) // upload file to hasher in, err := src.Open(ctx) require.NoError(t, err) dst, err := f.Put(ctx, in, src) require.NoError(t, err) assert.NotNil(t, dst) // check that hash was created if f.opt.MaxAge > 0 { hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime) assert.NoError(t, err) assert.NotEmpty(t, hash) } //t.Logf("hash is %q", hash) _ = operations.Purge(ctx, f, dirName) } // InternalTest dispatches all internal tests func (f *Fs) InternalTest(t *testing.T) { if !kv.Supported() { t.Skip("hasher is not supported on this OS") } t.Run("UploadFromCrypt", f.testUploadFromCrypt) } var _ fstests.InternalTester = (*Fs)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/hasher/object.go
backend/hasher/object.go
package hasher import ( "context" "errors" "fmt" "io" "path" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" ) // obtain hash for an object func (o *Object) getHash(ctx context.Context, hashType hash.Type) (string, error) { maxAge := time.Duration(o.f.opt.MaxAge) if maxAge <= 0 { return "", nil } fp := o.fingerprint(ctx) if fp == "" { return "", errors.New("fingerprint failed") } return o.f.getRawHash(ctx, hashType, o.Remote(), fp, maxAge) } // obtain hash for a path func (f *Fs) getRawHash(ctx context.Context, hashType hash.Type, remote, fp string, age time.Duration) (string, error) { key := path.Join(f.Fs.Root(), remote) op := &kvGet{ key: key, fp: fp, hash: hashType.String(), age: age, } err := f.db.Do(false, op) return op.val, err } // put new hashes for an object func (o *Object) putHashes(ctx context.Context, rawHashes hashMap) error { if o.f.opt.MaxAge <= 0 { return nil } fp := o.fingerprint(ctx) if fp == "" { return nil } key := path.Join(o.f.Fs.Root(), o.Remote()) hashes := operations.HashSums{} for hashType, hashVal := range rawHashes { hashes[hashType.String()] = hashVal } return o.f.putRawHashes(ctx, key, fp, hashes) } // set hashes for a path without any validation func (f *Fs) putRawHashes(ctx context.Context, key, fp string, hashes operations.HashSums) error { return f.db.Do(true, &kvPut{ key: key, fp: fp, hashes: hashes, age: time.Duration(f.opt.MaxAge), }) } // Hash returns the selected checksum of the file or "" if unavailable. func (o *Object) Hash(ctx context.Context, hashType hash.Type) (hashVal string, err error) { f := o.f if f.passHashes.Contains(hashType) { fs.Debugf(o, "pass %s", hashType) hashVal, err = o.Object.Hash(ctx, hashType) if hashVal != "" { return hashVal, err } if err != nil { fs.Debugf(o, "error passing %s: %v", hashType, err) } fs.Debugf(o, "passed %s is blank -- trying other methods", hashType) } if !f.suppHashes.Contains(hashType) { fs.Debugf(o, "unsupp %s", hashType) return "", hash.ErrUnsupported } if hashVal, err = o.getHash(ctx, hashType); err != nil { fs.Debugf(o, "getHash: %v", err) err = nil hashVal = "" } if hashVal != "" { fs.Debugf(o, "cached %s = %q", hashType, hashVal) return hashVal, nil } if f.slowHashes.Contains(hashType) { fs.Debugf(o, "slow %s", hashType) hashVal, err = o.Object.Hash(ctx, hashType) if err == nil && hashVal != "" && f.keepHashes.Contains(hashType) { if err = o.putHashes(ctx, hashMap{hashType: hashVal}); err != nil { fs.Debugf(o, "putHashes: %v", err) err = nil } } return hashVal, err } if f.autoHashes.Contains(hashType) && o.Size() < int64(f.opt.AutoSize) { _ = o.updateHashes(ctx) if hashVal, err = o.getHash(ctx, hashType); err != nil { fs.Debugf(o, "auto %s = %q (%v)", hashType, hashVal, err) err = nil } } return hashVal, err } // updateHashes performs implicit "rclone hashsum --download" and updates cache. func (o *Object) updateHashes(ctx context.Context) error { r, err := o.Open(ctx) if err != nil { fs.Infof(o, "update failed (open): %v", err) return err } defer func() { _ = r.Close() }() if _, err = io.Copy(io.Discard, r); err != nil { fs.Infof(o, "update failed (copy): %v", err) return err } return nil } // Update the object with the given data, time and size. func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { _ = o.f.pruneHash(src.Remote()) return o.Object.Update(ctx, in, src, options...) } // Remove an object. func (o *Object) Remove(ctx context.Context) error { _ = o.f.pruneHash(o.Remote()) return o.Object.Remove(ctx) } // SetModTime sets the modification time of the file. // Also prunes the cache entry when modtime changes so that // touching a file will trigger checksum recalculation even // on backends that don't provide modTime with fingerprint. func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error { if mtime != o.Object.ModTime(ctx) { _ = o.f.pruneHash(o.Remote()) } return o.Object.SetModTime(ctx, mtime) } // Open opens the file for read. // Full reads will also update object hashes. func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadCloser, err error) { size := o.Size() var offset, limit int64 = 0, -1 for _, option := range options { switch opt := option.(type) { case *fs.SeekOption: offset = opt.Offset case *fs.RangeOption: offset, limit = opt.Decode(size) } } if offset < 0 { return nil, errors.New("invalid offset") } if limit < 0 { limit = size - offset } if r, err = o.Object.Open(ctx, options...); err != nil { return nil, err } if offset != 0 || limit < size { // It's a partial read return r, err } return o.f.newHashingReader(ctx, r, func(sums hashMap) { if err := o.putHashes(ctx, sums); err != nil { fs.Infof(o, "auto hashing error: %v", err) } }) } // Put data into the remote path with given modTime and size func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { var ( o fs.Object common hash.Set rehash bool hashes hashMap ) if fsrc := src.Fs(); fsrc != nil { common = fsrc.Hashes().Overlap(f.keepHashes) // Rehash if source does not have all required hashes or hashing is slow rehash = fsrc.Features().SlowHash || common != f.keepHashes } wrapIn := in if rehash { r, err := f.newHashingReader(ctx, in, func(sums hashMap) { hashes = sums }) fs.Debugf(src, "Rehash in-fly due to incomplete or slow source set %v (err: %v)", common, err) if err == nil { wrapIn = r } else { rehash = false } } _ = f.pruneHash(src.Remote()) oResult, err := f.Fs.Put(ctx, wrapIn, src, options...) o, err = f.wrapObject(oResult, err) if err != nil { return nil, err } if !rehash { hashes = hashMap{} for _, ht := range common.Array() { if h, e := src.Hash(ctx, ht); e == nil && h != "" { hashes[ht] = h } } } if len(hashes) > 0 { err := o.(*Object).putHashes(ctx, hashes) fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err) } return o, err } type hashingReader struct { rd io.Reader hasher *hash.MultiHasher fun func(hashMap) } func (f *Fs) newHashingReader(ctx context.Context, rd io.Reader, fun func(hashMap)) (*hashingReader, error) { hasher, err := hash.NewMultiHasherTypes(f.keepHashes) if err != nil { return nil, err } hr := &hashingReader{ rd: rd, hasher: hasher, fun: fun, } return hr, nil } func (r *hashingReader) Read(p []byte) (n int, err error) { n, err = r.rd.Read(p) if err != nil && err != io.EOF { r.hasher = nil } if r.hasher != nil { if _, errHash := r.hasher.Write(p[:n]); errHash != nil { r.hasher = nil err = errHash } } if err == io.EOF && r.hasher != nil { r.fun(r.hasher.Sums()) r.hasher = nil } return } func (r *hashingReader) Close() error { if rc, ok := r.rd.(io.ReadCloser); ok { return rc.Close() } return nil } // Return object fingerprint or empty string in case of errors // // Note that we can't use the generic `fs.Fingerprint` here because // this fingerprint is used to pick _derived hashes_ that are slow // to calculate or completely unsupported by the base remote. // // The hasher fingerprint must be based on `fsHash`, the first _fast_ // hash supported _by the underlying remote_ (if there is one), // while `fs.Fingerprint` would select a hash _produced by hasher_ // creating unresolvable fingerprint loop. func (o *Object) fingerprint(ctx context.Context) string { size := o.Object.Size() timeStr := "-" if o.f.fpTime { timeStr = o.Object.ModTime(ctx).UTC().Format(timeFormat) if timeStr == "" { return "" } } hashStr := "-" if o.f.fpHash != hash.None { var err error hashStr, err = o.Object.Hash(ctx, o.f.fpHash) if hashStr == "" || err != nil { return "" } } return fmt.Sprintf("%d,%s,%s", size, timeStr, hashStr) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/memory/memory_test.go
backend/memory/memory_test.go
// Test memory filesystem interface package memory import ( "testing" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: ":memory:", NilObject: (*Object)(nil), QuickTestOK: true, }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/memory/memory.go
backend/memory/memory.go
// Package memory provides an interface to an in memory object storage system package memory import ( "bytes" "context" "crypto/md5" "encoding/hex" "errors" "fmt" "io" "path" "strings" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/lib/bucket" ) var ( hashType = hash.MD5 // the object storage is persistent buckets = newBucketsInfo() errWriteOnly = errors.New("can't read when using --memory-discard") ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "memory", Description: "In memory object storage system.", NewFs: NewFs, Options: []fs.Option{{ Name: "discard", Default: false, Advanced: true, Help: `If set all writes will be discarded and reads will return an error If set then when files are uploaded the contents not be saved. The files will appear to have been uploaded but will give an error on read. Files will have their MD5 sum calculated on upload which takes very little CPU time and allows the transfers to be checked. This can be useful for testing performance. Probably most easily used by using the connection string syntax: :memory,discard:bucket `, }}, }) } // Options defines the configuration for this backend type Options struct { Discard bool `config:"discard"` } // Fs represents a remote memory server type Fs struct { name string // name of this remote root string // the path we are working on if any opt Options // parsed config options rootBucket string // bucket part of root (if any) rootDirectory string // directory part of root (if any) features *fs.Features // optional features } // bucketsInfo holds info about all the buckets type bucketsInfo struct { mu sync.RWMutex buckets map[string]*bucketInfo } func newBucketsInfo() *bucketsInfo { return &bucketsInfo{ buckets: make(map[string]*bucketInfo, 16), } } // getBucket gets a names bucket or nil func (bi *bucketsInfo) getBucket(name string) (b *bucketInfo) { bi.mu.RLock() b = bi.buckets[name] bi.mu.RUnlock() return b } // makeBucket returns the bucket or makes it func (bi *bucketsInfo) makeBucket(name string) (b *bucketInfo) { bi.mu.Lock() defer bi.mu.Unlock() b = bi.buckets[name] if b != nil { return b } b = newBucketInfo() bi.buckets[name] = b return b } // deleteBucket deleted the bucket or returns an error func (bi *bucketsInfo) deleteBucket(name string) error { bi.mu.Lock() defer bi.mu.Unlock() b := bi.buckets[name] if b == nil { return fs.ErrorDirNotFound } if !b.isEmpty() { return fs.ErrorDirectoryNotEmpty } delete(bi.buckets, name) return nil } // getObjectData gets an object from (bucketName, bucketPath) or nil func (bi *bucketsInfo) getObjectData(bucketName, bucketPath string) (od *objectData) { b := bi.getBucket(bucketName) if b == nil { return nil } return b.getObjectData(bucketPath) } // updateObjectData updates an object from (bucketName, bucketPath) func (bi *bucketsInfo) updateObjectData(bucketName, bucketPath string, od *objectData) { b := bi.makeBucket(bucketName) b.mu.Lock() b.objects[bucketPath] = od b.mu.Unlock() } // removeObjectData removes an object from (bucketName, bucketPath) returning true if removed func (bi *bucketsInfo) removeObjectData(bucketName, bucketPath string) (removed bool) { b := bi.getBucket(bucketName) if b != nil { b.mu.Lock() od := b.objects[bucketPath] if od != nil { delete(b.objects, bucketPath) removed = true } b.mu.Unlock() } return removed } // bucketInfo holds info about a single bucket type bucketInfo struct { mu sync.RWMutex objects map[string]*objectData } func newBucketInfo() *bucketInfo { return &bucketInfo{ objects: make(map[string]*objectData, 16), } } // getBucket gets a names bucket or nil func (bi *bucketInfo) getObjectData(name string) (od *objectData) { bi.mu.RLock() od = bi.objects[name] bi.mu.RUnlock() return od } // getBucket gets a names bucket or nil func (bi *bucketInfo) isEmpty() (empty bool) { bi.mu.RLock() empty = len(bi.objects) == 0 bi.mu.RUnlock() return empty } // the object data and metadata type objectData struct { modTime time.Time hash string mimeType string data []byte size int64 } // Object describes a memory object type Object struct { fs *Fs // what this object is part of remote string // The remote path od *objectData // the object data } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("Memory root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // parsePath parses a remote 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // split returns bucket and bucketPath from the rootRelativePath // relative to f.root func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) { return bucket.Split(path.Join(f.root, rootRelativePath)) } // split returns bucket and bucketPath from the object func (o *Object) split() (bucket, bucketPath string) { return o.fs.split(o.remote) } // setRoot changes the root of the Fs func (f *Fs) setRoot(root string) { f.root = parsePath(root) f.rootBucket, f.rootDirectory = bucket.Split(f.root) } // NewFs constructs an Fs from the path, bucket:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } root = strings.Trim(root, "/") f := &Fs{ name: name, root: root, opt: *opt, } f.setRoot(root) f.features = (&fs.Features{ ReadMimeType: true, WriteMimeType: true, BucketBased: true, BucketBasedRootOK: true, }).Fill(ctx, f) if f.rootBucket != "" && f.rootDirectory != "" { od := buckets.getObjectData(f.rootBucket, f.rootDirectory) if od != nil { newRoot := path.Dir(f.root) if newRoot == "." { newRoot = "" } f.setRoot(newRoot) // return an error with an fs which points to the parent err = fs.ErrorIsFile } } return f, err } // newObject makes an object from a remote and an objectData func (f *Fs) newObject(remote string, od *objectData) *Object { return &Object{fs: f, remote: remote, od: od} } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { bucket, bucketPath := f.split(remote) od := buckets.getObjectData(bucket, bucketPath) if od == nil { return nil, fs.ErrorObjectNotFound } return f.newObject(remote, od), nil } // listFn is called from list to handle an object. type listFn func(remote string, entry fs.DirEntry, isDirectory bool) error // list the buckets to fn func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) { if prefix != "" { prefix += "/" } if directory != "" { directory += "/" } b := buckets.getBucket(bucket) if b == nil { return fs.ErrorDirNotFound } b.mu.RLock() defer b.mu.RUnlock() dirs := make(map[string]struct{}) for absPath, od := range b.objects { if strings.HasPrefix(absPath, directory) { remote := absPath[len(prefix):] if !recurse { localPath := absPath[len(directory):] slash := strings.IndexRune(localPath, '/') if slash >= 0 { // send a directory if have a slash dir := strings.TrimPrefix(directory, f.rootDirectory+"/") + localPath[:slash] if addBucket { dir = path.Join(bucket, dir) } _, found := dirs[dir] if !found { err = fn(dir, fs.NewDir(dir, time.Time{}), true) if err != nil { return err } dirs[dir] = struct{}{} } continue // don't send this file if not recursing } } // send an object if addBucket { remote = path.Join(bucket, remote) } err = fn(remote, f.newObject(remote, od), false) if err != nil { return err } } } return nil } // listDir lists the bucket to the entries func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) { // List the objects and directories err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, entry fs.DirEntry, isDirectory bool) error { return callback(entry) }) return err } // listBuckets lists the buckets to entries func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) { buckets.mu.RLock() defer buckets.mu.RUnlock() for name := range buckets.buckets { entries = append(entries, fs.NewDir(name, time.Time{})) } return entries, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return list.WithListP(ctx, dir, f) } // ListP lists the objects and directories of the Fs starting // from dir non recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error { list := list.NewHelper(callback) bucket, directory := f.split(dir) if bucket == "" { if directory != "" { return fs.ErrorListBucketRequired } entries, err := f.listBuckets(ctx) if err != nil { return err } for _, entry := range entries { err = list.Add(entry) if err != nil { return err } } } else { err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add) if err != nil { return err } } return list.Flush() } // ListR lists the objects and directories of the Fs starting // from dir recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { bucket, directory := f.split(dir) list := list.NewHelper(callback) entries := fs.DirEntries{} listR := func(bucket, directory, prefix string, addBucket bool) error { err = f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, entry fs.DirEntry, isDirectory bool) error { entries = append(entries, entry) // can't list.Add here -- could deadlock return nil }) if err != nil { return err } for _, entry := range entries { err = list.Add(entry) if err != nil { return err } } return nil } if bucket == "" { entries, err := f.listBuckets(ctx) if err != nil { return err } for _, entry := range entries { err = list.Add(entry) if err != nil { return err } bucket := entry.Remote() err = listR(bucket, "", f.rootDirectory, true) if err != nil { return err } } } else { err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "") if err != nil { return err } } return list.Flush() } // Put the object into the bucket // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // Temporary Object under construction fs := &Object{ fs: f, remote: src.Remote(), od: &objectData{ modTime: src.ModTime(ctx), }, } return fs, fs.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(ctx, in, src, options...) } // Mkdir creates the bucket if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { bucket, _ := f.split(dir) buckets.makeBucket(bucket) return nil } // Rmdir deletes the bucket if the fs is at the root // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { bucket, directory := f.split(dir) if bucket == "" || directory != "" { return nil } return buckets.deleteBucket(bucket) } // Precision of the remote func (f *Fs) Precision() time.Duration { return time.Nanosecond } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { dstBucket, dstPath := f.split(remote) _ = buckets.makeBucket(dstBucket) srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } srcBucket, srcPath := srcObj.split() od := buckets.getObjectData(srcBucket, srcPath) if od == nil { return nil, fs.ErrorObjectNotFound } odCopy := *od buckets.updateObjectData(dstBucket, dstPath, &odCopy) return f.NewObject(ctx, remote) } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hashType) } // ------------------------------------------------------------ // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.Remote() } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Hash returns the hash of an object returning a lowercase hex string func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hashType { return "", hash.ErrUnsupported } if o.od.hash == "" && !o.fs.opt.Discard { sum := md5.Sum(o.od.data) o.od.hash = hex.EncodeToString(sum[:]) } return o.od.hash, nil } // Size returns the size of an object in bytes func (o *Object) Size() int64 { return o.od.size } // ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers // // SHA-1 will also be updated once the request has completed. func (o *Object) ModTime(ctx context.Context) (result time.Time) { return o.od.modTime } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { o.od.modTime = modTime return nil } // Storable returns if this object is storable func (o *Object) Storable() bool { return true } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { if o.fs.opt.Discard { return nil, errWriteOnly } var offset, limit int64 = 0, -1 for _, option := range options { switch x := option.(type) { case *fs.RangeOption: offset, limit = x.Decode(int64(len(o.od.data))) case *fs.SeekOption: offset = x.Offset default: if option.Mandatory() { fs.Logf(o, "Unsupported mandatory option: %v", option) } } } if offset > int64(len(o.od.data)) { offset = int64(len(o.od.data)) } data := o.od.data[offset:] if limit >= 0 { if limit > int64(len(data)) { limit = int64(len(data)) } data = data[:limit] } return io.NopCloser(bytes.NewBuffer(data)), nil } // Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { bucket, bucketPath := o.split() var data []byte var size int64 var hash string if o.fs.opt.Discard { h := md5.New() size, err = io.Copy(h, in) hash = hex.EncodeToString(h.Sum(nil)) } else { data, err = io.ReadAll(in) size = int64(len(data)) } if err != nil { return fmt.Errorf("failed to update memory object: %w", err) } o.od = &objectData{ data: data, size: size, hash: hash, modTime: src.ModTime(ctx), mimeType: fs.MimeType(ctx, src), } buckets.updateObjectData(bucket, bucketPath, o.od) return nil } // Remove an object func (o *Object) Remove(ctx context.Context) error { bucket, bucketPath := o.split() removed := buckets.removeObjectData(bucket, bucketPath) if !removed { return fs.ErrorObjectNotFound } return nil } // MimeType of an Object if known, "" otherwise func (o *Object) MimeType(ctx context.Context) string { return o.od.mimeType } // Check the interfaces are satisfied var ( _ fs.Fs = &Fs{} _ fs.Copier = &Fs{} _ fs.PutStreamer = &Fs{} _ fs.ListRer = &Fs{} _ fs.ListPer = &Fs{} _ fs.Object = &Object{} _ fs.MimeTyper = &Object{} )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/memory/memory_internal_test.go
backend/memory/memory_internal_test.go
package memory import ( "context" "fmt" "testing" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" "github.com/stretchr/testify/require" ) var t1 = fstest.Time("2001-02-03T04:05:06.499999999Z") // InternalTest dispatches all internal tests func (f *Fs) InternalTest(t *testing.T) { t.Run("PurgeListDeadlock", func(t *testing.T) { testPurgeListDeadlock(t) }) } // test that Purge fallback does not result in deadlock from concurrently listing and removing func testPurgeListDeadlock(t *testing.T) { ctx := context.Background() r := fstest.NewRunIndividual(t) r.Mkdir(ctx, r.Fremote) r.Fremote.Features().Disable("Purge") // force fallback-purge // make a lot of files to prevent it from finishing too quickly for i := range 100 { dst := "file" + fmt.Sprint(i) + ".txt" r.WriteObject(ctx, dst, "hello", t1) } require.NoError(t, operations.Purge(ctx, r.Fremote, "")) } var _ fstests.InternalTester = (*Fs)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/cloudinary/cloudinary_test.go
backend/cloudinary/cloudinary_test.go
// Test Cloudinary filesystem interface package cloudinary_test import ( "testing" "github.com/rclone/rclone/backend/cloudinary" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { name := "TestCloudinary" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":", NilObject: (*cloudinary.Object)(nil), SkipInvalidUTF8: true, ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "eventually_consistent_delay", Value: "7"}, }, }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/cloudinary/cloudinary.go
backend/cloudinary/cloudinary.go
// Package cloudinary provides an interface to the Cloudinary DAM package cloudinary import ( "context" "encoding/hex" "errors" "fmt" "io" "net/http" "net/url" "path" "slices" "strconv" "strings" "time" "github.com/cloudinary/cloudinary-go/v2" SDKApi "github.com/cloudinary/cloudinary-go/v2/api" "github.com/cloudinary/cloudinary-go/v2/api/admin" "github.com/cloudinary/cloudinary-go/v2/api/admin/search" "github.com/cloudinary/cloudinary-go/v2/api/uploader" "github.com/rclone/rclone/backend/cloudinary/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" "github.com/zeebo/blake3" ) // Cloudinary shouldn't have a trailing dot if there is no path func cldPathDir(somePath string) string { if somePath == "" || somePath == "." { return somePath } dir := path.Dir(somePath) if dir == "." { return "" } return dir } // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "cloudinary", Description: "Cloudinary", NewFs: NewFs, Options: []fs.Option{ { Name: "cloud_name", Help: "Cloudinary Environment Name", Required: true, Sensitive: true, }, { Name: "api_key", Help: "Cloudinary API Key", Required: true, Sensitive: true, }, { Name: "api_secret", Help: "Cloudinary API Secret", Required: true, Sensitive: true, }, { Name: "upload_prefix", Help: "Specify the API endpoint for environments out of the US", }, { Name: "upload_preset", Help: "Upload Preset to select asset manipulation on upload", }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot encoder.EncodeSlash | encoder.EncodeLtGt | encoder.EncodeDoubleQuote | encoder.EncodeQuestion | encoder.EncodeAsterisk | encoder.EncodePipe | encoder.EncodeHash | encoder.EncodePercent | encoder.EncodeBackSlash | encoder.EncodeDel | encoder.EncodeCtl | encoder.EncodeRightSpace | encoder.EncodeInvalidUtf8 | encoder.EncodeDot), }, { Name: "eventually_consistent_delay", Default: fs.Duration(0), Advanced: true, Help: "Wait N seconds for eventual consistency of the databases that support the backend operation", }, { Name: "adjust_media_files_extensions", Default: true, Advanced: true, Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems", }, { Name: "media_extensions", Default: []string{ "3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw", "cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif", "glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe", "jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts", "mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga", "tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"}, Advanced: true, Help: "Cloudinary supported media extensions", }, }, }) } // Options defines the configuration for this backend type Options struct { CloudName string `config:"cloud_name"` APIKey string `config:"api_key"` APISecret string `config:"api_secret"` UploadPrefix string `config:"upload_prefix"` UploadPreset string `config:"upload_preset"` Enc encoder.MultiEncoder `config:"encoding"` EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"` MediaExtensions []string `config:"media_extensions"` AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"` } // Fs represents a remote cloudinary server type Fs struct { name string root string opt Options features *fs.Features pacer *fs.Pacer srv *rest.Client // For downloading assets via the Cloudinary CDN cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK lastCRUD time.Time } // Object describes a cloudinary object type Object struct { fs *Fs remote string size int64 modTime time.Time url string md5sum string publicID string resourceType string deliveryType string } // NewFs constructs an Fs from the path, bucket:path func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) { opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } // Initialize the Cloudinary client cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret) if err != nil { return nil, fmt.Errorf("failed to create Cloudinary client: %w", err) } cld.Admin.Client = *fshttp.NewClient(ctx) cld.Upload.Client = *fshttp.NewClient(ctx) if opt.UploadPrefix != "" { cld.Config.API.UploadPrefix = opt.UploadPrefix } client := fshttp.NewClient(ctx) f := &Fs{ name: name, root: root, opt: *opt, cld: cld, pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))), srv: rest.NewClient(client), } f.features = (&fs.Features{ CanHaveEmptyDirectories: true, }).Fill(ctx, f) if root != "" { // Check to see if the root actually an existing file remote := path.Base(root) f.root = cldPathDir(root) _, err := f.NewObject(ctx, remote) if err != nil { if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) { // File doesn't exist so return the previous root f.root = root return f, nil } return nil, err } // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // ------------------------------------------------------------ // FromStandardPath implementation of the api.CloudinaryEncoder func (f *Fs) FromStandardPath(s string) string { return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06") } // FromStandardName implementation of the api.CloudinaryEncoder func (f *Fs) FromStandardName(s string) string { if f.opt.AdjustMediaFilesExtensions { parsedURL, err := url.Parse(s) ext := "" if err != nil { fs.Logf(nil, "Error parsing URL: %v", err) } else { ext = path.Ext(parsedURL.Path) if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) { s = strings.TrimSuffix(parsedURL.Path, ext) } } } return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06") } // ToStandardPath implementation of the api.CloudinaryEncoder func (f *Fs) ToStandardPath(s string) string { return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&") } // ToStandardName implementation of the api.CloudinaryEncoder func (f *Fs) ToStandardName(s string, assetURL string) string { ext := "" if f.opt.AdjustMediaFilesExtensions { parsedURL, err := url.Parse(assetURL) if err != nil { fs.Logf(nil, "Error parsing URL: %v", err) } else { ext = path.Ext(parsedURL.Path) if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) { ext = "" } } } return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext } // FromStandardFullPath encodes a full path to Cloudinary standard func (f *Fs) FromStandardFullPath(dir string) string { return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir)) } // ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK func (f *Fs) ToAssetFolderAPI(dir string) string { return strings.ReplaceAll(dir, "%", "%25") } // ToDisplayNameElastic encodes a special case of elasticsearch func (f *Fs) ToDisplayNameElastic(dir string) string { return strings.ReplaceAll(dir, "!", "\\!") } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // WaitEventuallyConsistent waits till the FS is eventually consistent func (f *Fs) WaitEventuallyConsistent() { if f.opt.EventuallyConsistentDelay == fs.Duration(0) { return } delay := time.Duration(f.opt.EventuallyConsistentDelay) timeSinceLastCRUD := time.Since(f.lastCRUD) if timeSinceLastCRUD < delay { time.Sleep(delay - timeSinceLastCRUD) } } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("Cloudinary root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // List the objects and directories in dir into entries func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) { remotePrefix := f.FromStandardFullPath(dir) if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") { remotePrefix += "/" } var entries fs.DirEntries dirs := make(map[string]struct{}) nextCursor := "" f.WaitEventuallyConsistent() for { // user the folders api to list folders. folderParams := admin.SubFoldersParams{ Folder: f.ToAssetFolderAPI(remotePrefix), MaxResults: 500, } if nextCursor != "" { folderParams.NextCursor = nextCursor } results, err := f.cld.Admin.SubFolders(ctx, folderParams) if err != nil { return nil, fmt.Errorf("failed to list sub-folders: %w", err) } if results.Error.Message != "" { if strings.HasPrefix(results.Error.Message, "Can't find folder with path") { return nil, fs.ErrorDirNotFound } return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message) } for _, folder := range results.Folders { relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix)) parts := strings.Split(relativePath, "/") // It's a directory dirName := parts[len(parts)-1] if _, found := dirs[dirName]; !found { d := fs.NewDir(path.Join(dir, dirName), time.Time{}) entries = append(entries, d) dirs[dirName] = struct{}{} } } // Break if there are no more results if results.NextCursor == "" { break } nextCursor = results.NextCursor } for { // Use the assets.AssetsByAssetFolder API to list assets assetsParams := admin.AssetsByAssetFolderParams{ AssetFolder: remotePrefix, MaxResults: 500, } if nextCursor != "" { assetsParams.NextCursor = nextCursor } results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams) if err != nil { return nil, fmt.Errorf("failed to list assets: %w", err) } for _, asset := range results.Assets { remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL)) o := &Object{ fs: f, remote: remote, size: int64(asset.Bytes), modTime: asset.CreatedAt, url: asset.SecureURL, publicID: asset.PublicID, resourceType: asset.AssetType, deliveryType: asset.Type, } entries = append(entries, o) } // Break if there are no more results if results.NextCursor == "" { break } nextCursor = results.NextCursor } return entries, nil } // NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { searchParams := search.Query{ Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"", f.FromStandardFullPath(cldPathDir(remote)), f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))), SortBy: []search.SortByField{{"uploaded_at": "desc"}}, MaxResults: 2, } var results *admin.SearchResult f.WaitEventuallyConsistent() err := f.pacer.Call(func() (bool, error) { var err1 error results, err1 = f.cld.Admin.Search(ctx, searchParams) if err1 == nil && results.TotalCount != len(results.Assets) { err1 = errors.New("partial response so waiting for eventual consistency") } return shouldRetry(ctx, nil, err1) }) if err != nil { return nil, fs.ErrorObjectNotFound } if results.TotalCount == 0 || len(results.Assets) == 0 { return nil, fs.ErrorObjectNotFound } asset := results.Assets[0] o := &Object{ fs: f, remote: remote, size: int64(asset.Bytes), modTime: asset.UploadedAt, url: asset.SecureURL, md5sum: asset.Etag, publicID: asset.PublicID, resourceType: asset.ResourceType, deliveryType: asset.Type, } return o, nil } func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string { payload := []byte(path.Join(assetFolder, displayName)) hash := blake3.Sum256(payload) return hex.EncodeToString(hash[:]) } // Put uploads content to Cloudinary func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { if src.Size() == 0 { return nil, fs.ErrorCantUploadEmptyFiles } params := uploader.UploadParams{ UploadPreset: f.opt.UploadPreset, } updateObject := false var modTime time.Time for _, option := range options { if updateOptions, ok := option.(*api.UpdateOptions); ok { if updateOptions.PublicID != "" { updateObject = true params.Overwrite = SDKApi.Bool(true) params.Invalidate = SDKApi.Bool(true) params.PublicID = updateOptions.PublicID params.ResourceType = updateOptions.ResourceType params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType) params.AssetFolder = updateOptions.AssetFolder params.DisplayName = updateOptions.DisplayName modTime = src.ModTime(ctx) } } } if !updateObject { params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote())) params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote())) // We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified). // We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case. // Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support. params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx)) } uploadResult, err := f.cld.Upload.Upload(ctx, in, params) f.lastCRUD = time.Now() if err != nil { return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err) } if !updateObject { modTime = uploadResult.CreatedAt } if uploadResult.Error.Message != "" { return nil, errors.New(uploadResult.Error.Message) } o := &Object{ fs: f, remote: src.Remote(), size: int64(uploadResult.Bytes), modTime: modTime, url: uploadResult.SecureURL, md5sum: uploadResult.Etag, publicID: uploadResult.PublicID, resourceType: uploadResult.ResourceType, deliveryType: uploadResult.Type, } return o, nil } // Precision of the remote func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported } // Hashes returns the supported hash sets func (f *Fs) Hashes() hash.Set { return hash.Set(hash.MD5) } // Mkdir creates empty folders func (f *Fs) Mkdir(ctx context.Context, dir string) error { params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))} res, err := f.cld.Admin.CreateFolder(ctx, params) f.lastCRUD = time.Now() if err != nil { return err } if res.Error.Message != "" { return errors.New(res.Error.Message) } return nil } // Rmdir deletes empty folders func (f *Fs) Rmdir(ctx context.Context, dir string) error { // Additional test because Cloudinary will delete folders without // assets, regardless of empty sub-folders folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir)) folderParams := admin.SubFoldersParams{ Folder: folder, MaxResults: 1, } results, err := f.cld.Admin.SubFolders(ctx, folderParams) if err != nil { return err } if results.TotalCount > 0 { return fs.ErrorDirectoryNotEmpty } params := admin.DeleteFolderParams{Folder: folder} res, err := f.cld.Admin.DeleteFolder(ctx, params) f.lastCRUD = time.Now() if err != nil { return err } if res.Error.Message != "" { if strings.HasPrefix(res.Error.Message, "Can't find folder with path") { return fs.ErrorDirNotFound } return errors.New(res.Error.Message) } return nil } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 420, // Too Many Requests (legacy) 429, // Too Many Requests 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } if err != nil { tryAgain := "Try again on " if idx := strings.Index(err.Error(), tryAgain); idx != -1 { layout := "2006-01-02 15:04:05 UTC" dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)] timestamp, err2 := time.Parse(layout, dateStr) if err2 == nil { return true, fserrors.NewErrorRetryAfter(time.Until(timestamp)) } } fs.Debugf(nil, "Retrying API error %v", err) return true, err } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // ------------------------------------------------------------ // Hash returns the MD5 of an object func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) { if ty != hash.MD5 { return "", hash.ErrUnsupported } return o.md5sum, nil } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // ModTime returns the modification time of the object func (o *Object) ModTime(ctx context.Context) time.Time { return o.modTime } // Size of object in bytes func (o *Object) Size() int64 { return o.size } // Storable returns if this object is storable func (o *Object) Storable() bool { return true } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return fs.ErrorCantSetModTime } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { var resp *http.Response opts := rest.Opts{ Method: "GET", RootURL: o.url, Options: options, } var offset int64 var count int64 var key string var value string fs.FixRangeOption(options, o.size) for _, option := range options { switch x := option.(type) { case *fs.RangeOption: offset, count = x.Decode(o.size) if count < 0 { count = o.size - offset } key, value = option.Header() case *fs.SeekOption: offset = x.Offset count = o.size - offset key, value = option.Header() default: if option.Mandatory() { fs.Logf(o, "Unsupported mandatory option: %v", option) } } } if key != "" && value != "" { opts.ExtraHeaders = make(map[string]string) opts.ExtraHeaders[key] = value } // Make sure that the asset is fully available err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) if err == nil { cl, clErr := strconv.Atoi(resp.Header.Get("content-length")) if clErr == nil && count == int64(cl) { return false, nil } } return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err) } return resp.Body, err } // Update the object with the contents of the io.Reader func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { options = append(options, &api.UpdateOptions{ PublicID: o.publicID, ResourceType: o.resourceType, DeliveryType: o.deliveryType, DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())), AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())), }) updatedObj, err := o.fs.Put(ctx, in, src, options...) if err != nil { return err } if uo, ok := updatedObj.(*Object); ok { o.size = uo.size o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time o.url = uo.url o.md5sum = uo.md5sum o.publicID = uo.publicID o.resourceType = uo.resourceType o.deliveryType = uo.deliveryType } return nil } // Remove an object func (o *Object) Remove(ctx context.Context) error { params := uploader.DestroyParams{ PublicID: o.publicID, ResourceType: o.resourceType, Type: o.deliveryType, } res, dErr := o.fs.cld.Upload.Destroy(ctx, params) o.fs.lastCRUD = time.Now() if dErr != nil { return dErr } if res.Error.Message != "" { return errors.New(res.Error.Message) } if res.Result != "ok" { return errors.New(res.Result) } return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/cloudinary/api/types.go
backend/cloudinary/api/types.go
// Package api has type definitions for cloudinary package api import ( "fmt" ) // CloudinaryEncoder extends the built-in encoder type CloudinaryEncoder interface { // FromStandardPath takes a / separated path in Standard encoding // and converts it to a / separated path in this encoding. FromStandardPath(string) string // FromStandardName takes name in Standard encoding and converts // it in this encoding. FromStandardName(string) string // ToStandardPath takes a / separated path in this encoding // and converts it to a / separated path in Standard encoding. ToStandardPath(string) string // ToStandardName takes name in this encoding and converts // it in Standard encoding. ToStandardName(string, string) string // Encoded root of the remote (as passed into NewFs) FromStandardFullPath(string) string } // UpdateOptions was created to pass options from Update to Put type UpdateOptions struct { PublicID string ResourceType string DeliveryType string AssetFolder string DisplayName string } // Header formats the option as a string func (o *UpdateOptions) Header() (string, string) { return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID) } // Mandatory returns whether the option must be parsed or can be ignored func (o *UpdateOptions) Mandatory() bool { return false } // String formats the option into human-readable form func (o *UpdateOptions) String() string { return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/zoho/zoho_test.go
backend/zoho/zoho_test.go
// Test Zoho filesystem interface package zoho_test import ( "testing" "github.com/rclone/rclone/backend/zoho" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestZoho:", SkipInvalidUTF8: true, NilObject: (*zoho.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/zoho/zoho.go
backend/zoho/zoho.go
// Package zoho provides an interface to the Zoho Workdrive // storage system. package zoho import ( "context" "errors" "fmt" "io" "net/http" "net/url" "path" "strconv" "strings" "time" "github.com/google/uuid" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/backend/zoho/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/rest" "golang.org/x/oauth2" ) const ( rcloneClientID = "1000.46MXF275FM2XV7QCHX5A7K3LGME66B" rcloneEncryptedClientSecret = "U-2gxclZQBcOG9NPhjiXAhj-f0uQ137D0zar8YyNHXHkQZlTeSpIOQfmCb4oSpvosJp_SJLXmLLeUA" minSleep = 10 * time.Millisecond maxSleep = 60 * time.Second decayConstant = 2 // bigger for slower decay, exponential configRootID = "root_folder_id" defaultUploadCutoff = 10 * 1024 * 1024 // 10 MiB ) // Globals var ( // Description of how to auth for this app oauthConfig = &oauthutil.Config{ Scopes: []string{ "aaaserver.profile.read", "WorkDrive.team.READ", "WorkDrive.workspace.READ", "WorkDrive.files.ALL", "ZohoFiles.files.ALL", }, AuthURL: "https://accounts.zoho.eu/oauth/v2/auth", TokenURL: "https://accounts.zoho.eu/oauth/v2/token", AuthStyle: oauth2.AuthStyleInParams, ClientID: rcloneClientID, ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), RedirectURL: oauthutil.RedirectLocalhostURL, } rootURL = "https://workdrive.zoho.eu/api/v1" downloadURL = "https://download.zoho.eu/v1/workdrive" uploadURL = "http://upload.zoho.eu/workdrive-api/v1/" accountsURL = "https://accounts.zoho.eu" ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "zoho", Description: "Zoho", NewFs: NewFs, Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { // Need to setup region before configuring oauth err := setupRegion(m) if err != nil { return nil, err } getSrvs := func() (authSrv, apiSrv *rest.Client, err error) { oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig) if err != nil { return nil, nil, fmt.Errorf("failed to load OAuth client: %w", err) } authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL) apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL) return authSrv, apiSrv, nil } switch config.State { case "": return oauthutil.ConfigOut("type", &oauthutil.Options{ OAuth2Config: oauthConfig, // No refresh token unless ApprovalForce is set OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce}, }) case "type": // We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants // it's own custom type token, err := oauthutil.GetToken(name, m) if err != nil { return nil, fmt.Errorf("failed to read token: %w", err) } if token.TokenType != "Zoho-oauthtoken" { token.TokenType = "Zoho-oauthtoken" err = oauthutil.PutToken(name, m, token, false) if err != nil { return nil, fmt.Errorf("failed to configure token: %w", err) } } _, apiSrv, err := getSrvs() if err != nil { return nil, err } userInfo, err := getUserInfo(ctx, apiSrv) if err != nil { return nil, err } // If personal Edition only one private Space is available. Directly configure that. if userInfo.Data.Attributes.Edition == "PERSONAL" { return fs.ConfigResult("private_space", userInfo.Data.ID) } // Otherwise go to team selection return fs.ConfigResult("team", userInfo.Data.ID) case "private_space": _, apiSrv, err := getSrvs() if err != nil { return nil, err } workspaces, err := getPrivateSpaces(ctx, config.Result, apiSrv) if err != nil { return nil, err } return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) { workspace := workspaces[i] return workspace.ID, workspace.Name }) case "team": _, apiSrv, err := getSrvs() if err != nil { return nil, err } // Get the teams teams, err := listTeams(ctx, config.Result, apiSrv) if err != nil { return nil, err } return fs.ConfigChoose("workspace", "config_team_drive_id", "Team Drive ID", len(teams), func(i int) (string, string) { team := teams[i] return team.ID, team.Attributes.Name }) case "workspace": _, apiSrv, err := getSrvs() if err != nil { return nil, err } teamID := config.Result workspaces, err := listWorkspaces(ctx, teamID, apiSrv) if err != nil { return nil, err } currentTeamInfo, err := getCurrentTeamInfo(ctx, teamID, apiSrv) if err != nil { return nil, err } privateSpaces, err := getPrivateSpaces(ctx, currentTeamInfo.Data.ID, apiSrv) if err != nil { return nil, err } workspaces = append(workspaces, privateSpaces...) return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) { workspace := workspaces[i] return workspace.ID, workspace.Name }) case "workspace_end": workspaceID := config.Result m.Set(configRootID, workspaceID) return nil, nil } return nil, fmt.Errorf("unknown state %q", config.State) }, Options: append(oauthutil.SharedOptions, []fs.Option{{ Name: "region", Help: `Zoho region to connect to. You'll have to use the region your organization is registered in. If not sure use the same top level domain as you connect to in your browser.`, Examples: []fs.OptionExample{{ Value: "com", Help: "United states / Global", }, { Value: "eu", Help: "Europe", }, { Value: "in", Help: "India", }, { Value: "jp", Help: "Japan", }, { Value: "com.cn", Help: "China", }, { Value: "com.au", Help: "Australia", }}, }, { Name: "upload_cutoff", Help: "Cutoff for switching to large file upload api (>= 10 MiB).", Default: fs.SizeSuffix(defaultUploadCutoff), Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.EncodeZero | encoder.EncodeCtl | encoder.EncodeDel | encoder.EncodeInvalidUtf8), }}...), }) } // Options defines the configuration for this backend type Options struct { UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` RootFolderID string `config:"root_folder_id"` Region string `config:"region"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a remote workdrive type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options features *fs.Features // optional features srv *rest.Client // the connection to the server downloadsrv *rest.Client // the connection to the download server uploadsrv *rest.Client // the connection to the upload server dirCache *dircache.DirCache // Map of directory path to directory id pacer *fs.Pacer // pacer for API calls } // Object describes a Zoho WorkDrive object // // Will definitely have info but maybe not meta type Object struct { fs *Fs // what this object is part of remote string // The remote path hasMetaData bool // whether info below has been set size int64 // size of the object modTime time.Time // modification time of the object id string // ID of the object } // ------------------------------------------------------------ func setupRegion(m configmap.Mapper) error { region, ok := m.Get("region") if !ok || region == "" { return errors.New("no region set") } rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region) downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region) uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region) accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region) oauthConfig.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region) oauthConfig.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region) return nil } // ------------------------------------------------------------ type workspaceInfo struct { ID string Name string } func getUserInfo(ctx context.Context, srv *rest.Client) (*api.UserInfoResponse, error) { var userInfo api.UserInfoResponse opts := rest.Opts{ Method: "GET", Path: "/users/me", ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, } _, err := srv.CallJSON(ctx, &opts, nil, &userInfo) if err != nil { return nil, err } return &userInfo, nil } func getCurrentTeamInfo(ctx context.Context, teamID string, srv *rest.Client) (*api.CurrentTeamInfo, error) { var currentTeamInfo api.CurrentTeamInfo opts := rest.Opts{ Method: "GET", Path: "/teams/" + teamID + "/currentuser", ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, } _, err := srv.CallJSON(ctx, &opts, nil, &currentTeamInfo) if err != nil { return nil, err } return &currentTeamInfo, err } func getPrivateSpaces(ctx context.Context, teamUserID string, srv *rest.Client) ([]workspaceInfo, error) { var privateSpaceListResponse api.TeamWorkspaceResponse opts := rest.Opts{ Method: "GET", Path: "/users/" + teamUserID + "/privatespace", ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, } _, err := srv.CallJSON(ctx, &opts, nil, &privateSpaceListResponse) if err != nil { return nil, err } workspaceList := make([]workspaceInfo, 0, len(privateSpaceListResponse.TeamWorkspace)) for _, workspace := range privateSpaceListResponse.TeamWorkspace { workspaceList = append(workspaceList, workspaceInfo{ID: workspace.ID, Name: "My Space"}) } return workspaceList, err } func listTeams(ctx context.Context, zuid string, srv *rest.Client) ([]api.TeamWorkspace, error) { var teamList api.TeamWorkspaceResponse opts := rest.Opts{ Method: "GET", Path: "/users/" + zuid + "/teams", ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, } _, err := srv.CallJSON(ctx, &opts, nil, &teamList) if err != nil { return nil, err } return teamList.TeamWorkspace, nil } func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]workspaceInfo, error) { var workspaceListResponse api.TeamWorkspaceResponse opts := rest.Opts{ Method: "GET", Path: "/teams/" + teamID + "/workspaces", ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, } _, err := srv.CallJSON(ctx, &opts, nil, &workspaceListResponse) if err != nil { return nil, err } workspaceList := make([]workspaceInfo, 0, len(workspaceListResponse.TeamWorkspace)) for _, workspace := range workspaceListResponse.TeamWorkspace { workspaceList = append(workspaceList, workspaceInfo{ID: workspace.ID, Name: workspace.Attributes.Name}) } return workspaceList, nil } // -------------------------------------------------------------- // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } authRetry := false // Bail out early if we are missing OAuth Scopes. if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Status, "INVALID_OAUTHSCOPE") { fs.Errorf(nil, "zoho: missing OAuth Scope. Run rclone config reconnect to fix this issue.") return false, err } if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Contains(resp.Header["Www-Authenticate"][0], "expired_token") { authRetry = true fs.Debugf(nil, "Should retry: %v", err) } if resp != nil && resp.StatusCode == 429 { err = pacer.RetryAfterError(err, 60*time.Second) fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", 60) return true, err } return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // -------------------------------------------------------------- // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("zoho root '%s'", f.root) } // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.None) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // parsePath parses a zoho 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // readMetaDataForPath reads the metadata from the path func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) { // defer log.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, fs.ErrorObjectNotFound } return nil, err } found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool { if item.Attributes.Name == leaf { info = item return true } return false }) if err != nil { return nil, err } if !found { return nil, fs.ErrorObjectNotFound } return info, nil } // readMetaDataForID reads the metadata for the object with given ID func (f *Fs) readMetaDataForID(ctx context.Context, id string) (*api.Item, error) { opts := rest.Opts{ Method: "GET", Path: "/files/" + id, ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, Parameters: url.Values{}, } var result *api.ItemInfo var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } return &result.Item, nil } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) if err := configstruct.Set(m, opt); err != nil { return nil, err } if opt.UploadCutoff < defaultUploadCutoff { return nil, fmt.Errorf("zoho: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(defaultUploadCutoff)) } err := setupRegion(m) if err != nil { return nil, err } root = parsePath(root) oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig) if err != nil { return nil, err } f := &Fs{ name: name, root: root, opt: *opt, srv: rest.NewClient(oAuthClient).SetRoot(rootURL), downloadsrv: rest.NewClient(oAuthClient).SetRoot(downloadURL), uploadsrv: rest.NewClient(oAuthClient).SetRoot(uploadURL), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } f.features = (&fs.Features{ CanHaveEmptyDirectories: true, }).Fill(ctx, f) // Get rootFolderID rootID := f.opt.RootFolderID f.dirCache = dircache.New(root, rootID, f) // Find the current root err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) tempF := *f tempF.dirCache = dircache.New(newRoot, rootID, &tempF) tempF.root = newRoot // Make new Fs which is the parent err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } _, err := tempF.newObjectWithInfo(ctx, remote, nil) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f return f, nil } return nil, err } f.features.Fill(ctx, &tempF) f.dirCache = tempF.dirCache f.root = tempF.root // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // list the objects into the function supplied // // If directories is set it only sends directories // User function to process a File item from listAll // // Should return true to finish processing type listAllFn func(*api.Item) bool // Lists the directory required calling the user function on each item found // // If the user fn ever returns true then it early exits with found = true func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { const listItemsLimit = 1000 opts := rest.Opts{ Method: "GET", Path: "/files/" + dirID + "/files", ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, Parameters: url.Values{ "page[limit]": {strconv.Itoa(listItemsLimit)}, "page[next]": {"0"}, }, } OUTER: for { var result api.ItemList var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if err != nil { return found, fmt.Errorf("couldn't list files: %w", err) } if len(result.Items) == 0 { break } for i := range result.Items { item := &result.Items[i] if item.Attributes.IsFolder { if filesOnly { continue } } else { if directoriesOnly { continue } } item.Attributes.Name = f.opt.Enc.ToStandardName(item.Attributes.Name) if fn(item) { found = true break OUTER } } if !result.Links.Cursor.HasNext { break } // Fetch the next from the URL in the response nextURL, err := url.Parse(result.Links.Cursor.Next) if err != nil { return found, fmt.Errorf("failed to parse next link as URL: %w", err) } opts.Parameters.Set("page[next]", nextURL.Query().Get("page[next]")) } return } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } var iErr error _, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool { remote := path.Join(dir, info.Attributes.Name) if info.Attributes.IsFolder { // cache the directory ID for later lookups f.dirCache.Put(remote, info.ID) d := fs.NewDir(remote, time.Time(info.Attributes.ModifiedTime)).SetID(info.ID) entries = append(entries, d) } else { o, err := f.newObjectWithInfo(ctx, remote, info) if err != nil { iErr = err return true } entries = append(entries, o) } return false }) if err != nil { return nil, err } if iErr != nil { return nil, iErr } return entries, nil } // FindLeaf finds a directory of name leaf in the folder with ID pathID func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { // Find the leaf in pathID found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool { if item.Attributes.Name == leaf { pathIDOut = item.ID return true } return false }) return pathIDOut, found, err } // CreateDir makes a directory with pathID as parent and name leaf func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { //fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) var resp *http.Response var info *api.ItemInfo opts := rest.Opts{ Method: "POST", Path: "/files", ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, } mkdir := api.WriteMetadataRequest{ Data: api.WriteMetadata{ Attributes: api.WriteAttributes{ Name: f.opt.Enc.FromStandardName(leaf), ParentID: pathID, }, Type: "files", }, } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info) return shouldRetry(ctx, resp, err) }) if err != nil { //fmt.Printf("...Error %v\n", err) return "", err } // fmt.Printf("...Id %q\n", *info.Id) return info.Item.ID, nil } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } var err error if info != nil { // Set info err = o.setMetaData(info) } else { err = o.readMetaData(ctx) // reads info and meta, returning an error } if err != nil { return nil, err } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // // Used to create new objects func (f *Fs) createObject(ctx context.Context, remote string, size int64, modTime time.Time) (o *Object, leaf string, directoryID string, err error) { leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true) if err != nil { return } // Temporary Object under construction o = &Object{ fs: f, remote: remote, size: size, modTime: modTime, } return } func (f *Fs) uploadLargeFile(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) { opts := rest.Opts{ Method: "POST", Path: "/stream/upload", Body: in, ContentLength: &size, ContentType: "application/octet-stream", Options: options, ExtraHeaders: map[string]string{ "x-filename": url.QueryEscape(name), "x-parent_id": parent, "override-name-exist": "true", "upload-id": uuid.New().String(), "x-streammode": "1", }, } var err error var resp *http.Response var uploadResponse *api.LargeUploadResponse err = f.pacer.CallNoRetry(func() (bool, error) { resp, err = f.uploadsrv.CallJSON(ctx, &opts, nil, &uploadResponse) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("upload large error: %v", err) } if len(uploadResponse.Uploads) != 1 { return nil, errors.New("upload: invalid response") } upload := uploadResponse.Uploads[0] uploadInfo, err := upload.GetUploadFileInfo() if err != nil { return nil, fmt.Errorf("upload error: %w", err) } // Fill in the api.Item from the api.UploadFileInfo var info api.Item info.ID = upload.Attributes.RessourceID info.Attributes.Name = upload.Attributes.FileName // info.Attributes.Type = not used info.Attributes.IsFolder = false // info.Attributes.CreatedTime = not used info.Attributes.ModifiedTime = uploadInfo.GetModTime() // info.Attributes.UploadedTime = 0 not used info.Attributes.StorageInfo.Size = uploadInfo.Size info.Attributes.StorageInfo.FileCount = 0 info.Attributes.StorageInfo.FolderCount = 0 return &info, nil } func (f *Fs) upload(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) { params := url.Values{} params.Set("filename", url.QueryEscape(name)) params.Set("parent_id", parent) params.Set("override-name-exist", strconv.FormatBool(true)) formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name) if err != nil { return nil, fmt.Errorf("failed to make multipart upload: %w", err) } contentLength := overhead + size opts := rest.Opts{ Method: "POST", Path: "/upload", Body: formReader, ContentType: contentType, ContentLength: &contentLength, Options: options, Parameters: params, TransferEncoding: []string{"identity"}, } var resp *http.Response var uploadResponse *api.UploadResponse err = f.pacer.CallNoRetry(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &uploadResponse) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("upload error: %w", err) } if len(uploadResponse.Uploads) != 1 { return nil, errors.New("upload: invalid response") } upload := uploadResponse.Uploads[0] uploadInfo, err := upload.GetUploadFileInfo() if err != nil { return nil, fmt.Errorf("upload error: %w", err) } // Fill in the api.Item from the api.UploadFileInfo var info api.Item info.ID = upload.Attributes.RessourceID info.Attributes.Name = upload.Attributes.FileName // info.Attributes.Type = not used info.Attributes.IsFolder = false // info.Attributes.CreatedTime = not used info.Attributes.ModifiedTime = uploadInfo.GetModTime() // info.Attributes.UploadedTime = 0 not used info.Attributes.StorageInfo.Size = uploadInfo.Size info.Attributes.StorageInfo.FileCount = 0 info.Attributes.StorageInfo.FolderCount = 0 return &info, nil } // Put the object into the container // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { existingObj, err := f.NewObject(ctx, src.Remote()) switch err { case nil: return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: size := src.Size() remote := src.Remote() // Create the directory for the object if it doesn't exist leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) if err != nil { return nil, err } // use normal upload API for small sizes (<10MiB) if size < int64(f.opt.UploadCutoff) { info, err := f.upload(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...) if err != nil { return nil, err } return f.newObjectWithInfo(ctx, remote, info) } // large file API otherwise info, err := f.uploadLargeFile(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...) if err != nil { return nil, err } return f.newObjectWithInfo(ctx, remote, info) default: return nil, err } } // Mkdir creates the container if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { _, err := f.dirCache.FindDir(ctx, dir, true) return err } // deleteObject removes an object by ID func (f *Fs) deleteObject(ctx context.Context, id string) (err error) { var resp *http.Response opts := rest.Opts{ Method: "PATCH", Path: "/files", ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, } delete := api.WriteMultiMetadataRequest{ Meta: []api.WriteMetadata{ { Attributes: api.WriteAttributes{ Status: "51", // Status "51" is deleted }, ID: id, Type: "files", }, }, } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &delete, nil) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("delete object failed: %w", err) } return nil } // purgeCheck removes the root directory, if check is set then it // refuses to do so if it has anything in func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } rootID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return err } info, err := f.readMetaDataForID(ctx, rootID) if err != nil { return err } if check && info.Attributes.StorageInfo.Size > 0 { return fs.ErrorDirectoryNotEmpty } err = f.deleteObject(ctx, rootID) if err != nil { return fmt.Errorf("rmdir failed: %w", err) } f.dirCache.FlushDir(dir) return nil } // Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, true) } // Purge deletes all the files and the container // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, false) } func (f *Fs) rename(ctx context.Context, id, name string) (item *api.Item, err error) { var resp *http.Response opts := rest.Opts{ Method: "PATCH", Path: "/files/" + id, ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, } rename := api.WriteMetadataRequest{ Data: api.WriteMetadata{ Attributes: api.WriteAttributes{ Name: f.opt.Enc.FromStandardName(name), }, Type: "files", }, } var result *api.ItemInfo err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &rename, &result) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("rename failed: %w", err) } return &result.Item, nil } // Copy src to this remote using server side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } err := srcObj.readMetaData(ctx) if err != nil { return nil, err } // Create temporary object dstObject, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.size, srcObj.modTime) if err != nil { return nil, err } // Copy the object opts := rest.Opts{ Method: "POST", Path: "/files/" + directoryID + "/copy", ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, } copyFile := api.WriteMultiMetadataRequest{ Meta: []api.WriteMetadata{ { Attributes: api.WriteAttributes{ RessourceID: srcObj.id, }, Type: "files", }, }, } var resp *http.Response var result *api.ItemList err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &copyFile, &result) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't copy file: %w", err) } // Server acts weird some times make sure we actually got // an item if len(result.Items) != 1 { return nil, errors.New("couldn't copy file: invalid response") } // Only set ID here because response is not complete Item struct dstObject.id = result.Items[0].ID // Can't copy and change name in one step so we have to check if we have // the correct name after copy if f.opt.Enc.ToStandardName(result.Items[0].Attributes.Name) != leaf { if err = dstObject.rename(ctx, leaf); err != nil { return nil, fmt.Errorf("copy: couldn't rename copied file: %w", err) } } return dstObject, nil } func (f *Fs) move(ctx context.Context, srcID, parentID string) (item *api.Item, err error) { // Move the object opts := rest.Opts{ Method: "PATCH", Path: "/files", ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, } moveFile := api.WriteMultiMetadataRequest{ Meta: []api.WriteMetadata{ { Attributes: api.WriteAttributes{ ParentID: parentID, }, ID: srcID, Type: "files", }, },
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/zoho/api/types.go
backend/zoho/api/types.go
// Package api provides types used by the Zoho API. package api import ( "encoding/json" "fmt" "strconv" "time" ) // Time represents date and time information for Zoho // Zoho uses milliseconds since unix epoch (Java currentTimeMillis) type Time time.Time // UnmarshalJSON turns JSON into a Time func (t *Time) UnmarshalJSON(data []byte) error { s := string(data) // If the time is a quoted string, strip quotes if len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"' { s = s[1 : len(s)-1] } millis, err := strconv.ParseInt(s, 10, 64) if err != nil { return err } *t = Time(time.Unix(0, millis*int64(time.Millisecond))) return nil } // OAuthUser is a Zoho user we are only interested in the ZUID here type OAuthUser struct { FirstName string `json:"First_Name"` Email string `json:"Email"` LastName string `json:"Last_Name"` DisplayName string `json:"Display_Name"` ZUID int64 `json:"ZUID"` } // UserInfoResponse is returned by the user info API. type UserInfoResponse struct { Data struct { ID string `json:"id"` Type string `json:"users"` Attributes struct { EmailID string `json:"email_id"` Edition string `json:"edition"` } `json:"attributes"` } `json:"data"` } // PrivateSpaceInfo gives basic information about a users private folder. type PrivateSpaceInfo struct { Data struct { ID string `json:"id"` Type string `json:"string"` } `json:"data"` } // CurrentTeamInfo gives information about the current user in a team. type CurrentTeamInfo struct { Data struct { ID string `json:"id"` Type string `json:"string"` } } // TeamWorkspace represents a Zoho Team, Workspace or Private Space // It's actually a VERY large json object that differs between // Team and Workspace and Private Space but we are only interested in some fields // that all of them have so we can use the same struct. type TeamWorkspace struct { ID string `json:"id"` Type string `json:"type"` Attributes struct { Name string `json:"name"` Created Time `json:"created_time_in_millisecond"` IsPart bool `json:"is_partof"` } `json:"attributes"` } // TeamWorkspaceResponse is the response by the list teams API, list workspace API // or list team private spaces API. type TeamWorkspaceResponse struct { TeamWorkspace []TeamWorkspace `json:"data"` } // Item is may represent a file or a folder in Zoho Workdrive type Item struct { ID string `json:"id"` Attributes struct { Name string `json:"name"` Type string `json:"type"` IsFolder bool `json:"is_folder"` CreatedTime Time `json:"created_time_in_millisecond"` ModifiedTime Time `json:"modified_time_in_millisecond"` UploadedTime Time `json:"uploaded_time_in_millisecond"` StorageInfo struct { Size int64 `json:"size_in_bytes"` FileCount int64 `json:"files_count"` FolderCount int64 `json:"folders_count"` } `json:"storage_info"` } `json:"attributes"` } // ItemInfo contains a single Zoho Item type ItemInfo struct { Item Item `json:"data"` } // Links contains Cursor information type Links struct { Cursor struct { HasNext bool `json:"has_next"` Next string `json:"next"` } `json:"cursor"` } // ItemList contains multiple Zoho Items type ItemList struct { Links Links `json:"links"` Items []Item `json:"data"` } // UploadFileInfo is what the FileInfo field in the UnloadInfo struct decodes to type UploadFileInfo struct { OrgID string `json:"ORG_ID"` ResourceID string `json:"RESOURCE_ID"` LibraryID string `json:"LIBRARY_ID"` Md5Checksum string `json:"MD5_CHECKSUM"` ParentModelID string `json:"PARENT_MODEL_ID"` ParentID string `json:"PARENT_ID"` ResourceType int `json:"RESOURCE_TYPE"` WmsSentTime string `json:"WMS_SENT_TIME"` TabID string `json:"TAB_ID"` Owner string `json:"OWNER"` ResourceGroup string `json:"RESOURCE_GROUP"` ParentModelName string `json:"PARENT_MODEL_NAME"` Size int64 `json:"size"` Operation string `json:"OPERATION"` EventID string `json:"EVENT_ID"` AuditInfo struct { VersionInfo struct { VersionAuthors []string `json:"versionAuthors"` VersionID string `json:"versionId"` IsMinorVersion bool `json:"isMinorVersion"` VersionTime Time `json:"versionTime"` VersionAuthorZuid []string `json:"versionAuthorZuid"` VersionNotes string `json:"versionNotes"` VersionNumber string `json:"versionNumber"` } `json:"versionInfo"` Resource struct { Owner string `json:"owner"` CreatedTime Time `json:"created_time"` Creator string `json:"creator"` ServiceType int `json:"service_type"` Extension string `json:"extension"` StatusChangeTime Time `json:"status_change_time"` ResourceType int `json:"resource_type"` Name string `json:"name"` } `json:"resource"` ParentInfo struct { ParentName string `json:"parentName"` ParentID string `json:"parentId"` ParentType int `json:"parentType"` } `json:"parentInfo"` LibraryInfo struct { LibraryName string `json:"libraryName"` LibraryID string `json:"libraryId"` LibraryType int `json:"libraryType"` } `json:"libraryInfo"` UpdateType string `json:"updateType"` StatusCode string `json:"statusCode"` } `json:"AUDIT_INFO"` ZUID int64 `json:"ZUID"` TeamID string `json:"TEAM_ID"` } // GetModTime fetches the modification time of the upload // // This tries a few places and if all fails returns the current time func (ufi *UploadFileInfo) GetModTime() Time { if t := ufi.AuditInfo.Resource.CreatedTime; !time.Time(t).IsZero() { return t } if t := ufi.AuditInfo.Resource.StatusChangeTime; !time.Time(t).IsZero() { return t } return Time(time.Now()) } // UploadInfo is a simplified and slightly different version of // the Item struct only used in the response to uploads type UploadInfo struct { Attributes struct { ParentID string `json:"parent_id"` FileName string `json:"notes.txt"` RessourceID string `json:"resource_id"` Permalink string `json:"Permalink"` FileInfo string `json:"File INFO"` // JSON encoded UploadFileInfo } `json:"attributes"` } // GetUploadFileInfo decodes the embedded FileInfo func (ui *UploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) { var ufi UploadFileInfo err := json.Unmarshal([]byte(ui.Attributes.FileInfo), &ufi) if err != nil { return nil, fmt.Errorf("failed to decode FileInfo: %w", err) } return &ufi, nil } // LargeUploadInfo is once again a slightly different version of UploadInfo // returned as part of an LargeUploadResponse by the large file upload API. type LargeUploadInfo struct { Attributes struct { ParentID string `json:"parent_id"` FileName string `json:"file_name"` RessourceID string `json:"resource_id"` FileInfo string `json:"file_info"` } `json:"attributes"` } // GetUploadFileInfo decodes the embedded FileInfo func (ui *LargeUploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) { var ufi UploadFileInfo err := json.Unmarshal([]byte(ui.Attributes.FileInfo), &ufi) if err != nil { return nil, fmt.Errorf("failed to decode FileInfo: %w", err) } return &ufi, nil } // UploadResponse is the response to a file Upload type UploadResponse struct { Uploads []UploadInfo `json:"data"` } // LargeUploadResponse is the response returned by large file upload API. type LargeUploadResponse struct { Uploads []LargeUploadInfo `json:"data"` Status string `json:"status"` } // WriteMetadataRequest is used to write metadata for a // single item type WriteMetadataRequest struct { Data WriteMetadata `json:"data"` } // WriteMultiMetadataRequest can be used to write metadata for // multiple items at once but we don't use it that way type WriteMultiMetadataRequest struct { Meta []WriteMetadata `json:"data"` } // WriteMetadata is used to write item metadata type WriteMetadata struct { Attributes WriteAttributes `json:"attributes,omitempty"` ID string `json:"id,omitempty"` Type string `json:"type"` } // WriteAttributes is used to set various attributes for on items // this is used for Move, Copy, Delete, Rename type WriteAttributes struct { Name string `json:"name,omitempty"` ParentID string `json:"parent_id,omitempty"` RessourceID string `json:"resource_id,omitempty"` Status string `json:"status,omitempty"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/iclouddrive/iclouddrive_test.go
backend/iclouddrive/iclouddrive_test.go
//go:build !plan9 && !solaris package iclouddrive_test import ( "testing" "github.com/rclone/rclone/backend/iclouddrive" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestICloudDrive:", NilObject: (*iclouddrive.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/iclouddrive/iclouddrive_unsupported.go
backend/iclouddrive/iclouddrive_unsupported.go
// Build for iclouddrive for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 || solaris // Package iclouddrive implements the iCloud Drive backend package iclouddrive
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/iclouddrive/iclouddrive.go
backend/iclouddrive/iclouddrive.go
//go:build !plan9 && !solaris // Package iclouddrive implements the iCloud Drive backend package iclouddrive import ( "bytes" "context" "path" "errors" "fmt" "io" "net/http" "strings" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/backend/iclouddrive/api" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" ) /* - dirCache operates on relative path to root - path sanitization - rule of thumb: sanitize before use, but store things as-is - the paths cached in dirCache are after sanitizing - the remote/dir passed in aren't, and are stored as-is */ const ( configAppleID = "apple_id" configPassword = "password" configClientID = "client_id" configCookies = "cookies" configTrustToken = "trust_token" minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "iclouddrive", Description: "iCloud Drive", Config: Config, NewFs: NewFs, Options: []fs.Option{{ Name: configAppleID, Help: "Apple ID.", Required: true, Sensitive: true, }, { Name: configPassword, Help: "Password.", Required: true, IsPassword: true, Sensitive: true, }, { Name: configTrustToken, Help: "Trust token (internal use)", IsPassword: false, Required: false, Sensitive: true, Hide: fs.OptionHideBoth, }, { Name: configCookies, Help: "cookies (internal use only)", Required: false, Advanced: false, Sensitive: true, Hide: fs.OptionHideBoth, }, { Name: configClientID, Help: "Client id", Required: false, Advanced: true, Default: "d39ba9916b7251055b22c7f910e2ea796ee65e98b2ddecea8f5dde8d9d1a815d", }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.Display | //encoder.EncodeDot | encoder.EncodeBackSlash | encoder.EncodeInvalidUtf8), }}, }) } // Options defines the configuration for this backend type Options struct { AppleID string `config:"apple_id"` Password string `config:"password"` Photos bool `config:"photos"` TrustToken string `config:"trust_token"` Cookies string `config:"cookies"` ClientID string `config:"client_id"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a remote icloud drive type Fs struct { name string // name of this remote root string // the path we are working on. rootID string opt Options // parsed config options features *fs.Features // optional features dirCache *dircache.DirCache // Map of directory path to directory id icloud *api.Client service *api.DriveService pacer *fs.Pacer // pacer for API calls } // Object describes an icloud drive object type Object struct { fs *Fs // what this object is part of remote string // The remote path (relative to the fs.root) size int64 // size of the object (on server, after encryption) modTime time.Time // modification time of the object createdTime time.Time // creation time of the object driveID string // item ID of the object docID string // document ID of the object itemID string // item ID of the object etag string downloadURL string } // Config configures the iCloud remote. func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { var err error appleid, _ := m.Get(configAppleID) if appleid == "" { return nil, errors.New("a apple ID is required") } password, _ := m.Get(configPassword) if password != "" { password, err = obscure.Reveal(password) if err != nil { return nil, err } } trustToken, _ := m.Get(configTrustToken) cookieRaw, _ := m.Get(configCookies) clientID, _ := m.Get(configClientID) cookies := ReadCookies(cookieRaw) switch config.State { case "": icloud, err := api.New(appleid, password, trustToken, clientID, cookies, nil) if err != nil { return nil, err } if err := icloud.Authenticate(ctx); err != nil { return nil, err } m.Set(configCookies, icloud.Session.GetCookieString()) if icloud.Session.Requires2FA() { return fs.ConfigInput("2fa_do", "config_2fa", "Two-factor authentication: please enter your 2FA code") } return nil, nil case "2fa_do": code := config.Result if code == "" { return fs.ConfigError("authenticate", "2FA codes can't be blank") } icloud, err := api.New(appleid, password, trustToken, clientID, cookies, nil) if err != nil { return nil, err } if err := icloud.SignIn(ctx); err != nil { return nil, err } if err := icloud.Session.Validate2FACode(ctx, code); err != nil { return nil, err } m.Set(configTrustToken, icloud.Session.TrustToken) m.Set(configCookies, icloud.Session.GetCookieString()) return nil, nil case "2fa_error": if config.Result == "true" { return fs.ConfigGoto("2fa") } return nil, errors.New("2fa authentication failed") } return nil, fmt.Errorf("unknown state %q", config.State) } // find item by path. Will not return any children for the item func (f *Fs) findItem(ctx context.Context, dir string) (item *api.DriveItem, found bool, err error) { var resp *http.Response if err = f.pacer.Call(func() (bool, error) { item, resp, err = f.service.GetItemByPath(ctx, path.Join(f.root, dir)) return shouldRetry(ctx, resp, err) }); err != nil { if item == nil && resp.StatusCode == 404 { return nil, false, nil } return nil, false, err } return item, true, nil } func (f *Fs) findLeafItem(ctx context.Context, pathID string, leaf string) (item *api.DriveItem, found bool, err error) { items, err := f.listAll(ctx, pathID) if err != nil { return nil, false, err } for _, item := range items { if strings.EqualFold(item.FullName(), leaf) { return item, true, nil } } return nil, false, nil } // FindLeaf finds a directory of name leaf in the folder with ID pathID func (f *Fs) FindLeaf(ctx context.Context, pathID string, leaf string) (pathIDOut string, found bool, err error) { item, found, err := f.findLeafItem(ctx, pathID, leaf) if err != nil { return "", found, err } if !found { return "", false, err } if !item.IsFolder() { return "", false, fs.ErrorIsFile } return f.IDJoin(item.Drivewsid, item.Etag), true, nil } // Features implements fs.Fs. func (f *Fs) Features() *fs.Features { return f.features } // Hashes are not exposed anywhere func (f *Fs) Hashes() hash.Set { return hash.Set(hash.None) } func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } directoryID, etag, err := f.FindDir(ctx, dir, false) if err != nil { return err } if check { item, found, err := f.findItem(ctx, dir) if err != nil { return err } if found && item.DirectChildrenCount > 0 { return fs.ErrorDirectoryNotEmpty } } var _ *api.DriveItem var resp *http.Response if err = f.pacer.Call(func() (bool, error) { _, resp, err = f.service.MoveItemToTrashByID(ctx, directoryID, etag, true) return retryResultUnknown(ctx, resp, err) }); err != nil { return err } // flush everything from the left of the dir f.dirCache.FlushDir(dir) return nil } // Purge all files in the directory specified // // Implement this if you have a way of deleting all the files // quicker than just running Remove() on the result of List() // // Return an error if it doesn't exist func (f *Fs) Purge(ctx context.Context, dir string) error { if dir == "" { return fs.ErrorCantPurge } return f.purgeCheck(ctx, dir, false) } func (f *Fs) listAll(ctx context.Context, dirID string) (items []*api.DriveItem, err error) { var item *api.DriveItem var resp *http.Response if err = f.pacer.Call(func() (bool, error) { id, _ := f.parseNormalizedID(dirID) item, resp, err = f.service.GetItemByDriveID(ctx, id, true) return shouldRetry(ctx, resp, err) }); err != nil { return nil, err } items = item.Items for i, item := range items { item.Name = f.opt.Enc.ToStandardName(item.Name) item.Extension = f.opt.Enc.ToStandardName(item.Extension) items[i] = item } return items, nil } // List implements fs.Fs. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { dirRemoteID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } entries = make(fs.DirEntries, 0) items, err := f.listAll(ctx, dirRemoteID) if err != nil { return nil, err } for _, item := range items { id := item.Drivewsid name := item.FullName() remote := path.Join(dir, name) if item.IsFolder() { jid := f.putFolderCache(id, item.Etag, remote) d := fs.NewDir(remote, item.DateModified).SetID(jid).SetSize(item.AssetQuota) entries = append(entries, d) } else { o, err := f.NewObjectFromDriveItem(ctx, remote, item) if err != nil { return nil, err } entries = append(entries, o) } } return entries, nil } // Mkdir implements fs.Fs. func (f *Fs) Mkdir(ctx context.Context, dir string) error { _, _, err := f.FindDir(ctx, dir, true) return err } // Name implements fs.Fs. func (f *Fs) Name() string { return f.name } // Precision implements fs.Fs. func (f *Fs) Precision() time.Duration { return time.Second } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy // //nolint:all func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { // ICloud cooy endpoint is broken. Once they fixed it this can be re-enabled. return nil, fs.ErrorCantCopy // note: so many calls its only just faster then a reupload for big files. srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } file, pathID, _, err := f.FindPath(ctx, remote, true) if err != nil { return nil, err } var resp *http.Response var info *api.DriveItemRaw // make a copy if err = f.pacer.Call(func() (bool, error) { info, resp, err = f.service.CopyDocByItemID(ctx, srcObj.itemID) return retryResultUnknown(ctx, resp, err) }); err != nil { return nil, err } // renaming in CopyDocByID endpoint does not work :/ so do it the hard way // get new document var doc *api.Document if err = f.pacer.Call(func() (bool, error) { doc, resp, err = f.service.GetDocByItemID(ctx, info.ItemID) return shouldRetry(ctx, resp, err) }); err != nil { return nil, err } // get parentdrive id var dirDoc *api.Document if err = f.pacer.Call(func() (bool, error) { dirDoc, resp, err = f.service.GetDocByItemID(ctx, pathID) return shouldRetry(ctx, resp, err) }); err != nil { return nil, err } // build request // can't use normal rename as file needs to be "activated" first r := api.NewUpdateFileInfo() r.DocumentID = doc.DocumentID r.Path.Path = file r.Path.StartingDocumentID = dirDoc.DocumentID r.Data.Signature = doc.Data.Signature r.Data.ReferenceSignature = doc.Data.ReferenceSignature r.Data.WrappingKey = doc.Data.WrappingKey r.Data.Size = doc.Data.Size r.Mtime = srcObj.modTime.UnixMilli() r.Btime = srcObj.modTime.UnixMilli() var item *api.DriveItem if err = f.pacer.Call(func() (bool, error) { item, resp, err = f.service.UpdateFile(ctx, &r) return retryResultUnknown(ctx, resp, err) }); err != nil { return nil, err } o, err := f.NewObjectFromDriveItem(ctx, remote, item) if err != nil { return nil, err } obj := o.(*Object) // cheat unit tests obj.modTime = srcObj.modTime obj.createdTime = srcObj.createdTime return obj, nil } // Put in to the remote path with the modTime given of the given size // // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Put should either // return an error or upload it properly (rather than e.g. calling panic). // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { size := src.Size() if size < 0 { return nil, errors.New("file size unknown") } existingObj, err := f.NewObject(ctx, src.Remote()) switch err { case nil: // object is found return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // object not found, so we need to create it remote := src.Remote() size := src.Size() modTime := src.ModTime(ctx) obj, err := f.createObject(ctx, remote, modTime, size) if err != nil { return nil, err } return obj, obj.Update(ctx, in, src, options...) default: // real error caught return nil, err } } // DirCacheFlush resets the directory cache - used in testing as an // optional interface func (f *Fs) DirCacheFlush() { f.dirCache.ResetRoot() } // parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`) // and returns itemID, driveID, rootURL. // Such a normalized ID can come from (*Item).GetID() // // Parameters: // - rid: the normalized ID to be parsed // // Returns: // - id: the itemID extracted from the normalized ID // - etag: the driveID extracted from the normalized ID, or an empty string if not present func (f *Fs) parseNormalizedID(rid string) (id string, etag string) { split := strings.Split(rid, "#") if len(split) == 1 { return split[0], "" } return split[0], split[1] } // FindPath finds the leaf and directoryID from a normalized path func (f *Fs) FindPath(ctx context.Context, remote string, create bool) (leaf, directoryID, etag string, err error) { leaf, jDirectoryID, err := f.dirCache.FindPath(ctx, remote, create) if err != nil { return "", "", "", err } directoryID, etag = f.parseNormalizedID(jDirectoryID) return leaf, directoryID, etag, nil } // FindDir finds the directory passed in returning the directory ID // starting from pathID func (f *Fs) FindDir(ctx context.Context, path string, create bool) (pathID string, etag string, err error) { jDirectoryID, err := f.dirCache.FindDir(ctx, path, create) if err != nil { return "", "", err } directoryID, etag := f.parseNormalizedID(jDirectoryID) return directoryID, etag, nil } // IDJoin joins the given ID and ETag into a single string with a "#" delimiter. func (f *Fs) IDJoin(id string, etag string) string { if strings.Contains(id, "#") { // already contains an etag, replace id, _ = f.parseNormalizedID(id) } return strings.Join([]string{id, etag}, "#") } func (f *Fs) putFolderCache(id, etag, remote string) string { jid := f.IDJoin(id, etag) f.dirCache.Put(remote, f.IDJoin(id, etag)) return jid } // Rmdir implements fs.Fs. func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, true) } // Root implements fs.Fs. func (f *Fs) Root() string { return f.opt.Enc.ToStandardPath(f.root) } // String implements fs.Fs. func (f *Fs) String() string { return f.root } // CreateDir makes a directory with pathID as parent and name leaf // // This should be implemented by the backend and will be called by the // dircache package when appropriate. func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (string, error) { var item *api.DriveItem var err error var found bool var resp *http.Response if err = f.pacer.Call(func() (bool, error) { id, _ := f.parseNormalizedID(pathID) item, resp, err = f.service.CreateNewFolderByDriveID(ctx, id, f.opt.Enc.FromStandardName(leaf)) // check if it went oke if requestError, ok := err.(*api.RequestError); ok { if requestError.Status == "unknown" { fs.Debugf(requestError, " checking if dir is created with separate call.") time.Sleep(1 * time.Second) // sleep to give icloud time to clear up its mind item, found, err = f.findLeafItem(ctx, pathID, leaf) if err != nil { return false, err } if !found { // lets assume it failed and retry return true, err } // success, clear err err = nil } } return ignoreResultUnknown(ctx, resp, err) }); err != nil { return "", err } return f.IDJoin(item.Drivewsid, item.Etag), err } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } srcID, jsrcDirectoryID, srcLeaf, jdstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) if err != nil { return err } srcDirectoryID, srcEtag := f.parseNormalizedID(jsrcDirectoryID) dstDirectoryID, _ := f.parseNormalizedID(jdstDirectoryID) _, err = f.move(ctx, srcID, srcDirectoryID, srcLeaf, srcEtag, dstDirectoryID, dstLeaf) if err != nil { return err } srcFs.dirCache.FlushDir(srcRemote) return nil } func (f *Fs) move(ctx context.Context, ID, srcDirectoryID, srcLeaf, srcEtag, dstDirectoryID, dstLeaf string) (*api.DriveItem, error) { var resp *http.Response var item *api.DriveItem var err error // move if srcDirectoryID != dstDirectoryID { if err = f.pacer.Call(func() (bool, error) { id, _ := f.parseNormalizedID(ID) item, resp, err = f.service.MoveItemByDriveID(ctx, id, srcEtag, dstDirectoryID, true) return ignoreResultUnknown(ctx, resp, err) }); err != nil { return nil, err } ID = item.Drivewsid srcEtag = item.Etag } // rename if srcLeaf != dstLeaf { if err = f.pacer.Call(func() (bool, error) { id, _ := f.parseNormalizedID(ID) item, resp, err = f.service.RenameItemByDriveID(ctx, id, srcEtag, dstLeaf, true) return ignoreResultUnknown(ctx, resp, err) }); err != nil { return item, err } } return item, err } // Move moves the src object to the specified remote. func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } srcLeaf, srcDirectoryID, _, err := srcObj.fs.FindPath(ctx, srcObj.remote, true) if err != nil { return nil, err } dstLeaf, dstDirectoryID, _, err := f.FindPath(ctx, remote, true) if err != nil { return nil, err } item, err := f.move(ctx, srcObj.driveID, srcDirectoryID, srcLeaf, srcObj.etag, dstDirectoryID, dstLeaf) if err != nil { return src, err } return f.NewObjectFromDriveItem(ctx, remote, item) } // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // // Returns the object, leaf, directoryID and error. // // Used to create new objects func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, err error) { // Create the directory for the object if it doesn't exist _, _, _, err = f.FindPath(ctx, remote, true) if err != nil { return } // Temporary Object under construction o = &Object{ fs: f, remote: remote, modTime: modTime, size: size, } return o, nil } // ReadCookies parses the raw cookie string and returns an array of http.Cookie objects. func ReadCookies(raw string) []*http.Cookie { header := http.Header{} header.Add("Cookie", raw) request := http.Request{Header: header} return request.Cookies() } var retryErrorCodes = []int{ 400, // icloud is a mess, sometimes returns 400 on a perfectly fine request. So just retry 408, // Request Timeout 409, // Conflict, retry could fix it. 429, // Rate exceeded. 500, // Get occasional 500 Internal Server Error 502, // Server overload 503, // Service Unavailable 504, // Gateway Time-out } func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } func ignoreResultUnknown(ctx context.Context, resp *http.Response, err error) (bool, error) { if requestError, ok := err.(*api.RequestError); ok { if requestError.Status == "unknown" { fs.Debugf(requestError, " ignoring.") return false, nil } } return shouldRetry(ctx, resp, err) } func retryResultUnknown(ctx context.Context, resp *http.Response, err error) (bool, error) { if requestError, ok := err.(*api.RequestError); ok { if requestError.Status == "unknown" { fs.Debugf(requestError, " retrying.") return true, err } } return shouldRetry(ctx, resp, err) } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } if opt.Password != "" { var err error opt.Password, err = obscure.Reveal(opt.Password) if err != nil { return nil, fmt.Errorf("couldn't decrypt user password: %w", err) } } if opt.TrustToken == "" { return nil, fmt.Errorf("missing icloud trust token: try refreshing it with \"rclone config reconnect %s:\"", name) } cookies := ReadCookies(opt.Cookies) callback := func(session *api.Session) { m.Set(configCookies, session.GetCookieString()) } icloud, err := api.New( opt.AppleID, opt.Password, opt.TrustToken, opt.ClientID, cookies, callback, ) if err != nil { return nil, err } if err := icloud.Authenticate(ctx); err != nil { return nil, err } if icloud.Session.Requires2FA() { return nil, errors.New("trust token expired, please reauth") } root = strings.Trim(root, "/") f := &Fs{ name: name, root: root, icloud: icloud, rootID: "FOLDER::com.apple.CloudDocs::root", opt: *opt, pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } f.features = (&fs.Features{ CanHaveEmptyDirectories: true, PartialUploads: false, }).Fill(ctx, f) rootID := f.rootID f.service, err = icloud.DriveService() if err != nil { return nil, err } f.dirCache = dircache.New( root, rootID, f, ) err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) tempF := *f tempF.dirCache = dircache.New(newRoot, rootID, &tempF) tempF.root = newRoot // Make new Fs which is the parent err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } _, err := tempF.NewObject(ctx, remote) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f return f, nil } return nil, err } f.dirCache = tempF.dirCache f.root = tempF.root // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // NewObject creates a new fs.Object from a given remote string. // // ctx: The context.Context for the function. // remote: The remote string representing the object's location. // Returns an fs.Object and an error. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.NewObjectFromDriveItem(ctx, remote, nil) } // NewObjectFromDriveItem creates a new fs.Object from a given remote string and DriveItem. // // ctx: The context.Context for the function. // remote: The remote string representing the object's location. // item: The optional DriveItem to use for initializing the Object. If nil, the function will read the metadata from the remote location. // Returns an fs.Object and an error. func (f *Fs) NewObjectFromDriveItem(ctx context.Context, remote string, item *api.DriveItem) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } if item != nil { err := o.setMetaData(item) if err != nil { return nil, err } } else { item, err := f.readMetaData(ctx, remote) if err != nil { return nil, err } err = o.setMetaData(item) if err != nil { return nil, err } } return o, nil } func (f *Fs) readMetaData(ctx context.Context, path string) (item *api.DriveItem, err error) { leaf, ID, _, err := f.FindPath(ctx, path, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, fs.ErrorObjectNotFound } return nil, err } item, found, err := f.findLeafItem(ctx, ID, leaf) if err != nil { return nil, err } if !found { return nil, fs.ErrorObjectNotFound } return item, nil } func (o *Object) setMetaData(item *api.DriveItem) (err error) { if item.IsFolder() { return fs.ErrorIsDir } o.size = item.Size o.modTime = item.DateModified o.createdTime = item.DateCreated o.driveID = item.Drivewsid o.docID = item.Docwsid o.itemID = item.Itemid o.etag = item.Etag o.downloadURL = item.DownloadURL() return nil } // ID returns the ID of the Object if known, or "" if not func (o *Object) ID() string { return o.driveID } // Fs implements fs.Object. func (o *Object) Fs() fs.Info { return o.fs } // Hash implements fs.Object. func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { return "", hash.ErrUnsupported } // ModTime implements fs.Object. func (o *Object) ModTime(context.Context) time.Time { return o.modTime } // Open implements fs.Object. func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { fs.FixRangeOption(options, o.size) // Drive does not support empty files, so we cheat if o.size == 0 { return io.NopCloser(bytes.NewBufferString("")), nil } var resp *http.Response var err error if err = o.fs.pacer.Call(func() (bool, error) { var url string //var doc *api.Document //if o.docID == "" { //doc, resp, err = o.fs.service.GetDocByItemID(ctx, o.itemID) //} // Can not get the download url on a item to work, so do it the hard way. url, _, err = o.fs.service.GetDownloadURLByDriveID(ctx, o.driveID) resp, err = o.fs.service.DownloadFile(ctx, url, options) return shouldRetry(ctx, resp, err) }); err != nil { return nil, err } return resp.Body, err } // Remote implements fs.Object. func (o *Object) Remote() string { return o.remote } // Remove implements fs.Object. func (o *Object) Remove(ctx context.Context) error { if o.itemID == "" { return nil } var resp *http.Response var err error if err = o.fs.pacer.Call(func() (bool, error) { _, resp, err = o.fs.service.MoveItemToTrashByID(ctx, o.driveID, o.etag, true) return retryResultUnknown(ctx, resp, err) }); err != nil { return err } return nil } // SetModTime implements fs.Object. func (o *Object) SetModTime(ctx context.Context, t time.Time) error { return fs.ErrorCantSetModTime } // Size implements fs.Object. func (o *Object) Size() int64 { return o.size } // Storable implements fs.Object. func (o *Object) Storable() bool { return true } // String implements fs.Object. func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Update implements fs.Object. func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { size := src.Size() if size < 0 { return errors.New("file size unknown") } remote := o.Remote() modTime := src.ModTime(ctx) leaf, dirID, _, err := o.fs.FindPath(ctx, path.Clean(remote), true) if err != nil { return err } // Move current file to trash if o.driveID != "" { err = o.Remove(ctx) if err != nil { return err } } name := o.fs.opt.Enc.FromStandardName(leaf) var resp *http.Response // Create document var uploadInfo *api.UploadResponse if err = o.fs.pacer.Call(func() (bool, error) { uploadInfo, resp, err = o.fs.service.CreateUpload(ctx, size, name) return ignoreResultUnknown(ctx, resp, err) }); err != nil { return err } // Upload content var upload *api.SingleFileResponse if err = o.fs.pacer.Call(func() (bool, error) { upload, resp, err = o.fs.service.Upload(ctx, in, size, name, uploadInfo.URL) return ignoreResultUnknown(ctx, resp, err) }); err != nil { return err } //var doc *api.Document //if err = o.fs.pacer.Call(func() (bool, error) { // doc, resp, err = o.fs.service.GetDocByItemID(ctx, dirID) // return ignoreResultUnknown(ctx, resp, err) //}); err != nil { // return err //} r := api.NewUpdateFileInfo() r.DocumentID = uploadInfo.DocumentID r.Path.Path = name r.Path.StartingDocumentID = api.GetDocIDFromDriveID(dirID) //r.Path.StartingDocumentID = doc.DocumentID r.Data.Receipt = upload.SingleFile.Receipt r.Data.Signature = upload.SingleFile.Signature r.Data.ReferenceSignature = upload.SingleFile.ReferenceSignature r.Data.WrappingKey = upload.SingleFile.WrappingKey r.Data.Size = upload.SingleFile.Size r.Mtime = modTime.Unix() * 1000 r.Btime = modTime.Unix() * 1000 // Update metadata var item *api.DriveItem if err = o.fs.pacer.Call(func() (bool, error) { item, resp, err = o.fs.service.UpdateFile(ctx, &r) return ignoreResultUnknown(ctx, resp, err) }); err != nil { return err } err = o.setMetaData(item) if err != nil { return err } o.modTime = modTime o.size = src.Size() return nil } // Check interfaces are satisfied var ( _ fs.Fs = &Fs{} _ fs.Mover = (*Fs)(nil) _ fs.Purger = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil) _ fs.Copier = (*Fs)(nil) _ fs.Object = &Object{} _ fs.IDer = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/iclouddrive/api/drive.go
backend/iclouddrive/api/drive.go
package api import ( "bytes" "context" "io" "mime" "net/http" "net/url" "path/filepath" "strconv" "strings" "time" "github.com/google/uuid" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/rest" ) const ( defaultZone = "com.apple.CloudDocs" statusOk = "OK" statusEtagConflict = "ETAG_CONFLICT" ) // DriveService represents an iCloud Drive service. type DriveService struct { icloud *Client RootID string endpoint string docsEndpoint string } // NewDriveService creates a new DriveService instance. func NewDriveService(icloud *Client) (*DriveService, error) { return &DriveService{icloud: icloud, RootID: "FOLDER::com.apple.CloudDocs::root", endpoint: icloud.Session.AccountInfo.Webservices["drivews"].URL, docsEndpoint: icloud.Session.AccountInfo.Webservices["docws"].URL}, nil } // GetItemByDriveID retrieves a DriveItem by its Drive ID. func (d *DriveService) GetItemByDriveID(ctx context.Context, id string, includeChildren bool) (*DriveItem, *http.Response, error) { items, resp, err := d.GetItemsByDriveID(ctx, []string{id}, includeChildren) if err != nil { return nil, resp, err } return items[0], resp, err } // GetItemsByDriveID retrieves DriveItems by their Drive IDs. func (d *DriveService) GetItemsByDriveID(ctx context.Context, ids []string, includeChildren bool) ([]*DriveItem, *http.Response, error) { var err error _items := []map[string]any{} for _, id := range ids { _items = append(_items, map[string]any{ "drivewsid": id, "partialData": false, "includeHierarchy": false, }) } var body *bytes.Reader var path string if !includeChildren { values := []map[string]any{{ "items": _items, }} body, err = IntoReader(values) if err != nil { return nil, nil, err } path = "/retrieveItemDetails" } else { values := _items body, err = IntoReader(values) if err != nil { return nil, nil, err } path = "/retrieveItemDetailsInFolders" } opts := rest.Opts{ Method: "POST", Path: path, ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), RootURL: d.endpoint, Body: body, } var items []*DriveItem resp, err := d.icloud.Request(ctx, opts, nil, &items) if err != nil { return nil, resp, err } return items, resp, err } // GetDocByPath retrieves a document by its path. func (d *DriveService) GetDocByPath(ctx context.Context, path string) (*Document, *http.Response, error) { values := url.Values{} values.Set("unified_format", "false") body, err := IntoReader(path) if err != nil { return nil, nil, err } opts := rest.Opts{ Method: "POST", Path: "/ws/" + defaultZone + "/list/lookup_by_path", ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), RootURL: d.docsEndpoint, Parameters: values, Body: body, } var item []*Document resp, err := d.icloud.Request(ctx, opts, nil, &item) if err != nil { return nil, resp, err } return item[0], resp, err } // GetItemByPath retrieves a DriveItem by its path. func (d *DriveService) GetItemByPath(ctx context.Context, path string) (*DriveItem, *http.Response, error) { values := url.Values{} values.Set("unified_format", "true") body, err := IntoReader(path) if err != nil { return nil, nil, err } opts := rest.Opts{ Method: "POST", Path: "/ws/" + defaultZone + "/list/lookup_by_path", ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), RootURL: d.docsEndpoint, Parameters: values, Body: body, } var item []*DriveItem resp, err := d.icloud.Request(ctx, opts, nil, &item) if err != nil { return nil, resp, err } return item[0], resp, err } // GetDocByItemID retrieves a document by its item ID. func (d *DriveService) GetDocByItemID(ctx context.Context, id string) (*Document, *http.Response, error) { values := url.Values{} values.Set("document_id", id) values.Set("unified_format", "false") // important opts := rest.Opts{ Method: "GET", Path: "/ws/" + defaultZone + "/list/lookup_by_id", ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), RootURL: d.docsEndpoint, Parameters: values, } var item *Document resp, err := d.icloud.Request(ctx, opts, nil, &item) if err != nil { return nil, resp, err } return item, resp, err } // GetItemRawByItemID retrieves a DriveItemRaw by its item ID. func (d *DriveService) GetItemRawByItemID(ctx context.Context, id string) (*DriveItemRaw, *http.Response, error) { opts := rest.Opts{ Method: "GET", Path: "/v1/item/" + id, ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), RootURL: d.docsEndpoint, } var item *DriveItemRaw resp, err := d.icloud.Request(ctx, opts, nil, &item) if err != nil { return nil, resp, err } return item, resp, err } // GetItemsInFolder retrieves a list of DriveItemRaw objects in a folder with the given ID. func (d *DriveService) GetItemsInFolder(ctx context.Context, id string, limit int64) ([]*DriveItemRaw, *http.Response, error) { values := url.Values{} values.Set("limit", strconv.FormatInt(limit, 10)) opts := rest.Opts{ Method: "GET", Path: "/v1/enumerate/" + id, ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), RootURL: d.docsEndpoint, Parameters: values, } items := struct { Items []*DriveItemRaw `json:"drive_item"` }{} resp, err := d.icloud.Request(ctx, opts, nil, &items) if err != nil { return nil, resp, err } return items.Items, resp, err } // GetDownloadURLByDriveID retrieves the download URL for a file in the DriveService. func (d *DriveService) GetDownloadURLByDriveID(ctx context.Context, id string) (string, *http.Response, error) { _, zone, docid := DeconstructDriveID(id) values := url.Values{} values.Set("document_id", docid) if zone == "" { zone = defaultZone } opts := rest.Opts{ Method: "GET", Path: "/ws/" + zone + "/download/by_id", Parameters: values, ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), RootURL: d.docsEndpoint, } var filer *FileRequest resp, err := d.icloud.Request(ctx, opts, nil, &filer) if err != nil { return "", resp, err } var url string if filer.DataToken != nil { url = filer.DataToken.URL } else { url = filer.PackageToken.URL } return url, resp, err } // DownloadFile downloads a file from the given URL using the provided options. func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.OpenOption) (*http.Response, error) { opts := &rest.Opts{ Method: "GET", ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), RootURL: url, Options: opt, } resp, err := d.icloud.srv.Call(ctx, opts) // icloud has some weird http codes if err != nil && resp != nil && resp.StatusCode == 330 { loc, err := resp.Location() if err == nil { return d.DownloadFile(ctx, loc.String(), opt) } } return resp, err } // MoveItemToTrashByItemID moves an item to the trash based on the item ID. func (d *DriveService) MoveItemToTrashByItemID(ctx context.Context, id, etag string, force bool) (*DriveItem, *http.Response, error) { doc, resp, err := d.GetDocByItemID(ctx, id) if err != nil { return nil, resp, err } return d.MoveItemToTrashByID(ctx, doc.DriveID(), etag, force) } // MoveItemToTrashByID moves an item to the trash based on the item ID. func (d *DriveService) MoveItemToTrashByID(ctx context.Context, drivewsid, etag string, force bool) (*DriveItem, *http.Response, error) { values := map[string]any{ "items": []map[string]any{{ "drivewsid": drivewsid, "etag": etag, "clientId": drivewsid, }}} body, err := IntoReader(values) if err != nil { return nil, nil, err } opts := rest.Opts{ Method: "POST", Path: "/moveItemsToTrash", ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), RootURL: d.endpoint, Body: body, } item := struct { Items []*DriveItem `json:"items"` }{} resp, err := d.icloud.Request(ctx, opts, nil, &item) if err != nil { return nil, resp, err } if item.Items[0].Status != statusOk { // rerun with latest etag if force && item.Items[0].Status == "ETAG_CONFLICT" { return d.MoveItemToTrashByID(ctx, drivewsid, item.Items[0].Etag, false) } err = newRequestError(item.Items[0].Status, "unknown request status") } return item.Items[0], resp, err } // CreateNewFolderByItemID creates a new folder by item ID. func (d *DriveService) CreateNewFolderByItemID(ctx context.Context, id, name string) (*DriveItem, *http.Response, error) { doc, resp, err := d.GetDocByItemID(ctx, id) if err != nil { return nil, resp, err } return d.CreateNewFolderByDriveID(ctx, doc.DriveID(), name) } // CreateNewFolderByDriveID creates a new folder by its Drive ID. func (d *DriveService) CreateNewFolderByDriveID(ctx context.Context, drivewsid, name string) (*DriveItem, *http.Response, error) { values := map[string]any{ "destinationDrivewsId": drivewsid, "folders": []map[string]any{{ "clientId": "FOLDER::UNKNOWN_ZONE::TempId-" + uuid.New().String(), "name": name, }}, } body, err := IntoReader(values) if err != nil { return nil, nil, err } opts := rest.Opts{ Method: "POST", Path: "/createFolders", ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), RootURL: d.endpoint, Body: body, } var fResp *CreateFoldersResponse resp, err := d.icloud.Request(ctx, opts, nil, &fResp) if err != nil { return nil, resp, err } status := fResp.Folders[0].Status if status != statusOk { err = newRequestError(status, "unknown request status") } return fResp.Folders[0], resp, err } // RenameItemByItemID renames a DriveItem by its item ID. func (d *DriveService) RenameItemByItemID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) { doc, resp, err := d.GetDocByItemID(ctx, id) if err != nil { return nil, resp, err } return d.RenameItemByDriveID(ctx, doc.DriveID(), doc.Etag, name, force) } // RenameItemByDriveID renames a DriveItem by its drive ID. func (d *DriveService) RenameItemByDriveID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) { values := map[string]any{ "items": []map[string]any{{ "drivewsid": id, "name": name, "etag": etag, // "extension": split[1], }}, } body, err := IntoReader(values) if err != nil { return nil, nil, err } opts := rest.Opts{ Method: "POST", Path: "/renameItems", ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), RootURL: d.endpoint, Body: body, } var items *DriveItem resp, err := d.icloud.Request(ctx, opts, nil, &items) if err != nil { return nil, resp, err } status := items.Items[0].Status if status != statusOk { // rerun with latest etag if force && status == "ETAG_CONFLICT" { return d.RenameItemByDriveID(ctx, id, items.Items[0].Etag, name, false) } err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL) } return items.Items[0], resp, err } // MoveItemByItemID moves an item by its item ID to a destination item ID. func (d *DriveService) MoveItemByItemID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) { docSrc, resp, err := d.GetDocByItemID(ctx, id) if err != nil { return nil, resp, err } docDst, resp, err := d.GetDocByItemID(ctx, dstID) if err != nil { return nil, resp, err } return d.MoveItemByDriveID(ctx, docSrc.DriveID(), docSrc.Etag, docDst.DriveID(), force) } // MoveItemByDocID moves an item by its doc ID. // func (d *DriveService) MoveItemByDocID(ctx context.Context, srcDocID, srcEtag, dstDocID string, force bool) (*DriveItem, *http.Response, error) { // return d.MoveItemByDriveID(ctx, srcDocID, srcEtag, docDst.DriveID(), force) // } // MoveItemByDriveID moves an item by its drive ID. func (d *DriveService) MoveItemByDriveID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) { values := map[string]any{ "destinationDrivewsId": dstID, "items": []map[string]any{{ "drivewsid": id, "etag": etag, "clientId": id, }}, } body, err := IntoReader(values) if err != nil { return nil, nil, err } opts := rest.Opts{ Method: "POST", Path: "/moveItems", ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), RootURL: d.endpoint, Body: body, } var items *DriveItem resp, err := d.icloud.Request(ctx, opts, nil, &items) if err != nil { return nil, resp, err } status := items.Items[0].Status if status != statusOk { // rerun with latest etag if force && status == "ETAG_CONFLICT" { return d.MoveItemByDriveID(ctx, id, items.Items[0].Etag, dstID, false) } err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL) } return items.Items[0], resp, err } // CopyDocByItemID copies a document by its item ID. func (d *DriveService) CopyDocByItemID(ctx context.Context, itemID string) (*DriveItemRaw, *http.Response, error) { // putting name in info doesn't work. extension does work so assume this is a bug in the endpoint values := map[string]any{ "info_to_update": map[string]any{}, } body, err := IntoReader(values) if err != nil { return nil, nil, err } opts := rest.Opts{ Method: "POST", Path: "/v1/item/copy/" + itemID, ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), RootURL: d.docsEndpoint, Body: body, } var info *DriveItemRaw resp, err := d.icloud.Request(ctx, opts, nil, &info) if err != nil { return nil, resp, err } return info, resp, err } // CreateUpload creates an url for an upload. func (d *DriveService) CreateUpload(ctx context.Context, size int64, name string) (*UploadResponse, *http.Response, error) { // first we need to request an upload url values := map[string]any{ "filename": name, "type": "FILE", "size": strconv.FormatInt(size, 10), "content_type": GetContentTypeForFile(name), } body, err := IntoReader(values) if err != nil { return nil, nil, err } opts := rest.Opts{ Method: "POST", Path: "/ws/" + defaultZone + "/upload/web", ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), RootURL: d.docsEndpoint, Body: body, } var responseInfo []*UploadResponse resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo) if err != nil { return nil, resp, err } return responseInfo[0], resp, err } // Upload uploads a file to the given url func (d *DriveService) Upload(ctx context.Context, in io.Reader, size int64, name, uploadURL string) (*SingleFileResponse, *http.Response, error) { // TODO: implement multipart upload opts := rest.Opts{ Method: "POST", ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), RootURL: uploadURL, Body: in, ContentLength: &size, ContentType: GetContentTypeForFile(name), // MultipartContentName: "files", MultipartFileName: name, } var singleFileResponse *SingleFileResponse resp, err := d.icloud.Request(ctx, opts, nil, &singleFileResponse) if err != nil { return nil, resp, err } return singleFileResponse, resp, err } // UpdateFile updates a file in the DriveService. // // ctx: the context.Context object for the request. // r: a pointer to the UpdateFileInfo struct containing the information for the file update. // Returns a pointer to the DriveItem struct representing the updated file, the http.Response object, and an error if any. func (d *DriveService) UpdateFile(ctx context.Context, r *UpdateFileInfo) (*DriveItem, *http.Response, error) { body, err := IntoReader(r) if err != nil { return nil, nil, err } opts := rest.Opts{ Method: "POST", Path: "/ws/" + defaultZone + "/update/documents", ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), RootURL: d.docsEndpoint, Body: body, } var responseInfo *DocumentUpdateResponse resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo) if err != nil { return nil, resp, err } doc := responseInfo.Results[0].Document item := DriveItem{ Drivewsid: "FILE::com.apple.CloudDocs::" + doc.DocumentID, Docwsid: doc.DocumentID, Itemid: doc.ItemID, Etag: doc.Etag, ParentID: doc.ParentID, DateModified: time.Unix(r.Mtime, 0), DateCreated: time.Unix(r.Mtime, 0), Type: doc.Type, Name: doc.Name, Size: doc.Size, } return &item, resp, err } // UpdateFileInfo represents the information for an update to a file in the DriveService. type UpdateFileInfo struct { AllowConflict bool `json:"allow_conflict"` Btime int64 `json:"btime"` Command string `json:"command"` CreateShortGUID bool `json:"create_short_guid"` Data struct { Receipt string `json:"receipt,omitempty"` ReferenceSignature string `json:"reference_signature,omitempty"` Signature string `json:"signature,omitempty"` Size int64 `json:"size,omitempty"` WrappingKey string `json:"wrapping_key,omitempty"` } `json:"data,omitempty"` DocumentID string `json:"document_id"` FileFlags FileFlags `json:"file_flags"` Mtime int64 `json:"mtime"` Path struct { Path string `json:"path"` StartingDocumentID string `json:"starting_document_id"` } `json:"path"` } // FileFlags defines the file flags for a document. type FileFlags struct { IsExecutable bool `json:"is_executable"` IsHidden bool `json:"is_hidden"` IsWritable bool `json:"is_writable"` } // NewUpdateFileInfo creates a new UpdateFileInfo object with default values. // // Returns an UpdateFileInfo object. func NewUpdateFileInfo() UpdateFileInfo { return UpdateFileInfo{ Command: "add_file", CreateShortGUID: true, AllowConflict: true, FileFlags: FileFlags{ IsExecutable: true, IsHidden: false, IsWritable: true, }, } } // DriveItemRaw is a raw drive item. // not suure what to call this but there seems to be a "unified" and non "unified" drive item response. This is the non unified. type DriveItemRaw struct { ItemID string `json:"item_id"` ItemInfo *DriveItemRawInfo `json:"item_info"` } // SplitName splits the name of a DriveItemRaw into its name and extension. // // It returns the name and extension as separate strings. If the name ends with a dot, // it means there is no extension, so an empty string is returned for the extension. // If the name does not contain a dot, it means func (d *DriveItemRaw) SplitName() (string, string) { name := d.ItemInfo.Name // ends with a dot, no extension if strings.HasSuffix(name, ".") { return name, "" } lastInd := strings.LastIndex(name, ".") if lastInd == -1 { return name, "" } return name[:lastInd], name[lastInd+1:] } // ModTime returns the modification time of the DriveItemRaw. // // It parses the ModifiedAt field of the ItemInfo struct and converts it to a time.Time value. // If the parsing fails, it returns the zero value of time.Time. // The returned time.Time value represents the modification time of the DriveItemRaw. func (d *DriveItemRaw) ModTime() time.Time { i, err := strconv.ParseInt(d.ItemInfo.ModifiedAt, 10, 64) if err != nil { return time.Time{} } return time.UnixMilli(i) } // CreatedTime returns the creation time of the DriveItemRaw. // // It parses the CreatedAt field of the ItemInfo struct and converts it to a time.Time value. // If the parsing fails, it returns the zero value of time.Time. // The returned time.Time func (d *DriveItemRaw) CreatedTime() time.Time { i, err := strconv.ParseInt(d.ItemInfo.CreatedAt, 10, 64) if err != nil { return time.Time{} } return time.UnixMilli(i) } // DriveItemRawInfo is the raw information about a drive item. type DriveItemRawInfo struct { Name string `json:"name"` // Extension is absolutely borked on endpoints so dont use it. Extension string `json:"extension"` Size int64 `json:"size,string"` Type string `json:"type"` Version string `json:"version"` ModifiedAt string `json:"modified_at"` CreatedAt string `json:"created_at"` Urls struct { URLDownload string `json:"url_download"` } `json:"urls"` } // IntoDriveItem converts a DriveItemRaw into a DriveItem. // // It takes no parameters. // It returns a pointer to a DriveItem. func (d *DriveItemRaw) IntoDriveItem() *DriveItem { name, extension := d.SplitName() return &DriveItem{ Itemid: d.ItemID, Name: name, Extension: extension, Type: d.ItemInfo.Type, Etag: d.ItemInfo.Version, DateModified: d.ModTime(), DateCreated: d.CreatedTime(), Size: d.ItemInfo.Size, Urls: d.ItemInfo.Urls, } } // DocumentUpdateResponse is the response of a document update request. type DocumentUpdateResponse struct { Status struct { StatusCode int `json:"status_code"` ErrorMessage string `json:"error_message"` } `json:"status"` Results []struct { Status struct { StatusCode int `json:"status_code"` ErrorMessage string `json:"error_message"` } `json:"status"` OperationID any `json:"operation_id"` Document *Document `json:"document"` } `json:"results"` } // Document represents a document on iCloud. type Document struct { Status struct { StatusCode int `json:"status_code"` ErrorMessage string `json:"error_message"` } `json:"status"` DocumentID string `json:"document_id"` ItemID string `json:"item_id"` Urls struct { URLDownload string `json:"url_download"` } `json:"urls"` Etag string `json:"etag"` ParentID string `json:"parent_id"` Name string `json:"name"` Type string `json:"type"` Deleted bool `json:"deleted"` Mtime int64 `json:"mtime"` LastEditorName string `json:"last_editor_name"` Data DocumentData `json:"data"` Size int64 `json:"size"` Btime int64 `json:"btime"` Zone string `json:"zone"` FileFlags struct { IsExecutable bool `json:"is_executable"` IsWritable bool `json:"is_writable"` IsHidden bool `json:"is_hidden"` } `json:"file_flags"` LastOpenedTime int64 `json:"lastOpenedTime"` RestorePath any `json:"restorePath"` HasChainedParent bool `json:"hasChainedParent"` } // DriveID returns the drive ID of the Document. func (d *Document) DriveID() string { if d.Zone == "" { d.Zone = defaultZone } return d.Type + "::" + d.Zone + "::" + d.DocumentID } // DocumentData represents the data of a document. type DocumentData struct { Signature string `json:"signature"` Owner string `json:"owner"` Size int64 `json:"size"` ReferenceSignature string `json:"reference_signature"` WrappingKey string `json:"wrapping_key"` PcsInfo string `json:"pcsInfo"` } // SingleFileResponse is the response of a single file request. type SingleFileResponse struct { SingleFile *SingleFileInfo `json:"singleFile"` } // SingleFileInfo represents the information of a single file. type SingleFileInfo struct { ReferenceSignature string `json:"referenceChecksum"` Size int64 `json:"size"` Signature string `json:"fileChecksum"` WrappingKey string `json:"wrappingKey"` Receipt string `json:"receipt"` } // UploadResponse is the response of an upload request. type UploadResponse struct { URL string `json:"url"` DocumentID string `json:"document_id"` } // FileRequestToken represents the token of a file request. type FileRequestToken struct { URL string `json:"url"` Token string `json:"token"` Signature string `json:"signature"` WrappingKey string `json:"wrapping_key"` ReferenceSignature string `json:"reference_signature"` } // FileRequest represents the request of a file. type FileRequest struct { DocumentID string `json:"document_id"` ItemID string `json:"item_id"` OwnerDsid int64 `json:"owner_dsid"` DataToken *FileRequestToken `json:"data_token,omitempty"` PackageToken *FileRequestToken `json:"package_token,omitempty"` DoubleEtag string `json:"double_etag"` } // CreateFoldersResponse is the response of a create folders request. type CreateFoldersResponse struct { Folders []*DriveItem `json:"folders"` } // DriveItem represents an item on iCloud. type DriveItem struct { DateCreated time.Time `json:"dateCreated"` Drivewsid string `json:"drivewsid"` Docwsid string `json:"docwsid"` Itemid string `json:"item_id"` Zone string `json:"zone"` Name string `json:"name"` ParentID string `json:"parentId"` Hierarchy []DriveItem `json:"hierarchy"` Etag string `json:"etag"` Type string `json:"type"` AssetQuota int64 `json:"assetQuota"` FileCount int64 `json:"fileCount"` ShareCount int64 `json:"shareCount"` ShareAliasCount int64 `json:"shareAliasCount"` DirectChildrenCount int64 `json:"directChildrenCount"` Items []*DriveItem `json:"items"` NumberOfItems int64 `json:"numberOfItems"` Status string `json:"status"` Extension string `json:"extension,omitempty"` DateModified time.Time `json:"dateModified,omitempty"` DateChanged time.Time `json:"dateChanged,omitempty"` Size int64 `json:"size,omitempty"` LastOpenTime time.Time `json:"lastOpenTime,omitempty"` Urls struct { URLDownload string `json:"url_download"` } `json:"urls"` } // IsFolder returns true if the item is a folder. func (d *DriveItem) IsFolder() bool { return d.Type == "FOLDER" || d.Type == "APP_CONTAINER" || d.Type == "APP_LIBRARY" } // DownloadURL returns the download URL of the item. func (d *DriveItem) DownloadURL() string { return d.Urls.URLDownload } // FullName returns the full name of the item. // name + extension func (d *DriveItem) FullName() string { if d.Extension != "" { return d.Name + "." + d.Extension } return d.Name } // GetDocIDFromDriveID returns the DocumentID from the drive ID. func GetDocIDFromDriveID(id string) string { split := strings.Split(id, "::") return split[len(split)-1] } // DeconstructDriveID returns the document type, zone, and document ID from the drive ID. func DeconstructDriveID(id string) (docType, zone, docid string) { split := strings.Split(id, "::") if len(split) < 3 { return "", "", id } return split[0], split[1], split[2] } // ConstructDriveID constructs a drive ID from the given components. func ConstructDriveID(id string, zone string, t string) string { return strings.Join([]string{t, zone, id}, "::") } // GetContentTypeForFile detects content type for given file name. func GetContentTypeForFile(name string) string { // detect MIME type by looking at the filename only mimeType := mime.TypeByExtension(filepath.Ext(name)) if mimeType == "" { // api requires a mime type passed in mimeType = "text/plain" } return strings.Split(mimeType, ";")[0] }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/iclouddrive/api/client.go
backend/iclouddrive/api/client.go
// Package api provides functionality for interacting with the iCloud API. package api import ( "bytes" "context" "encoding/json" "errors" "fmt" "net/http" "strings" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/lib/rest" ) const ( baseEndpoint = "https://www.icloud.com" homeEndpoint = "https://www.icloud.com" setupEndpoint = "https://setup.icloud.com/setup/ws/1" authEndpoint = "https://idmsa.apple.com/appleauth/auth" ) type sessionSave func(*Session) // Client defines the client configuration type Client struct { appleID string password string srv *rest.Client Session *Session sessionSaveCallback sessionSave drive *DriveService } // New creates a new Client instance with the provided Apple ID, password, trust token, cookies, and session save callback. // // Parameters: // - appleID: the Apple ID of the user. // - password: the password of the user. // - trustToken: the trust token for the session. // - clientID: the client id for the session. // - cookies: the cookies for the session. // - sessionSaveCallback: the callback function to save the session. func New(appleID, password, trustToken string, clientID string, cookies []*http.Cookie, sessionSaveCallback sessionSave) (*Client, error) { icloud := &Client{ appleID: appleID, password: password, srv: rest.NewClient(fshttp.NewClient(context.Background())), Session: NewSession(), sessionSaveCallback: sessionSaveCallback, } icloud.Session.TrustToken = trustToken icloud.Session.Cookies = cookies icloud.Session.ClientID = clientID return icloud, nil } // DriveService returns the DriveService instance associated with the Client. func (c *Client) DriveService() (*DriveService, error) { var err error if c.drive == nil { c.drive, err = NewDriveService(c) if err != nil { return nil, err } } return c.drive, nil } // Request makes a request and retries it if the session is invalid. // // This function is the main entry point for making requests to the iCloud // API. If the initial request returns a 401 (Unauthorized), it will try to // reauthenticate and retry the request. func (c *Client) Request(ctx context.Context, opts rest.Opts, request any, response any) (resp *http.Response, err error) { resp, err = c.Session.Request(ctx, opts, request, response) if err != nil && resp != nil { // try to reauth if resp.StatusCode == 401 || resp.StatusCode == 421 { err = c.Authenticate(ctx) if err != nil { return nil, err } if c.Session.Requires2FA() { return nil, errors.New("trust token expired, please reauth") } return c.RequestNoReAuth(ctx, opts, request, response) } } return resp, err } // RequestNoReAuth makes a request without re-authenticating. // // This function is useful when you have a session that is already // authenticated, but you need to make a request without triggering // a re-authentication. func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request any, response any) (resp *http.Response, err error) { // Make the request without re-authenticating resp, err = c.Session.Request(ctx, opts, request, response) return resp, err } // Authenticate authenticates the client with the iCloud API. func (c *Client) Authenticate(ctx context.Context) error { if c.Session.Cookies != nil { if err := c.Session.ValidateSession(ctx); err == nil { fs.Debugf("icloud", "Valid session, no need to reauth") return nil } c.Session.Cookies = nil } fs.Debugf("icloud", "Authenticating as %s\n", c.appleID) err := c.Session.SignIn(ctx, c.appleID, c.password) if err == nil { err = c.Session.AuthWithToken(ctx) if err == nil && c.sessionSaveCallback != nil { c.sessionSaveCallback(c.Session) } } return err } // SignIn signs in the client using the provided context and credentials. func (c *Client) SignIn(ctx context.Context) error { return c.Session.SignIn(ctx, c.appleID, c.password) } // IntoReader marshals the provided values into a JSON encoded reader func IntoReader(values any) (*bytes.Reader, error) { m, err := json.Marshal(values) if err != nil { return nil, err } return bytes.NewReader(m), nil } // RequestError holds info on a result state, icloud can return a 200 but the result is unknown type RequestError struct { Status string Text string } // Error satisfy the error interface. func (e *RequestError) Error() string { return fmt.Sprintf("%s: %s", e.Text, e.Status) } func newRequestError(Status string, Text string) *RequestError { return &RequestError{ Status: strings.ToLower(Status), Text: Text, } } // newErr orf makes a new error from sprintf parameters. func newRequestErrorf(Status string, Text string, Parameters ...any) *RequestError { return newRequestError(strings.ToLower(Status), fmt.Sprintf(Text, Parameters...)) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/iclouddrive/api/session.go
backend/iclouddrive/api/session.go
package api import ( "context" "fmt" "maps" "net/http" "net/url" "slices" "strings" "github.com/oracle/oci-go-sdk/v65/common" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/lib/rest" ) // Session represents an iCloud session type Session struct { SessionToken string `json:"session_token"` Scnt string `json:"scnt"` SessionID string `json:"session_id"` AccountCountry string `json:"account_country"` TrustToken string `json:"trust_token"` ClientID string `json:"client_id"` Cookies []*http.Cookie `json:"cookies"` AccountInfo AccountInfo `json:"account_info"` srv *rest.Client `json:"-"` } // String returns the session as a string // func (s *Session) String() string { // jsession, _ := json.Marshal(s) // return string(jsession) // } // Request makes a request func (s *Session) Request(ctx context.Context, opts rest.Opts, request any, response any) (*http.Response, error) { resp, err := s.srv.CallJSON(ctx, &opts, &request, &response) if err != nil { return resp, err } if val := resp.Header.Get("X-Apple-ID-Account-Country"); val != "" { s.AccountCountry = val } if val := resp.Header.Get("X-Apple-ID-Session-Id"); val != "" { s.SessionID = val } if val := resp.Header.Get("X-Apple-Session-Token"); val != "" { s.SessionToken = val } if val := resp.Header.Get("X-Apple-TwoSV-Trust-Token"); val != "" { s.TrustToken = val } if val := resp.Header.Get("scnt"); val != "" { s.Scnt = val } return resp, nil } // Requires2FA returns true if the session requires 2FA func (s *Session) Requires2FA() bool { return s.AccountInfo.DsInfo.HsaVersion == 2 && s.AccountInfo.HsaChallengeRequired } // SignIn signs in the session func (s *Session) SignIn(ctx context.Context, appleID, password string) error { trustTokens := []string{} if s.TrustToken != "" { trustTokens = []string{s.TrustToken} } values := map[string]any{ "accountName": appleID, "password": password, "rememberMe": true, "trustTokens": trustTokens, } body, err := IntoReader(values) if err != nil { return err } opts := rest.Opts{ Method: "POST", Path: "/signin", Parameters: url.Values{}, ExtraHeaders: s.GetAuthHeaders(map[string]string{}), RootURL: authEndpoint, IgnoreStatus: true, // need to handle 409 for hsa2 NoResponse: true, Body: body, } opts.Parameters.Set("isRememberMeEnabled", "true") _, err = s.Request(ctx, opts, nil, nil) return err } // AuthWithToken authenticates the session func (s *Session) AuthWithToken(ctx context.Context) error { values := map[string]any{ "accountCountryCode": s.AccountCountry, "dsWebAuthToken": s.SessionToken, "extended_login": true, "trustToken": s.TrustToken, } body, err := IntoReader(values) if err != nil { return err } opts := rest.Opts{ Method: "POST", Path: "/accountLogin", ExtraHeaders: GetCommonHeaders(map[string]string{}), RootURL: setupEndpoint, Body: body, } resp, err := s.Request(ctx, opts, nil, &s.AccountInfo) if err == nil { s.Cookies = resp.Cookies() } return err } // Validate2FACode validates the 2FA code func (s *Session) Validate2FACode(ctx context.Context, code string) error { values := map[string]any{"securityCode": map[string]string{"code": code}} body, err := IntoReader(values) if err != nil { return err } headers := s.GetAuthHeaders(map[string]string{}) headers["scnt"] = s.Scnt headers["X-Apple-ID-Session-Id"] = s.SessionID opts := rest.Opts{ Method: "POST", Path: "/verify/trusteddevice/securitycode", ExtraHeaders: headers, RootURL: authEndpoint, Body: body, NoResponse: true, } _, err = s.Request(ctx, opts, nil, nil) if err == nil { if err := s.TrustSession(ctx); err != nil { return err } return nil } return fmt.Errorf("validate2FACode failed: %w", err) } // TrustSession trusts the session func (s *Session) TrustSession(ctx context.Context) error { headers := s.GetAuthHeaders(map[string]string{}) headers["scnt"] = s.Scnt headers["X-Apple-ID-Session-Id"] = s.SessionID opts := rest.Opts{ Method: "GET", Path: "/2sv/trust", ExtraHeaders: headers, RootURL: authEndpoint, NoResponse: true, ContentLength: common.Int64(0), } _, err := s.Request(ctx, opts, nil, nil) if err != nil { return fmt.Errorf("trustSession failed: %w", err) } return s.AuthWithToken(ctx) } // ValidateSession validates the session func (s *Session) ValidateSession(ctx context.Context) error { opts := rest.Opts{ Method: "POST", Path: "/validate", ExtraHeaders: s.GetHeaders(map[string]string{}), RootURL: setupEndpoint, ContentLength: common.Int64(0), } _, err := s.Request(ctx, opts, nil, &s.AccountInfo) if err != nil { return fmt.Errorf("validateSession failed: %w", err) } return nil } // GetAuthHeaders returns the authentication headers for the session. // // It takes an `overwrite` map[string]string parameter which allows // overwriting the default headers. It returns a map[string]string. func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string { headers := map[string]string{ "Accept": "application/json", "Content-Type": "application/json", "X-Apple-OAuth-Client-Id": s.ClientID, "X-Apple-OAuth-Client-Type": "firstPartyAuth", "X-Apple-OAuth-Redirect-URI": "https://www.icloud.com", "X-Apple-OAuth-Require-Grant-Code": "true", "X-Apple-OAuth-Response-Mode": "web_message", "X-Apple-OAuth-Response-Type": "code", "X-Apple-OAuth-State": s.ClientID, "X-Apple-Widget-Key": s.ClientID, "Origin": homeEndpoint, "Referer": fmt.Sprintf("%s/", homeEndpoint), "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0", } maps.Copy(headers, overwrite) return headers } // GetHeaders Gets the authentication headers required for a request func (s *Session) GetHeaders(overwrite map[string]string) map[string]string { headers := GetCommonHeaders(map[string]string{}) headers["Cookie"] = s.GetCookieString() maps.Copy(headers, overwrite) return headers } // GetCookieString returns the cookie header string for the session. func (s *Session) GetCookieString() string { cookieHeader := "" // we only care about name and value. for _, cookie := range s.Cookies { cookieHeader = cookieHeader + cookie.Name + "=" + cookie.Value + ";" } return cookieHeader } // GetCommonHeaders generates common HTTP headers with optional overwrite. func GetCommonHeaders(overwrite map[string]string) map[string]string { headers := map[string]string{ "Content-Type": "application/json", "Origin": baseEndpoint, "Referer": fmt.Sprintf("%s/", baseEndpoint), "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0", } maps.Copy(headers, overwrite) return headers } // MergeCookies merges two slices of http.Cookies, ensuring no duplicates are added. func MergeCookies(left []*http.Cookie, right []*http.Cookie) ([]*http.Cookie, error) { var hashes []string for _, cookie := range right { hashes = append(hashes, cookie.Raw) } for _, cookie := range left { if !slices.Contains(hashes, cookie.Raw) { right = append(right, cookie) } } return right, nil } // GetCookiesForDomain filters the provided cookies based on the domain of the given URL. func GetCookiesForDomain(url *url.URL, cookies []*http.Cookie) ([]*http.Cookie, error) { var domainCookies []*http.Cookie for _, cookie := range cookies { if strings.HasSuffix(url.Host, cookie.Domain) { domainCookies = append(domainCookies, cookie) } } return domainCookies, nil } // NewSession creates a new Session instance with default values. func NewSession() *Session { session := &Session{} session.srv = rest.NewClient(fshttp.NewClient(context.Background())).SetRoot(baseEndpoint) //session.ClientID = "auth-" + uuid.New().String() return session } // AccountInfo represents an account info type AccountInfo struct { DsInfo *ValidateDataDsInfo `json:"dsInfo"` HasMinimumDeviceForPhotosWeb bool `json:"hasMinimumDeviceForPhotosWeb"` ICDPEnabled bool `json:"iCDPEnabled"` Webservices map[string]*webService `json:"webservices"` PcsEnabled bool `json:"pcsEnabled"` TermsUpdateNeeded bool `json:"termsUpdateNeeded"` ConfigBag struct { Urls struct { AccountCreateUI string `json:"accountCreateUI"` AccountLoginUI string `json:"accountLoginUI"` AccountLogin string `json:"accountLogin"` AccountRepairUI string `json:"accountRepairUI"` DownloadICloudTerms string `json:"downloadICloudTerms"` RepairDone string `json:"repairDone"` AccountAuthorizeUI string `json:"accountAuthorizeUI"` VettingURLForEmail string `json:"vettingUrlForEmail"` AccountCreate string `json:"accountCreate"` GetICloudTerms string `json:"getICloudTerms"` VettingURLForPhone string `json:"vettingUrlForPhone"` } `json:"urls"` AccountCreateEnabled bool `json:"accountCreateEnabled"` } `json:"configBag"` HsaTrustedBrowser bool `json:"hsaTrustedBrowser"` AppsOrder []string `json:"appsOrder"` Version int `json:"version"` IsExtendedLogin bool `json:"isExtendedLogin"` PcsServiceIdentitiesIncluded bool `json:"pcsServiceIdentitiesIncluded"` IsRepairNeeded bool `json:"isRepairNeeded"` HsaChallengeRequired bool `json:"hsaChallengeRequired"` RequestInfo struct { Country string `json:"country"` TimeZone string `json:"timeZone"` Region string `json:"region"` } `json:"requestInfo"` PcsDeleted bool `json:"pcsDeleted"` ICloudInfo struct { SafariBookmarksHasMigratedToCloudKit bool `json:"SafariBookmarksHasMigratedToCloudKit"` } `json:"iCloudInfo"` Apps map[string]*ValidateDataApp `json:"apps"` } // ValidateDataDsInfo represents an validation info type ValidateDataDsInfo struct { HsaVersion int `json:"hsaVersion"` LastName string `json:"lastName"` ICDPEnabled bool `json:"iCDPEnabled"` TantorMigrated bool `json:"tantorMigrated"` Dsid string `json:"dsid"` HsaEnabled bool `json:"hsaEnabled"` IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"` IroncadeMigrated bool `json:"ironcadeMigrated"` Locale string `json:"locale"` BrZoneConsolidated bool `json:"brZoneConsolidated"` ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"` IsManagedAppleID bool `json:"isManagedAppleID"` IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"` IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"` ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"` Gilligvited bool `json:"gilligvited"` AppleIDAliases []any `json:"appleIdAliases"` UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"` IsPaidDeveloper bool `json:"isPaidDeveloper"` CountryCode string `json:"countryCode"` NotificationID string `json:"notificationId"` PrimaryEmailVerified bool `json:"primaryEmailVerified"` ADsID string `json:"aDsID"` Locked bool `json:"locked"` ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"` HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"` PrimaryEmail string `json:"primaryEmail"` AppleIDEntries []struct { IsPrimary bool `json:"isPrimary"` Type string `json:"type"` Value string `json:"value"` } `json:"appleIdEntries"` GilliganEnabled bool `json:"gilligan-enabled"` IsWebAccessAllowed bool `json:"isWebAccessAllowed"` FullName string `json:"fullName"` MailFlags struct { IsThreadingAvailable bool `json:"isThreadingAvailable"` IsSearchV2Provisioned bool `json:"isSearchV2Provisioned"` SCKMail bool `json:"sCKMail"` IsMppSupportedInCurrentCountry bool `json:"isMppSupportedInCurrentCountry"` } `json:"mailFlags"` LanguageCode string `json:"languageCode"` AppleID string `json:"appleId"` HasUnreleasedOS bool `json:"hasUnreleasedOS"` AnalyticsOptInStatus bool `json:"analyticsOptInStatus"` FirstName string `json:"firstName"` ICloudAppleIDAlias string `json:"iCloudAppleIdAlias"` NotesMigrated bool `json:"notesMigrated"` BeneficiaryInfo struct { IsBeneficiary bool `json:"isBeneficiary"` } `json:"beneficiaryInfo"` HasPaymentInfo bool `json:"hasPaymentInfo"` PcsDelet bool `json:"pcsDelet"` AppleIDAlias string `json:"appleIdAlias"` BrMigrated bool `json:"brMigrated"` StatusCode int `json:"statusCode"` FamilyEligible bool `json:"familyEligible"` } // ValidateDataApp represents an app type ValidateDataApp struct { CanLaunchWithOneFactor bool `json:"canLaunchWithOneFactor"` IsQualifiedForBeta bool `json:"isQualifiedForBeta"` } // WebService represents a web service type webService struct { PcsRequired bool `json:"pcsRequired"` URL string `json:"url"` UploadURL string `json:"uploadUrl"` Status string `json:"status"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/opendrive/types.go
backend/opendrive/types.go
package opendrive import ( "encoding/json" "fmt" ) // Error describes an openDRIVE error response type Error struct { Info struct { Code int `json:"code"` Message string `json:"message"` } `json:"error"` } // Error satisfies the error interface func (e *Error) Error() string { return fmt.Sprintf("%s (Error %d)", e.Info.Message, e.Info.Code) } // Account describes an OpenDRIVE account type Account struct { Username string `json:"username"` Password string `json:"passwd"` } // UserSessionInfo describes an OpenDRIVE session type UserSessionInfo struct { Username string `json:"username"` Password string `json:"passwd"` SessionID string `json:"SessionID"` UserName string `json:"UserName"` UserFirstName string `json:"UserFirstName"` UserLastName string `json:"UserLastName"` AccType string `json:"AccType"` UserLang string `json:"UserLang"` UserID string `json:"UserID"` IsAccountUser json.RawMessage `json:"IsAccountUser"` DriveName string `json:"DriveName"` UserLevel string `json:"UserLevel"` UserPlan string `json:"UserPlan"` FVersioning string `json:"FVersioning"` UserDomain string `json:"UserDomain"` PartnerUsersDomain string `json:"PartnerUsersDomain"` } // FolderList describes an OpenDRIVE listing type FolderList struct { // DirUpdateTime string `json:"DirUpdateTime,string"` Name string `json:"Name"` ParentFolderID string `json:"ParentFolderID"` DirectFolderLink string `json:"DirectFolderLink"` ResponseType int `json:"ResponseType"` Folders []Folder `json:"Folders"` Files []File `json:"Files"` } // Folder describes an OpenDRIVE folder type Folder struct { FolderID string `json:"FolderID"` Name string `json:"Name"` DateCreated int `json:"DateCreated"` DirUpdateTime int `json:"DirUpdateTime"` Access int `json:"Access"` DateModified int64 `json:"DateModified"` Shared string `json:"Shared"` ChildFolders int `json:"ChildFolders"` Link string `json:"Link"` Encrypted string `json:"Encrypted"` } type createFolder struct { SessionID string `json:"session_id"` FolderName string `json:"folder_name"` FolderSubParent string `json:"folder_sub_parent"` FolderIsPublic int64 `json:"folder_is_public"` // (0 = private, 1 = public, 2 = hidden) FolderPublicUpl int64 `json:"folder_public_upl"` // (0 = disabled, 1 = enabled) FolderPublicDisplay int64 `json:"folder_public_display"` // (0 = disabled, 1 = enabled) FolderPublicDnl int64 `json:"folder_public_dnl"` // (0 = disabled, 1 = enabled). } type createFolderResponse struct { FolderID string `json:"FolderID"` Name string `json:"Name"` DateCreated int `json:"DateCreated"` DirUpdateTime int `json:"DirUpdateTime"` Access int `json:"Access"` DateModified int `json:"DateModified"` Shared string `json:"Shared"` Description string `json:"Description"` Link string `json:"Link"` } type moveCopyFolder struct { SessionID string `json:"session_id"` FolderID string `json:"folder_id"` DstFolderID string `json:"dst_folder_id"` Move string `json:"move"` NewFolderName string `json:"new_folder_name"` // New name for destination folder. } type renameFolder struct { SessionID string `json:"session_id"` FolderID string `json:"folder_id"` FolderName string `json:"folder_name"` // New name for destination folder (max 255). SharingID string `json:"sharing_id"` } type moveCopyFolderResponse struct { FolderID string `json:"FolderID"` } type removeFolder struct { SessionID string `json:"session_id"` FolderID string `json:"folder_id"` } // File describes an OpenDRIVE file type File struct { FileID string `json:"FileId"` FileHash string `json:"FileHash"` Name string `json:"Name"` GroupID int `json:"GroupID"` Extension string `json:"Extension"` Size int64 `json:"Size,string"` Views string `json:"Views"` Version string `json:"Version"` Downloads string `json:"Downloads"` DateModified int64 `json:"DateModified,string"` Access string `json:"Access"` Link string `json:"Link"` DownloadLink string `json:"DownloadLink"` StreamingLink string `json:"StreamingLink"` TempStreamingLink string `json:"TempStreamingLink"` EditLink string `json:"EditLink"` ThumbLink string `json:"ThumbLink"` Password string `json:"Password"` EditOnline int `json:"EditOnline"` } type moveCopyFile struct { SessionID string `json:"session_id"` SrcFileID string `json:"src_file_id"` DstFolderID string `json:"dst_folder_id"` Move string `json:"move"` OverwriteIfExists string `json:"overwrite_if_exists"` NewFileName string `json:"new_file_name"` // New name for destination file. } type moveCopyFileResponse struct { FileID string `json:"FileID"` Size string `json:"Size"` } type renameFile struct { SessionID string `json:"session_id"` NewFileName string `json:"new_file_name"` // New name for destination file. FileID string `json:"file_id"` AccessFolderID string `json:"access_folder_id"` SharingID string `json:"sharing_id"` } type createFile struct { SessionID string `json:"session_id"` FolderID string `json:"folder_id"` Name string `json:"file_name"` } type createFileResponse struct { FileID string `json:"FileId"` Name string `json:"Name"` GroupID int `json:"GroupID"` Extension string `json:"Extension"` Size string `json:"Size"` Views string `json:"Views"` Downloads string `json:"Downloads"` DateModified string `json:"DateModified"` Access string `json:"Access"` Link string `json:"Link"` DownloadLink string `json:"DownloadLink"` StreamingLink string `json:"StreamingLink"` TempStreamingLink string `json:"TempStreamingLink"` DirUpdateTime int `json:"DirUpdateTime"` TempLocation string `json:"TempLocation"` SpeedLimit int `json:"SpeedLimit"` RequireCompression int `json:"RequireCompression"` RequireHash int `json:"RequireHash"` RequireHashOnly int `json:"RequireHashOnly"` } type modTimeFile struct { SessionID string `json:"session_id"` FileID string `json:"file_id"` FileModificationTime string `json:"file_modification_time"` } type openUpload struct { SessionID string `json:"session_id"` FileID string `json:"file_id"` Size int64 `json:"file_size"` } type openUploadResponse struct { TempLocation string `json:"TempLocation"` RequireCompression bool `json:"RequireCompression"` RequireHash bool `json:"RequireHash"` RequireHashOnly bool `json:"RequireHashOnly"` SpeedLimit int `json:"SpeedLimit"` } type closeUpload struct { SessionID string `json:"session_id"` FileID string `json:"file_id"` Size int64 `json:"file_size"` TempLocation string `json:"temp_location"` } type closeUploadResponse struct { FileID string `json:"FileID"` FileHash string `json:"FileHash"` Size int64 `json:"Size"` } type permissions struct { SessionID string `json:"session_id"` FileID string `json:"file_id"` FileIsPublic int64 `json:"file_ispublic"` } type uploadFileChunkReply struct { TotalWritten int64 `json:"TotalWritten"` } // usersInfoResponse describes OpenDrive users/info.json response type usersInfoResponse struct { // This response contains many other values but these are the only ones currently in use StorageUsed int64 `json:"StorageUsed,string"` MaxStorage int64 `json:"MaxStorage,string"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/opendrive/opendrive_test.go
backend/opendrive/opendrive_test.go
// Test Opendrive filesystem interface package opendrive_test import ( "testing" "github.com/rclone/rclone/backend/opendrive" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestOpenDrive:", NilObject: (*opendrive.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/opendrive/opendrive.go
backend/opendrive/opendrive.go
// Package opendrive provides an interface to the OpenDrive storage system. package opendrive import ( "context" "errors" "fmt" "io" "net/http" "net/url" "path" "strconv" "strings" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/rest" ) const ( defaultEndpoint = "https://dev.opendrive.com/api/v1" minSleep = 10 * time.Millisecond maxSleep = 5 * time.Minute decayConstant = 1 // bigger for slower decay, exponential ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "opendrive", Description: "OpenDrive", NewFs: NewFs, Options: []fs.Option{{ Name: "username", Help: "Username.", Required: true, Sensitive: true, }, { Name: "password", Help: "Password.", IsPassword: true, Required: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // List of replaced characters: // < (less than) -> '<' // FULLWIDTH LESS-THAN SIGN // > (greater than) -> '>' // FULLWIDTH GREATER-THAN SIGN // : (colon) -> ':' // FULLWIDTH COLON // " (double quote) -> '"' // FULLWIDTH QUOTATION MARK // \ (backslash) -> '\' // FULLWIDTH REVERSE SOLIDUS // | (vertical line) -> '|' // FULLWIDTH VERTICAL LINE // ? (question mark) -> '?' // FULLWIDTH QUESTION MARK // * (asterisk) -> '*' // FULLWIDTH ASTERISK // // Additionally names can't begin or end with an ASCII whitespace. // List of replaced characters: // (space) -> '␠' // SYMBOL FOR SPACE // (horizontal tab) -> '␉' // SYMBOL FOR HORIZONTAL TABULATION // (line feed) -> '␊' // SYMBOL FOR LINE FEED // (vertical tab) -> '␋' // SYMBOL FOR VERTICAL TABULATION // (carriage return) -> '␍' // SYMBOL FOR CARRIAGE RETURN // // Also encode invalid UTF-8 bytes as json doesn't handle them properly. // // https://www.opendrive.com/wp-content/uploads/guides/OpenDrive_API_guide.pdf Default: (encoder.Base | encoder.EncodeWin | encoder.EncodeLeftCrLfHtVt | encoder.EncodeRightCrLfHtVt | encoder.EncodeBackSlash | encoder.EncodeLeftSpace | encoder.EncodeRightSpace | encoder.EncodeInvalidUtf8), }, { Name: "chunk_size", Help: `Files will be uploaded in chunks this size. Note that these chunks are buffered in memory so increasing them will increase memory use.`, Default: 10 * fs.Mebi, Advanced: true, }, { Name: "access", Help: "Files and folders will be uploaded with this access permission (default private)", Default: "private", Advanced: true, Examples: []fs.OptionExample{{ Value: "private", Help: "The file or folder access can be granted in a way that will allow select users to view, read or write what is absolutely essential for them.", }, { Value: "public", Help: "The file or folder can be downloaded by anyone from a web browser. The link can be shared in any way,", }, { Value: "hidden", Help: "The file or folder can be accessed has the same restrictions as Public if the user knows the URL of the file or folder link in order to access the contents", }}, }}, }) } // Options defines the configuration for this backend type Options struct { UserName string `config:"username"` Password string `config:"password"` Enc encoder.MultiEncoder `config:"encoding"` ChunkSize fs.SizeSuffix `config:"chunk_size"` Access string `config:"access"` } // Fs represents a remote server type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options features *fs.Features // optional features srv *rest.Client // the connection to the server pacer *fs.Pacer // To pace and retry the API calls session UserSessionInfo // contains the session data dirCache *dircache.DirCache // Map of directory path to directory id } // Object describes an object type Object struct { fs *Fs // what this object is part of remote string // The remote path id string // ID of the file parent string // ID of the parent directory modTime time.Time // The modified time of the object if known md5 string // MD5 hash if known size int64 // Size of the object } // parsePath parses an incoming 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("OpenDrive root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.MD5) } // DirCacheFlush resets the directory cache - used in testing as an // optional interface func (f *Fs) DirCacheFlush() { f.dirCache.ResetRoot() } // NewFs constructs an Fs from the path, bucket:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } root = parsePath(root) if opt.UserName == "" { return nil, errors.New("username not found") } opt.Password, err = obscure.Reveal(opt.Password) if err != nil { return nil, errors.New("password could not revealed") } if opt.Password == "" { return nil, errors.New("password not found") } f := &Fs{ name: name, root: root, opt: *opt, srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(errorHandler), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } f.dirCache = dircache.New(root, "0", f) // set the rootURL for the REST client f.srv.SetRoot(defaultEndpoint) // get sessionID var resp *http.Response err = f.pacer.Call(func() (bool, error) { account := Account{Username: opt.UserName, Password: opt.Password} opts := rest.Opts{ Method: "POST", Path: "/session/login.json", } resp, err = f.srv.CallJSON(ctx, &opts, &account, &f.session) return f.shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("failed to create session: %w", err) } fs.Debugf(nil, "Starting OpenDrive session with ID: %s", f.session.SessionID) f.features = (&fs.Features{ CaseInsensitive: true, CanHaveEmptyDirectories: true, }).Fill(ctx, f) // Find the current root err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) tempF := *f tempF.dirCache = dircache.New(newRoot, "0", &tempF) tempF.root = newRoot // Make new Fs which is the parent err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } _, err := tempF.newObjectWithInfo(ctx, remote, nil, "") if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f return f, nil } return nil, err } // XXX: update the old f here instead of returning tempF, since // `features` were already filled with functions having *f as a receiver. // See https://github.com/rclone/rclone/issues/2182 f.dirCache = tempF.dirCache f.root = tempF.root // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // rootSlash returns root with a slash on if it is empty, otherwise empty string func (f *Fs) rootSlash() string { if f.root == "" { return f.root } return f.root + "/" } // errorHandler parses a non 2xx error response into an error func errorHandler(resp *http.Response) error { errResponse := new(Error) err := rest.DecodeJSON(resp, &errResponse) if err != nil { fs.Debugf(nil, "Couldn't decode error response: %v", err) } if errResponse.Info.Code == 0 { errResponse.Info.Code = resp.StatusCode } if errResponse.Info.Message == "" { errResponse.Info.Message = "Unknown " + resp.Status } return errResponse } // Mkdir creates the folder if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { // fs.Debugf(nil, "Mkdir(\"%s\")", dir) _, err := f.dirCache.FindDir(ctx, dir, true) return err } // deleteObject removes an object by ID func (f *Fs) deleteObject(ctx context.Context, id string) error { return f.pacer.Call(func() (bool, error) { removeDirData := removeFolder{SessionID: f.session.SessionID, FolderID: id} opts := rest.Opts{ Method: "POST", NoResponse: true, Path: "/folder/remove.json", } resp, err := f.srv.CallJSON(ctx, &opts, &removeDirData, nil) return f.shouldRetry(ctx, resp, err) }) } // purgeCheck remotes the root directory, if check is set then it // refuses to do so if it has anything in func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } dc := f.dirCache rootID, err := dc.FindDir(ctx, dir, false) if err != nil { return err } item, err := f.readMetaDataForFolderID(ctx, rootID) if err != nil { return err } if check && len(item.Files) != 0 { return errors.New("folder not empty") } err = f.deleteObject(ctx, rootID) if err != nil { return err } f.dirCache.FlushDir(dir) return nil } // Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { // fs.Debugf(nil, "Rmdir(\"%s\")", path.Join(f.root, dir)) return f.purgeCheck(ctx, dir, true) } // Precision of the remote func (f *Fs) Precision() time.Duration { return time.Second } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { // fs.Debugf(nil, "Copy(%v)", remote) srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } err := srcObj.readMetaData(ctx) if err != nil { return nil, err } srcPath := srcObj.fs.rootSlash() + srcObj.remote dstPath := f.rootSlash() + remote if strings.EqualFold(srcPath, dstPath) { return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath) } // Create temporary object dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } // fs.Debugf(nil, "...%#v\n...%#v", remote, directoryID) // Copy the object var resp *http.Response response := moveCopyFileResponse{} err = f.pacer.Call(func() (bool, error) { copyFileData := moveCopyFile{ SessionID: f.session.SessionID, SrcFileID: srcObj.id, DstFolderID: directoryID, Move: "false", OverwriteIfExists: "true", NewFileName: leaf, } opts := rest.Opts{ Method: "POST", Path: "/file/move_copy.json", } resp, err = f.srv.CallJSON(ctx, &opts, &copyFileData, &response) return f.shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } size, _ := strconv.ParseInt(response.Size, 10, 64) dstObj.id = response.FileID dstObj.size = size return dstObj, nil } // About gets quota information func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { var uInfo usersInfoResponse var resp *http.Response err = f.pacer.Call(func() (bool, error) { opts := rest.Opts{ Method: "GET", Path: "/users/info.json/" + f.session.SessionID, } resp, err = f.srv.CallJSON(ctx, &opts, nil, &uInfo) return f.shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } usage = &fs.Usage{ Used: fs.NewUsageValue(uInfo.StorageUsed), Total: fs.NewUsageValue(uInfo.MaxStorage * 1024 * 1024), // MaxStorage appears to be in MB Free: fs.NewUsageValue(uInfo.MaxStorage*1024*1024 - uInfo.StorageUsed), } return usage, nil } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { // fs.Debugf(nil, "Move(%v)", remote) srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantCopy } err := srcObj.readMetaData(ctx) if err != nil { return nil, err } // Create temporary object dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } // move_copy will silently truncate new filenames if len(leaf) > 255 { fs.Debugf(src, "Can't move file: name (%q) exceeds 255 char", leaf) return nil, fs.ErrorFileNameTooLong } moveCopyFileData := moveCopyFile{ SessionID: f.session.SessionID, SrcFileID: srcObj.id, DstFolderID: directoryID, Move: "true", OverwriteIfExists: "true", NewFileName: leaf, } opts := rest.Opts{ Method: "POST", Path: "/file/move_copy.json", } var request any = moveCopyFileData // use /file/rename.json if moving within the same directory _, srcDirID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) if err != nil { return nil, err } if srcDirID == directoryID { fs.Debugf(src, "same parent dir (%v) - using file/rename instead of move_copy for %s", directoryID, remote) renameFileData := renameFile{ SessionID: f.session.SessionID, FileID: srcObj.id, NewFileName: leaf, } opts.Path = "/file/rename.json" request = renameFileData } // Move the object var resp *http.Response response := moveCopyFileResponse{} err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &request, &response) return f.shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } size, _ := strconv.ParseInt(response.Size, 10, 64) dstObj.id = response.FileID dstObj.size = size return dstObj, nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } srcID, srcDirectoryID, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) if err != nil { return err } // move_copy will silently truncate new filenames if len(dstLeaf) > 255 { fs.Debugf(src, "Can't move folder: name (%q) exceeds 255 char", dstLeaf) return fs.ErrorFileNameTooLong } moveFolderData := moveCopyFolder{ SessionID: f.session.SessionID, FolderID: srcID, DstFolderID: dstDirectoryID, Move: "true", NewFolderName: dstLeaf, } opts := rest.Opts{ Method: "POST", Path: "/folder/move_copy.json", } var request any = moveFolderData // use /folder/rename.json if moving within the same parent directory if srcDirectoryID == dstDirectoryID { fs.Debugf(dstRemote, "same parent dir (%v) - using folder/rename instead of move_copy", srcDirectoryID) renameFolderData := renameFolder{ SessionID: f.session.SessionID, FolderID: srcID, FolderName: dstLeaf, } opts.Path = "/folder/rename.json" request = renameFolderData } // Do the move var resp *http.Response response := moveCopyFolderResponse{} err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &request, &response) return f.shouldRetry(ctx, resp, err) }) if err != nil { fs.Debugf(src, "DirMove error %v", err) return err } srcFs.dirCache.FlushDir(srcRemote) return nil } // Purge deletes all the files in the directory // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, false) } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *File, parent string) (fs.Object, error) { // fs.Debugf(nil, "newObjectWithInfo(%s, %v)", remote, file) var o *Object if nil != file { o = &Object{ fs: f, remote: remote, id: file.FileID, parent: parent, modTime: time.Unix(file.DateModified, 0), size: file.Size, md5: file.FileHash, } } else { o = &Object{ fs: f, remote: remote, } err := o.readMetaData(ctx) if err != nil { return nil, err } } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { // fs.Debugf(nil, "NewObject(\"%s\")", remote) return f.newObjectWithInfo(ctx, remote, nil, "") } // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // // Returns the object, leaf, directoryID and error. // // Used to create new objects func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { // Create the directory for the object if it doesn't exist leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true) if err != nil { return nil, leaf, directoryID, err } // fs.Debugf(nil, "\n...leaf %#v\n...id %#v", leaf, directoryID) // Temporary Object under construction o = &Object{ fs: f, remote: remote, } return o, f.opt.Enc.FromStandardName(leaf), directoryID, nil } // readMetaDataForPath reads the metadata from the path func (f *Fs) readMetaDataForFolderID(ctx context.Context, id string) (info *FolderList, err error) { var resp *http.Response opts := rest.Opts{ Method: "GET", Path: "/folder/list.json/" + f.session.SessionID + "/" + id, } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &info) return f.shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } return info, err } // Put the object into the bucket // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() modTime := src.ModTime(ctx) // fs.Debugf(nil, "Put(%s)", remote) o, leaf, directoryID, err := f.createObject(ctx, remote, modTime, size) if err != nil { return nil, err } if o.id == "" { // Attempt to read ID, ignore error // FIXME is this correct? _ = o.readMetaData(ctx) } if o.id == "" { // We need to create an ID for this file var resp *http.Response response := createFileResponse{} err := o.fs.pacer.Call(func() (bool, error) { createFileData := createFile{ SessionID: o.fs.session.SessionID, FolderID: directoryID, Name: leaf, } opts := rest.Opts{ Method: "POST", Options: options, Path: "/upload/create_file.json", } resp, err = o.fs.srv.CallJSON(ctx, &opts, &createFileData, &response) return o.fs.shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("failed to create file: %w", err) } o.id = response.FileID } return o, o.Update(ctx, in, src, options...) } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 401, // Unauthorized (seen in "Token has expired") 408, // Request Timeout 423, // Locked - get this on folders sometimes 429, // Rate exceeded. 500, // Get occasional 500 Internal Server Error 502, // Bad Gateway when doing big listings 503, // Service Unavailable 504, // Gateway Time-out } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // getAccessLevel is a helper function to determine access level integer func getAccessLevel(access string) int64 { var accessLevel int64 switch access { case "private": accessLevel = 0 case "public": accessLevel = 1 case "hidden": accessLevel = 2 default: accessLevel = 0 fs.Errorf(nil, "Invalid access: %s, defaulting to private", access) } return accessLevel } // DirCacher methods // CreateDir makes a directory with pathID as parent and name leaf func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, replaceReservedChars(leaf)) var resp *http.Response response := createFolderResponse{} err = f.pacer.Call(func() (bool, error) { createDirData := createFolder{ SessionID: f.session.SessionID, FolderName: f.opt.Enc.FromStandardName(leaf), FolderSubParent: pathID, FolderIsPublic: getAccessLevel(f.opt.Access), FolderPublicUpl: 0, FolderPublicDisplay: 0, FolderPublicDnl: 0, } opts := rest.Opts{ Method: "POST", Path: "/folder.json", } resp, err = f.srv.CallJSON(ctx, &opts, &createDirData, &response) return f.shouldRetry(ctx, resp, err) }) if err != nil { return "", err } return response.FolderID, nil } // FindLeaf finds a directory of name leaf in the folder with ID pathID func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { // fs.Debugf(nil, "FindLeaf(\"%s\", \"%s\")", pathID, leaf) if pathID == "0" && leaf == "" { // fs.Debugf(nil, "Found OpenDrive root") // that's the root directory return pathID, true, nil } // get the folderIDs var resp *http.Response folderList := FolderList{} err = f.pacer.Call(func() (bool, error) { opts := rest.Opts{ Method: "GET", Path: "/folder/list.json/" + f.session.SessionID + "/" + pathID, } resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList) return f.shouldRetry(ctx, resp, err) }) if err != nil { return "", false, fmt.Errorf("failed to get folder list: %w", err) } leaf = f.opt.Enc.FromStandardName(leaf) for _, folder := range folderList.Folders { // fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID) if strings.EqualFold(leaf, folder.Name) { // found return folder.FolderID, true, nil } } return "", false, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { // fs.Debugf(nil, "List(%v)", dir) directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } var resp *http.Response opts := rest.Opts{ Method: "GET", Path: "/folder/list.json/" + f.session.SessionID + "/" + directoryID, } folderList := FolderList{} err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList) return f.shouldRetry(ctx, resp, err) }) if err != nil { if apiError, ok := err.(*Error); ok { // Work around a bug maybe in opendrive or maybe in rclone. // // We should know whether the folder exists or not by the call to // FindDir above so exactly why it is not found here is a mystery. // // This manifests as a failure in fs/sync TestSyncOverlapWithFilter if apiError.Info.Message == "Folder is already deleted" { return fs.DirEntries{}, nil } } return nil, fmt.Errorf("failed to get folder list: %w", err) } for _, folder := range folderList.Folders { folder.Name = f.opt.Enc.ToStandardName(folder.Name) // fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID) remote := path.Join(dir, folder.Name) // cache the directory ID for later lookups f.dirCache.Put(remote, folder.FolderID) d := fs.NewDir(remote, time.Unix(folder.DateModified, 0)).SetID(folder.FolderID) d.SetItems(int64(folder.ChildFolders)) d.SetParentID(directoryID) entries = append(entries, d) } for _, file := range folderList.Files { file.Name = f.opt.Enc.ToStandardName(file.Name) // fs.Debugf(nil, "File: %s (%s)", file.Name, file.FileID) remote := path.Join(dir, file.Name) o, err := f.newObjectWithInfo(ctx, remote, &file, directoryID) if err != nil { return nil, err } entries = append(entries, o) } return entries, nil } // ------------------------------------------------------------ // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Hash returns the Md5sum of an object returning a lowercase hex string func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } return o.md5, nil } // Size returns the size of an object in bytes func (o *Object) Size() int64 { return o.size // Object is likely PENDING } // ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime(ctx context.Context) time.Time { return o.modTime } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { // fs.Debugf(nil, "SetModTime(%v)", modTime.String()) opts := rest.Opts{ Method: "PUT", NoResponse: true, Path: "/file/filesettings.json", } update := modTimeFile{ SessionID: o.fs.session.SessionID, FileID: o.id, FileModificationTime: strconv.FormatInt(modTime.Unix(), 10), } err := o.fs.pacer.Call(func() (bool, error) { resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, nil) return o.fs.shouldRetry(ctx, resp, err) }) o.modTime = modTime return err } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { // fs.Debugf(nil, "Open(\"%v\")", o.remote) fs.FixRangeOption(options, o.size) opts := rest.Opts{ Method: "GET", Path: "/download/file.json/" + o.id + "?session_id=" + o.fs.session.SessionID, Options: options, } var resp *http.Response err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return o.fs.shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("failed to open file): %w", err) } return resp.Body, nil } // Remove an object func (o *Object) Remove(ctx context.Context) error { // fs.Debugf(nil, "Remove(\"%s\")", o.id) return o.fs.pacer.Call(func() (bool, error) { opts := rest.Opts{ Method: "DELETE", NoResponse: true, Path: "/file.json/" + o.fs.session.SessionID + "/" + o.id, } resp, err := o.fs.srv.Call(ctx, &opts) return o.fs.shouldRetry(ctx, resp, err) }) } // Storable returns a boolean showing whether this object storable func (o *Object) Storable() bool { return true } // Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { size := src.Size() modTime := src.ModTime(ctx) // fs.Debugf(nil, "Update(\"%s\", \"%s\")", o.id, o.remote) // Open file for upload var resp *http.Response openResponse := openUploadResponse{} err := o.fs.pacer.Call(func() (bool, error) { openUploadData := openUpload{SessionID: o.fs.session.SessionID, FileID: o.id, Size: size} // fs.Debugf(nil, "PreOpen: %#v", openUploadData) opts := rest.Opts{ Method: "POST", Options: options, Path: "/upload/open_file_upload.json", } resp, err := o.fs.srv.CallJSON(ctx, &opts, &openUploadData, &openResponse) return o.fs.shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("failed to create file: %w", err) } // resp.Body.Close() // fs.Debugf(nil, "PostOpen: %#v", openResponse) buf := make([]byte, o.fs.opt.ChunkSize) chunkOffset := int64(0) remainingBytes := size chunkCounter := 0 for remainingBytes > 0 { currentChunkSize := min(int64(o.fs.opt.ChunkSize), remainingBytes) remainingBytes -= currentChunkSize fs.Debugf(o, "Uploading chunk %d, size=%d, remain=%d", chunkCounter, currentChunkSize, remainingBytes) chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, currentChunkSize) var reply uploadFileChunkReply err = o.fs.pacer.Call(func() (bool, error) { // seek to the start in case this is a retry if _, err = chunk.Seek(0, io.SeekStart); err != nil { return false, err } opts := rest.Opts{ Method: "POST", Path: "/upload/upload_file_chunk.json", Body: chunk, MultipartParams: url.Values{ "session_id": []string{o.fs.session.SessionID}, "file_id": []string{o.id}, "temp_location": []string{openResponse.TempLocation}, "chunk_offset": []string{strconv.FormatInt(chunkOffset, 10)}, "chunk_size": []string{strconv.FormatInt(currentChunkSize, 10)}, }, MultipartContentName: "file_data", // ..name of the parameter which is the attached file MultipartFileName: o.remote, // ..name of the file for the attached file } resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &reply) return o.fs.shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("failed to create file: %w", err) } if reply.TotalWritten != currentChunkSize { return fmt.Errorf("failed to create file: incomplete write of %d/%d bytes", reply.TotalWritten, currentChunkSize) } chunkCounter++ chunkOffset += currentChunkSize } // Close file for upload closeResponse := closeUploadResponse{} err = o.fs.pacer.Call(func() (bool, error) { closeUploadData := closeUpload{SessionID: o.fs.session.SessionID, FileID: o.id, Size: size, TempLocation: openResponse.TempLocation} // fs.Debugf(nil, "PreClose: %#v", closeUploadData) opts := rest.Opts{ Method: "POST", Path: "/upload/close_file_upload.json", } resp, err = o.fs.srv.CallJSON(ctx, &opts, &closeUploadData, &closeResponse) return o.fs.shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("failed to create file: %w", err) } // fs.Debugf(nil, "PostClose: %#v", closeResponse) o.id = closeResponse.FileID o.size = closeResponse.Size // Set the mod time now err = o.SetModTime(ctx, modTime) if err != nil { return err } // Set permissions err = o.fs.pacer.Call(func() (bool, error) { update := permissions{SessionID: o.fs.session.SessionID, FileID: o.id, FileIsPublic: getAccessLevel(o.fs.opt.Access)}
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/s3/providers.go
backend/s3/providers.go
package s3 import ( "embed" stdfs "io/fs" "os" "sort" "strings" "github.com/rclone/rclone/fs" orderedmap "github.com/wk8/go-ordered-map/v2" "gopkg.in/yaml.v3" ) // YamlMap is converted to YAML in the correct order type YamlMap = *orderedmap.OrderedMap[string, string] // NewYamlMap creates a new ordered map var NewYamlMap = orderedmap.New[string, string] // Quirks defines all the S3 provider quirks type Quirks struct { ListVersion *int `yaml:"list_version,omitempty"` // 1 or 2 ForcePathStyle *bool `yaml:"force_path_style,omitempty"` // true = path-style ListURLEncode *bool `yaml:"list_url_encode,omitempty"` UseMultipartEtag *bool `yaml:"use_multipart_etag,omitempty"` UseAlreadyExists *bool `yaml:"use_already_exists,omitempty"` UseAcceptEncodingGzip *bool `yaml:"use_accept_encoding_gzip,omitempty"` UseDataIntegrityProtections *bool `yaml:"use_data_integrity_protections,omitempty"` MightGzip *bool `yaml:"might_gzip,omitempty"` UseMultipartUploads *bool `yaml:"use_multipart_uploads,omitempty"` UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"` UseXID *bool `yaml:"use_x_id,omitempty"` SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"` CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"` MaxUploadParts *int `yaml:"max_upload_parts,omitempty"` MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"` } // Provider defines the configurable data in each provider.yaml type Provider struct { Name string `yaml:"name,omitempty"` Description string `yaml:"description,omitempty"` Region YamlMap `yaml:"region,omitempty"` Endpoint YamlMap `yaml:"endpoint,omitempty"` LocationConstraint YamlMap `yaml:"location_constraint,omitempty"` ACL YamlMap `yaml:"acl,omitempty"` StorageClass YamlMap `yaml:"storage_class,omitempty"` ServerSideEncryption YamlMap `yaml:"server_side_encryption,omitempty"` // other IBMApiKey bool `yaml:"ibm_api_key,omitempty"` IBMResourceInstanceID bool `yaml:"ibm_resource_instance_id,omitempty"` // advanced BucketACL bool `yaml:"bucket_acl,omitempty"` DirectoryBucket bool `yaml:"directory_bucket,omitempty"` LeavePartsOnError bool `yaml:"leave_parts_on_error,omitempty"` RequesterPays bool `yaml:"requester_pays,omitempty"` SSECustomerAlgorithm bool `yaml:"sse_customer_algorithm,omitempty"` SSECustomerKey bool `yaml:"sse_customer_key,omitempty"` SSECustomerKeyBase64 bool `yaml:"sse_customer_key_base64,omitempty"` SSECustomerKeyMd5 bool `yaml:"sse_customer_key_md5,omitempty"` SSEKmsKeyID bool `yaml:"sse_kms_key_id,omitempty"` STSEndpoint bool `yaml:"sts_endpoint,omitempty"` UseAccelerateEndpoint bool `yaml:"use_accelerate_endpoint,omitempty"` Quirks Quirks `yaml:"quirks,omitempty"` } //go:embed provider/*.yaml var providerFS embed.FS // addProvidersToInfo adds provider information to the fs.RegInfo func addProvidersToInfo(info *fs.RegInfo) *fs.RegInfo { providerMap := loadProviders() providerList := constructProviders(info.Options, providerMap) info.Description += strings.TrimSuffix(providerList, ", ") return info } // loadProvider loads a single provider // // It returns nil if it could not be found except if "Other" which is a fatal error. func loadProvider(name string) *Provider { data, err := stdfs.ReadFile(providerFS, "provider/"+name+".yaml") if err != nil { if os.IsNotExist(err) && name != "Other" { return nil } fs.Fatalf(nil, "internal error: failed to load provider %q: %v", name, err) } var p Provider err = yaml.Unmarshal(data, &p) if err != nil { fs.Fatalf(nil, "internal error: failed to unmarshal provider %q: %v", name, err) } return &p } // loadProviders loads provider definitions from embedded YAML files func loadProviders() map[string]*Provider { providers, err := stdfs.ReadDir(providerFS, "provider") if err != nil { fs.Fatalf(nil, "internal error: failed to read embedded providers: %v", err) } providerMap := make(map[string]*Provider, len(providers)) for _, provider := range providers { name, _ := strings.CutSuffix(provider.Name(), ".yaml") p := loadProvider(name) providerMap[p.Name] = p } return providerMap } // constructProviders populates fs.Options with provider-specific examples and information func constructProviders(options fs.Options, providerMap map[string]*Provider) string { // Defaults for map options set to {} defaults := providerMap["Other"] // sort providers: AWS first, Other last, rest alphabetically providers := make([]*Provider, 0, len(providerMap)) for _, p := range providerMap { providers = append(providers, p) } sort.Slice(providers, func(i, j int) bool { if providers[i].Name == "AWS" { return true } if providers[j].Name == "AWS" { return false } if providers[i].Name == "Other" { return false } if providers[j].Name == "Other" { return true } return strings.ToLower(providers[i].Name) < strings.ToLower(providers[j].Name) }) addProvider := func(sp *string, name string) { if *sp != "" { *sp += "," } *sp += name } addBool := func(opt *fs.Option, p *Provider, flag bool) { if flag { addProvider(&opt.Provider, p.Name) } } addExample := func(opt *fs.Option, p *Provider, examples, defaultExamples YamlMap) { if examples == nil { return } if examples.Len() == 0 { examples = defaultExamples } addProvider(&opt.Provider, p.Name) OUTER: for pair := examples.Oldest(); pair != nil; pair = pair.Next() { // Find an existing example to add to if possible for i, example := range opt.Examples { if example.Value == pair.Key && example.Help == pair.Value { addProvider(&opt.Examples[i].Provider, p.Name) continue OUTER } } // Otherwise add a new one opt.Examples = append(opt.Examples, fs.OptionExample{ Value: pair.Key, Help: pair.Value, Provider: p.Name, }) } } var providerList strings.Builder for _, p := range providers { for i := range options { opt := &options[i] switch opt.Name { case "provider": opt.Examples = append(opt.Examples, fs.OptionExample{ Value: p.Name, Help: p.Description, }) providerList.WriteString(p.Name + ", ") case "region": addExample(opt, p, p.Region, defaults.Region) case "endpoint": addExample(opt, p, p.Endpoint, defaults.Endpoint) case "location_constraint": addExample(opt, p, p.LocationConstraint, defaults.LocationConstraint) case "acl": addExample(opt, p, p.ACL, defaults.ACL) case "storage_class": addExample(opt, p, p.StorageClass, defaults.StorageClass) case "server_side_encryption": addExample(opt, p, p.ServerSideEncryption, defaults.ServerSideEncryption) case "bucket_acl": addBool(opt, p, p.BucketACL) case "requester_pays": addBool(opt, p, p.RequesterPays) case "sse_customer_algorithm": addBool(opt, p, p.SSECustomerAlgorithm) case "sse_kms_key_id": addBool(opt, p, p.SSEKmsKeyID) case "sse_customer_key": addBool(opt, p, p.SSECustomerKey) case "sse_customer_key_base64": addBool(opt, p, p.SSECustomerKeyBase64) case "sse_customer_key_md5": addBool(opt, p, p.SSECustomerKeyMd5) case "directory_bucket": addBool(opt, p, p.DirectoryBucket) case "ibm_api_key": addBool(opt, p, p.IBMApiKey) case "ibm_resource_instance_id": addBool(opt, p, p.IBMResourceInstanceID) case "leave_parts_on_error": addBool(opt, p, p.LeavePartsOnError) case "sts_endpoint": addBool(opt, p, p.STSEndpoint) case "use_accelerate_endpoint": addBool(opt, p, p.UseAccelerateEndpoint) } } } return strings.TrimSuffix(providerList.String(), ", ") }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/s3/s3_test.go
backend/s3/s3_test.go
// Test S3 filesystem interface package s3 import ( "context" "net/http" "testing" "github.com/aws/aws-sdk-go-v2/aws" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func SetupS3Test(t *testing.T) (context.Context, *Options, *http.Client) { ctx, opt := context.Background(), new(Options) opt.Provider = "AWS" client := getClient(ctx, opt) return ctx, opt, client } // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { opt := &fstests.Opt{ RemoteName: "TestS3:", NilObject: (*Object)(nil), TiersToTest: []string{"STANDARD"}, ChunkedUpload: fstests.ChunkedUploadConfig{ MinChunkSize: minChunkSize, }, } // Test wider range of tiers on AWS if *fstest.RemoteName == "" || *fstest.RemoteName == "TestS3:" { opt.TiersToTest = []string{"STANDARD", "STANDARD_IA"} } fstests.Run(t, opt) } func TestIntegration2(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("skipping as -remote is set") } name := "TestS3" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":", NilObject: (*Object)(nil), TiersToTest: []string{"STANDARD", "STANDARD_IA"}, ChunkedUpload: fstests.ChunkedUploadConfig{ MinChunkSize: minChunkSize, }, ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "directory_markers", Value: "true"}, }, }) } func TestAWSDualStackOption(t *testing.T) { { // test enabled ctx, opt, client := SetupS3Test(t) opt.UseDualStack = true s3Conn, _, err := s3Connection(ctx, opt, client) require.NoError(t, err) assert.Equal(t, aws.DualStackEndpointStateEnabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint) } { // test default case ctx, opt, client := SetupS3Test(t) s3Conn, _, err := s3Connection(ctx, opt, client) require.NoError(t, err) assert.Equal(t, aws.DualStackEndpointStateDisabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint) } } func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadChunkSize(cs) } func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadCutoff(cs) } func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setCopyCutoff(cs) } var ( _ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadCutoffer = (*Fs)(nil) _ fstests.SetCopyCutoffer = (*Fs)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/s3/s3.go
backend/s3/s3.go
// Package s3 provides an interface to Amazon S3 object storage package s3 //go:generate go run gen_setfrom.go -o setfrom.go import ( "context" "crypto/md5" "crypto/tls" "encoding/base64" "encoding/hex" "encoding/json" "encoding/xml" "errors" "fmt" "io" "math" "net/http" "net/url" "path" "regexp" "slices" "sort" "strconv" "strings" "sync" "time" "github.com/aws/aws-sdk-go-v2/aws" v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4" awsconfig "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/credentials/stscreds" "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/aws/smithy-go" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/ncw/swift/v2" "golang.org/x/net/http/httpguts" "golang.org/x/sync/errgroup" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/chunksize" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/multipart" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pool" "github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/version" ) // Register with Fs func init() { fs.Register(addProvidersToInfo(&fs.RegInfo{ Name: "s3", Description: "Amazon S3 Compliant Storage Providers including ", NewFs: NewFs, CommandHelp: commandHelp, Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { switch config.State { case "": return nil, setEndpointValueForIDriveE2(m) } return nil, fmt.Errorf("unknown state %q", config.State) }, MetadataInfo: &fs.MetadataInfo{ System: systemMetadataInfo, Help: `User metadata is stored as x-amz-meta- keys. S3 metadata keys are case insensitive and are always returned in lower case.`, }, Options: []fs.Option{{ Name: fs.ConfigProvider, Help: "Choose your S3 provider.", }, { Name: "env_auth", Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\n\nOnly applies if access_key_id and secret_access_key is blank.", Default: false, Examples: []fs.OptionExample{{ Value: "false", Help: "Enter AWS credentials in the next step.", }, { Value: "true", Help: "Get AWS credentials from the environment (env vars or IAM).", }}, }, { Name: "access_key_id", Help: "AWS Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.", Sensitive: true, }, { Name: "secret_access_key", Help: "AWS Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.", Sensitive: true, }, { Name: "region", Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.", }, { Name: "endpoint", Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.", }, { Name: "location_constraint", Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.", }, { Name: "acl", Help: `Canned ACL used when creating buckets and storing or copying objects. This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl Note that this ACL is applied when server-side copying objects as S3 doesn't copy the ACL from the source but rather writes a fresh one. If the acl is an empty string then no X-Amz-Acl: header is added and the default (private) will be used. `, }, { Name: "bucket_acl", Help: `Canned ACL used when creating buckets. For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl Note that this ACL is applied when only when creating buckets. If it isn't set then "acl" is used instead. If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: header is added and the default (private) will be used. `, Advanced: true, Examples: []fs.OptionExample{{ Value: "private", Help: "Owner gets FULL_CONTROL.\nNo one else has access rights (default).", }, { Value: "public-read", Help: "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ access.", }, { Value: "public-read-write", Help: "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.", }, { Value: "authenticated-read", Help: "Owner gets FULL_CONTROL.\nThe AuthenticatedUsers group gets READ access.", }}, }, { Name: "requester_pays", Help: "Enables requester pays option when interacting with S3 bucket.", Default: false, Advanced: true, }, { Name: "server_side_encryption", Help: "The server-side encryption algorithm used when storing this object in S3.", }, { Name: "sse_customer_algorithm", Help: "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", Advanced: true, Examples: []fs.OptionExample{{ Value: "", Help: "None", }, { Value: "AES256", Help: "AES256", }}, }, { Name: "sse_kms_key_id", Help: "If using KMS ID you must provide the ARN of Key.", Examples: []fs.OptionExample{{ Value: "", Help: "None", }, { Value: "arn:aws:kms:us-east-1:*", Help: "arn:aws:kms:*", }}, Sensitive: true, }, { Name: "sse_customer_key", Help: `To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. Alternatively you can provide --sse-customer-key-base64.`, Advanced: true, Examples: []fs.OptionExample{{ Value: "", Help: "None", }}, Sensitive: true, }, { Name: "sse_customer_key_base64", Help: `If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. Alternatively you can provide --sse-customer-key.`, Advanced: true, Examples: []fs.OptionExample{{ Value: "", Help: "None", }}, Sensitive: true, }, { Name: "sse_customer_key_md5", Help: `If using SSE-C you may provide the secret encryption key MD5 checksum (optional). If you leave it blank, this is calculated automatically from the sse_customer_key provided. `, Advanced: true, Examples: []fs.OptionExample{{ Value: "", Help: "None", }}, Sensitive: true, }, { Name: "storage_class", Help: "The storage class to use when storing new objects in S3.", }, { Name: "upload_cutoff", Help: `Cutoff for switching to chunked upload. Any files larger than this will be uploaded in chunks of chunk_size. The minimum is 0 and the maximum is 5 GiB.`, Default: defaultUploadCutoff, Advanced: true, }, { Name: "chunk_size", Help: `Chunk size to use for uploading. When uploading files larger than upload_cutoff or files with unknown size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google photos or google docs) they will be uploaded as multipart uploads using this chunk size. Note that "--s3-upload-concurrency" chunks of this size are buffered in memory per transfer. If you are transferring large files over high-speed links and you have enough memory, then increasing this will speed up the transfers. Rclone will automatically increase the chunk size when uploading a large file of known size to stay below the 10,000 chunks limit. Files of unknown size are uploaded with the configured chunk_size. Since the default chunk size is 5 MiB and there can be at most 10,000 chunks, this means that by default the maximum size of a file you can stream upload is 48 GiB. If you wish to stream upload larger files then you will need to increase chunk_size. Increasing the chunk size decreases the accuracy of the progress statistics displayed with "-P" flag. Rclone treats chunk as sent when it's buffered by the AWS SDK, when in fact it may still be uploading. A bigger chunk size means a bigger AWS SDK buffer and progress reporting more deviating from the truth. `, Default: minChunkSize, Advanced: true, }, { Name: "max_upload_parts", Help: `Maximum number of parts in a multipart upload. This option defines the maximum number of multipart chunks to use when doing a multipart upload. This can be useful if a service does not support the AWS S3 specification of 10,000 chunks. Rclone will automatically increase the chunk size when uploading a large file of a known size to stay below this number of chunks limit. `, Default: maxUploadParts, Advanced: true, }, { Name: "copy_cutoff", Help: `Cutoff for switching to multipart copy. Any files larger than this that need to be server-side copied will be copied in chunks of this size. The minimum is 0 and the maximum is 5 GiB.`, Default: fs.SizeSuffix(maxSizeForCopy), Advanced: true, }, { Name: "disable_checksum", Help: `Don't store MD5 checksum with object metadata. Normally rclone will calculate the MD5 checksum of the input before uploading it so it can add it to metadata on the object. This is great for data integrity checking but can cause long delays for large files to start uploading.`, Default: false, Advanced: true, }, { Name: "shared_credentials_file", Help: `Path to the shared credentials file. If env_auth = true then rclone can use a shared credentials file. If this variable is empty rclone will look for the "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty it will default to the current user's home directory. Linux/OSX: "$HOME/.aws/credentials" Windows: "%USERPROFILE%\.aws\credentials" `, Advanced: true, }, { Name: "profile", Help: `Profile to use in the shared credentials file. If env_auth = true then rclone can use a shared credentials file. This variable controls which profile is used in that file. If empty it will default to the environment variable "AWS_PROFILE" or "default" if that environment variable is also not set. `, Advanced: true, }, { Name: "session_token", Help: "An AWS session token.", Advanced: true, Sensitive: true, }, { Name: "role_arn", Help: `ARN of the IAM role to assume. Leave blank if not using assume role.`, Advanced: true, }, { Name: "role_session_name", Help: `Session name for assumed role. If empty, a session name will be generated automatically.`, Advanced: true, }, { Name: "role_session_duration", Help: `Session duration for assumed role. If empty, the default session duration will be used.`, Advanced: true, }, { Name: "role_external_id", Help: `External ID for assumed role. Leave blank if not using an external ID.`, Advanced: true, }, { Name: "upload_concurrency", Help: `Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing this may help to speed up the transfers.`, Default: 4, Advanced: true, }, { Name: "force_path_style", Help: `If true use path style access if false use virtual hosted style. If this is true (the default) then rclone will use path style access, if false then rclone will use virtual path style. See [the AWS S3 docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) for more info. Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, you'll need to set this to true. `, Default: true, Advanced: true, }, { Name: "v2_auth", Help: `If true use v2 authentication. If this is false (the default) then rclone will use v4 authentication. If it is set then rclone will use v2 authentication. Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.`, Default: false, Advanced: true, }, { Name: "use_dual_stack", Help: `If true use AWS S3 dual-stack endpoint (IPv6 support). See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html)`, Default: false, Advanced: true, }, { Name: "use_accelerate_endpoint", Help: `If true use the AWS S3 accelerated endpoint. See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`, Default: false, Advanced: true, }, { Name: "use_arn_region", Help: `If true, enables arn region support for the service.`, Default: false, Advanced: true, }, { Name: "leave_parts_on_error", Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. It should be set to true for resuming uploads across different sessions. WARNING: Storing parts of an incomplete multipart upload counts towards space usage on S3 and will add additional costs if not cleaned up. `, Default: false, Advanced: true, }, { Name: "list_chunk", Help: `Size of listing chunk (response list for each ListObject S3 request). This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. Most services truncate the response list to 1000 objects even if requested more than that. In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). In Ceph, this can be increased with the "rgw list buckets max chunk" option. `, Default: 1000, Advanced: true, }, { Name: "list_version", Help: `Version of ListObjects to use: 1,2 or 0 for auto. When S3 originally launched it only provided the ListObjects call to enumerate objects in a bucket. However in May 2016 the ListObjectsV2 call was introduced. This is much higher performance and should be used if at all possible. If set to the default, 0, rclone will guess according to the provider set which list objects method to call. If it guesses wrong, then it may be set manually here. `, Default: 0, Advanced: true, }, { Name: "list_url_encode", Help: `Whether to url encode listings: true/false/unset Some providers support URL encoding listings and where this is available this is more reliable when using control characters in file names. If this is set to unset (the default) then rclone will choose according to the provider setting what to apply, but you can override rclone's choice here. `, Default: fs.Tristate{}, Advanced: true, }, { Name: "no_check_bucket", Help: `If set, don't attempt to check the bucket exists or create it. This can be useful when trying to minimise the number of transactions rclone does if you know the bucket exists already. It can also be needed if the user you are using does not have bucket creation permissions. Before v1.52.0 this would have passed silently due to a bug. `, Default: false, Advanced: true, }, { Name: "no_head", Help: `If set, don't HEAD uploaded objects to check integrity. This can be useful when trying to minimise the number of transactions rclone does. Setting it means that if rclone receives a 200 OK message after uploading an object with PUT then it will assume that it got uploaded properly. In particular it will assume: - the metadata, including modtime, storage class and content type was as uploaded - the size was as uploaded It reads the following items from the response for a single part PUT: - the MD5SUM - The uploaded date For multipart uploads these items aren't read. If an source object of unknown length is uploaded then rclone **will** do a HEAD request. Setting this flag increases the chance for undetected upload failures, in particular an incorrect size, so it isn't recommended for normal operation. In practice the chance of an undetected upload failure is very small even with this flag. `, Default: false, Advanced: true, }, { Name: "no_head_object", Help: `If set, do not do HEAD before GET when getting objects.`, Default: false, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // Any UTF-8 character is valid in a key, however it can't handle // invalid UTF-8 and / have a special meaning. // // The SDK can't seem to handle uploading files called '.' // // FIXME would be nice to add // - initial / encoding // - doubled / encoding // - trailing / encoding // so that AWS keys are always valid file names Default: encoder.EncodeInvalidUtf8 | encoder.EncodeSlash | encoder.EncodeDot, }, { Name: "memory_pool_flush_time", Default: fs.Duration(time.Minute), Advanced: true, Hide: fs.OptionHideBoth, Help: `How often internal memory buffer pools will be flushed. (no longer used)`, }, { Name: "memory_pool_use_mmap", Default: false, Advanced: true, Hide: fs.OptionHideBoth, Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`, }, { Name: "disable_http2", Default: false, Advanced: true, Help: `Disable usage of http2 for S3 backends. There is currently an unsolved issue with the s3 (specifically minio) backend and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be disabled here. When the issue is solved this flag will be removed. See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 `, }, { Name: "download_url", Help: `Custom endpoint for downloads. This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network.`, Advanced: true, }, { Name: "directory_markers", Default: false, Advanced: true, Help: `Upload an empty object with a trailing slash when a new directory is created Empty folders are unsupported for bucket based remotes, this option creates an empty object ending with "/", to persist the folder. `, }, { Name: "use_multipart_etag", Help: `Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. `, Default: fs.Tristate{}, Advanced: true, }, { Name: "use_unsigned_payload", Help: `Whether to use an unsigned payload in PutObject Rclone has to avoid the AWS SDK seeking the body when calling PutObject. The AWS provider can add checksums in the trailer to avoid seeking but other providers can't. This should be true, false or left unset to use the default for the provider. `, Default: fs.Tristate{}, Advanced: true, }, { Name: "use_presigned_request", Help: `Whether to use a presigned request or PutObject for single part uploads If this is false rclone will use PutObject from the AWS SDK to upload an object. Versions of rclone < 1.59 use presigned requests to upload a single part object and setting this flag to true will re-enable that functionality. This shouldn't be necessary except in exceptional circumstances or for testing. `, Default: false, Advanced: true, }, { Name: "use_data_integrity_protections", Help: `If true use AWS S3 data integrity protections. See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html)`, Default: fs.Tristate{}, Advanced: true, }, { Name: "versions", Help: "Include old versions in directory listings.", Default: false, Advanced: true, }, { Name: "version_at", Help: `Show file versions as they were at the specified time. The parameter should be a date, "2006-01-02", datetime "2006-01-02 15:04:05" or a duration for that long ago, eg "100d" or "1h". Note that when using this no file write operations are permitted, so you can't upload files or delete them. See [the time option docs](/docs/#time-options) for valid formats. `, Default: fs.Time{}, Advanced: true, }, { Name: "version_deleted", Help: `Show deleted file markers when using versions. This shows deleted file markers in the listing when using versions. These will appear as 0 size files. The only operation which can be performed on them is deletion. Deleting a delete marker will reveal the previous version. Deleted files will always show with a timestamp. `, Default: false, Advanced: true, }, { Name: "decompress", Help: `If set this will decompress gzip encoded objects. It is possible to upload objects to S3 with "Content-Encoding: gzip" set. Normally rclone will download these files as compressed objects. If this flag is set then rclone will decompress these files with "Content-Encoding: gzip" as they are received. This means that rclone can't check the size and hash but the file contents will be decompressed. `, Advanced: true, Default: false, }, { Name: "might_gzip", Help: strings.ReplaceAll(`Set this if the backend might gzip objects. Normally providers will not alter objects when they are downloaded. If an object was not uploaded with |Content-Encoding: gzip| then it won't be set on download. However some providers may gzip objects even if they weren't uploaded with |Content-Encoding: gzip| (eg Cloudflare). A symptom of this would be receiving errors like ERROR corrupted on transfer: sizes differ NNN vs MMM If you set this flag and rclone downloads an object with Content-Encoding: gzip set and chunked transfer encoding, then rclone will decompress the object on the fly. If this is set to unset (the default) then rclone will choose according to the provider setting what to apply, but you can override rclone's choice here. `, "|", "`"), Default: fs.Tristate{}, Advanced: true, }, { Name: "use_accept_encoding_gzip", Help: strings.ReplaceAll(`Whether to send |Accept-Encoding: gzip| header. By default, rclone will append |Accept-Encoding: gzip| to the request to download compressed objects whenever possible. However some providers such as Google Cloud Storage may alter the HTTP headers, breaking the signature of the request. A symptom of this would be receiving errors like SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. In this case, you might want to try disabling this option. `, "|", "`"), Default: fs.Tristate{}, Advanced: true, }, { Name: "no_system_metadata", Help: `Suppress setting and reading of system metadata`, Advanced: true, Default: false, }, { Name: "sts_endpoint", Help: "Endpoint for STS (deprecated).\n\nLeave blank if using AWS to use the default endpoint for the region.", Advanced: true, Hide: fs.OptionHideBoth, }, { Name: "use_already_exists", Help: strings.ReplaceAll(`Set if rclone should report BucketAlreadyExists errors on bucket creation. At some point during the evolution of the s3 protocol, AWS started returning an |AlreadyOwnedByYou| error when attempting to create a bucket that the user already owned, rather than a |BucketAlreadyExists| error. Unfortunately exactly what has been implemented by s3 clones is a little inconsistent, some return |AlreadyOwnedByYou|, some return |BucketAlreadyExists| and some return no error at all. This is important to rclone because it ensures the bucket exists by creating it on quite a lot of operations (unless |--s3-no-check-bucket| is used). If rclone knows the provider can return |AlreadyOwnedByYou| or returns no error then it can report |BucketAlreadyExists| errors when the user attempts to create a bucket not owned by them. Otherwise rclone ignores the |BucketAlreadyExists| error which can lead to confusion. This should be automatically set correctly for all providers rclone knows about - please make a bug report if not. `, "|", "`"), Default: fs.Tristate{}, Advanced: true, }, { Name: "use_multipart_uploads", Help: `Set if rclone should use multipart uploads. You can change this if you want to disable the use of multipart uploads. This shouldn't be necessary in normal operation. This should be automatically set correctly for all providers rclone knows about - please make a bug report if not. `, Default: fs.Tristate{}, Advanced: true, }, { Name: "use_x_id", Help: `Set if rclone should add x-id URL parameters. You can change this if you want to disable the AWS SDK from adding x-id URL parameters. This shouldn't be necessary in normal operation. This should be automatically set correctly for all providers rclone knows about - please make a bug report if not. `, Default: fs.Tristate{}, Advanced: true, }, { Name: "sign_accept_encoding", Help: `Set if rclone should include Accept-Encoding as part of the signature. You can change this if you want to stop rclone including Accept-Encoding as part of the signature. This shouldn't be necessary in normal operation. This should be automatically set correctly for all providers rclone knows about - please make a bug report if not. `, Default: fs.Tristate{}, Advanced: true, }, { Name: "directory_bucket", Help: strings.ReplaceAll(`Set to use AWS Directory Buckets If you are using an AWS Directory Bucket then set this flag. This will ensure no |Content-Md5| headers are sent and ensure |ETag| headers are not interpreted as MD5 sums. |X-Amz-Meta-Md5chksum| will be set on all objects whether single or multipart uploaded. This also sets |no_check_bucket = true|. Note that Directory Buckets do not support: - Versioning - |Content-Encoding: gzip| Rclone limitations with Directory Buckets: - rclone does not support creating Directory Buckets with |rclone mkdir| - ... or removing them with |rclone rmdir| yet - Directory Buckets do not appear when doing |rclone lsf| at the top level. - Rclone can't remove auto created directories yet. In theory this should work with |directory_markers = true| but it doesn't. - Directories don't seem to appear in recursive (ListR) listings. `, "|", "`"), Default: false, Advanced: true, }, { Name: "sdk_log_mode", Help: strings.ReplaceAll(`Set to debug the SDK This can be set to a comma separated list of the following functions: - |Signing| - |Retries| - |Request| - |RequestWithBody| - |Response| - |ResponseWithBody| - |DeprecatedUsage| - |RequestEventMessage| - |ResponseEventMessage| Use |Off| to disable and |All| to set all log levels. You will need to use |-vv| to see the debug level logs. `, "|", "`"), Default: sdkLogMode(0), Advanced: true, }, { Name: "ibm_api_key", Help: "IBM API Key to be used to obtain IAM token", }, { Name: "ibm_resource_instance_id", Help: "IBM service instance id", }, }})) } // Constants const ( metaMtime = "mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime metaMD5Hash = "md5chksum" // the meta key to store md5hash in // The maximum size of object we can COPY - this should be 5 GiB but is < 5 GB for b2 compatibility // See https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76 maxSizeForCopy = 4768 * 1024 * 1024 maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload minChunkSize = fs.SizeSuffix(1024 * 1024 * 5) defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024) maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024) minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep. maxExpireDuration = fs.Duration(7 * 24 * time.Hour) // max expiry is 1 week ) type ( sdkLogMode = fs.Bits[sdkLogModeChoices] sdkLogModeChoices struct{} ) func (sdkLogModeChoices) Choices() []fs.BitsChoicesInfo { return []fs.BitsChoicesInfo{ {Bit: uint64(0), Name: "Off"}, {Bit: uint64(aws.LogSigning), Name: "Signing"}, {Bit: uint64(aws.LogRetries), Name: "Retries"}, {Bit: uint64(aws.LogRequest), Name: "Request"}, {Bit: uint64(aws.LogRequestWithBody), Name: "RequestWithBody"}, {Bit: uint64(aws.LogResponse), Name: "Response"}, {Bit: uint64(aws.LogResponseWithBody), Name: "ResponseWithBody"}, {Bit: uint64(aws.LogDeprecatedUsage), Name: "DeprecatedUsage"}, {Bit: uint64(aws.LogRequestEventMessage), Name: "RequestEventMessage"}, {Bit: uint64(aws.LogResponseEventMessage), Name: "ResponseEventMessage"}, {Bit: math.MaxUint64, Name: "All"}, } } // globals var ( errNotWithVersionAt = errors.New("can't modify or delete files in --s3-version-at mode") ) // system metadata keys which this backend owns var systemMetadataInfo = map[string]fs.MetadataHelp{ "cache-control": { Help: "Cache-Control header", Type: "string", Example: "no-cache", }, "content-disposition": { Help: "Content-Disposition header", Type: "string", Example: "inline", }, "content-encoding": { Help: "Content-Encoding header", Type: "string", Example: "gzip", }, "content-language": { Help: "Content-Language header", Type: "string", Example: "en-US", }, "content-type": { Help: "Content-Type header", Type: "string", Example: "text/plain", }, // "tagging": { // Help: "x-amz-tagging header", // Type: "string", // Example: "tag1=value1&tag2=value2", // }, "tier": { Help: "Tier of the object", Type: "string", Example: "GLACIER", ReadOnly: true, }, "mtime": { Help: "Time of last modification, read from rclone metadata", Type: "RFC 3339", Example: "2006-01-02T15:04:05.999999999Z07:00", }, "btime": { Help: "Time of file birth (creation) read from Last-Modified header", Type: "RFC 3339", Example: "2006-01-02T15:04:05.999999999Z07:00", ReadOnly: true, }, } // Options defines the configuration for this backend type Options struct { Provider string `config:"provider"` EnvAuth bool `config:"env_auth"` AccessKeyID string `config:"access_key_id"` SecretAccessKey string `config:"secret_access_key"` Region string `config:"region"` Endpoint string `config:"endpoint"` STSEndpoint string `config:"sts_endpoint"` UseDualStack bool `config:"use_dual_stack"` LocationConstraint string `config:"location_constraint"` ACL string `config:"acl"` BucketACL string `config:"bucket_acl"` RequesterPays bool `config:"requester_pays"` ServerSideEncryption string `config:"server_side_encryption"` SSEKMSKeyID string `config:"sse_kms_key_id"` SSECustomerAlgorithm string `config:"sse_customer_algorithm"` SSECustomerKey string `config:"sse_customer_key"` SSECustomerKeyBase64 string `config:"sse_customer_key_base64"` SSECustomerKeyMD5 string `config:"sse_customer_key_md5"` StorageClass string `config:"storage_class"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` CopyCutoff fs.SizeSuffix `config:"copy_cutoff"` ChunkSize fs.SizeSuffix `config:"chunk_size"` MaxUploadParts int `config:"max_upload_parts"` DisableChecksum bool `config:"disable_checksum"` SharedCredentialsFile string `config:"shared_credentials_file"` Profile string `config:"profile"`
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/s3/setfrom.go
backend/s3/setfrom.go
// Code generated by "go run gen_setfrom.go"; DO NOT EDIT. package s3 import ( "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" ) // setFrom_s3ListObjectsInput_s3ListObjectsV2Input copies matching elements from a to b func setFrom_s3ListObjectsInput_s3ListObjectsV2Input(a *s3.ListObjectsInput, b *s3.ListObjectsV2Input) { a.Bucket = b.Bucket a.Delimiter = b.Delimiter a.EncodingType = b.EncodingType a.ExpectedBucketOwner = b.ExpectedBucketOwner a.MaxKeys = b.MaxKeys a.OptionalObjectAttributes = b.OptionalObjectAttributes a.Prefix = b.Prefix a.RequestPayer = b.RequestPayer } // setFrom_s3ListObjectsV2Output_s3ListObjectsOutput copies matching elements from a to b func setFrom_s3ListObjectsV2Output_s3ListObjectsOutput(a *s3.ListObjectsV2Output, b *s3.ListObjectsOutput) { a.CommonPrefixes = b.CommonPrefixes a.Contents = b.Contents a.Delimiter = b.Delimiter a.EncodingType = b.EncodingType a.IsTruncated = b.IsTruncated a.MaxKeys = b.MaxKeys a.Name = b.Name a.Prefix = b.Prefix a.RequestCharged = b.RequestCharged a.ResultMetadata = b.ResultMetadata } // setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input copies matching elements from a to b func setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input(a *s3.ListObjectVersionsInput, b *s3.ListObjectsV2Input) { a.Bucket = b.Bucket a.Delimiter = b.Delimiter a.EncodingType = b.EncodingType a.ExpectedBucketOwner = b.ExpectedBucketOwner a.MaxKeys = b.MaxKeys a.OptionalObjectAttributes = b.OptionalObjectAttributes a.Prefix = b.Prefix a.RequestPayer = b.RequestPayer } // setFrom_typesObjectVersion_typesDeleteMarkerEntry copies matching elements from a to b func setFrom_typesObjectVersion_typesDeleteMarkerEntry(a *types.ObjectVersion, b *types.DeleteMarkerEntry) { a.IsLatest = b.IsLatest a.Key = b.Key a.LastModified = b.LastModified a.Owner = b.Owner a.VersionId = b.VersionId } // setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput copies matching elements from a to b func setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput(a *s3.ListObjectsV2Output, b *s3.ListObjectVersionsOutput) { a.CommonPrefixes = b.CommonPrefixes a.Delimiter = b.Delimiter a.EncodingType = b.EncodingType a.IsTruncated = b.IsTruncated a.MaxKeys = b.MaxKeys a.Name = b.Name a.Prefix = b.Prefix a.RequestCharged = b.RequestCharged a.ResultMetadata = b.ResultMetadata } // setFrom_typesObject_typesObjectVersion copies matching elements from a to b func setFrom_typesObject_typesObjectVersion(a *types.Object, b *types.ObjectVersion) { a.ChecksumAlgorithm = b.ChecksumAlgorithm a.ChecksumType = b.ChecksumType a.ETag = b.ETag a.Key = b.Key a.LastModified = b.LastModified a.Owner = b.Owner a.RestoreStatus = b.RestoreStatus a.Size = b.Size } // setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput copies matching elements from a to b func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipartUploadInput, b *s3.HeadObjectOutput) { a.BucketKeyEnabled = b.BucketKeyEnabled a.CacheControl = b.CacheControl a.ChecksumType = b.ChecksumType a.ContentDisposition = b.ContentDisposition a.ContentEncoding = b.ContentEncoding a.ContentLanguage = b.ContentLanguage a.ContentType = b.ContentType a.Expires = b.Expires a.Metadata = b.Metadata a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus a.ObjectLockMode = b.ObjectLockMode a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate a.SSECustomerAlgorithm = b.SSECustomerAlgorithm a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5 a.SSEKMSKeyId = b.SSEKMSKeyId a.ServerSideEncryption = b.ServerSideEncryption a.StorageClass = b.StorageClass a.WebsiteRedirectLocation = b.WebsiteRedirectLocation } // setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput copies matching elements from a to b func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipartUploadInput, b *s3.CopyObjectInput) { a.Bucket = b.Bucket a.Key = b.Key a.ACL = b.ACL a.BucketKeyEnabled = b.BucketKeyEnabled a.CacheControl = b.CacheControl a.ChecksumAlgorithm = b.ChecksumAlgorithm a.ContentDisposition = b.ContentDisposition a.ContentEncoding = b.ContentEncoding a.ContentLanguage = b.ContentLanguage a.ContentType = b.ContentType a.ExpectedBucketOwner = b.ExpectedBucketOwner a.Expires = b.Expires a.GrantFullControl = b.GrantFullControl a.GrantRead = b.GrantRead a.GrantReadACP = b.GrantReadACP a.GrantWriteACP = b.GrantWriteACP a.Metadata = b.Metadata a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus a.ObjectLockMode = b.ObjectLockMode a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate a.RequestPayer = b.RequestPayer a.SSECustomerAlgorithm = b.SSECustomerAlgorithm a.SSECustomerKey = b.SSECustomerKey a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5 a.SSEKMSEncryptionContext = b.SSEKMSEncryptionContext a.SSEKMSKeyId = b.SSEKMSKeyId a.ServerSideEncryption = b.ServerSideEncryption a.StorageClass = b.StorageClass a.Tagging = b.Tagging a.WebsiteRedirectLocation = b.WebsiteRedirectLocation } // setFrom_s3UploadPartCopyInput_s3CopyObjectInput copies matching elements from a to b func setFrom_s3UploadPartCopyInput_s3CopyObjectInput(a *s3.UploadPartCopyInput, b *s3.CopyObjectInput) { a.Bucket = b.Bucket a.CopySource = b.CopySource a.Key = b.Key a.CopySourceIfMatch = b.CopySourceIfMatch a.CopySourceIfModifiedSince = b.CopySourceIfModifiedSince a.CopySourceIfNoneMatch = b.CopySourceIfNoneMatch a.CopySourceIfUnmodifiedSince = b.CopySourceIfUnmodifiedSince a.CopySourceSSECustomerAlgorithm = b.CopySourceSSECustomerAlgorithm a.CopySourceSSECustomerKey = b.CopySourceSSECustomerKey a.CopySourceSSECustomerKeyMD5 = b.CopySourceSSECustomerKeyMD5 a.ExpectedBucketOwner = b.ExpectedBucketOwner a.ExpectedSourceBucketOwner = b.ExpectedSourceBucketOwner a.RequestPayer = b.RequestPayer a.SSECustomerAlgorithm = b.SSECustomerAlgorithm a.SSECustomerKey = b.SSECustomerKey a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5 } // setFrom_s3HeadObjectOutput_s3GetObjectOutput copies matching elements from a to b func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3.GetObjectOutput) { a.AcceptRanges = b.AcceptRanges a.BucketKeyEnabled = b.BucketKeyEnabled a.CacheControl = b.CacheControl a.ChecksumCRC32 = b.ChecksumCRC32 a.ChecksumCRC32C = b.ChecksumCRC32C a.ChecksumCRC64NVME = b.ChecksumCRC64NVME a.ChecksumSHA1 = b.ChecksumSHA1 a.ChecksumSHA256 = b.ChecksumSHA256 a.ChecksumType = b.ChecksumType a.ContentDisposition = b.ContentDisposition a.ContentEncoding = b.ContentEncoding a.ContentLanguage = b.ContentLanguage a.ContentLength = b.ContentLength a.ContentRange = b.ContentRange a.ContentType = b.ContentType a.DeleteMarker = b.DeleteMarker a.ETag = b.ETag a.Expiration = b.Expiration a.Expires = b.Expires a.ExpiresString = b.ExpiresString a.LastModified = b.LastModified a.Metadata = b.Metadata a.MissingMeta = b.MissingMeta a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus a.ObjectLockMode = b.ObjectLockMode a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate a.PartsCount = b.PartsCount a.ReplicationStatus = b.ReplicationStatus a.RequestCharged = b.RequestCharged a.Restore = b.Restore a.SSECustomerAlgorithm = b.SSECustomerAlgorithm a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5 a.SSEKMSKeyId = b.SSEKMSKeyId a.ServerSideEncryption = b.ServerSideEncryption a.StorageClass = b.StorageClass a.TagCount = b.TagCount a.VersionId = b.VersionId a.WebsiteRedirectLocation = b.WebsiteRedirectLocation a.ResultMetadata = b.ResultMetadata } // setFrom_s3CreateMultipartUploadInput_s3PutObjectInput copies matching elements from a to b func setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(a *s3.CreateMultipartUploadInput, b *s3.PutObjectInput) { a.Bucket = b.Bucket a.Key = b.Key a.ACL = b.ACL a.BucketKeyEnabled = b.BucketKeyEnabled a.CacheControl = b.CacheControl a.ChecksumAlgorithm = b.ChecksumAlgorithm a.ContentDisposition = b.ContentDisposition a.ContentEncoding = b.ContentEncoding a.ContentLanguage = b.ContentLanguage a.ContentType = b.ContentType a.ExpectedBucketOwner = b.ExpectedBucketOwner a.Expires = b.Expires a.GrantFullControl = b.GrantFullControl a.GrantRead = b.GrantRead a.GrantReadACP = b.GrantReadACP a.GrantWriteACP = b.GrantWriteACP a.Metadata = b.Metadata a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus a.ObjectLockMode = b.ObjectLockMode a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate a.RequestPayer = b.RequestPayer a.SSECustomerAlgorithm = b.SSECustomerAlgorithm a.SSECustomerKey = b.SSECustomerKey a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5 a.SSEKMSEncryptionContext = b.SSEKMSEncryptionContext a.SSEKMSKeyId = b.SSEKMSKeyId a.ServerSideEncryption = b.ServerSideEncryption a.StorageClass = b.StorageClass a.Tagging = b.Tagging a.WebsiteRedirectLocation = b.WebsiteRedirectLocation } // setFrom_s3HeadObjectOutput_s3PutObjectInput copies matching elements from a to b func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.PutObjectInput) { a.BucketKeyEnabled = b.BucketKeyEnabled a.CacheControl = b.CacheControl a.ChecksumCRC32 = b.ChecksumCRC32 a.ChecksumCRC32C = b.ChecksumCRC32C a.ChecksumCRC64NVME = b.ChecksumCRC64NVME a.ChecksumSHA1 = b.ChecksumSHA1 a.ChecksumSHA256 = b.ChecksumSHA256 a.ContentDisposition = b.ContentDisposition a.ContentEncoding = b.ContentEncoding a.ContentLanguage = b.ContentLanguage a.ContentLength = b.ContentLength a.ContentType = b.ContentType a.Expires = b.Expires a.Metadata = b.Metadata a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus a.ObjectLockMode = b.ObjectLockMode a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate a.SSECustomerAlgorithm = b.SSECustomerAlgorithm a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5 a.SSEKMSKeyId = b.SSEKMSKeyId a.ServerSideEncryption = b.ServerSideEncryption a.StorageClass = b.StorageClass a.WebsiteRedirectLocation = b.WebsiteRedirectLocation } // setFrom_s3CopyObjectInput_s3PutObjectInput copies matching elements from a to b func setFrom_s3CopyObjectInput_s3PutObjectInput(a *s3.CopyObjectInput, b *s3.PutObjectInput) { a.Bucket = b.Bucket a.Key = b.Key a.ACL = b.ACL a.BucketKeyEnabled = b.BucketKeyEnabled a.CacheControl = b.CacheControl a.ChecksumAlgorithm = b.ChecksumAlgorithm a.ContentDisposition = b.ContentDisposition a.ContentEncoding = b.ContentEncoding a.ContentLanguage = b.ContentLanguage a.ContentType = b.ContentType a.ExpectedBucketOwner = b.ExpectedBucketOwner a.Expires = b.Expires a.GrantFullControl = b.GrantFullControl a.GrantRead = b.GrantRead a.GrantReadACP = b.GrantReadACP a.GrantWriteACP = b.GrantWriteACP a.IfMatch = b.IfMatch a.IfNoneMatch = b.IfNoneMatch a.Metadata = b.Metadata a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus a.ObjectLockMode = b.ObjectLockMode a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate a.RequestPayer = b.RequestPayer a.SSECustomerAlgorithm = b.SSECustomerAlgorithm a.SSECustomerKey = b.SSECustomerKey a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5 a.SSEKMSEncryptionContext = b.SSEKMSEncryptionContext a.SSEKMSKeyId = b.SSEKMSKeyId a.ServerSideEncryption = b.ServerSideEncryption a.StorageClass = b.StorageClass a.Tagging = b.Tagging a.WebsiteRedirectLocation = b.WebsiteRedirectLocation }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/s3/ibm_signer_test.go
backend/s3/ibm_signer_test.go
package s3 import ( "context" "net/http" "testing" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/stretchr/testify/assert" ) type MockAuthenticator struct { Token string Error error } func (m *MockAuthenticator) GetToken() (string, error) { return m.Token, m.Error } func TestSignHTTP(t *testing.T) { apiKey := "mock-api-key" instanceID := "mock-instance-id" token := "mock-iam-token" mockAuth := &MockAuthenticator{ Token: token, Error: nil, } signer := &IbmIamSigner{ APIKey: apiKey, InstanceID: instanceID, Auth: mockAuth, } req, err := http.NewRequest("GET", "https://example.com", nil) if err != nil { t.Fatalf("Failed to create HTTP request: %v", err) } credentials := aws.Credentials{ AccessKeyID: "mock-access-key", SecretAccessKey: "mock-secret-key", } err = signer.SignHTTP(context.TODO(), credentials, req, "payload-hash", "service", "region", time.Now()) assert.NoError(t, err, "Expected no error") assert.Equal(t, "Bearer "+token, req.Header.Get("Authorization"), "Authorization header should be set correctly") assert.Equal(t, instanceID, req.Header.Get("ibm-service-instance-id"), "ibm-service-instance-id header should be set correctly") }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/s3/s3_internal_test.go
backend/s3/s3_internal_test.go
package s3 import ( "bytes" "compress/gzip" "context" "crypto/md5" "errors" "fmt" "path" "strings" "testing" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/version" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func gz(t *testing.T, s string) string { var buf bytes.Buffer zw := gzip.NewWriter(&buf) _, err := zw.Write([]byte(s)) require.NoError(t, err) err = zw.Close() require.NoError(t, err) return buf.String() } func md5sum(t *testing.T, s string) string { hash := md5.Sum([]byte(s)) return fmt.Sprintf("%x", hash) } func (f *Fs) InternalTestMetadata(t *testing.T) { ctx := context.Background() original := random.String(1000) contents := gz(t, original) item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) btime := time.Now() metadata := fs.Metadata{ "cache-control": "no-cache", "content-disposition": "inline", "content-encoding": "gzip", "content-language": "en-US", "content-type": "text/plain", "mtime": "2009-05-06T04:05:06.499999999Z", // "tier" - read only // "btime" - read only } // Cloudflare insists on decompressing `Content-Encoding: gzip` unless // `Cache-Control: no-transform` is supplied. This is a deviation from // AWS but we fudge the tests here rather than breaking peoples // expectations of what Cloudflare does. // // This can always be overridden by using // `--header-upload "Cache-Control: no-transform"` if f.opt.Provider == "Cloudflare" { metadata["cache-control"] = "no-transform" } obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", metadata) defer func() { assert.NoError(t, obj.Remove(ctx)) }() o := obj.(*Object) gotMetadata, err := o.Metadata(ctx) require.NoError(t, err) for k, v := range metadata { got := gotMetadata[k] switch k { case "mtime": assert.True(t, fstest.Time(v).Equal(fstest.Time(got))) case "btime": gotBtime := fstest.Time(got) dt := gotBtime.Sub(btime) assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt)) assert.True(t, fstest.Time(v).Equal(fstest.Time(got))) case "tier": assert.NotEqual(t, "", got) default: assert.Equal(t, v, got, k) } } t.Run("GzipEncoding", func(t *testing.T) { // Test that the gzipped file we uploaded can be // downloaded with and without decompression checkDownload := func(wantContents string, wantSize int64, wantHash string) { gotContents := fstests.ReadObject(ctx, t, o, -1) assert.Equal(t, wantContents, gotContents) assert.Equal(t, wantSize, o.Size()) gotHash, err := o.Hash(ctx, hash.MD5) require.NoError(t, err) assert.Equal(t, wantHash, gotHash) } t.Run("NoDecompress", func(t *testing.T) { checkDownload(contents, int64(len(contents)), md5sum(t, contents)) }) t.Run("Decompress", func(t *testing.T) { f.opt.Decompress = true defer func() { f.opt.Decompress = false }() checkDownload(original, -1, "") }) }) } func (f *Fs) InternalTestNoHead(t *testing.T) { ctx := context.Background() // Set NoHead for this test f.opt.NoHead = true defer func() { f.opt.NoHead = false }() contents := random.String(1000) item := fstest.NewItem("test-no-head", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) obj := fstests.PutTestContents(ctx, t, f, &item, contents, true) defer func() { assert.NoError(t, obj.Remove(ctx)) }() // PutTestcontents checks the received object } func TestVersionLess(t *testing.T) { key1 := "key1" key2 := "key2" t1 := fstest.Time("2022-01-21T12:00:00+01:00") t2 := fstest.Time("2022-01-21T12:00:01+01:00") for n, test := range []struct { a, b *types.ObjectVersion want bool }{ {a: nil, b: nil, want: true}, {a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: nil, want: false}, {a: nil, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true}, {a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false}, {a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t2}, want: false}, {a: &types.ObjectVersion{Key: &key1, LastModified: &t2}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true}, {a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key2, LastModified: &t1}, want: true}, {a: &types.ObjectVersion{Key: &key2, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false}, {a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false}, {a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true}, {a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, want: false}, } { got := versionLess(test.a, test.b) assert.Equal(t, test.want, got, fmt.Sprintf("%d: %+v", n, test)) } } func TestMergeDeleteMarkers(t *testing.T) { key1 := "key1" key2 := "key2" t1 := fstest.Time("2022-01-21T12:00:00+01:00") t2 := fstest.Time("2022-01-21T12:00:01+01:00") for n, test := range []struct { versions []types.ObjectVersion markers []types.DeleteMarkerEntry want []types.ObjectVersion }{ { versions: []types.ObjectVersion{}, markers: []types.DeleteMarkerEntry{}, want: []types.ObjectVersion{}, }, { versions: []types.ObjectVersion{ { Key: &key1, LastModified: &t1, }, }, markers: []types.DeleteMarkerEntry{}, want: []types.ObjectVersion{ { Key: &key1, LastModified: &t1, }, }, }, { versions: []types.ObjectVersion{}, markers: []types.DeleteMarkerEntry{ { Key: &key1, LastModified: &t1, }, }, want: []types.ObjectVersion{ { Key: &key1, LastModified: &t1, Size: isDeleteMarker, }, }, }, { versions: []types.ObjectVersion{ { Key: &key1, LastModified: &t2, }, { Key: &key2, LastModified: &t2, }, }, markers: []types.DeleteMarkerEntry{ { Key: &key1, LastModified: &t1, }, }, want: []types.ObjectVersion{ { Key: &key1, LastModified: &t2, }, { Key: &key1, LastModified: &t1, Size: isDeleteMarker, }, { Key: &key2, LastModified: &t2, }, }, }, } { got := mergeDeleteMarkers(test.versions, test.markers) assert.Equal(t, test.want, got, fmt.Sprintf("%d: %+v", n, test)) } } func TestRemoveAWSChunked(t *testing.T) { ps := func(s string) *string { return &s } tests := []struct { name string in *string want *string }{ {"nil", nil, nil}, {"empty", ps(""), nil}, {"only aws", ps("aws-chunked"), nil}, {"leading aws", ps("aws-chunked, gzip"), ps("gzip")}, {"trailing aws", ps("gzip, aws-chunked"), ps("gzip")}, {"middle aws", ps("gzip, aws-chunked, br"), ps("gzip,br")}, {"case insensitive", ps("GZip, AwS-ChUnKeD, Br"), ps("GZip,Br")}, {"duplicates", ps("aws-chunked , aws-chunked"), nil}, {"no aws normalize spaces", ps(" gzip , br "), ps(" gzip , br ")}, {"surrounding spaces", ps(" aws-chunked "), nil}, {"no change", ps("gzip, br"), ps("gzip, br")}, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { got := removeAWSChunked(tc.in) check := func(want, got *string) { t.Helper() if tc.want == nil { assert.Nil(t, got) } else { require.NotNil(t, got) assert.Equal(t, *tc.want, *got) } } check(tc.want, got) // Idempotent got2 := removeAWSChunked(got) check(got, got2) }) } } func (f *Fs) InternalTestVersions(t *testing.T) { ctx := context.Background() // Enable versioning for this bucket during this test _, err := f.setGetVersioning(ctx, "Enabled") if err != nil { t.Skipf("Couldn't enable versioning: %v", err) } defer func() { // Disable versioning for this bucket _, err := f.setGetVersioning(ctx, "Suspended") assert.NoError(t, err) }() // Small pause to make the LastModified different since AWS // only seems to track them to 1 second granularity time.Sleep(2 * time.Second) // Create an object const dirName = "versions" const fileName = dirName + "/" + "test-versions.txt" contents := random.String(100) item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) obj := fstests.PutTestContents(ctx, t, f, &item, contents, true) defer func() { assert.NoError(t, obj.Remove(ctx)) }() // Small pause time.Sleep(2 * time.Second) // Remove it assert.NoError(t, obj.Remove(ctx)) // Small pause to make the LastModified different since AWS only seems to track them to 1 second granularity time.Sleep(2 * time.Second) // And create it with different size and contents newContents := random.String(101) newItem := fstest.NewItem(fileName, newContents, fstest.Time("2002-05-06T04:05:06.499999999Z")) newObj := fstests.PutTestContents(ctx, t, f, &newItem, newContents, true) t.Run("Versions", func(t *testing.T) { // Set --s3-versions for this test f.opt.Versions = true defer func() { f.opt.Versions = false }() // Read the contents entries, err := f.List(ctx, dirName) require.NoError(t, err) tests := 0 var fileNameVersion string for _, entry := range entries { t.Log(entry) remote := entry.Remote() if remote == fileName { t.Run("ReadCurrent", func(t *testing.T) { assert.Equal(t, newContents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1)) }) tests++ } else if versionTime, p := version.Remove(remote); !versionTime.IsZero() && p == fileName { t.Run("ReadVersion", func(t *testing.T) { assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1)) }) assert.WithinDuration(t, obj.(*Object).lastModified, versionTime, time.Second, "object time must be with 1 second of version time") fileNameVersion = remote tests++ } } assert.Equal(t, 2, tests, "object missing from listing") // Check we can read the object with a version suffix t.Run("NewObject", func(t *testing.T) { o, err := f.NewObject(ctx, fileNameVersion) require.NoError(t, err) require.NotNil(t, o) assert.Equal(t, int64(100), o.Size(), o.Remote()) }) // Check we can make a NewFs from that object with a version suffix t.Run("NewFs", func(t *testing.T) { newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion) // Make sure --s3-versions is set in the config of the new remote fs.Debugf(nil, "oldPath = %q", newPath) lastColon := strings.LastIndex(newPath, ":") require.True(t, lastColon >= 0) newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:] fs.Debugf(nil, "newPath = %q", newPath) fNew, err := cache.Get(ctx, newPath) // This should return pointing to a file require.Equal(t, fs.ErrorIsFile, err) require.NotNil(t, fNew) // With the directory the directory above assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew))) }) }) t.Run("VersionAt", func(t *testing.T) { // We set --s3-version-at for this test so make sure we reset it at the end defer func() { f.opt.VersionAt = fs.Time{} }() var ( firstObjectTime = obj.(*Object).lastModified secondObjectTime = newObj.(*Object).lastModified ) for _, test := range []struct { what string at time.Time want []fstest.Item wantErr error wantSize int64 }{ { what: "Before", at: firstObjectTime.Add(-time.Second), want: fstests.InternalTestFiles, wantErr: fs.ErrorObjectNotFound, }, { what: "AfterOne", at: firstObjectTime.Add(time.Second), want: append([]fstest.Item{item}, fstests.InternalTestFiles...), wantSize: 100, }, { what: "AfterDelete", at: secondObjectTime.Add(-time.Second), want: fstests.InternalTestFiles, wantErr: fs.ErrorObjectNotFound, }, { what: "AfterTwo", at: secondObjectTime.Add(time.Second), want: append([]fstest.Item{newItem}, fstests.InternalTestFiles...), wantSize: 101, }, } { t.Run(test.what, func(t *testing.T) { f.opt.VersionAt = fs.Time(test.at) t.Run("List", func(t *testing.T) { fstest.CheckListing(t, f, test.want) }) t.Run("NewObject", func(t *testing.T) { gotObj, gotErr := f.NewObject(ctx, fileName) assert.Equal(t, test.wantErr, gotErr) if gotErr == nil { assert.Equal(t, test.wantSize, gotObj.Size()) } }) }) } }) t.Run("Mkdir", func(t *testing.T) { // Test what happens when we create a bucket we already own and see whether the // quirk is set correctly req := s3.CreateBucketInput{ Bucket: &f.rootBucket, ACL: types.BucketCannedACL(f.opt.BucketACL), } if f.opt.LocationConstraint != "" { req.CreateBucketConfiguration = &types.CreateBucketConfiguration{ LocationConstraint: types.BucketLocationConstraint(f.opt.LocationConstraint), } } err := f.pacer.Call(func() (bool, error) { _, err := f.c.CreateBucket(ctx, &req) return f.shouldRetry(ctx, err) }) var errString string var awsError smithy.APIError if err == nil { errString = "No Error" } else if errors.As(err, &awsError) { errString = awsError.ErrorCode() } else { assert.Fail(t, "Unknown error %T %v", err, err) } t.Logf("Creating a bucket we already have created returned code: %s", errString) switch errString { case "BucketAlreadyExists": assert.False(t, f.opt.UseAlreadyExists.Value, "Need to clear UseAlreadyExists quirk") case "No Error", "BucketAlreadyOwnedByYou": assert.True(t, f.opt.UseAlreadyExists.Value, "Need to set UseAlreadyExists quirk") default: assert.Fail(t, "Unknown error string %q", errString) } }) t.Run("Cleanup", func(t *testing.T) { require.NoError(t, f.CleanUpHidden(ctx)) items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...) fstest.CheckListing(t, f, items) // Set --s3-versions for this test f.opt.Versions = true defer func() { f.opt.Versions = false }() fstest.CheckListing(t, f, items) }) // Purge gets tested later } func (f *Fs) InternalTest(t *testing.T) { t.Run("Metadata", f.InternalTestMetadata) t.Run("NoHead", f.InternalTestNoHead) t.Run("Versions", f.InternalTestVersions) } var _ fstests.InternalTester = (*Fs)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/s3/ibm_signer.go
backend/s3/ibm_signer.go
package s3 import ( "context" "net/http" "time" "github.com/IBM/go-sdk-core/v5/core" "github.com/aws/aws-sdk-go-v2/aws" v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4" ) // Authenticator defines an interface for obtaining an IAM token. type Authenticator interface { GetToken() (string, error) } // IbmIamSigner is a structure for signing requests using IBM IAM. // Requeres APIKey and Resource InstanceID type IbmIamSigner struct { APIKey string InstanceID string Auth Authenticator } // SignHTTP signs requests using IBM IAM token. func (signer *IbmIamSigner) SignHTTP(ctx context.Context, credentials aws.Credentials, req *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4signer.SignerOptions)) error { var authenticator Authenticator if signer.Auth != nil { authenticator = signer.Auth } else { authenticator = &core.IamAuthenticator{ApiKey: signer.APIKey} } token, err := authenticator.GetToken() if err != nil { return err } req.Header.Set("Authorization", "Bearer "+token) req.Header.Set("ibm-service-instance-id", signer.InstanceID) return nil } // NoOpCredentialsProvider is needed since S3 SDK requires having credentials, even though authentication is happening via IBM IAM. type NoOpCredentialsProvider struct{} // Retrieve returns mock credentials for the NoOpCredentialsProvider. func (n *NoOpCredentialsProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { return aws.Credentials{ AccessKeyID: "NoOpAccessKey", SecretAccessKey: "NoOpSecretKey", SessionToken: "", Source: "NoOpCredentialsProvider", }, nil } // IsExpired always returns false func (n *NoOpCredentialsProvider) IsExpired() bool { return false }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/s3/gen_setfrom.go
backend/s3/gen_setfrom.go
// Generate boilerplate code for setting similar structs from each other //go:build ignore package main import ( "flag" "fmt" "io" "log" "os" "reflect" "strings" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" ) // flags var ( outputFile = flag.String("o", "", "Output file name, stdout if unset") ) // globals var ( out io.Writer = os.Stdout ) // genSetFrom generates code to set the public members of a from b // // a and b should be pointers to structs // // a can be a different type from b // // Only the Fields which have the same name and assignable type on a // and b will be set. // // This is useful for copying between almost identical structures that // are frequently present in auto-generated code for cloud storage // interfaces. func genSetFrom(a, b interface{}) { name := fmt.Sprintf("setFrom_%T_%T", a, b) name = strings.Replace(name, ".", "", -1) name = strings.Replace(name, "*", "", -1) fmt.Fprintf(out, "\n// %s copies matching elements from a to b\n", name) fmt.Fprintf(out, "func %s(a %T, b %T) {\n", name, a, b) ta := reflect.TypeOf(a).Elem() tb := reflect.TypeOf(b).Elem() va := reflect.ValueOf(a).Elem() vb := reflect.ValueOf(b).Elem() for i := 0; i < tb.NumField(); i++ { bField := vb.Field(i) tbField := tb.Field(i) name := tbField.Name aField := va.FieldByName(name) taField, found := ta.FieldByName(name) if found && aField.IsValid() && bField.IsValid() && aField.CanSet() && tbField.Type.AssignableTo(taField.Type) { fmt.Fprintf(out, "\ta.%s = b.%s\n", name, name) } } fmt.Fprintf(out, "}\n") } func main() { flag.Parse() if *outputFile != "" { fd, err := os.Create(*outputFile) if err != nil { log.Fatal(err) } defer func() { err := fd.Close() if err != nil { log.Fatal(err) } }() out = fd } fmt.Fprintf(out, `// Code generated by "go run gen_setfrom.go"; DO NOT EDIT. package s3 import ( "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" ) `) genSetFrom(new(s3.ListObjectsInput), new(s3.ListObjectsV2Input)) genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectsOutput)) genSetFrom(new(s3.ListObjectVersionsInput), new(s3.ListObjectsV2Input)) genSetFrom(new(types.ObjectVersion), new(types.DeleteMarkerEntry)) genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectVersionsOutput)) genSetFrom(new(types.Object), new(types.ObjectVersion)) genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.HeadObjectOutput)) genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.CopyObjectInput)) genSetFrom(new(s3.UploadPartCopyInput), new(s3.CopyObjectInput)) genSetFrom(new(s3.HeadObjectOutput), new(s3.GetObjectOutput)) genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.PutObjectInput)) genSetFrom(new(s3.HeadObjectOutput), new(s3.PutObjectInput)) genSetFrom(new(s3.CopyObjectInput), new(s3.PutObjectInput)) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/s3/v2sign.go
backend/s3/v2sign.go
// v2 signing package s3 import ( "context" "crypto/hmac" "crypto/sha1" "encoding/base64" "net/http" "sort" "strings" "time" "github.com/aws/aws-sdk-go-v2/aws" v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4" ) // URL parameters that need to be added to the signature var s3ParamsToSign = map[string]struct{}{ "delete": {}, "acl": {}, "location": {}, "logging": {}, "notification": {}, "partNumber": {}, "policy": {}, "requestPayment": {}, "torrent": {}, "uploadId": {}, "uploads": {}, "versionId": {}, "versioning": {}, "versions": {}, "response-content-type": {}, "response-content-language": {}, "response-expires": {}, "response-cache-control": {}, "response-content-disposition": {}, "response-content-encoding": {}, } // Implement HTTPSignerV4 interface type v2Signer struct { opt *Options } // SignHTTP signs requests using v2 auth. // // Cobbled together from goamz and aws-sdk-go. // // Bodged up to compile with AWS SDK v2 func (v2 *v2Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, req *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4signer.SignerOptions)) error { // Set date date := time.Now().UTC().Format(time.RFC1123) req.Header.Set("Date", date) // Sort out URI uri := req.URL.EscapedPath() if uri == "" { uri = "/" } // Look through headers of interest var md5 string var contentType string var headersToSign []string tmpHeadersToSign := make(map[string][]string) for k, v := range req.Header { k = strings.ToLower(k) switch k { case "content-md5": md5 = v[0] case "content-type": contentType = v[0] default: if strings.HasPrefix(k, "x-amz-") { tmpHeadersToSign[k] = v } } } var keys []string for k := range tmpHeadersToSign { keys = append(keys, k) } // https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html sort.Strings(keys) for _, key := range keys { vall := strings.Join(tmpHeadersToSign[key], ",") headersToSign = append(headersToSign, key+":"+vall) } // Make headers of interest into canonical string var joinedHeadersToSign string if len(headersToSign) > 0 { joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n" } // Look for query parameters which need to be added to the signature params := req.URL.Query() var queriesToSign []string for k, vs := range params { if _, ok := s3ParamsToSign[k]; ok { for _, v := range vs { if v == "" { queriesToSign = append(queriesToSign, k) } else { queriesToSign = append(queriesToSign, k+"="+v) } } } } // Add query parameters to URI if len(queriesToSign) > 0 { sort.StringSlice(queriesToSign).Sort() uri += "?" + strings.Join(queriesToSign, "&") } // Make signature payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri hash := hmac.New(sha1.New, []byte(v2.opt.SecretAccessKey)) _, _ = hash.Write([]byte(payload)) signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size())) base64.StdEncoding.Encode(signature, hash.Sum(nil)) // Set signature in request req.Header.Set("Authorization", "AWS "+v2.opt.AccessKeyID+":"+string(signature)) return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/filescom/filescom.go
backend/filescom/filescom.go
// Package filescom provides an interface to the Files.com // object storage system. package filescom import ( "context" "errors" "fmt" "io" "net/http" "net/url" "path" "slices" "strings" "time" files_sdk "github.com/Files-com/files-sdk-go/v3" "github.com/Files-com/files-sdk-go/v3/bundle" "github.com/Files-com/files-sdk-go/v3/file" file_migration "github.com/Files-com/files-sdk-go/v3/filemigration" "github.com/Files-com/files-sdk-go/v3/folder" "github.com/Files-com/files-sdk-go/v3/session" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" ) /* Run of rclone info stringNeedsEscaping = []rune{ '/', '\x00' } maxFileLength = 512 // for 1 byte unicode characters maxFileLength = 512 // for 2 byte unicode characters maxFileLength = 512 // for 3 byte unicode characters maxFileLength = 512 // for 4 byte unicode characters canWriteUnnormalized = true canReadUnnormalized = true canReadRenormalized = true canStream = true */ const ( minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential folderNotEmpty = "processing-failure/folder-not-empty" ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "filescom", Description: "Files.com", NewFs: NewFs, Options: []fs.Option{ { Name: "site", Help: "Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com).", }, { Name: "username", Help: "The username used to authenticate with Files.com.", }, { Name: "password", Help: "The password used to authenticate with Files.com.", IsPassword: true, }, { Name: "api_key", Help: "The API key used to authenticate with Files.com.", Advanced: true, Sensitive: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.Display | encoder.EncodeBackSlash | encoder.EncodeRightSpace | encoder.EncodeRightCrLfHtVt | encoder.EncodeInvalidUtf8), }}, }) } // Options defines the configuration for this backend type Options struct { Site string `config:"site"` Username string `config:"username"` Password string `config:"password"` APIKey string `config:"api_key"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs represents a remote files.com server type Fs struct { name string // name of this remote root string // the path we are working on opt Options // parsed options features *fs.Features // optional features fileClient *file.Client // the connection to the file API folderClient *folder.Client // the connection to the folder API migrationClient *file_migration.Client // the connection to the file migration API bundleClient *bundle.Client // the connection to the bundle API pacer *fs.Pacer // pacer for API calls } // Object describes a files object // // Will definitely have info but maybe not meta type Object struct { fs *Fs // what this object is part of remote string // The remote path size int64 // size of the object crc32 string // CRC32 of the object content md5 string // MD5 of the object content mimeType string // Content-Type of the object modTime time.Time // modification time of the object } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("files root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // Encode remote and turn it into an absolute path in the share func (f *Fs) absPath(remote string) string { return f.opt.Enc.FromStandardPath(path.Join(f.root, remote)) } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry returns a boolean as to whether this err deserves to be // retried. It returns the err as a convenience func shouldRetry(ctx context.Context, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } if apiErr, ok := err.(files_sdk.ResponseError); ok { if slices.Contains(retryErrorCodes, apiErr.HttpCode) { fs.Debugf(nil, "Retrying API error %v", err) return true, err } } return fserrors.ShouldRetry(err), err } // readMetaDataForPath reads the metadata from the path func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *files_sdk.File, err error) { params := files_sdk.FileFindParams{ Path: f.absPath(path), } var file files_sdk.File err = f.pacer.Call(func() (bool, error) { file, err = f.fileClient.Find(params, files_sdk.WithContext(ctx)) return shouldRetry(ctx, err) }) if err != nil { return nil, err } return &file, nil } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } root = strings.Trim(root, "/") config, err := newClientConfig(ctx, opt) if err != nil { return nil, err } f := &Fs{ name: name, root: root, opt: *opt, fileClient: &file.Client{Config: config}, folderClient: &folder.Client{Config: config}, migrationClient: &file_migration.Client{Config: config}, bundleClient: &bundle.Client{Config: config}, pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } f.features = (&fs.Features{ CaseInsensitive: true, CanHaveEmptyDirectories: true, ReadMimeType: true, DirModTimeUpdatesOnWrite: true, }).Fill(ctx, f) if f.root != "" { info, err := f.readMetaDataForPath(ctx, "") if err == nil && !info.IsDir() { f.root = path.Dir(f.root) if f.root == "." { f.root = "" } return f, fs.ErrorIsFile } } return f, err } func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) { if opt.Site != "" { if strings.Contains(opt.Site, ".") { config.EndpointOverride = opt.Site } else { config.Subdomain = opt.Site } _, err = url.ParseRequestURI(config.Endpoint()) if err != nil { err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site) return } } config = config.Init().SetCustomClient(fshttp.NewClient(ctx)) if opt.APIKey != "" { config.APIKey = opt.APIKey return } if opt.Username == "" { err = errors.New("username not found") return } if opt.Password == "" { err = errors.New("password not found") return } opt.Password, err = obscure.Reveal(opt.Password) if err != nil { return } sessionClient := session.Client{Config: config} params := files_sdk.SessionCreateParams{ Username: opt.Username, Password: opt.Password, } thisSession, err := sessionClient.Create(params, files_sdk.WithContext(ctx)) if err != nil { err = fmt.Errorf("couldn't create session: %w", err) return } config.SessionId = thisSession.Id return } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *files_sdk.File) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } var err error if file != nil { err = o.setMetaData(file) } else { err = o.readMetaData(ctx) // reads info and meta, returning an error } if err != nil { return nil, err } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(ctx, remote, nil) } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { var it *folder.Iter params := files_sdk.FolderListForParams{ Path: f.absPath(dir), } err = f.pacer.Call(func() (bool, error) { it, err = f.folderClient.ListFor(params, files_sdk.WithContext(ctx)) return shouldRetry(ctx, err) }) if err != nil { return nil, fmt.Errorf("couldn't list files: %w", err) } for it.Next() { item := ptr(it.File()) remote := f.opt.Enc.ToStandardPath(item.DisplayName) remote = path.Join(dir, remote) if remote == dir { continue } if item.IsDir() { d := fs.NewDir(remote, item.ModTime()) entries = append(entries, d) } else { o, err := f.newObjectWithInfo(ctx, remote, item) if err != nil { return nil, err } entries = append(entries, o) } } err = it.Err() if files_sdk.IsNotExist(err) { return nil, fs.ErrorDirNotFound } return } // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // // Returns the object and error. // // Used to create new objects func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, err error) { // Create the directory for the object if it doesn't exist err = f.mkParentDir(ctx, remote) if err != nil { return } // Temporary Object under construction o = &Object{ fs: f, remote: remote, } return o, nil } // Put the object // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // Temporary Object under construction fs := &Object{ fs: f, remote: src.Remote(), } return fs, fs.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.Put(ctx, in, src, options...) } func (f *Fs) mkdir(ctx context.Context, path string) error { if path == "" || path == "." { return nil } params := files_sdk.FolderCreateParams{ Path: path, MkdirParents: ptr(true), } err := f.pacer.Call(func() (bool, error) { _, err := f.folderClient.Create(params, files_sdk.WithContext(ctx)) return shouldRetry(ctx, err) }) if files_sdk.IsExist(err) { return nil } return err } // Make the parent directory of remote func (f *Fs) mkParentDir(ctx context.Context, remote string) error { return f.mkdir(ctx, path.Dir(f.absPath(remote))) } // Mkdir creates the container if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { return f.mkdir(ctx, f.absPath(dir)) } // DirSetModTime sets the directory modtime for dir func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error { o := Object{ fs: f, remote: dir, } return o.SetModTime(ctx, modTime) } // purgeCheck removes the root directory, if check is set then it // refuses to do so if it has anything in func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { path := f.absPath(dir) if path == "" || path == "." { return errors.New("can't purge root directory") } params := files_sdk.FileDeleteParams{ Path: path, Recursive: ptr(!check), } err := f.pacer.Call(func() (bool, error) { err := f.fileClient.Delete(params, files_sdk.WithContext(ctx)) // Allow for eventual consistency deletion of child objects. if isFolderNotEmpty(err) { return true, err } return shouldRetry(ctx, err) }) if err != nil { if files_sdk.IsNotExist(err) { return fs.ErrorDirNotFound } else if isFolderNotEmpty(err) { return fs.ErrorDirectoryNotEmpty } return fmt.Errorf("rmdir failed: %w", err) } return nil } // Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, true) } // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { return time.Second } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dstObj fs.Object, err error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } err = srcObj.readMetaData(ctx) if err != nil { return } srcPath := srcObj.fs.absPath(srcObj.remote) dstPath := f.absPath(remote) if strings.EqualFold(srcPath, dstPath) { return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath) } // Create temporary object dstObj, err = f.createObject(ctx, remote) if err != nil { return } // Copy the object params := files_sdk.FileCopyParams{ Path: srcPath, Destination: dstPath, Overwrite: ptr(true), } var action files_sdk.FileAction err = f.pacer.Call(func() (bool, error) { action, err = f.fileClient.Copy(params, files_sdk.WithContext(ctx)) return shouldRetry(ctx, err) }) if err != nil { return } err = f.waitForAction(ctx, action, "copy") if err != nil { return } err = dstObj.SetModTime(ctx, srcObj.modTime) return } // Purge deletes all the files and the container // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, false) } // move a file or folder func (f *Fs) move(ctx context.Context, src *Fs, srcRemote string, dstRemote string) (info *files_sdk.File, err error) { // Move the object params := files_sdk.FileMoveParams{ Path: src.absPath(srcRemote), Destination: f.absPath(dstRemote), } var action files_sdk.FileAction err = f.pacer.Call(func() (bool, error) { action, err = f.fileClient.Move(params, files_sdk.WithContext(ctx)) return shouldRetry(ctx, err) }) if err != nil { return nil, err } err = f.waitForAction(ctx, action, "move") if err != nil { return nil, err } info, err = f.readMetaDataForPath(ctx, dstRemote) return } func (f *Fs) waitForAction(ctx context.Context, action files_sdk.FileAction, operation string) (err error) { var migration files_sdk.FileMigration err = f.pacer.Call(func() (bool, error) { migration, err = f.migrationClient.Wait(action, func(migration files_sdk.FileMigration) { // noop }, files_sdk.WithContext(ctx)) return shouldRetry(ctx, err) }) if err == nil && migration.Status != "completed" { return fmt.Errorf("%v did not complete successfully: %v", operation, migration.Status) } return } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } // Create temporary object dstObj, err := f.createObject(ctx, remote) if err != nil { return nil, err } // Do the move info, err := f.move(ctx, srcObj.fs, srcObj.remote, dstObj.remote) if err != nil { return nil, err } err = dstObj.setMetaData(info) if err != nil { return nil, err } return dstObj, nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } // Check if destination exists _, err = f.readMetaDataForPath(ctx, dstRemote) if err == nil { return fs.ErrorDirExists } // Create temporary object dstObj, err := f.createObject(ctx, dstRemote) if err != nil { return } // Do the move _, err = f.move(ctx, srcFs, srcRemote, dstObj.remote) return } // PublicLink adds a "readable by anyone with link" permission on the given file or folder. func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (url string, err error) { params := files_sdk.BundleCreateParams{ Paths: []string{f.absPath(remote)}, } if expire < fs.DurationOff { params.ExpiresAt = ptr(time.Now().Add(time.Duration(expire))) } var bundle files_sdk.Bundle err = f.pacer.Call(func() (bool, error) { bundle, err = f.bundleClient.Create(params, files_sdk.WithContext(ctx)) return shouldRetry(ctx, err) }) url = bundle.Url return } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.NewHashSet(hash.CRC32, hash.MD5) } // ------------------------------------------------------------ // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Hash returns the MD5 of an object returning a lowercase hex string func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { switch t { case hash.CRC32: if o.crc32 == "" { return "", nil } return fmt.Sprintf("%08s", o.crc32), nil case hash.MD5: return o.md5, nil } return "", hash.ErrUnsupported } // Size returns the size of an object in bytes func (o *Object) Size() int64 { return o.size } // setMetaData sets the metadata from info func (o *Object) setMetaData(file *files_sdk.File) error { o.modTime = file.ModTime() if !file.IsDir() { o.size = file.Size o.crc32 = file.Crc32 o.md5 = file.Md5 o.mimeType = file.MimeType } return nil } // readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info func (o *Object) readMetaData(ctx context.Context) (err error) { file, err := o.fs.readMetaDataForPath(ctx, o.remote) if err != nil { if files_sdk.IsNotExist(err) { return fs.ErrorObjectNotFound } return err } if file.IsDir() { return fs.ErrorIsDir } return o.setMetaData(file) } // ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime(ctx context.Context) time.Time { return o.modTime } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) { params := files_sdk.FileUpdateParams{ Path: o.fs.absPath(o.remote), ProvidedMtime: &modTime, } var file files_sdk.File err = o.fs.pacer.Call(func() (bool, error) { file, err = o.fs.fileClient.Update(params, files_sdk.WithContext(ctx)) return shouldRetry(ctx, err) }) if err != nil { return err } return o.setMetaData(&file) } // Storable returns a boolean showing whether this object storable func (o *Object) Storable() bool { return true } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { // Offset and Count for range download var offset, count int64 fs.FixRangeOption(options, o.size) for _, option := range options { switch x := option.(type) { case *fs.RangeOption: offset, count = x.Decode(o.size) if count < 0 { count = o.size - offset } case *fs.SeekOption: offset = x.Offset count = o.size - offset default: if option.Mandatory() { fs.Logf(o, "Unsupported mandatory option: %v", option) } } } params := files_sdk.FileDownloadParams{ Path: o.fs.absPath(o.remote), } headers := &http.Header{} headers.Set("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+count-1)) err = o.fs.pacer.Call(func() (bool, error) { _, err = o.fs.fileClient.Download( params, files_sdk.WithContext(ctx), files_sdk.RequestHeadersOption(headers), files_sdk.ResponseBodyOption(func(closer io.ReadCloser) error { in = closer return err }), ) return shouldRetry(ctx, err) }) return } // Returns a pointer to t - useful for returning pointers to constants func ptr[T any](t T) *T { return &t } func isFolderNotEmpty(err error) bool { var re files_sdk.ResponseError ok := errors.As(err, &re) return ok && re.Type == folderNotEmpty } // Update the object with the contents of the io.Reader, modTime and size // // If existing is set then it updates the object rather than creating a new one. // // The new object may have been created if an error is returned. func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { uploadOpts := []file.UploadOption{ file.UploadWithContext(ctx), file.UploadWithReader(in), file.UploadWithDestinationPath(o.fs.absPath(o.remote)), file.UploadWithProvidedMtime(src.ModTime(ctx)), } err := o.fs.pacer.Call(func() (bool, error) { err := o.fs.fileClient.Upload(uploadOpts...) return shouldRetry(ctx, err) }) if err != nil { return err } return o.readMetaData(ctx) } // Remove an object func (o *Object) Remove(ctx context.Context) error { params := files_sdk.FileDeleteParams{ Path: o.fs.absPath(o.remote), } return o.fs.pacer.Call(func() (bool, error) { err := o.fs.fileClient.Delete(params, files_sdk.WithContext(ctx)) return shouldRetry(ctx, err) }) } // MimeType of an Object if known, "" otherwise func (o *Object) MimeType(ctx context.Context) string { return o.mimeType } // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) _ fs.Purger = (*Fs)(nil) _ fs.PutStreamer = (*Fs)(nil) _ fs.Copier = (*Fs)(nil) _ fs.Mover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil) _ fs.Object = (*Object)(nil) _ fs.MimeTyper = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/filescom/filescom_test.go
backend/filescom/filescom_test.go
// Test Files filesystem interface package filescom_test import ( "testing" "github.com/rclone/rclone/backend/filescom" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestFilesCom:", NilObject: (*filescom.Object)(nil), }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/googlephotos/albums_test.go
backend/googlephotos/albums_test.go
package googlephotos import ( "testing" "github.com/rclone/rclone/backend/googlephotos/api" "github.com/stretchr/testify/assert" ) func TestNewAlbums(t *testing.T) { albums := newAlbums() assert.NotNil(t, albums.dupes) assert.NotNil(t, albums.byID) assert.NotNil(t, albums.byTitle) assert.NotNil(t, albums.path) } func TestAlbumsAdd(t *testing.T) { albums := newAlbums() assert.Equal(t, map[string][]*api.Album{}, albums.dupes) assert.Equal(t, map[string]*api.Album{}, albums.byID) assert.Equal(t, map[string]*api.Album{}, albums.byTitle) assert.Equal(t, map[string][]string{}, albums.path) a1 := &api.Album{ Title: "one", ID: "1", } albums.add(a1) assert.Equal(t, map[string][]*api.Album{ "one": {a1}, }, albums.dupes) assert.Equal(t, map[string]*api.Album{ "1": a1, }, albums.byID) assert.Equal(t, map[string]*api.Album{ "one": a1, }, albums.byTitle) assert.Equal(t, map[string][]string{ "": {"one"}, }, albums.path) a2 := &api.Album{ Title: "two", ID: "2", } albums.add(a2) assert.Equal(t, map[string][]*api.Album{ "one": {a1}, "two": {a2}, }, albums.dupes) assert.Equal(t, map[string]*api.Album{ "1": a1, "2": a2, }, albums.byID) assert.Equal(t, map[string]*api.Album{ "one": a1, "two": a2, }, albums.byTitle) assert.Equal(t, map[string][]string{ "": {"one", "two"}, }, albums.path) // Add a duplicate a2a := &api.Album{ Title: "two", ID: "2a", } albums.add(a2a) assert.Equal(t, map[string][]*api.Album{ "one": {a1}, "two": {a2, a2a}, }, albums.dupes) assert.Equal(t, map[string]*api.Album{ "1": a1, "2": a2, "2a": a2a, }, albums.byID) assert.Equal(t, map[string]*api.Album{ "one": a1, "two {2}": a2, "two {2a}": a2a, }, albums.byTitle) assert.Equal(t, map[string][]string{ "": {"one", "two {2}", "two {2a}"}, }, albums.path) // Add a sub directory a1sub := &api.Album{ Title: "one/sub", ID: "1sub", } albums.add(a1sub) assert.Equal(t, map[string][]*api.Album{ "one": {a1}, "two": {a2, a2a}, "one/sub": {a1sub}, }, albums.dupes) assert.Equal(t, map[string]*api.Album{ "1": a1, "2": a2, "2a": a2a, "1sub": a1sub, }, albums.byID) assert.Equal(t, map[string]*api.Album{ "one": a1, "one/sub": a1sub, "two {2}": a2, "two {2a}": a2a, }, albums.byTitle) assert.Equal(t, map[string][]string{ "": {"one", "two {2}", "two {2a}"}, "one": {"sub"}, }, albums.path) // Add a weird path a0 := &api.Album{ Title: "/../././..////.", ID: "0", } albums.add(a0) assert.Equal(t, map[string][]*api.Album{ "{0}": {a0}, "one": {a1}, "two": {a2, a2a}, "one/sub": {a1sub}, }, albums.dupes) assert.Equal(t, map[string]*api.Album{ "0": a0, "1": a1, "2": a2, "2a": a2a, "1sub": a1sub, }, albums.byID) assert.Equal(t, map[string]*api.Album{ "{0}": a0, "one": a1, "one/sub": a1sub, "two {2}": a2, "two {2a}": a2a, }, albums.byTitle) assert.Equal(t, map[string][]string{ "": {"one", "two {2}", "two {2a}", "{0}"}, "one": {"sub"}, }, albums.path) } func TestAlbumsDel(t *testing.T) { albums := newAlbums() a1 := &api.Album{ Title: "one", ID: "1", } albums.add(a1) a2 := &api.Album{ Title: "two", ID: "2", } albums.add(a2) // Add a duplicate a2a := &api.Album{ Title: "two", ID: "2a", } albums.add(a2a) // Add a sub directory a1sub := &api.Album{ Title: "one/sub", ID: "1sub", } albums.add(a1sub) assert.Equal(t, map[string][]*api.Album{ "one": {a1}, "two": {a2, a2a}, "one/sub": {a1sub}, }, albums.dupes) assert.Equal(t, map[string]*api.Album{ "1": a1, "2": a2, "2a": a2a, "1sub": a1sub, }, albums.byID) assert.Equal(t, map[string]*api.Album{ "one": a1, "one/sub": a1sub, "two {2}": a2, "two {2a}": a2a, }, albums.byTitle) assert.Equal(t, map[string][]string{ "": {"one", "two {2}", "two {2a}"}, "one": {"sub"}, }, albums.path) albums.del(a1) assert.Equal(t, map[string][]*api.Album{ "one": {a1}, "two": {a2, a2a}, "one/sub": {a1sub}, }, albums.dupes) assert.Equal(t, map[string]*api.Album{ "2": a2, "2a": a2a, "1sub": a1sub, }, albums.byID) assert.Equal(t, map[string]*api.Album{ "one/sub": a1sub, "two {2}": a2, "two {2a}": a2a, }, albums.byTitle) assert.Equal(t, map[string][]string{ "": {"one", "two {2}", "two {2a}"}, "one": {"sub"}, }, albums.path) albums.del(a2) assert.Equal(t, map[string][]*api.Album{ "one": {a1}, "two": {a2, a2a}, "one/sub": {a1sub}, }, albums.dupes) assert.Equal(t, map[string]*api.Album{ "2a": a2a, "1sub": a1sub, }, albums.byID) assert.Equal(t, map[string]*api.Album{ "one/sub": a1sub, "two {2a}": a2a, }, albums.byTitle) assert.Equal(t, map[string][]string{ "": {"one", "two {2a}"}, "one": {"sub"}, }, albums.path) albums.del(a2a) assert.Equal(t, map[string][]*api.Album{ "one": {a1}, "two": {a2, a2a}, "one/sub": {a1sub}, }, albums.dupes) assert.Equal(t, map[string]*api.Album{ "1sub": a1sub, }, albums.byID) assert.Equal(t, map[string]*api.Album{ "one/sub": a1sub, }, albums.byTitle) assert.Equal(t, map[string][]string{ "": {"one"}, "one": {"sub"}, }, albums.path) albums.del(a1sub) assert.Equal(t, map[string][]*api.Album{ "one": {a1}, "two": {a2, a2a}, "one/sub": {a1sub}, }, albums.dupes) assert.Equal(t, map[string]*api.Album{}, albums.byID) assert.Equal(t, map[string]*api.Album{}, albums.byTitle) assert.Equal(t, map[string][]string{}, albums.path) } func TestAlbumsGet(t *testing.T) { albums := newAlbums() a1 := &api.Album{ Title: "one", ID: "1", } albums.add(a1) album, ok := albums.get("one") assert.Equal(t, true, ok) assert.Equal(t, a1, album) album, ok = albums.get("notfound") assert.Equal(t, false, ok) assert.Nil(t, album) } func TestAlbumsGetDirs(t *testing.T) { albums := newAlbums() a1 := &api.Album{ Title: "one", ID: "1", } albums.add(a1) dirs, ok := albums.getDirs("") assert.Equal(t, true, ok) assert.Equal(t, []string{"one"}, dirs) dirs, ok = albums.getDirs("notfound") assert.Equal(t, false, ok) assert.Nil(t, dirs) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/googlephotos/albums.go
backend/googlephotos/albums.go
// This file contains the albums abstraction package googlephotos import ( "path" "slices" "strings" "sync" "github.com/rclone/rclone/backend/googlephotos/api" ) // All the albums type albums struct { mu sync.Mutex dupes map[string][]*api.Album // duplicated names byID map[string]*api.Album //..indexed by ID byTitle map[string]*api.Album //..indexed by Title path map[string][]string // partial album names to directory } // Create a new album func newAlbums() *albums { return &albums{ dupes: map[string][]*api.Album{}, byID: map[string]*api.Album{}, byTitle: map[string]*api.Album{}, path: map[string][]string{}, } } // add an album func (as *albums) add(album *api.Album) { // Munge the name of the album into a sensible path name album.Title = path.Clean(album.Title) if album.Title == "." || album.Title == "/" { album.Title = addID("", album.ID) } as.mu.Lock() as._add(album) as.mu.Unlock() } // _add an album - call with lock held func (as *albums) _add(album *api.Album) { // update dupes by title dupes := as.dupes[album.Title] dupes = append(dupes, album) as.dupes[album.Title] = dupes // Dedupe the album name if necessary if len(dupes) >= 2 { // If this is the first dupe, then need to adjust the first one if len(dupes) == 2 { firstAlbum := dupes[0] as._del(firstAlbum) as._add(firstAlbum) // undo add of firstAlbum to dupes as.dupes[album.Title] = dupes } album.Title = addID(album.Title, album.ID) } // Store the new album as.byID[album.ID] = album as.byTitle[album.Title] = album // Store the partial paths dir, leaf := album.Title, "" for dir != "" { i := strings.LastIndex(dir, "/") if i >= 0 { dir, leaf = dir[:i], dir[i+1:] } else { dir, leaf = "", dir } dirs := as.path[dir] found := false for _, dir := range dirs { if dir == leaf { found = true } } if !found { as.path[dir] = append(as.path[dir], leaf) } } } // del an album func (as *albums) del(album *api.Album) { as.mu.Lock() as._del(album) as.mu.Unlock() } // _del an album - call with lock held func (as *albums) _del(album *api.Album) { // We leave in dupes so it doesn't cause albums to get renamed // Remove from byID and byTitle delete(as.byID, album.ID) delete(as.byTitle, album.Title) // Remove from paths dir, leaf := album.Title, "" for dir != "" { // Can't delete if this dir exists anywhere in the path structure if _, found := as.path[dir]; found { break } i := strings.LastIndex(dir, "/") if i >= 0 { dir, leaf = dir[:i], dir[i+1:] } else { dir, leaf = "", dir } dirs := as.path[dir] for i, dir := range dirs { if dir == leaf { dirs = slices.Delete(dirs, i, i+1) break } } if len(dirs) == 0 { delete(as.path, dir) } else { as.path[dir] = dirs } } } // get an album by title func (as *albums) get(title string) (album *api.Album, ok bool) { as.mu.Lock() defer as.mu.Unlock() album, ok = as.byTitle[title] return album, ok } // getDirs gets directories below an album path func (as *albums) getDirs(albumPath string) (dirs []string, ok bool) { as.mu.Lock() defer as.mu.Unlock() dirs, ok = as.path[albumPath] return dirs, ok }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/googlephotos/googlephotos_test.go
backend/googlephotos/googlephotos_test.go
package googlephotos import ( "context" "errors" "fmt" "io" "net/http" "path" "testing" "time" _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/lib/random" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( // We have two different files here as Google Photos will uniq // them otherwise which confuses the tests as the filename is // unexpected. fileNameAlbum = "rclone-test-image1.jpg" fileNameUpload = "rclone-test-image2.jpg" ) func TestIntegration(t *testing.T) { ctx := context.Background() fstest.Initialise() // Create Fs if *fstest.RemoteName == "" { *fstest.RemoteName = "TestGooglePhotos:" } f, err := fs.NewFs(ctx, *fstest.RemoteName) if errors.Is(err, fs.ErrorNotFoundInConfigFile) { t.Skipf("Couldn't create google photos backend - skipping tests: %v", err) } require.NoError(t, err) // Create local Fs pointing at testfiles localFs, err := fs.NewFs(ctx, "testfiles") require.NoError(t, err) t.Run("CreateAlbum", func(t *testing.T) { albumName := "album/rclone-test-" + random.String(24) err = f.Mkdir(ctx, albumName) require.NoError(t, err) remote := albumName + "/" + fileNameAlbum t.Run("PutFile", func(t *testing.T) { srcObj, err := localFs.NewObject(ctx, fileNameAlbum) require.NoError(t, err) in, err := srcObj.Open(ctx) require.NoError(t, err) dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote)) require.NoError(t, err) assert.Equal(t, remote, dstObj.Remote()) _ = in.Close() remoteWithID := addFileID(remote, dstObj.(*Object).id) t.Run("ObjectFs", func(t *testing.T) { assert.Equal(t, f, dstObj.Fs()) }) t.Run("ObjectString", func(t *testing.T) { assert.Equal(t, remote, dstObj.String()) assert.Equal(t, "<nil>", (*Object)(nil).String()) }) t.Run("ObjectHash", func(t *testing.T) { h, err := dstObj.Hash(ctx, hash.MD5) assert.Equal(t, "", h) assert.Equal(t, hash.ErrUnsupported, err) }) t.Run("ObjectSize", func(t *testing.T) { assert.Equal(t, int64(-1), dstObj.Size()) f.(*Fs).opt.ReadSize = true defer func() { f.(*Fs).opt.ReadSize = false }() size := dstObj.Size() assert.True(t, size > 1000, fmt.Sprintf("Size too small %d", size)) }) t.Run("ObjectSetModTime", func(t *testing.T) { err := dstObj.SetModTime(ctx, time.Now()) assert.Equal(t, fs.ErrorCantSetModTime, err) }) t.Run("ObjectStorable", func(t *testing.T) { assert.True(t, dstObj.Storable()) }) t.Run("ObjectOpen", func(t *testing.T) { in, err := dstObj.Open(ctx) require.NoError(t, err) buf, err := io.ReadAll(in) require.NoError(t, err) require.NoError(t, in.Close()) assert.True(t, len(buf) > 1000) contentType := http.DetectContentType(buf[:512]) assert.Equal(t, "image/jpeg", contentType) }) t.Run("CheckFileInAlbum", func(t *testing.T) { entries, err := f.List(ctx, albumName) require.NoError(t, err) assert.Equal(t, 1, len(entries)) assert.Equal(t, remote, entries[0].Remote()) assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String()) }) // Check it is there in the date/month/year hierarchy // 2013-07-13 is the creation date of the folder checkPresent := func(t *testing.T, objPath string) { entries, err := f.List(ctx, objPath) require.NoError(t, err) found := false for _, entry := range entries { leaf := path.Base(entry.Remote()) if leaf == fileNameAlbum || leaf == remoteWithID { found = true } } assert.True(t, found, fmt.Sprintf("didn't find %q in %q", fileNameAlbum, objPath)) } t.Run("CheckInByYear", func(t *testing.T) { checkPresent(t, "media/by-year/2013") }) t.Run("CheckInByMonth", func(t *testing.T) { checkPresent(t, "media/by-month/2013/2013-07") }) t.Run("CheckInByDay", func(t *testing.T) { checkPresent(t, "media/by-day/2013/2013-07-26") }) t.Run("NewObject", func(t *testing.T) { o, err := f.NewObject(ctx, remote) require.NoError(t, err) require.Equal(t, remote, o.Remote()) }) t.Run("NewObjectWithID", func(t *testing.T) { o, err := f.NewObject(ctx, remoteWithID) require.NoError(t, err) require.Equal(t, remoteWithID, o.Remote()) }) t.Run("NewFsIsFile", func(t *testing.T) { fNew, err := fs.NewFs(ctx, *fstest.RemoteName+remote) assert.Equal(t, fs.ErrorIsFile, err) leaf := path.Base(remote) o, err := fNew.NewObject(ctx, leaf) require.NoError(t, err) require.Equal(t, leaf, o.Remote()) }) t.Run("RemoveFileFromAlbum", func(t *testing.T) { err = dstObj.Remove(ctx) require.NoError(t, err) time.Sleep(time.Second) // Check album empty entries, err := f.List(ctx, albumName) require.NoError(t, err) assert.Equal(t, 0, len(entries)) }) }) // remove the album err = f.Rmdir(ctx, albumName) require.Error(t, err) // FIXME doesn't work yet }) t.Run("UploadMkdir", func(t *testing.T) { assert.NoError(t, f.Mkdir(ctx, "upload/dir")) assert.NoError(t, f.Mkdir(ctx, "upload/dir/subdir")) t.Run("List", func(t *testing.T) { entries, err := f.List(ctx, "upload") require.NoError(t, err) assert.Equal(t, 1, len(entries)) assert.Equal(t, "upload/dir", entries[0].Remote()) entries, err = f.List(ctx, "upload/dir") require.NoError(t, err) assert.Equal(t, 1, len(entries)) assert.Equal(t, "upload/dir/subdir", entries[0].Remote()) }) t.Run("Rmdir", func(t *testing.T) { assert.NoError(t, f.Rmdir(ctx, "upload/dir/subdir")) assert.NoError(t, f.Rmdir(ctx, "upload/dir")) }) t.Run("ListEmpty", func(t *testing.T) { entries, err := f.List(ctx, "upload") require.NoError(t, err) assert.Equal(t, 0, len(entries)) _, err = f.List(ctx, "upload/dir") assert.Equal(t, fs.ErrorDirNotFound, err) }) }) t.Run("Upload", func(t *testing.T) { uploadDir := "upload/dir/subdir" remote := path.Join(uploadDir, fileNameUpload) srcObj, err := localFs.NewObject(ctx, fileNameUpload) require.NoError(t, err) in, err := srcObj.Open(ctx) require.NoError(t, err) dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote)) require.NoError(t, err) assert.Equal(t, remote, dstObj.Remote()) _ = in.Close() remoteWithID := addFileID(remote, dstObj.(*Object).id) t.Run("List", func(t *testing.T) { entries, err := f.List(ctx, uploadDir) require.NoError(t, err) require.Equal(t, 1, len(entries)) assert.Equal(t, remote, entries[0].Remote()) assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String()) }) t.Run("NewObject", func(t *testing.T) { o, err := f.NewObject(ctx, remote) require.NoError(t, err) require.Equal(t, remote, o.Remote()) }) t.Run("NewObjectWithID", func(t *testing.T) { o, err := f.NewObject(ctx, remoteWithID) require.NoError(t, err) require.Equal(t, remoteWithID, o.Remote()) }) }) t.Run("Name", func(t *testing.T) { assert.Equal(t, (*fstest.RemoteName)[:len(*fstest.RemoteName)-1], f.Name()) }) t.Run("Root", func(t *testing.T) { assert.Equal(t, "", f.Root()) }) t.Run("String", func(t *testing.T) { assert.Equal(t, `Google Photos path ""`, f.String()) }) t.Run("Features", func(t *testing.T) { features := f.Features() assert.False(t, features.CaseInsensitive) assert.True(t, features.ReadMimeType) }) t.Run("Precision", func(t *testing.T) { assert.Equal(t, fs.ModTimeNotSupported, f.Precision()) }) t.Run("Hashes", func(t *testing.T) { assert.Equal(t, hash.Set(hash.None), f.Hashes()) }) } func TestAddID(t *testing.T) { assert.Equal(t, "potato {123}", addID("potato", "123")) assert.Equal(t, "{123}", addID("", "123")) } func TestFileAddID(t *testing.T) { assert.Equal(t, "potato {123}.txt", addFileID("potato.txt", "123")) assert.Equal(t, "potato {123}", addFileID("potato", "123")) assert.Equal(t, "{123}", addFileID("", "123")) } func TestFindID(t *testing.T) { assert.Equal(t, "", findID("potato")) ID := "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" assert.Equal(t, ID, findID("potato {"+ID+"}.txt")) ID = ID[1:] assert.Equal(t, "", findID("potato {"+ID+"}.txt")) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/googlephotos/googlephotos.go
backend/googlephotos/googlephotos.go
// Package googlephotos provides an interface to Google Photos package googlephotos // FIXME Resumable uploads not implemented - rclone can't resume uploads in general import ( "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "path" "regexp" "strconv" "strings" "sync" "time" "github.com/rclone/rclone/backend/googlephotos/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/dirtree" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/batcher" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" "golang.org/x/oauth2/google" ) var ( errCantUpload = errors.New("can't upload files here") errCantMkdir = errors.New("can't make directories here") errCantRmdir = errors.New("can't remove this directory") errAlbumDelete = errors.New("google photos API does not implement deleting albums") errRemove = errors.New("google photos API only implements removing files from albums") errOwnAlbums = errors.New("google photos API only allows uploading to albums rclone created") errReadOnly = errors.New("can't upload files in read only mode") ) const ( rcloneClientID = "202264815644-rt1o1c9evjaotbpbab10m83i8cnjk077.apps.googleusercontent.com" rcloneEncryptedClientSecret = "kLJLretPefBgrDHosdml_nlF64HZ9mUcO85X5rdjYBPP8ChA-jr3Ow" rootURL = "https://photoslibrary.googleapis.com/v1" listChunks = 100 // chunk size to read directory listings albumChunks = 50 // chunk size to read album listings minSleep = 10 * time.Millisecond scopeAppendOnly = "https://www.googleapis.com/auth/photoslibrary.appendonly" scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly.appcreateddata" scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary.edit.appcreateddata" ) var ( // scopes needed for read write access scopesReadWrite = []string{ "openid", "profile", scopeAppendOnly, scopeReadOnly, scopeReadWrite, } // scopes needed for read only access scopesReadOnly = []string{ "openid", "profile", scopeReadOnly, } // Description of how to auth for this app oauthConfig = &oauthutil.Config{ Scopes: scopesReadWrite, AuthURL: google.Endpoint.AuthURL, TokenURL: google.Endpoint.TokenURL, ClientID: rcloneClientID, ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), RedirectURL: oauthutil.RedirectURL, } // Configure the batcher defaultBatcherOptions = batcher.Options{ MaxBatchSize: 50, DefaultTimeoutSync: 1000 * time.Millisecond, DefaultTimeoutAsync: 10 * time.Second, DefaultBatchSizeAsync: 50, } ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "google photos", Prefix: "gphotos", Description: "Google Photos", NewFs: NewFs, Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, fmt.Errorf("couldn't parse config into struct: %w", err) } switch config.State { case "": // Fill in the scopes if opt.ReadOnly { oauthConfig.Scopes = scopesReadOnly } else { oauthConfig.Scopes = scopesReadWrite } return oauthutil.ConfigOut("warning1", &oauthutil.Options{ OAuth2Config: oauthConfig, }) case "warning1": // Warn the user as required by google photos integration return fs.ConfigConfirm("warning2", true, "config_warning", `Warning IMPORTANT: All media items uploaded to Google Photos with rclone are stored in full resolution at original quality. These uploads will count towards storage in your Google Account.`) case "warning2": // Warn the user that rclone can no longer download photos it didnt upload from google photos return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning IMPORTANT: Due to Google policy changes rclone can now only download photos it uploaded.`) case "warning_done": return nil, nil } return nil, fmt.Errorf("unknown state %q", config.State) }, Options: append(append(oauthutil.SharedOptions, []fs.Option{{ Name: "read_only", Default: false, Help: `Set to make the Google Photos backend read only. If you choose read only then rclone will only request read only access to your photos, otherwise rclone will request full access.`, }, { Name: "read_size", Default: false, Help: `Set to read the size of media items. Normally rclone does not read the size of media items since this takes another transaction. This isn't necessary for syncing. However rclone mount needs to know the size of files in advance of reading them, so setting this flag when using rclone mount is recommended if you want to read the media.`, Advanced: true, }, { Name: "start_year", Default: 2000, Help: `Year limits the photos to be downloaded to those which are uploaded after the given year.`, Advanced: true, }, { Name: "include_archived", Default: false, Help: `Also view and download archived media. By default, rclone does not request archived media. Thus, when syncing, archived media is not visible in directory listings or transferred. Note that media in albums is always visible and synced, no matter their archive status. With this flag, archived media are always visible in directory listings and transferred. Without this flag, archived media will not be visible in directory listings and won't be transferred.`, Advanced: true, }, { Name: "proxy", Default: "", Help: strings.ReplaceAll(`Use the gphotosdl proxy for downloading the full resolution images The Google API will deliver images and video which aren't full resolution, and/or have EXIF data missing. However if you use the gphotosdl proxy then you can download original, unchanged images. This runs a headless browser in the background. Download the software from [gphotosdl](https://github.com/rclone/gphotosdl) First run with gphotosdl -login Then once you have logged into google photos close the browser window and run gphotosdl Then supply the parameter |--gphotos-proxy "http://localhost:8282"| to make rclone use the proxy. `, "|", "`"), Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: (encoder.Base | encoder.EncodeCrLf | encoder.EncodeInvalidUtf8), }}...), defaultBatcherOptions.FsOptions("")...), }) } // Options defines the configuration for this backend type Options struct { ReadOnly bool `config:"read_only"` ReadSize bool `config:"read_size"` StartYear int `config:"start_year"` IncludeArchived bool `config:"include_archived"` Enc encoder.MultiEncoder `config:"encoding"` BatchMode string `config:"batch_mode"` BatchSize int `config:"batch_size"` BatchTimeout fs.Duration `config:"batch_timeout"` Proxy string `config:"proxy"` } // Fs represents a remote storage server type Fs struct { name string // name of this remote root string // the path we are working on if any opt Options // parsed options features *fs.Features // optional features unAuth *rest.Client // unauthenticated http client srv *rest.Client // the connection to the server ts *oauthutil.TokenSource // token source for oauth2 pacer *fs.Pacer // To pace the API calls startTime time.Time // time Fs was started - used for datestamps albumsMu sync.Mutex // protect albums (but not contents) albums map[bool]*albums // albums, shared or not uploadedMu sync.Mutex // to protect the below uploaded dirtree.DirTree // record of uploaded items createMu sync.Mutex // held when creating albums to prevent dupes batcher *batcher.Batcher[uploadedItem, *api.MediaItem] } // Object describes a storage object // // Will definitely have info but maybe not meta type Object struct { fs *Fs // what this object is part of remote string // The remote path url string // download path id string // ID of this object bytes int64 // Bytes in the object modTime time.Time // Modified time of the object mimeType string } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("Google Photos path %q", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // dirTime returns the time to set a directory to func (f *Fs) dirTime() time.Time { return f.startTime } // startYear returns the start year func (f *Fs) startYear() int { return f.opt.StartYear } func (f *Fs) includeArchived() bool { return f.opt.IncludeArchived } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // errorHandler parses a non 2xx error response into an error func errorHandler(resp *http.Response) error { body, err := rest.ReadBody(resp) if err != nil { body = nil } // Google sends 404 messages as images so be prepared for that if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") { body = []byte("Image not found or broken") } e := api.Error{ Details: api.ErrorDetails{ Code: resp.StatusCode, Message: string(body), Status: resp.Status, }, } if body != nil { _ = json.Unmarshal(body, &e) } return &e } // NewFs constructs an Fs from the path, bucket:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } baseClient := fshttp.NewClient(ctx) oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient) if err != nil { return nil, fmt.Errorf("failed to configure google photos: %w", err) } root = strings.Trim(path.Clean(root), "/") if root == "." || root == "/" { root = "" } f := &Fs{ name: name, root: root, opt: *opt, unAuth: rest.NewClient(baseClient), srv: rest.NewClient(oAuthClient).SetRoot(rootURL), ts: ts, pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))), startTime: time.Now(), albums: map[bool]*albums{}, uploaded: dirtree.New(), } batcherOptions := defaultBatcherOptions batcherOptions.Mode = f.opt.BatchMode batcherOptions.Size = f.opt.BatchSize batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout) f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions) if err != nil { return nil, err } f.features = (&fs.Features{ ReadMimeType: true, }).Fill(ctx, f) f.srv.SetErrorHandler(errorHandler) _, _, pattern := patterns.match(f.root, "", true) if pattern != nil && pattern.isFile { oldRoot := f.root var leaf string f.root, leaf = path.Split(f.root) f.root = strings.TrimRight(f.root, "/") _, err := f.NewObject(ctx, leaf) if err == nil { return f, fs.ErrorIsFile } f.root = oldRoot } return f, nil } // fetchEndpoint gets the openid endpoint named from the Google config func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, err error) { // Get openID config without auth opts := rest.Opts{ Method: "GET", RootURL: "https://accounts.google.com/.well-known/openid-configuration", } var openIDconfig map[string]any err = f.pacer.Call(func() (bool, error) { resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig) return shouldRetry(ctx, resp, err) }) if err != nil { return "", fmt.Errorf("couldn't read openID config: %w", err) } // Find userinfo endpoint endpoint, ok := openIDconfig[name].(string) if !ok { return "", fmt.Errorf("couldn't find %q from openID config", name) } return endpoint, nil } // UserInfo fetches info about the current user with oauth2 func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err error) { endpoint, err := f.fetchEndpoint(ctx, "userinfo_endpoint") if err != nil { return nil, err } // Fetch the user info with auth opts := rest.Opts{ Method: "GET", RootURL: endpoint, } err = f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, nil, &userInfo) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't read user info: %w", err) } return userInfo, nil } // Disconnect kills the token and refresh token func (f *Fs) Disconnect(ctx context.Context) (err error) { endpoint, err := f.fetchEndpoint(ctx, "revocation_endpoint") if err != nil { return err } token, err := f.ts.Token() if err != nil { return err } // Revoke the token and the refresh token opts := rest.Opts{ Method: "POST", RootURL: endpoint, MultipartParams: url.Values{ "token": []string{token.AccessToken}, "token_type_hint": []string{"access_token"}, }, } var res any err = f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, nil, &res) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("couldn't revoke token: %w", err) } fs.Infof(f, "res = %+v", res) return nil } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.MediaItem) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } if info != nil { o.setMetaData(info) } else { err := o.readMetaData(ctx) // reads info and meta, returning an error if err != nil { return nil, err } } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { // defer log.Trace(f, "remote=%q", remote)("") return f.newObjectWithInfo(ctx, remote, nil) } // addID adds the ID to name func addID(name string, ID string) string { idStr := "{" + ID + "}" if name == "" { return idStr } return name + " " + idStr } // addFileID adds the ID to the fileName passed in func addFileID(fileName string, ID string) string { ext := path.Ext(fileName) base := fileName[:len(fileName)-len(ext)] return addID(base, ID) + ext } var idRe = regexp.MustCompile(`\{([A-Za-z0-9_-]{55,})\}`) // findID finds an ID in string if one is there or "" func findID(name string) string { match := idRe.FindStringSubmatch(name) if match == nil { return "" } return match[1] } // list the albums into an internal cache // FIXME cache invalidation func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err error) { f.albumsMu.Lock() defer f.albumsMu.Unlock() all, ok := f.albums[shared] if ok && all != nil { return all, nil } opts := rest.Opts{ Method: "GET", Path: "/albums", Parameters: url.Values{}, } if shared { opts.Path = "/sharedAlbums" } all = newAlbums() opts.Parameters.Set("pageSize", strconv.Itoa(albumChunks)) lastID := "" for { var result api.ListAlbums var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't list albums: %w", err) } newAlbums := result.Albums if shared { newAlbums = result.SharedAlbums } if len(newAlbums) > 0 && newAlbums[0].ID == lastID { // skip first if ID duplicated from last page newAlbums = newAlbums[1:] } if len(newAlbums) > 0 { lastID = newAlbums[len(newAlbums)-1].ID } for i := range newAlbums { anAlbum := newAlbums[i] anAlbum.Title = f.opt.Enc.FromStandardPath(anAlbum.Title) all.add(&anAlbum) } if result.NextPageToken == "" { break } opts.Parameters.Set("pageToken", result.NextPageToken) } f.albums[shared] = all return all, nil } // listFn is called from list to handle an object. type listFn func(remote string, object *api.MediaItem, isDirectory bool) error // list the objects into the function supplied // // dir is the starting directory, "" for root // // Set recurse to read sub directories func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err error) { opts := rest.Opts{ Method: "POST", Path: "/mediaItems:search", } filter.PageSize = listChunks filter.PageToken = "" if filter.AlbumID == "" { // album ID and filters cannot be set together, else error 400 INVALID_ARGUMENT if filter.Filters == nil { filter.Filters = &api.Filters{} } filter.Filters.IncludeArchivedMedia = &f.opt.IncludeArchived } lastID := "" for { var result api.MediaItems var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, &filter, &result) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("couldn't list files: %w", err) } items := result.MediaItems if len(items) > 0 && items[0].ID == lastID { // skip first if ID duplicated from last page items = items[1:] } if len(items) > 0 { lastID = items[len(items)-1].ID } for i := range items { item := &result.MediaItems[i] remote := item.Filename remote = strings.ReplaceAll(remote, "/", "/") err = fn(remote, item, false) if err != nil { return err } } if result.NextPageToken == "" { break } filter.PageToken = result.NextPageToken } return nil } // Convert a list item into a DirEntry func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *api.MediaItem, isDirectory bool) (fs.DirEntry, error) { if isDirectory { d := fs.NewDir(remote, f.dirTime()) return d, nil } o := &Object{ fs: f, remote: remote, } o.setMetaData(item) return o, nil } // listDir lists a single directory func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) { // List the objects err = f.list(ctx, filter, func(remote string, item *api.MediaItem, isDirectory bool) error { entry, err := f.itemToDirEntry(ctx, prefix+remote, item, isDirectory) if err != nil { return err } entries = append(entries, entry) return nil }) if err != nil { return nil, err } // Dedupe the file names dupes := map[string]int{} for _, entry := range entries { o, ok := entry.(*Object) if ok { dupes[o.remote]++ } } for _, entry := range entries { o, ok := entry.(*Object) if ok { duplicated := dupes[o.remote] > 1 if duplicated || o.remote == "" { o.remote = addFileID(o.remote, o.id) } } } return entries, err } // listUploads lists a single directory from the uploads func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) { f.uploadedMu.Lock() entries, ok := f.uploaded[dir] f.uploadedMu.Unlock() if !ok && dir != "" { return nil, fs.ErrorDirNotFound } return entries, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { // defer log.Trace(f, "dir=%q", dir)("err=%v", &err) match, prefix, pattern := patterns.match(f.root, dir, false) if pattern == nil || pattern.isFile { return nil, fs.ErrorDirNotFound } if pattern.toEntries != nil { return pattern.toEntries(ctx, f, prefix, match) } return nil, fs.ErrorDirNotFound } // Put the object into the bucket // // Copy the reader in to the new object which is returned. // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // defer log.Trace(f, "src=%+v", src)("") // Temporary Object under construction o := &Object{ fs: f, remote: src.Remote(), } return o, o.Update(ctx, in, src, options...) } // createAlbum creates the album func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Album, err error) { opts := rest.Opts{ Method: "POST", Path: "/albums", Parameters: url.Values{}, } request := api.CreateAlbum{ Album: &api.Album{ Title: albumTitle, }, } var result api.Album var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, request, &result) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't create album: %w", err) } f.albums[false].add(&result) return &result, nil } // getOrCreateAlbum gets an existing album or creates a new one // // It does the creation with the lock held to avoid duplicates func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *api.Album, err error) { f.createMu.Lock() defer f.createMu.Unlock() albums, err := f.listAlbums(ctx, false) if err != nil { return nil, err } album, ok := albums.get(albumTitle) if ok { return album, nil } return f.createAlbum(ctx, albumTitle) } // Mkdir creates the album if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { // defer log.Trace(f, "dir=%q", dir)("err=%v", &err) match, prefix, pattern := patterns.match(f.root, dir, false) if pattern == nil { return fs.ErrorDirNotFound } if !pattern.canMkdir { return errCantMkdir } if pattern.isUpload { f.uploadedMu.Lock() d := fs.NewDir(strings.Trim(prefix, "/"), f.dirTime()) f.uploaded.AddEntry(d) f.uploadedMu.Unlock() return nil } albumTitle := match[1] _, err = f.getOrCreateAlbum(ctx, albumTitle) return err } // Rmdir deletes the bucket if the fs is at the root // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { // defer log.Trace(f, "dir=%q")("err=%v", &err) match, _, pattern := patterns.match(f.root, dir, false) if pattern == nil { return fs.ErrorDirNotFound } if !pattern.canMkdir { return errCantRmdir } if pattern.isUpload { f.uploadedMu.Lock() err = f.uploaded.Prune(map[string]bool{ dir: true, }) f.uploadedMu.Unlock() return err } albumTitle := match[1] allAlbums, err := f.listAlbums(ctx, false) if err != nil { return err } album, ok := allAlbums.get(albumTitle) if !ok { return fs.ErrorDirNotFound } _ = album return errAlbumDelete } // Precision returns the precision func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.None) } // Shutdown the backend, closing any background tasks and any // cached connections. func (f *Fs) Shutdown(ctx context.Context) error { f.batcher.Shutdown() return nil } // ------------------------------------------------------------ // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Hash returns the Md5sum of an object returning a lowercase hex string func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { return "", hash.ErrUnsupported } // Size returns the size of an object in bytes func (o *Object) Size() int64 { // defer log.Trace(o, "")("") if !o.fs.opt.ReadSize || o.bytes >= 0 { return o.bytes } ctx := context.TODO() err := o.readMetaData(ctx) if err != nil { fs.Debugf(o, "Size: Failed to read metadata: %v", err) return -1 } var resp *http.Response opts := rest.Opts{ Method: "HEAD", RootURL: o.downloadURL(), } err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) if err != nil { fs.Debugf(o, "Reading size failed: %v", err) } else { lengthStr := resp.Header.Get("Content-Length") length, err := strconv.ParseInt(lengthStr, 10, 64) if err != nil { fs.Debugf(o, "Reading size failed to parse Content_length %q: %v", lengthStr, err) } else { o.bytes = length } } return o.bytes } // setMetaData sets the fs data from a storage.Object func (o *Object) setMetaData(info *api.MediaItem) { o.url = info.BaseURL o.id = info.ID o.bytes = -1 // FIXME o.mimeType = info.MimeType o.modTime = info.MediaMetadata.CreationTime } // readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info func (o *Object) readMetaData(ctx context.Context) (err error) { if !o.modTime.IsZero() && o.url != "" { return nil } dir, fileName := path.Split(o.remote) dir = strings.Trim(dir, "/") _, _, pattern := patterns.match(o.fs.root, o.remote, true) if pattern == nil { return fs.ErrorObjectNotFound } if !pattern.isFile { return fs.ErrorNotAFile } // If have ID fetch it directly if id := findID(fileName); id != "" { opts := rest.Opts{ Method: "GET", Path: "/mediaItems/" + id, } var item api.MediaItem var resp *http.Response err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &item) return shouldRetry(ctx, resp, err) }) if err != nil { return fmt.Errorf("couldn't get media item: %w", err) } o.setMetaData(&item) return nil } // Otherwise list the directory the file is in entries, err := o.fs.List(ctx, dir) if err != nil { if err == fs.ErrorDirNotFound { return fs.ErrorObjectNotFound } return err } // and find the file in the directory for _, entry := range entries { if entry.Remote() == o.remote { if newO, ok := entry.(*Object); ok { *o = *newO return nil } } } return fs.ErrorObjectNotFound } // ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime(ctx context.Context) time.Time { // defer log.Trace(o, "")("") err := o.readMetaData(ctx) if err != nil { fs.Debugf(o, "ModTime: Failed to read metadata: %v", err) return time.Now() } return o.modTime } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) { return fs.ErrorCantSetModTime } // Storable returns a boolean as to whether this object is storable func (o *Object) Storable() bool { return true } // downloadURL returns the URL for a full bytes download for the object func (o *Object) downloadURL() string { url := o.url + "=d" if strings.HasPrefix(o.mimeType, "video/") { url += "v" } return url } // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { // defer log.Trace(o, "")("") err = o.readMetaData(ctx) if err != nil { fs.Debugf(o, "Open: Failed to read metadata: %v", err) return nil, err } url := o.downloadURL() if o.fs.opt.Proxy != "" { url = strings.TrimRight(o.fs.opt.Proxy, "/") + "/id/" + o.id } var resp *http.Response opts := rest.Opts{ Method: "GET", RootURL: url, Options: options, } err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } return resp.Body, err } // input to the batcher type uploadedItem struct { AlbumID string // desired album UploadToken string // upload ID } // Commit a batch of items to albumID returning the errors in errors func (f *Fs) commitBatchAlbumID(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error, albumID string) { // Create the media item from an UploadToken, optionally adding to an album opts := rest.Opts{ Method: "POST", Path: "/mediaItems:batchCreate", } request := api.BatchCreateRequest{ AlbumID: albumID, } itemsInBatch := 0 for i := range items { if items[i].AlbumID == albumID { request.NewMediaItems = append(request.NewMediaItems, api.NewMediaItem{ SimpleMediaItem: api.SimpleMediaItem{ UploadToken: items[i].UploadToken, }, }) itemsInBatch++ } } var result api.BatchCreateResponse var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.CallJSON(ctx, &opts, request, &result) return shouldRetry(ctx, resp, err) }) if err != nil { err = fmt.Errorf("failed to create media item: %w", err) } if err == nil && len(result.NewMediaItemResults) != itemsInBatch { err = fmt.Errorf("bad response to BatchCreate expecting %d items but got %d", itemsInBatch, len(result.NewMediaItemResults)) } j := 0 for i := range items { if items[i].AlbumID == albumID { if err == nil { media := &result.NewMediaItemResults[j] if media.Status.Code != 0 { errors[i] = fmt.Errorf("upload failed: %s (%d)", media.Status.Message, media.Status.Code) } else { results[i] = &media.MediaItem } } else { errors[i] = err } j++ } } } // Called by the batcher to commit a batch func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error) (err error) { // Discover all the AlbumIDs as we have to upload these separately // // Should maybe have one batcher per AlbumID albumIDs := map[string]struct{}{} for i := range items { albumIDs[items[i].AlbumID] = struct{}{} } // batch the albums for albumID := range albumIDs { // errors returned in errors f.commitBatchAlbumID(ctx, items, results, errors, albumID) } return nil } // Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { // defer log.Trace(o, "src=%+v", src)("err=%v", &err) match, _, pattern := patterns.match(o.fs.root, o.remote, true) if pattern == nil || !pattern.isFile || !pattern.canUpload { return errCantUpload } var ( albumID string fileName string ) if pattern.isUpload { fileName = match[1] } else { var albumTitle string albumTitle, fileName = match[1], match[2] album, err := o.fs.getOrCreateAlbum(ctx, albumTitle) if err != nil { return err } if !album.IsWriteable { if o.fs.opt.ReadOnly { return errReadOnly } return errOwnAlbums } albumID = album.ID } // Upload the media item in exchange for an UploadToken opts := rest.Opts{ Method: "POST", Path: "/uploads", Options: options, ExtraHeaders: map[string]string{ "X-Goog-Upload-File-Name": fileName, "X-Goog-Upload-Protocol": "raw", }, Body: in, } var token []byte var resp *http.Response err = o.fs.pacer.CallNoRetry(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) if err != nil { return shouldRetry(ctx, resp, err) } token, err = rest.ReadBody(resp) return shouldRetry(ctx, resp, err) }) if err != nil {
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/googlephotos/pattern.go
backend/googlephotos/pattern.go
// Store the parsing of file patterns package googlephotos import ( "context" "fmt" "path" "regexp" "strconv" "strings" "time" "github.com/rclone/rclone/backend/googlephotos/api" "github.com/rclone/rclone/fs" ) // lister describes the subset of the interfaces on Fs needed for the // file pattern parsing type lister interface { listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) listAlbums(ctx context.Context, shared bool) (all *albums, err error) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) dirTime() time.Time startYear() int includeArchived() bool } // dirPattern describes a single directory pattern type dirPattern struct { re string // match for the path match *regexp.Regexp // compiled match canUpload bool // true if can upload here canMkdir bool // true if can make a directory here isFile bool // true if this is a file isUpload bool // true if this is the upload directory // function to turn a match into DirEntries toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) } // dirPatterns is a slice of all the directory patterns type dirPatterns []dirPattern // patterns describes the layout of the google photos backend file system. // // NB no trailing / on paths var patterns = dirPatterns{ { re: `^$`, toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) { return fs.DirEntries{ fs.NewDir(prefix+"media", f.dirTime()), fs.NewDir(prefix+"album", f.dirTime()), fs.NewDir(prefix+"shared-album", f.dirTime()), fs.NewDir(prefix+"upload", f.dirTime()), fs.NewDir(prefix+"feature", f.dirTime()), }, nil }, }, { re: `^upload(?:/(.*))?$`, toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) { return f.listUploads(ctx, match[0]) }, canUpload: true, canMkdir: true, isUpload: true, }, { re: `^upload/(.*)$`, isFile: true, canUpload: true, isUpload: true, }, { re: `^media$`, toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) { return fs.DirEntries{ fs.NewDir(prefix+"all", f.dirTime()), fs.NewDir(prefix+"by-year", f.dirTime()), fs.NewDir(prefix+"by-month", f.dirTime()), fs.NewDir(prefix+"by-day", f.dirTime()), }, nil }, }, { re: `^media/all$`, toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) { return f.listDir(ctx, prefix, api.SearchFilter{}) }, }, { re: `^media/all/([^/]+)$`, isFile: true, }, { re: `^media/by-year$`, toEntries: years, }, { re: `^media/by-year/(\d{4})$`, toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) { filter, err := yearMonthDayFilter(ctx, f, match) if err != nil { return nil, err } return f.listDir(ctx, prefix, filter) }, }, { re: `^media/by-year/(\d{4})/([^/]+)$`, isFile: true, }, { re: `^media/by-month$`, toEntries: years, }, { re: `^media/by-month/(\d{4})$`, toEntries: months, }, { re: `^media/by-month/\d{4}/(\d{4})-(\d{2})$`, toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) { filter, err := yearMonthDayFilter(ctx, f, match) if err != nil { return nil, err } return f.listDir(ctx, prefix, filter) }, }, { re: `^media/by-month/\d{4}/(\d{4})-(\d{2})/([^/]+)$`, isFile: true, }, { re: `^media/by-day$`, toEntries: years, }, { re: `^media/by-day/(\d{4})$`, toEntries: days, }, { re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})$`, toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) { filter, err := yearMonthDayFilter(ctx, f, match) if err != nil { return nil, err } return f.listDir(ctx, prefix, filter) }, }, { re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})/([^/]+)$`, isFile: true, }, { re: `^album$`, toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { return albumsToEntries(ctx, f, false, prefix, "") }, }, { re: `^album/(.+)$`, canMkdir: true, toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { return albumsToEntries(ctx, f, false, prefix, match[1]) }, }, { re: `^album/(.+?)/([^/]+)$`, canUpload: true, isFile: true, }, { re: `^shared-album$`, toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { return albumsToEntries(ctx, f, true, prefix, "") }, }, { re: `^shared-album/(.+)$`, toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { return albumsToEntries(ctx, f, true, prefix, match[1]) }, }, { re: `^shared-album/(.+?)/([^/]+)$`, isFile: true, }, { re: `^feature$`, toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { return fs.DirEntries{ fs.NewDir(prefix+"favorites", f.dirTime()), }, nil }, }, { re: `^feature/favorites$`, toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { filter := featureFilter(ctx, f, match) if err != nil { return nil, err } return f.listDir(ctx, prefix, filter) }, }, { re: `^feature/favorites/([^/]+)$`, isFile: true, }, }.mustCompile() // mustCompile compiles the regexps in the dirPatterns func (ds dirPatterns) mustCompile() dirPatterns { for i := range ds { pattern := &ds[i] pattern.match = regexp.MustCompile(pattern.re) } return ds } // match finds the path passed in the matching structure and // returns the parameters and a pointer to the match, or nil. func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) { itemPath = strings.Trim(itemPath, "/") absPath := path.Join(root, itemPath) prefix = strings.Trim(absPath[len(root):], "/") if prefix != "" { prefix += "/" } for i := range ds { pattern = &ds[i] if pattern.isFile != isFile { continue } match = pattern.match.FindStringSubmatch(absPath) if match != nil { return } } return nil, "", nil } // Return the years from startYear to today func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { currentYear := f.dirTime().Year() for year := f.startYear(); year <= currentYear; year++ { entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime())) } return entries, nil } // Return the months in a given year func months(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { year := match[1] for month := 1; month <= 12; month++ { entries = append(entries, fs.NewDir(fmt.Sprintf("%s%s-%02d", prefix, year, month), f.dirTime())) } return entries, nil } // Return the days in a given year func days(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { year := match[1] current, err := time.Parse("2006", year) if err != nil { return nil, fmt.Errorf("bad year %q", match[1]) } currentYear := current.Year() for current.Year() == currentYear { entries = append(entries, fs.NewDir(prefix+current.Format("2006-01-02"), f.dirTime())) current = current.AddDate(0, 0, 1) } return entries, nil } // This creates a search filter on year/month/day as provided func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) { year, err := strconv.Atoi(match[1]) if err != nil || year < 1000 || year > 3000 { return sf, fmt.Errorf("bad year %q", match[1]) } sf = api.SearchFilter{ Filters: &api.Filters{ DateFilter: &api.DateFilter{ Dates: []api.Date{ { Year: year, }, }, }, }, } if len(match) >= 3 { month, err := strconv.Atoi(match[2]) if err != nil || month < 1 || month > 12 { return sf, fmt.Errorf("bad month %q", match[2]) } sf.Filters.DateFilter.Dates[0].Month = month } if len(match) >= 4 { day, err := strconv.Atoi(match[3]) if err != nil || day < 1 || day > 31 { return sf, fmt.Errorf("bad day %q", match[3]) } sf.Filters.DateFilter.Dates[0].Day = day } return sf, nil } // featureFilter creates a filter for the Feature enum // // The API only supports one feature, FAVORITES, so hardcode that feature. // // https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) { sf = api.SearchFilter{ Filters: &api.Filters{ FeatureFilter: &api.FeatureFilter{ IncludedFeatures: []string{ "FAVORITES", }, }, }, } return sf } // Turns an albumPath into entries // // These can either be synthetic directory entries if the album path // is a prefix of another album, or actual files, or a combination of // the two. func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) { albums, err := f.listAlbums(ctx, shared) if err != nil { return nil, err } // Put in the directories dirs, foundAlbumPath := albums.getDirs(albumPath) if foundAlbumPath { for _, dir := range dirs { d := fs.NewDir(prefix+dir, f.dirTime()) dirPath := path.Join(albumPath, dir) // if this dir is an album add more special stuff album, ok := albums.get(dirPath) if ok { count, err := strconv.ParseInt(album.MediaItemsCount, 10, 64) if err != nil { fs.Debugf(f, "Error reading media count: %v", err) } d.SetID(album.ID).SetItems(count) } entries = append(entries, d) } } // if this is an album then return a filter to list it album, foundAlbum := albums.get(albumPath) if foundAlbum { filter := api.SearchFilter{AlbumID: album.ID} newEntries, err := f.listDir(ctx, prefix, filter) if err != nil { return nil, err } entries = append(entries, newEntries...) } if !foundAlbumPath && !foundAlbum && albumPath != "" { return nil, fs.ErrorDirNotFound } return entries, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/googlephotos/pattern_test.go
backend/googlephotos/pattern_test.go
package googlephotos import ( "context" "fmt" "testing" "time" "github.com/rclone/rclone/backend/googlephotos/api" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/dirtree" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/mockobject" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // time for directories var startTime = fstest.Time("2019-06-24T15:53:05.999999999Z") // mock Fs for testing patterns type testLister struct { t *testing.T albums *albums names []string uploaded dirtree.DirTree } // newTestLister makes a mock for testing func newTestLister(t *testing.T) *testLister { return &testLister{ t: t, albums: newAlbums(), uploaded: dirtree.New(), } } // mock listDir for testing func (f *testLister) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) { for _, name := range f.names { entries = append(entries, mockobject.New(prefix+name)) } return entries, nil } // mock listAlbums for testing func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums, err error) { return f.albums, nil } // mock listUploads for testing func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) { entries = f.uploaded[dir] return entries, nil } // mock dirTime for testing func (f *testLister) dirTime() time.Time { return startTime } // mock startYear for testing func (f *testLister) startYear() int { return 2000 } // mock includeArchived for testing func (f *testLister) includeArchived() bool { return false } func TestPatternMatch(t *testing.T) { for testNumber, test := range []struct { // input root string itemPath string isFile bool // expected output wantMatch []string wantPrefix string wantPattern *dirPattern }{ { root: "", itemPath: "", isFile: false, wantMatch: []string{""}, wantPrefix: "", wantPattern: &patterns[0], }, { root: "", itemPath: "", isFile: true, wantMatch: nil, wantPrefix: "", wantPattern: nil, }, { root: "upload", itemPath: "", isFile: false, wantMatch: []string{"upload", ""}, wantPrefix: "", wantPattern: &patterns[1], }, { root: "upload/dir", itemPath: "", isFile: false, wantMatch: []string{"upload/dir", "dir"}, wantPrefix: "", wantPattern: &patterns[1], }, { root: "upload/file.jpg", itemPath: "", isFile: true, wantMatch: []string{"upload/file.jpg", "file.jpg"}, wantPrefix: "", wantPattern: &patterns[2], }, { root: "media", itemPath: "", isFile: false, wantMatch: []string{"media"}, wantPrefix: "", wantPattern: &patterns[3], }, { root: "", itemPath: "media", isFile: false, wantMatch: []string{"media"}, wantPrefix: "media/", wantPattern: &patterns[3], }, { root: "media/all", itemPath: "", isFile: false, wantMatch: []string{"media/all"}, wantPrefix: "", wantPattern: &patterns[4], }, { root: "media", itemPath: "all", isFile: false, wantMatch: []string{"media/all"}, wantPrefix: "all/", wantPattern: &patterns[4], }, { root: "media/all", itemPath: "file.jpg", isFile: true, wantMatch: []string{"media/all/file.jpg", "file.jpg"}, wantPrefix: "file.jpg/", wantPattern: &patterns[5], }, { root: "", itemPath: "feature", isFile: false, wantMatch: []string{"feature"}, wantPrefix: "feature/", wantPattern: &patterns[23], }, { root: "feature/favorites", itemPath: "", isFile: false, wantMatch: []string{"feature/favorites"}, wantPrefix: "", wantPattern: &patterns[24], }, { root: "feature", itemPath: "favorites", isFile: false, wantMatch: []string{"feature/favorites"}, wantPrefix: "favorites/", wantPattern: &patterns[24], }, { root: "feature/favorites", itemPath: "file.jpg", isFile: true, wantMatch: []string{"feature/favorites/file.jpg", "file.jpg"}, wantPrefix: "file.jpg/", wantPattern: &patterns[25], }, } { t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) { gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile) assert.Equal(t, test.wantMatch, gotMatch) assert.Equal(t, test.wantPrefix, gotPrefix) assert.Equal(t, test.wantPattern, gotPattern) }) } } func TestPatternMatchToEntries(t *testing.T) { ctx := context.Background() f := newTestLister(t) f.names = []string{"file.jpg"} f.albums.add(&api.Album{ ID: "1", Title: "sub/one", }) f.albums.add(&api.Album{ ID: "2", Title: "sub", }) f.uploaded.AddEntry(mockobject.New("upload/file1.jpg")) f.uploaded.AddEntry(mockobject.New("upload/dir/file2.jpg")) for testNumber, test := range []struct { // input root string itemPath string // expected output wantMatch []string wantPrefix string remotes []string }{ { root: "", itemPath: "", wantMatch: []string{""}, wantPrefix: "", remotes: []string{"media/", "album/", "shared-album/", "upload/"}, }, { root: "upload", itemPath: "", wantMatch: []string{"upload", ""}, wantPrefix: "", remotes: []string{"upload/file1.jpg", "upload/dir/"}, }, { root: "upload", itemPath: "dir", wantMatch: []string{"upload/dir", "dir"}, wantPrefix: "dir/", remotes: []string{"upload/dir/file2.jpg"}, }, { root: "media", itemPath: "", wantMatch: []string{"media"}, wantPrefix: "", remotes: []string{"all/", "by-year/", "by-month/", "by-day/"}, }, { root: "media/all", itemPath: "", wantMatch: []string{"media/all"}, wantPrefix: "", remotes: []string{"file.jpg"}, }, { root: "media", itemPath: "all", wantMatch: []string{"media/all"}, wantPrefix: "all/", remotes: []string{"all/file.jpg"}, }, { root: "media/by-year", itemPath: "", wantMatch: []string{"media/by-year"}, wantPrefix: "", remotes: []string{"2000/", "2001/", "2002/", "2003/"}, }, { root: "media/by-year/2000", itemPath: "", wantMatch: []string{"media/by-year/2000", "2000"}, wantPrefix: "", remotes: []string{"file.jpg"}, }, { root: "media/by-month", itemPath: "", wantMatch: []string{"media/by-month"}, wantPrefix: "", remotes: []string{"2000/", "2001/", "2002/", "2003/"}, }, { root: "media/by-month/2001", itemPath: "", wantMatch: []string{"media/by-month/2001", "2001"}, wantPrefix: "", remotes: []string{"2001-01/", "2001-02/", "2001-03/", "2001-04/"}, }, { root: "media/by-month/2001/2001-01", itemPath: "", wantMatch: []string{"media/by-month/2001/2001-01", "2001", "01"}, wantPrefix: "", remotes: []string{"file.jpg"}, }, { root: "media/by-day", itemPath: "", wantMatch: []string{"media/by-day"}, wantPrefix: "", remotes: []string{"2000/", "2001/", "2002/", "2003/"}, }, { root: "media/by-day/2001", itemPath: "", wantMatch: []string{"media/by-day/2001", "2001"}, wantPrefix: "", remotes: []string{"2001-01-01/", "2001-01-02/", "2001-01-03/", "2001-01-04/"}, }, { root: "media/by-day/2001/2001-01-02", itemPath: "", wantMatch: []string{"media/by-day/2001/2001-01-02", "2001", "01", "02"}, wantPrefix: "", remotes: []string{"file.jpg"}, }, { root: "album", itemPath: "", wantMatch: []string{"album"}, wantPrefix: "", remotes: []string{"sub/"}, }, { root: "album/sub", itemPath: "", wantMatch: []string{"album/sub", "sub"}, wantPrefix: "", remotes: []string{"one/", "file.jpg"}, }, { root: "album/sub/one", itemPath: "", wantMatch: []string{"album/sub/one", "sub/one"}, wantPrefix: "", remotes: []string{"file.jpg"}, }, { root: "shared-album", itemPath: "", wantMatch: []string{"shared-album"}, wantPrefix: "", remotes: []string{"sub/"}, }, { root: "shared-album/sub", itemPath: "", wantMatch: []string{"shared-album/sub", "sub"}, wantPrefix: "", remotes: []string{"one/", "file.jpg"}, }, { root: "shared-album/sub/one", itemPath: "", wantMatch: []string{"shared-album/sub/one", "sub/one"}, wantPrefix: "", remotes: []string{"file.jpg"}, }, } { t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q", testNumber, test.root, test.itemPath), func(t *testing.T) { match, prefix, pattern := patterns.match(test.root, test.itemPath, false) assert.Equal(t, test.wantMatch, match) assert.Equal(t, test.wantPrefix, prefix) assert.NotNil(t, pattern) assert.NotNil(t, pattern.toEntries) entries, err := pattern.toEntries(ctx, f, prefix, match) assert.NoError(t, err) var remotes = []string{} for _, entry := range entries { remote := entry.Remote() if _, isDir := entry.(fs.Directory); isDir { remote += "/" } remotes = append(remotes, remote) if len(remotes) >= 4 { break // only test first 4 entries } } assert.Equal(t, test.remotes, remotes) }) } } func TestPatternYears(t *testing.T) { f := newTestLister(t) entries, err := years(context.Background(), f, "potato/", nil) require.NoError(t, err) year := 2000 for _, entry := range entries { assert.Equal(t, "potato/"+fmt.Sprint(year), entry.Remote()) year++ } } func TestPatternMonths(t *testing.T) { f := newTestLister(t) entries, err := months(context.Background(), f, "potato/", []string{"", "2020"}) require.NoError(t, err) assert.Equal(t, 12, len(entries)) for i, entry := range entries { assert.Equal(t, fmt.Sprintf("potato/2020-%02d", i+1), entry.Remote()) } } func TestPatternDays(t *testing.T) { f := newTestLister(t) entries, err := days(context.Background(), f, "potato/", []string{"", "2020"}) require.NoError(t, err) assert.Equal(t, 366, len(entries)) assert.Equal(t, "potato/2020-01-01", entries[0].Remote()) assert.Equal(t, "potato/2020-12-31", entries[len(entries)-1].Remote()) } func TestPatternYearMonthDayFilter(t *testing.T) { ctx := context.Background() f := newTestLister(t) // Years sf, err := yearMonthDayFilter(ctx, f, []string{"", "2000"}) require.NoError(t, err) assert.Equal(t, api.SearchFilter{ Filters: &api.Filters{ DateFilter: &api.DateFilter{ Dates: []api.Date{ { Year: 2000, }, }, }, }, }, sf) _, err = yearMonthDayFilter(ctx, f, []string{"", "potato"}) require.Error(t, err) _, err = yearMonthDayFilter(ctx, f, []string{"", "999"}) require.Error(t, err) _, err = yearMonthDayFilter(ctx, f, []string{"", "4000"}) require.Error(t, err) // Months sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01"}) require.NoError(t, err) assert.Equal(t, api.SearchFilter{ Filters: &api.Filters{ DateFilter: &api.DateFilter{ Dates: []api.Date{ { Month: 1, Year: 2000, }, }, }, }, }, sf) _, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "potato"}) require.Error(t, err) _, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "0"}) require.Error(t, err) _, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "13"}) require.Error(t, err) // Days sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "02"}) require.NoError(t, err) assert.Equal(t, api.SearchFilter{ Filters: &api.Filters{ DateFilter: &api.DateFilter{ Dates: []api.Date{ { Day: 2, Month: 1, Year: 2000, }, }, }, }, }, sf) _, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "potato"}) require.Error(t, err) _, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "0"}) require.Error(t, err) _, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "32"}) require.Error(t, err) } func TestPatternAlbumsToEntries(t *testing.T) { f := newTestLister(t) ctx := context.Background() _, err := albumsToEntries(ctx, f, false, "potato/", "sub") assert.Equal(t, fs.ErrorDirNotFound, err) f.albums.add(&api.Album{ ID: "1", Title: "sub/one", }) entries, err := albumsToEntries(ctx, f, false, "potato/", "sub") assert.NoError(t, err) assert.Equal(t, 1, len(entries)) assert.Equal(t, "potato/one", entries[0].Remote()) _, ok := entries[0].(fs.Directory) assert.Equal(t, true, ok) f.albums.add(&api.Album{ ID: "1", Title: "sub", }) f.names = []string{"file.jpg"} entries, err = albumsToEntries(ctx, f, false, "potato/", "sub") assert.NoError(t, err) assert.Equal(t, 2, len(entries)) assert.Equal(t, "potato/one", entries[0].Remote()) _, ok = entries[0].(fs.Directory) assert.Equal(t, true, ok) assert.Equal(t, "potato/file.jpg", entries[1].Remote()) _, ok = entries[1].(fs.Object) assert.Equal(t, true, ok) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/googlephotos/api/types.go
backend/googlephotos/api/types.go
// Package api provides types used by the Google Photos API. package api import ( "fmt" "time" ) // ErrorDetails in the internals of the Error type type ErrorDetails struct { Code int `json:"code"` Message string `json:"message"` Status string `json:"status"` } // Error is returned on errors type Error struct { Details ErrorDetails `json:"error"` } // Error satisfies error interface func (e *Error) Error() string { return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status) } // Album of photos type Album struct { ID string `json:"id,omitempty"` Title string `json:"title"` ProductURL string `json:"productUrl,omitempty"` MediaItemsCount string `json:"mediaItemsCount,omitempty"` CoverPhotoBaseURL string `json:"coverPhotoBaseUrl,omitempty"` CoverPhotoMediaItemID string `json:"coverPhotoMediaItemId,omitempty"` IsWriteable bool `json:"isWriteable,omitempty"` } // ListAlbums is returned from albums.list and sharedAlbums.list type ListAlbums struct { Albums []Album `json:"albums"` SharedAlbums []Album `json:"sharedAlbums"` NextPageToken string `json:"nextPageToken"` } // CreateAlbum creates an Album type CreateAlbum struct { Album *Album `json:"album"` } // MediaItem is a photo or video type MediaItem struct { ID string `json:"id"` ProductURL string `json:"productUrl"` BaseURL string `json:"baseUrl"` MimeType string `json:"mimeType"` MediaMetadata struct { CreationTime time.Time `json:"creationTime"` Width string `json:"width"` Height string `json:"height"` Photo struct{} `json:"photo"` } `json:"mediaMetadata"` Filename string `json:"filename"` } // MediaItems is returned from mediaitems.list, mediaitems.search type MediaItems struct { MediaItems []MediaItem `json:"mediaItems"` NextPageToken string `json:"nextPageToken"` } // Content categories // NONE Default content category. This category is ignored when any other category is used in the filter. // LANDSCAPES Media items containing landscapes. // RECEIPTS Media items containing receipts. // CITYSCAPES Media items containing cityscapes. // LANDMARKS Media items containing landmarks. // SELFIES Media items that are selfies. // PEOPLE Media items containing people. // PETS Media items containing pets. // WEDDINGS Media items from weddings. // BIRTHDAYS Media items from birthdays. // DOCUMENTS Media items containing documents. // TRAVEL Media items taken during travel. // ANIMALS Media items containing animals. // FOOD Media items containing food. // SPORT Media items from sporting events. // NIGHT Media items taken at night. // PERFORMANCES Media items from performances. // WHITEBOARDS Media items containing whiteboards. // SCREENSHOTS Media items that are screenshots. // UTILITY Media items that are considered to be utility. These include, but aren't limited to documents, screenshots, whiteboards etc. // ARTS Media items containing art. // CRAFTS Media items containing crafts. // FASHION Media items related to fashion. // HOUSES Media items containing houses. // GARDENS Media items containing gardens. // FLOWERS Media items containing flowers. // HOLIDAYS Media items taken of holidays. // MediaTypes // ALL_MEDIA Treated as if no filters are applied. All media types are included. // VIDEO All media items that are considered videos. This also includes movies the user has created using the Google Photos app. // PHOTO All media items that are considered photos. This includes .bmp, .gif, .ico, .jpg (and other spellings), .tiff, .webp and special photo types such as iOS live photos, Android motion photos, panoramas, photospheres. // Features // NONE Treated as if no filters are applied. All features are included. // FAVORITES Media items that the user has marked as favorites in the Google Photos app. // Date is used as part of SearchFilter type Date struct { Year int `json:"year,omitempty"` Month int `json:"month,omitempty"` Day int `json:"day,omitempty"` } // DateFilter is uses to add date ranges to media item queries type DateFilter struct { Dates []Date `json:"dates,omitempty"` Ranges []struct { StartDate Date `json:"startDate,omitempty"` EndDate Date `json:"endDate,omitempty"` } `json:"ranges,omitempty"` } // ContentFilter is uses to add content categories to media item queries type ContentFilter struct { IncludedContentCategories []string `json:"includedContentCategories,omitempty"` ExcludedContentCategories []string `json:"excludedContentCategories,omitempty"` } // MediaTypeFilter is uses to add media types to media item queries type MediaTypeFilter struct { MediaTypes []string `json:"mediaTypes,omitempty"` } // FeatureFilter is uses to add features to media item queries type FeatureFilter struct { IncludedFeatures []string `json:"includedFeatures,omitempty"` } // Filters combines all the filter types for media item queries type Filters struct { DateFilter *DateFilter `json:"dateFilter,omitempty"` ContentFilter *ContentFilter `json:"contentFilter,omitempty"` MediaTypeFilter *MediaTypeFilter `json:"mediaTypeFilter,omitempty"` FeatureFilter *FeatureFilter `json:"featureFilter,omitempty"` IncludeArchivedMedia *bool `json:"includeArchivedMedia,omitempty"` ExcludeNonAppCreatedData *bool `json:"excludeNonAppCreatedData,omitempty"` } // SearchFilter is uses with mediaItems.search type SearchFilter struct { AlbumID string `json:"albumId,omitempty"` PageSize int `json:"pageSize"` PageToken string `json:"pageToken,omitempty"` Filters *Filters `json:"filters,omitempty"` } // SimpleMediaItem is part of NewMediaItem type SimpleMediaItem struct { UploadToken string `json:"uploadToken"` } // NewMediaItem is a single media item for upload type NewMediaItem struct { Description string `json:"description"` SimpleMediaItem SimpleMediaItem `json:"simpleMediaItem"` } // BatchCreateRequest creates media items from upload tokens type BatchCreateRequest struct { AlbumID string `json:"albumId,omitempty"` NewMediaItems []NewMediaItem `json:"newMediaItems"` } // BatchCreateResponse is returned from BatchCreateRequest type BatchCreateResponse struct { NewMediaItemResults []struct { UploadToken string `json:"uploadToken"` Status struct { Message string `json:"message"` Code int `json:"code"` } `json:"status"` MediaItem MediaItem `json:"mediaItem"` } `json:"newMediaItemResults"` } // BatchRemoveItems is for removing items from an album type BatchRemoveItems struct { MediaItemIDs []string `json:"mediaItemIds"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/crypt/crypt.go
backend/crypt/crypt.go
// Package crypt provides wrappers for Fs and Object which implement encryption package crypt import ( "context" "errors" "fmt" "io" "path" "strings" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/list" ) // Globals // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "crypt", Description: "Encrypt/Decrypt a remote", NewFs: NewFs, CommandHelp: commandHelp, MetadataInfo: &fs.MetadataInfo{ Help: `Any metadata supported by the underlying remote is read and written.`, }, Options: []fs.Option{{ Name: "remote", Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).", Required: true, }, { Name: "filename_encryption", Help: "How to encrypt the filenames.", Default: "standard", Examples: []fs.OptionExample{ { Value: "standard", Help: "Encrypt the filenames.\nSee the docs for the details.", }, { Value: "obfuscate", Help: "Very simple filename obfuscation.", }, { Value: "off", Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.", }, }, }, { Name: "directory_name_encryption", Help: `Option to either encrypt directory names or leave them intact. NB If filename_encryption is "off" then this option will do nothing.`, Default: true, Examples: []fs.OptionExample{ { Value: "true", Help: "Encrypt directory names.", }, { Value: "false", Help: "Don't encrypt directory names, leave them intact.", }, }, }, { Name: "password", Help: "Password or pass phrase for encryption.", IsPassword: true, Required: true, }, { Name: "password2", Help: "Password or pass phrase for salt.\n\nOptional but recommended.\nShould be different to the previous password.", IsPassword: true, }, { Name: "server_side_across_configs", Default: false, Help: `Deprecated: use --server-side-across-configs instead. Allow server-side operations (e.g. copy) to work across different crypt configs. Normally this option is not what you want, but if you have two crypts pointing to the same backend you can use it. This can be used, for example, to change file name encryption type without re-uploading all the data. Just make two crypt backends pointing to two different directories with the single changed parameter and use rclone move to move the files between the crypt remotes.`, Advanced: true, }, { Name: "show_mapping", Help: `For all files listed show how the names encrypt. If this flag is set then for each file that the remote is asked to list, it will log (at level INFO) a line stating the decrypted file name and the encrypted file name. This is so you can work out which encrypted names are which decrypted names just in case you need to do something with the encrypted file names, or for debugging purposes.`, Default: false, Hide: fs.OptionHideConfigurator, Advanced: true, }, { Name: "no_data_encryption", Help: "Option to either encrypt file data or leave it unencrypted.", Default: false, Advanced: true, Examples: []fs.OptionExample{ { Value: "true", Help: "Don't encrypt file data, leave it unencrypted.", }, { Value: "false", Help: "Encrypt file data.", }, }, }, { Name: "pass_bad_blocks", Help: `If set this will pass bad blocks through as all 0. This should not be set in normal operation, it should only be set if trying to recover an encrypted file with errors and it is desired to recover as much of the file as possible.`, Default: false, Advanced: true, }, { Name: "strict_names", Help: `If set, this will raise an error when crypt comes across a filename that can't be decrypted. (By default, rclone will just log a NOTICE and continue as normal.) This can happen if encrypted and unencrypted files are stored in the same directory (which is not recommended.) It may also indicate a more serious problem that should be investigated.`, Default: false, Advanced: true, }, { Name: "filename_encoding", Help: `How to encode the encrypted filename to text string. This option could help with shortening the encrypted filename. The suitable option would depend on the way your remote count the filename length and if it's case sensitive.`, Default: "base32", Examples: []fs.OptionExample{ { Value: "base32", Help: "Encode using base32. Suitable for all remote.", }, { Value: "base64", Help: "Encode using base64. Suitable for case sensitive remote.", }, { Value: "base32768", Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)", }, }, Advanced: true, }, { Name: "suffix", Help: `If this is set it will override the default suffix of ".bin". Setting suffix to "none" will result in an empty suffix. This may be useful when the path length is critical.`, Default: ".bin", Advanced: true, }}, }) } // newCipherForConfig constructs a Cipher for the given config name func newCipherForConfig(opt *Options) (*Cipher, error) { mode, err := NewNameEncryptionMode(opt.FilenameEncryption) if err != nil { return nil, err } if opt.Password == "" { return nil, errors.New("password not set in config file") } password, err := obscure.Reveal(opt.Password) if err != nil { return nil, fmt.Errorf("failed to decrypt password: %w", err) } var salt string if opt.Password2 != "" { salt, err = obscure.Reveal(opt.Password2) if err != nil { return nil, fmt.Errorf("failed to decrypt password2: %w", err) } } enc, err := NewNameEncoding(opt.FilenameEncoding) if err != nil { return nil, err } cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption, enc) if err != nil { return nil, fmt.Errorf("failed to make cipher: %w", err) } cipher.setEncryptedSuffix(opt.Suffix) cipher.setPassBadBlocks(opt.PassBadBlocks) return cipher, nil } // NewCipher constructs a Cipher for the given config func NewCipher(m configmap.Mapper) (*Cipher, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } return newCipherForConfig(opt) } // NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } cipher, err := newCipherForConfig(opt) if err != nil { return nil, err } remote := opt.Remote if strings.HasPrefix(remote, name+":") { return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting") } // Make sure to remove trailing . referring to the current dir if path.Base(rpath) == "." { rpath = strings.TrimSuffix(rpath, ".") } // Look for a file first var wrappedFs fs.Fs if rpath == "" { wrappedFs, err = cache.Get(ctx, remote) } else { remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath)) wrappedFs, err = cache.Get(ctx, remotePath) // if that didn't produce a file, look for a directory if err != fs.ErrorIsFile { remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath)) wrappedFs, err = cache.Get(ctx, remotePath) } } if err != fs.ErrorIsFile && err != nil { return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err) } f := &Fs{ Fs: wrappedFs, name: name, root: rpath, opt: *opt, cipher: cipher, } cache.PinUntilFinalized(f.Fs, f) // Correct root if definitely pointing to a file if err == fs.ErrorIsFile { f.root = path.Dir(f.root) if f.root == "." || f.root == "/" { f.root = "" } } // the features here are ones we could support, and they are // ANDed with the ones from wrappedFs f.features = (&fs.Features{ CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff, DuplicateFiles: true, ReadMimeType: false, // MimeTypes not supported with crypt WriteMimeType: false, BucketBased: true, CanHaveEmptyDirectories: true, SetTier: true, GetTier: true, ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs, ReadMetadata: true, WriteMetadata: true, UserMetadata: true, ReadDirMetadata: true, WriteDirMetadata: true, WriteDirSetModTime: true, UserDirMetadata: true, DirModTimeUpdatesOnWrite: true, PartialUploads: true, }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) // Enable ListP always f.features.ListP = f.ListP return f, err } // Options defines the configuration for this backend type Options struct { Remote string `config:"remote"` FilenameEncryption string `config:"filename_encryption"` DirectoryNameEncryption bool `config:"directory_name_encryption"` NoDataEncryption bool `config:"no_data_encryption"` Password string `config:"password"` Password2 string `config:"password2"` ServerSideAcrossConfigs bool `config:"server_side_across_configs"` ShowMapping bool `config:"show_mapping"` PassBadBlocks bool `config:"pass_bad_blocks"` FilenameEncoding string `config:"filename_encoding"` Suffix string `config:"suffix"` StrictNames bool `config:"strict_names"` } // Fs represents a wrapped fs.Fs type Fs struct { fs.Fs wrapper fs.Fs name string root string opt Options features *fs.Features // optional features cipher *Cipher } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // String returns a description of the FS func (f *Fs) String() string { return fmt.Sprintf("Encrypted drive '%s:%s'", f.name, f.root) } // Encrypt an object file name to entries. func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) error { remote := obj.Remote() decryptedRemote, err := f.cipher.DecryptFileName(remote) if err != nil { if f.opt.StrictNames { return fmt.Errorf("%s: undecryptable file name detected: %v", remote, err) } fs.Logf(remote, "Skipping undecryptable file name: %v", err) return nil } if f.opt.ShowMapping { fs.Logf(decryptedRemote, "Encrypts to %q", remote) } *entries = append(*entries, f.newObject(obj)) return nil } // Encrypt a directory file name to entries. func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) error { remote := dir.Remote() decryptedRemote, err := f.cipher.DecryptDirName(remote) if err != nil { if f.opt.StrictNames { return fmt.Errorf("%s: undecryptable dir name detected: %v", remote, err) } fs.Logf(remote, "Skipping undecryptable dir name: %v", err) return nil } if f.opt.ShowMapping { fs.Logf(decryptedRemote, "Encrypts to %q", remote) } *entries = append(*entries, f.newDir(ctx, dir)) return nil } // Encrypt some directory entries. This alters entries returning it as newEntries. func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) { newEntries = entries[:0] // in place filter errors := 0 var firsterr error for _, entry := range entries { switch x := entry.(type) { case fs.Object: err = f.add(&newEntries, x) case fs.Directory: err = f.addDir(ctx, &newEntries, x) default: return nil, fmt.Errorf("unknown object type %T", entry) } if err != nil { errors++ if firsterr == nil { firsterr = err } } } if firsterr != nil { return nil, fmt.Errorf("there were %v undecryptable name errors. first error: %v", errors, firsterr) } return newEntries, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return list.WithListP(ctx, dir, f) } // ListP lists the objects and directories of the Fs starting // from dir non recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error { wrappedCallback := func(entries fs.DirEntries) error { entries, err := f.encryptEntries(ctx, entries) if err != nil { return err } return callback(entries) } listP := f.Fs.Features().ListP encryptedDir := f.cipher.EncryptDirName(dir) if listP == nil { entries, err := f.Fs.List(ctx, encryptedDir) if err != nil { return err } return wrappedCallback(entries) } return listP(ctx, encryptedDir, wrappedCallback) } // ListR lists the objects and directories of the Fs starting // from dir recursively into out. // // dir should be "" to start from the root, and should not // have trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. // // It should call callback for each tranche of entries read. // These need not be returned in any particular order. If // callback returns an error then the listing will stop // immediately. // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { return f.Fs.Features().ListR(ctx, f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error { newEntries, err := f.encryptEntries(ctx, entries) if err != nil { return err } return callback(newEntries) }) } // NewObject finds the Object at remote. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { o, err := f.Fs.NewObject(ctx, f.cipher.EncryptFileName(remote)) if err != nil { return nil, err } return f.newObject(o), nil } type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) // put implements Put or PutStream func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { ci := fs.GetConfig(ctx) if f.opt.NoDataEncryption { o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...) if err == nil && o != nil { o = f.newObject(o) } return o, err } // Encrypt the data into wrappedIn wrappedIn, encrypter, err := f.cipher.encryptData(in) if err != nil { return nil, err } // Find a hash the destination supports to compute a hash of // the encrypted data ht := f.Fs.Hashes().GetOne() if ci.IgnoreChecksum { ht = hash.None } var hasher *hash.MultiHasher if ht != hash.None { hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht)) if err != nil { return nil, err } // unwrap the accounting var wrap accounting.WrapFn wrappedIn, wrap = accounting.UnWrap(wrappedIn) // add the hasher wrappedIn = io.TeeReader(wrappedIn, hasher) // wrap the accounting back on wrappedIn = wrap(wrappedIn) } // Transfer the data o, err := put(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce), options...) if err != nil { return nil, err } // Check the hashes of the encrypted data if we were comparing them if ht != hash.None && hasher != nil { srcHash := hasher.Sums()[ht] var dstHash string dstHash, err = o.Hash(ctx, ht) if err != nil { return nil, fmt.Errorf("failed to read destination hash: %w", err) } if srcHash != "" && dstHash != "" { if srcHash != dstHash { // remove object err = o.Remove(ctx) if err != nil { fs.Errorf(o, "Failed to remove corrupted object: %v", err) } return nil, fmt.Errorf("corrupted on transfer: %v encrypted hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash) } fs.Debugf(src, "%v = %s OK", ht, srcHash) } } return f.newObject(o), nil } // Put in to the remote path with the modTime given of the given size // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.put(ctx, in, src, options, f.Fs.Put) } // PutStream uploads to the remote path with the modTime given of indeterminate size func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.put(ctx, in, src, options, f.Fs.Features().PutStream) } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.None) } // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists func (f *Fs) Mkdir(ctx context.Context, dir string) error { return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir)) } // MkdirMetadata makes the root directory of the Fs object func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) { do := f.Fs.Features().MkdirMetadata if do == nil { return nil, fs.ErrorNotImplemented } newDir, err := do(ctx, f.cipher.EncryptDirName(dir), metadata) if err != nil { return nil, err } var entries = make(fs.DirEntries, 0, 1) err = f.addDir(ctx, &entries, newDir) if err != nil { return nil, err } newDir, ok := entries[0].(fs.Directory) if !ok { return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0]) } return newDir, nil } // DirSetModTime sets the directory modtime for dir func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error { do := f.Fs.Features().DirSetModTime if do == nil { return fs.ErrorNotImplemented } return do(ctx, f.cipher.EncryptDirName(dir), modTime) } // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir)) } // Purge all files in the directory specified // // Implement this if you have a way of deleting all the files // quicker than just running Remove() on the result of List() // // Return an error if it doesn't exist func (f *Fs) Purge(ctx context.Context, dir string) error { do := f.Fs.Features().Purge if do == nil { return fs.ErrorCantPurge } return do(ctx, f.cipher.EncryptDirName(dir)) } // Copy src to this remote using server-side copy operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { do := f.Fs.Features().Copy if do == nil { return nil, fs.ErrorCantCopy } o, ok := src.(*Object) if !ok { return nil, fs.ErrorCantCopy } oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote)) if err != nil { return nil, err } return f.newObject(oResult), nil } // Move src to this remote using server-side move operations. // // This is stored with the remote path given. // // It returns the destination Object and a possible error. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { do := f.Fs.Features().Move if do == nil { return nil, fs.ErrorCantMove } o, ok := src.(*Object) if !ok { return nil, fs.ErrorCantMove } oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote)) if err != nil { return nil, err } return f.newObject(oResult), nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { do := f.Fs.Features().DirMove if do == nil { return fs.ErrorCantDirMove } srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } return do(ctx, srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote)) } // PutUnchecked uploads the object // // This will create a duplicate if we upload a new file without // checking to see if there is one already - use Put() for that. func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { do := f.Fs.Features().PutUnchecked if do == nil { return nil, errors.New("can't PutUnchecked") } wrappedIn, encrypter, err := f.cipher.encryptData(in) if err != nil { return nil, err } o, err := do(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce)) if err != nil { return nil, err } return f.newObject(o), nil } // CleanUp the trash in the Fs // // Implement this if you have a way of emptying the trash or // otherwise cleaning up old versions of files. func (f *Fs) CleanUp(ctx context.Context) error { do := f.Fs.Features().CleanUp if do == nil { return errors.New("not supported by underlying remote") } return do(ctx) } // About gets quota information from the Fs func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { do := f.Fs.Features().About if do == nil { return nil, errors.New("not supported by underlying remote") } return do(ctx) } // UnWrap returns the Fs that this Fs is wrapping func (f *Fs) UnWrap() fs.Fs { return f.Fs } // WrapFs returns the Fs that is wrapping this Fs func (f *Fs) WrapFs() fs.Fs { return f.wrapper } // SetWrapper sets the Fs that is wrapping this Fs func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper } // EncryptFileName returns an encrypted file name func (f *Fs) EncryptFileName(fileName string) string { return f.cipher.EncryptFileName(fileName) } // DecryptFileName returns a decrypted file name func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) { return f.cipher.DecryptFileName(encryptedFileName) } // computeHashWithNonce takes the nonce and encrypts the contents of // src with it, and calculates the hash given by HashType on the fly // // Note that we break lots of encapsulation in this function. func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Object, hashType hash.Type) (hashStr string, err error) { // Open the src for input in, err := src.Open(ctx) if err != nil { return "", fmt.Errorf("failed to open src: %w", err) } defer fs.CheckClose(in, &err) // Now encrypt the src with the nonce out, err := f.cipher.newEncrypter(in, &nonce) if err != nil { return "", fmt.Errorf("failed to make encrypter: %w", err) } // pipe into hash m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType)) if err != nil { return "", fmt.Errorf("failed to make hasher: %w", err) } _, err = io.Copy(m, out) if err != nil { return "", fmt.Errorf("failed to hash data: %w", err) } return m.Sums()[hashType], nil } // ComputeHash takes the nonce from o, and encrypts the contents of // src with it, and calculates the hash given by HashType on the fly // // Note that we break lots of encapsulation in this function. func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) { if f.opt.NoDataEncryption { return src.Hash(ctx, hashType) } // Read the nonce - opening the file is sufficient to read the nonce in // use a limited read so we only read the header in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1}) if err != nil { return "", fmt.Errorf("failed to open object to read nonce: %w", err) } d, err := f.cipher.newDecrypter(in) if err != nil { _ = in.Close() return "", fmt.Errorf("failed to open object to read nonce: %w", err) } nonce := d.nonce // fs.Debugf(o, "Read nonce % 2x", nonce) // Check nonce isn't all zeros isZero := true for i := range nonce { if nonce[i] != 0 { isZero = false } } if isZero { fs.Errorf(o, "empty nonce read") } // Close d (and hence in) once we have read the nonce err = d.Close() if err != nil { return "", fmt.Errorf("failed to close nonce read: %w", err) } return f.computeHashWithNonce(ctx, nonce, src, hashType) } // MergeDirs merges the contents of all the directories passed // in into the first one and rmdirs the other directories. func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { do := f.Fs.Features().MergeDirs if do == nil { return errors.New("MergeDirs not supported") } out := make([]fs.Directory, len(dirs)) for i, dir := range dirs { out[i] = fs.NewDirWrapper(f.cipher.EncryptDirName(dir.Remote()), dir) } return do(ctx, out) } // DirCacheFlush resets the directory cache - used in testing // as an optional interface func (f *Fs) DirCacheFlush() { do := f.Fs.Features().DirCacheFlush if do != nil { do() } } // PublicLink generates a public link to the remote path (usually readable by anyone) func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { do := f.Fs.Features().PublicLink if do == nil { return "", errors.New("PublicLink not supported") } o, err := f.NewObject(ctx, remote) if err != nil { // assume it is a directory return do(ctx, f.cipher.EncryptDirName(remote), expire, unlink) } return do(ctx, o.(*Object).Object.Remote(), expire, unlink) } // ChangeNotify calls the passed function with a path // that has had changes. If the implementation // uses polling, it should adhere to the given interval. func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { do := f.Fs.Features().ChangeNotify if do == nil { return } wrappedNotifyFunc := func(path string, entryType fs.EntryType) { // fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType) var ( err error decrypted string ) switch entryType { case fs.EntryDirectory: decrypted, err = f.cipher.DecryptDirName(path) case fs.EntryObject: decrypted, err = f.cipher.DecryptFileName(path) default: fs.Errorf(path, "crypt ChangeNotify: ignoring unknown EntryType %d", entryType) return } if err != nil { fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err) return } notifyFunc(decrypted, entryType) } do(ctx, wrappedNotifyFunc, pollIntervalChan) } var commandHelp = []fs.CommandHelp{ { Name: "encode", Short: "Encode the given filename(s).", Long: `This encodes the filenames given as arguments returning a list of strings of the encoded results. Usage examples: ` + "```console" + ` rclone backend encode crypt: file1 [file2...] rclone rc backend/command command=encode fs=crypt: file1 [file2...] ` + "```", }, { Name: "decode", Short: "Decode the given filename(s).", Long: `This decodes the filenames given as arguments returning a list of strings of the decoded results. It will return an error if any of the inputs are invalid. Usage examples: ` + "```console" + ` rclone backend decode crypt: encryptedfile1 [encryptedfile2...] rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...] ` + "```", }, } // Command the backend to run a named command // // The command run is name // args may be used to read arguments from // opts may be used to read optional arguments from // // The result should be capable of being JSON encoded // If it is a string or a []string it will be shown to the user // otherwise it will be JSON encoded and shown to the user like that func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { switch name { case "decode": out := make([]string, 0, len(arg)) for _, encryptedFileName := range arg { fileName, err := f.DecryptFileName(encryptedFileName) if err != nil { return out, fmt.Errorf("failed to decrypt: %s: %w", encryptedFileName, err) } out = append(out, fileName) } return out, nil case "encode": out := make([]string, 0, len(arg)) for _, fileName := range arg { encryptedFileName := f.EncryptFileName(fileName) out = append(out, encryptedFileName) } return out, nil default: return nil, fs.ErrorCommandNotFound } } // Object describes a wrapped for being read from the Fs // // This decrypts the remote name and decrypts the data type Object struct { fs.Object f *Fs } func (f *Fs) newObject(o fs.Object) *Object { return &Object{ Object: o, f: f, } } // Fs returns read only access to the Fs that this object is part of func (o *Object) Fs() fs.Info { return o.f } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.Remote() } // Remote returns the remote path func (o *Object) Remote() string { remote := o.Object.Remote() decryptedName, err := o.f.cipher.DecryptFileName(remote) if err != nil { fs.Debugf(remote, "Undecryptable file name: %v", err) return remote } return decryptedName } // Size returns the size of the file func (o *Object) Size() int64 { size := o.Object.Size() if !o.f.opt.NoDataEncryption { var err error size, err = o.f.cipher.DecryptedSize(size) if err != nil { fs.Debugf(o, "Bad size for decrypt: %v", err) } } return size } // Hash returns the selected checksum of the file // If no checksum is available it returns "" func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { return "", hash.ErrUnsupported } // UnWrap returns the wrapped Object func (o *Object) UnWrap() fs.Object { return o.Object } // Open opens the file for read. Call Close() on the returned io.ReadCloser func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { if o.f.opt.NoDataEncryption { return o.Object.Open(ctx, options...) } var openOptions []fs.OpenOption var offset, limit int64 = 0, -1 for _, option := range options { switch x := option.(type) { case *fs.SeekOption: offset = x.Offset case *fs.RangeOption: offset, limit = x.Decode(o.Size()) default: // pass on Options to underlying open if appropriate openOptions = append(openOptions, option) } } rc, err = o.f.cipher.DecryptDataSeek(ctx, func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { if underlyingOffset == 0 && underlyingLimit < 0 { // Open with no seek return o.Object.Open(ctx, openOptions...) } // Open stream with a range of underlyingOffset, underlyingLimit end := int64(-1) if underlyingLimit >= 0 { end = underlyingOffset + underlyingLimit - 1 if end >= o.Object.Size() { end = -1 } } newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end}) return o.Object.Open(ctx, newOpenOptions...) }, offset, limit) if err != nil { return nil, err } return rc, nil } // Update in to the object with the modTime given of the given size func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/crypt/cipher.go
backend/crypt/cipher.go
package crypt import ( "bytes" "context" "crypto/aes" gocipher "crypto/cipher" "crypto/rand" "encoding/base32" "encoding/base64" "errors" "fmt" "io" "strconv" "strings" "sync" "time" "unicode/utf8" "github.com/Max-Sum/base32768" "github.com/rclone/rclone/backend/crypt/pkcs7" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/version" "github.com/rfjakob/eme" "golang.org/x/crypto/nacl/secretbox" "golang.org/x/crypto/scrypt" ) // Constants const ( nameCipherBlockSize = aes.BlockSize fileMagic = "RCLONE\x00\x00" fileMagicSize = len(fileMagic) fileNonceSize = 24 fileHeaderSize = fileMagicSize + fileNonceSize blockHeaderSize = secretbox.Overhead blockDataSize = 64 * 1024 blockSize = blockHeaderSize + blockDataSize ) // Errors returned by cipher var ( ErrorBadDecryptUTF8 = errors.New("bad decryption - utf-8 invalid") ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars") ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize") ErrorTooShortAfterDecode = errors.New("too short after base32 decode") ErrorTooLongAfterDecode = errors.New("too long after base32 decode") ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted") ErrorEncryptedFileBadHeader = errors.New("file has truncated block header") ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string") ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?") ErrorBadBase32Encoding = errors.New("bad base32 filename encoding") ErrorFileClosed = errors.New("file already closed") ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix") ErrorBadSeek = errors.New("Seek beyond end of file") ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'") defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1} obfuscQuoteRune = '!' ) // Global variables var ( fileMagicBytes = []byte(fileMagic) ) // ReadSeekCloser is the interface of the read handles type ReadSeekCloser interface { io.Reader io.Seeker io.Closer fs.RangeSeeker } // OpenRangeSeek opens the file handle at the offset with the limit given type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error) // NameEncryptionMode is the type of file name encryption in use type NameEncryptionMode int // NameEncryptionMode levels const ( NameEncryptionOff NameEncryptionMode = iota NameEncryptionStandard NameEncryptionObfuscated ) // NewNameEncryptionMode turns a string into a NameEncryptionMode func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) { s = strings.ToLower(s) switch s { case "off": mode = NameEncryptionOff case "standard": mode = NameEncryptionStandard case "obfuscate": mode = NameEncryptionObfuscated default: err = fmt.Errorf("unknown file name encryption mode %q", s) } return mode, err } // String turns mode into a human-readable string func (mode NameEncryptionMode) String() (out string) { switch mode { case NameEncryptionOff: out = "off" case NameEncryptionStandard: out = "standard" case NameEncryptionObfuscated: out = "obfuscate" default: out = fmt.Sprintf("Unknown mode #%d", mode) } return out } // fileNameEncoding are the encoding methods dealing with encrypted file names type fileNameEncoding interface { EncodeToString(src []byte) string DecodeString(s string) ([]byte, error) } // caseInsensitiveBase32Encoding defines a file name encoding // using a modified version of standard base32 as described in // RFC4648 // // The standard encoding is modified in two ways // - it becomes lower case (no-one likes upper case filenames!) // - we strip the padding character `=` type caseInsensitiveBase32Encoding struct{} // EncodeToString encodes a string using the modified version of // base32 encoding. func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string { encoded := base32.HexEncoding.EncodeToString(src) encoded = strings.TrimRight(encoded, "=") return strings.ToLower(encoded) } // DecodeString decodes a string as encoded by EncodeToString func (caseInsensitiveBase32Encoding) DecodeString(s string) ([]byte, error) { if strings.HasSuffix(s, "=") { return nil, ErrorBadBase32Encoding } // First figure out how many padding characters to add roundUpToMultipleOf8 := (len(s) + 7) &^ 7 equals := roundUpToMultipleOf8 - len(s) s = strings.ToUpper(s) + "========"[:equals] return base32.HexEncoding.DecodeString(s) } // NewNameEncoding creates a NameEncoding from a string func NewNameEncoding(s string) (enc fileNameEncoding, err error) { s = strings.ToLower(s) switch s { case "base32": enc = caseInsensitiveBase32Encoding{} case "base64": enc = base64.RawURLEncoding case "base32768": enc = base32768.SafeEncoding default: err = fmt.Errorf("unknown file name encoding mode %q", s) } return enc, err } // Cipher defines an encoding and decoding cipher for the crypt backend type Cipher struct { dataKey [32]byte // Key for secretbox nameKey [32]byte // 16,24 or 32 bytes nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto block gocipher.Block mode NameEncryptionMode fileNameEnc fileNameEncoding buffers sync.Pool // encrypt/decrypt buffers cryptoRand io.Reader // read crypto random numbers from here dirNameEncrypt bool passBadBlocks bool // if set passed bad blocks as zeroed blocks encryptedSuffix string } // newCipher initialises the cipher. If salt is "" then it uses a built in salt val func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) { c := &Cipher{ mode: mode, fileNameEnc: enc, cryptoRand: rand.Reader, dirNameEncrypt: dirNameEncrypt, encryptedSuffix: ".bin", } c.buffers.New = func() any { return new([blockSize]byte) } err := c.Key(password, salt) if err != nil { return nil, err } return c, nil } // setEncryptedSuffix set suffix, or an empty string func (c *Cipher) setEncryptedSuffix(suffix string) { if strings.EqualFold(suffix, "none") { c.encryptedSuffix = "" return } if !strings.HasPrefix(suffix, ".") { fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot) suffix = "." + suffix } c.encryptedSuffix = suffix } // Call to set bad block pass through func (c *Cipher) setPassBadBlocks(passBadBlocks bool) { c.passBadBlocks = passBadBlocks } // Key creates all the internal keys from the password passed in using // scrypt. // // If salt is "" we use a fixed salt just to make attackers lives // slightly harder than using no salt. // // Note that empty password makes all 0x00 keys which is used in the // tests. func (c *Cipher) Key(password, salt string) (err error) { const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak) var saltBytes = defaultSalt if salt != "" { saltBytes = []byte(salt) } var key []byte if password == "" { key = make([]byte, keySize) } else { key, err = scrypt.Key([]byte(password), saltBytes, 16384, 8, 1, keySize) if err != nil { return err } } copy(c.dataKey[:], key) copy(c.nameKey[:], key[len(c.dataKey):]) copy(c.nameTweak[:], key[len(c.dataKey)+len(c.nameKey):]) // Key the name cipher c.block, err = aes.NewCipher(c.nameKey[:]) return err } // getBlock gets a block from the pool of size blockSize func (c *Cipher) getBlock() *[blockSize]byte { return c.buffers.Get().(*[blockSize]byte) } // putBlock returns a block to the pool of size blockSize func (c *Cipher) putBlock(buf *[blockSize]byte) { c.buffers.Put(buf) } // encryptSegment encrypts a path segment // // This uses EME with AES. // // EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the // 2003 paper "A Parallelizable Enciphering Mode" by Halevi and // Rogaway. // // This makes for deterministic encryption which is what we want - the // same filename must encrypt to the same thing. // // This means that // - filenames with the same name will encrypt the same // - filenames which start the same won't have a common prefix func (c *Cipher) encryptSegment(plaintext string) string { if plaintext == "" { return "" } paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext)) ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt) return c.fileNameEnc.EncodeToString(ciphertext) } // decryptSegment decrypts a path segment func (c *Cipher) decryptSegment(ciphertext string) (string, error) { if ciphertext == "" { return "", nil } rawCiphertext, err := c.fileNameEnc.DecodeString(ciphertext) if err != nil { return "", err } if len(rawCiphertext)%nameCipherBlockSize != 0 { return "", ErrorNotAMultipleOfBlocksize } if len(rawCiphertext) == 0 { // not possible if decodeFilename() working correctly return "", ErrorTooShortAfterDecode } if len(rawCiphertext) > 2048 { return "", ErrorTooLongAfterDecode } paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt) plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext) if err != nil { return "", err } return string(plaintext), err } // Simple obfuscation routines func (c *Cipher) obfuscateSegment(plaintext string) string { if plaintext == "" { return "" } // If the string isn't valid UTF8 then don't rotate; just // prepend a !. if !utf8.ValidString(plaintext) { return "!." + plaintext } // Calculate a simple rotation based on the filename and // the nameKey var dir int for _, runeValue := range plaintext { dir += int(runeValue) } dir %= 256 // We'll use this number to store in the result filename... var result bytes.Buffer _, _ = result.WriteString(strconv.Itoa(dir) + ".") // but we'll augment it with the nameKey for real calculation for i := range len(c.nameKey) { dir += int(c.nameKey[i]) } // Now for each character, depending on the range it is in // we will actually rotate a different amount for _, runeValue := range plaintext { switch { case runeValue == obfuscQuoteRune: // Quote the Quote character _, _ = result.WriteRune(obfuscQuoteRune) _, _ = result.WriteRune(obfuscQuoteRune) case runeValue >= '0' && runeValue <= '9': // Number thisdir := (dir % 9) + 1 newRune := '0' + (int(runeValue)-'0'+thisdir)%10 _, _ = result.WriteRune(rune(newRune)) case (runeValue >= 'A' && runeValue <= 'Z') || (runeValue >= 'a' && runeValue <= 'z'): // ASCII letter. Try to avoid trivial A->a mappings thisdir := dir%25 + 1 // Calculate the offset of this character in A-Za-z pos := int(runeValue - 'A') if pos >= 26 { pos -= 6 // It's lower case } // Rotate the character to the new location pos = (pos + thisdir) % 52 if pos >= 26 { pos += 6 // and handle lower case offset again } _, _ = result.WriteRune(rune('A' + pos)) case runeValue >= 0xA0 && runeValue <= 0xFF: // Latin 1 supplement thisdir := (dir % 95) + 1 newRune := 0xA0 + (int(runeValue)-0xA0+thisdir)%96 _, _ = result.WriteRune(rune(newRune)) case runeValue >= 0x100: // Some random Unicode range; we have no good rules here thisdir := (dir % 127) + 1 base := int(runeValue - runeValue%256) newRune := rune(base + (int(runeValue)-base+thisdir)%256) // If the new character isn't a valid UTF8 char // then don't rotate it. Quote it instead if !utf8.ValidRune(newRune) { _, _ = result.WriteRune(obfuscQuoteRune) _, _ = result.WriteRune(runeValue) } else { _, _ = result.WriteRune(newRune) } default: // Leave character untouched _, _ = result.WriteRune(runeValue) } } return result.String() } func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) { if ciphertext == "" { return "", nil } before, after, ok := strings.Cut(ciphertext, ".") if !ok { return "", ErrorNotAnEncryptedFile } // No . num := before if num == "!" { // No rotation; probably original was not valid unicode return after, nil } dir, err := strconv.Atoi(num) if err != nil { return "", ErrorNotAnEncryptedFile // Not a number } // add the nameKey to get the real rotate distance for i := range len(c.nameKey) { dir += int(c.nameKey[i]) } var result bytes.Buffer inQuote := false for _, runeValue := range after { switch { case inQuote: _, _ = result.WriteRune(runeValue) inQuote = false case runeValue == obfuscQuoteRune: inQuote = true case runeValue >= '0' && runeValue <= '9': // Number thisdir := (dir % 9) + 1 newRune := '0' + int(runeValue) - '0' - thisdir if newRune < '0' { newRune += 10 } _, _ = result.WriteRune(rune(newRune)) case (runeValue >= 'A' && runeValue <= 'Z') || (runeValue >= 'a' && runeValue <= 'z'): thisdir := dir%25 + 1 pos := int(runeValue - 'A') if pos >= 26 { pos -= 6 } pos -= thisdir if pos < 0 { pos += 52 } if pos >= 26 { pos += 6 } _, _ = result.WriteRune(rune('A' + pos)) case runeValue >= 0xA0 && runeValue <= 0xFF: thisdir := (dir % 95) + 1 newRune := 0xA0 + int(runeValue) - 0xA0 - thisdir if newRune < 0xA0 { newRune += 96 } _, _ = result.WriteRune(rune(newRune)) case runeValue >= 0x100: thisdir := (dir % 127) + 1 base := int(runeValue - runeValue%256) newRune := rune(base + (int(runeValue) - base - thisdir)) if int(newRune) < base { newRune += 256 } _, _ = result.WriteRune(newRune) default: _, _ = result.WriteRune(runeValue) } } return result.String(), nil } // encryptFileName encrypts a file path func (c *Cipher) encryptFileName(in string) string { segments := strings.Split(in, "/") for i := range segments { // Skip directory name encryption if the user chose to // leave them intact if !c.dirNameEncrypt && i != (len(segments)-1) { continue } // Strip version string so that only the non-versioned part // of the file name gets encrypted/obfuscated hasVersion := false var t time.Time if i == (len(segments)-1) && version.Match(segments[i]) { var s string t, s = version.Remove(segments[i]) // version.Remove can fail, in which case it returns segments[i] if s != segments[i] { segments[i] = s hasVersion = true } } if c.mode == NameEncryptionStandard { segments[i] = c.encryptSegment(segments[i]) } else { segments[i] = c.obfuscateSegment(segments[i]) } // Add back a version to the encrypted/obfuscated // file name, if we stripped it off earlier if hasVersion { segments[i] = version.Add(segments[i], t) } } return strings.Join(segments, "/") } // EncryptFileName encrypts a file path func (c *Cipher) EncryptFileName(in string) string { if c.mode == NameEncryptionOff { return in + c.encryptedSuffix } return c.encryptFileName(in) } // EncryptDirName encrypts a directory path func (c *Cipher) EncryptDirName(in string) string { if c.mode == NameEncryptionOff || !c.dirNameEncrypt { return in } return c.encryptFileName(in) } // decryptFileName decrypts a file path func (c *Cipher) decryptFileName(in string) (string, error) { segments := strings.Split(in, "/") for i := range segments { var err error // Skip directory name decryption if the user chose to // leave them intact if !c.dirNameEncrypt && i != (len(segments)-1) { continue } // Strip version string so that only the non-versioned part // of the file name gets decrypted/deobfuscated hasVersion := false var t time.Time if i == (len(segments)-1) && version.Match(segments[i]) { var s string t, s = version.Remove(segments[i]) // version.Remove can fail, in which case it returns segments[i] if s != segments[i] { segments[i] = s hasVersion = true } } if c.mode == NameEncryptionStandard { segments[i], err = c.decryptSegment(segments[i]) } else { segments[i], err = c.deobfuscateSegment(segments[i]) } if err != nil { return "", err } // Add back a version to the decrypted/deobfuscated // file name, if we stripped it off earlier if hasVersion { segments[i] = version.Add(segments[i], t) } } return strings.Join(segments, "/"), nil } // DecryptFileName decrypts a file path func (c *Cipher) DecryptFileName(in string) (string, error) { if c.mode == NameEncryptionOff { remainingLength := len(in) - len(c.encryptedSuffix) if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) { return "", ErrorNotAnEncryptedFile } decrypted := in[:remainingLength] if version.Match(decrypted) { _, unversioned := version.Remove(decrypted) if unversioned == "" { return "", ErrorNotAnEncryptedFile } } // Leave the version string on, if it was there return decrypted, nil } return c.decryptFileName(in) } // DecryptDirName decrypts a directory path func (c *Cipher) DecryptDirName(in string) (string, error) { if c.mode == NameEncryptionOff || !c.dirNameEncrypt { return in, nil } return c.decryptFileName(in) } // NameEncryptionMode returns the encryption mode in use for names func (c *Cipher) NameEncryptionMode() NameEncryptionMode { return c.mode } // nonce is an NACL secretbox nonce type nonce [fileNonceSize]byte // pointer returns the nonce as a *[24]byte for secretbox func (n *nonce) pointer() *[fileNonceSize]byte { return (*[fileNonceSize]byte)(n) } // fromReader fills the nonce from an io.Reader - normally the OSes // crypto random number generator func (n *nonce) fromReader(in io.Reader) error { read, err := readers.ReadFill(in, (*n)[:]) if read != fileNonceSize { return fmt.Errorf("short read of nonce: %w", err) } return nil } // fromBuf fills the nonce from the buffer passed in func (n *nonce) fromBuf(buf []byte) { read := copy((*n)[:], buf) if read != fileNonceSize { panic("buffer to short to read nonce") } } // carry 1 up the nonce from position i func (n *nonce) carry(i int) { for ; i < len(*n); i++ { digit := (*n)[i] newDigit := digit + 1 (*n)[i] = newDigit if newDigit >= digit { // exit if no carry break } } } // increment to add 1 to the nonce func (n *nonce) increment() { n.carry(0) } // add a uint64 to the nonce func (n *nonce) add(x uint64) { carry := uint16(0) for i := range 8 { digit := (*n)[i] xDigit := byte(x) x >>= 8 carry += uint16(digit) + uint16(xDigit) (*n)[i] = byte(carry) carry >>= 8 } if carry != 0 { n.carry(8) } } // encrypter encrypts an io.Reader on the fly type encrypter struct { mu sync.Mutex in io.Reader c *Cipher nonce nonce buf *[blockSize]byte readBuf *[blockSize]byte bufIndex int bufSize int err error } // newEncrypter creates a new file handle encrypting on the fly func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) { fh := &encrypter{ in: in, c: c, buf: c.getBlock(), readBuf: c.getBlock(), bufSize: fileHeaderSize, } // Initialise nonce if nonce != nil { fh.nonce = *nonce } else { err := fh.nonce.fromReader(c.cryptoRand) if err != nil { return nil, err } } // Copy magic into buffer copy((*fh.buf)[:], fileMagicBytes) // Copy nonce into buffer copy((*fh.buf)[fileMagicSize:], fh.nonce[:]) return fh, nil } // Read as per io.Reader func (fh *encrypter) Read(p []byte) (n int, err error) { fh.mu.Lock() defer fh.mu.Unlock() if fh.err != nil { return 0, fh.err } if fh.bufIndex >= fh.bufSize { // Read data // FIXME should overlap the reads with a go-routine and 2 buffers? readBuf := (*fh.readBuf)[:blockDataSize] n, err = readers.ReadFill(fh.in, readBuf) if n == 0 { return fh.finish(err) } // possibly err != nil here, but we will process the // data and the next call to ReadFill will return 0, err // Encrypt the block using the nonce secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey) fh.bufIndex = 0 fh.bufSize = blockHeaderSize + n fh.nonce.increment() } n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize]) fh.bufIndex += n return n, nil } // finish sets the final error and tidies up func (fh *encrypter) finish(err error) (int, error) { if fh.err != nil { return 0, fh.err } fh.err = err fh.c.putBlock(fh.buf) fh.buf = nil fh.c.putBlock(fh.readBuf) fh.readBuf = nil return 0, err } // Encrypt data encrypts the data stream func (c *Cipher) encryptData(in io.Reader) (io.Reader, *encrypter, error) { in, wrap := accounting.UnWrap(in) // unwrap the accounting off the Reader out, err := c.newEncrypter(in, nil) if err != nil { return nil, nil, err } return wrap(out), out, nil // and wrap the accounting back on } // EncryptData encrypts the data stream func (c *Cipher) EncryptData(in io.Reader) (io.Reader, error) { out, _, err := c.encryptData(in) return out, err } // decrypter decrypts an io.ReaderCloser on the fly type decrypter struct { mu sync.Mutex rc io.ReadCloser nonce nonce initialNonce nonce c *Cipher buf *[blockSize]byte readBuf *[blockSize]byte bufIndex int bufSize int err error limit int64 // limit of bytes to read, -1 for unlimited open OpenRangeSeek } // newDecrypter creates a new file handle decrypting on the fly func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) { fh := &decrypter{ rc: rc, c: c, buf: c.getBlock(), readBuf: c.getBlock(), limit: -1, } // Read file header (magic + nonce) readBuf := (*fh.readBuf)[:fileHeaderSize] n, err := readers.ReadFill(fh.rc, readBuf) if n < fileHeaderSize && err == io.EOF { // This read from 0..fileHeaderSize-1 bytes return nil, fh.finishAndClose(ErrorEncryptedFileTooShort) } else if err != io.EOF && err != nil { return nil, fh.finishAndClose(err) } // check the magic if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) { return nil, fh.finishAndClose(ErrorEncryptedBadMagic) } // retrieve the nonce fh.nonce.fromBuf(readBuf[fileMagicSize:]) fh.initialNonce = fh.nonce return fh, nil } // newDecrypterSeek creates a new file handle decrypting on the fly func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) { var rc io.ReadCloser doRangeSeek := false setLimit := false // Open initially with no seek if offset == 0 && limit < 0 { // If no offset or limit then open whole file rc, err = open(ctx, 0, -1) } else if offset == 0 { // If no offset open the header + limit worth of the file _, underlyingLimit, _, _ := calculateUnderlying(offset, limit) rc, err = open(ctx, 0, int64(fileHeaderSize)+underlyingLimit) setLimit = true } else { // Otherwise just read the header to start with rc, err = open(ctx, 0, int64(fileHeaderSize)) doRangeSeek = true } if err != nil { return nil, err } // Open the stream which fills in the nonce fh, err = c.newDecrypter(rc) if err != nil { return nil, err } fh.open = open // will be called by fh.RangeSeek if doRangeSeek { _, err = fh.RangeSeek(ctx, offset, io.SeekStart, limit) if err != nil { _ = fh.Close() return nil, err } } if setLimit { fh.limit = limit } return fh, nil } // read data into internal buffer - call with fh.mu held func (fh *decrypter) fillBuffer() (err error) { // FIXME should overlap the reads with a go-routine and 2 buffers? readBuf := fh.readBuf n, err := readers.ReadFill(fh.rc, (*readBuf)[:]) if n == 0 { return err } // possibly err != nil here, but we will process the data and // the next call to ReadFull will return 0, err // Check header + 1 byte exists if n <= blockHeaderSize { if err != nil && err != io.EOF { return err // return pending error as it is likely more accurate } return ErrorEncryptedFileBadHeader } // Decrypt the block using the nonce _, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey) if !ok { if err != nil && err != io.EOF { return err // return pending error as it is likely more accurate } if !fh.c.passBadBlocks { return ErrorEncryptedBadBlock } fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock) // Zero out the bad block and continue for i := range (*fh.buf)[:n] { fh.buf[i] = 0 } } fh.bufIndex = 0 fh.bufSize = n - blockHeaderSize fh.nonce.increment() return nil } // Read as per io.Reader func (fh *decrypter) Read(p []byte) (n int, err error) { fh.mu.Lock() defer fh.mu.Unlock() if fh.err != nil { return 0, fh.err } if fh.bufIndex >= fh.bufSize { err = fh.fillBuffer() if err != nil { return 0, fh.finish(err) } } toCopy := fh.bufSize - fh.bufIndex if fh.limit >= 0 && fh.limit < int64(toCopy) { toCopy = int(fh.limit) } n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy]) fh.bufIndex += n if fh.limit >= 0 { fh.limit -= int64(n) if fh.limit == 0 { return n, fh.finish(io.EOF) } } return n, nil } // calculateUnderlying converts an (offset, limit) in an encrypted file // into an (underlyingOffset, underlyingLimit) for the underlying file. // // It also returns number of bytes to discard after reading the first // block and number of blocks this is from the start so the nonce can // be incremented. func calculateUnderlying(offset, limit int64) (underlyingOffset, underlyingLimit, discard, blocks int64) { // blocks we need to seek, plus bytes we need to discard blocks, discard = offset/blockDataSize, offset%blockDataSize // Offset in underlying stream we need to seek underlyingOffset = int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize) // work out how many blocks we need to read underlyingLimit = int64(-1) if limit >= 0 { // bytes to read beyond the first block bytesToRead := limit - (blockDataSize - discard) // Read the first block blocksToRead := int64(1) if bytesToRead > 0 { // Blocks that need to be read plus left over blocks extraBlocksToRead, endBytes := bytesToRead/blockDataSize, bytesToRead%blockDataSize if endBytes != 0 { // If left over bytes must read another block extraBlocksToRead++ } blocksToRead += extraBlocksToRead } // Must read a whole number of blocks underlyingLimit = blocksToRead * (blockHeaderSize + blockDataSize) } return } // RangeSeek behaves like a call to Seek(offset int64, whence // int) with the output wrapped in an io.LimitedReader // limiting the total length to limit. // // RangeSeek with a limit of < 0 is equivalent to a regular Seek. func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, limit int64) (int64, error) { fh.mu.Lock() defer fh.mu.Unlock() if fh.open == nil { return 0, fh.finish(errors.New("can't seek - not initialised with newDecrypterSeek")) } if whence != io.SeekStart { return 0, fh.finish(errors.New("can only seek from the start")) } // Reset error or return it if not EOF if fh.err == io.EOF { fh.unFinish() } else if fh.err != nil { return 0, fh.err } underlyingOffset, underlyingLimit, discard, blocks := calculateUnderlying(offset, limit) // Move the nonce on the correct number of blocks from the start fh.nonce = fh.initialNonce fh.nonce.add(uint64(blocks)) // Can we seek underlying stream directly? if do, ok := fh.rc.(fs.RangeSeeker); ok { // Seek underlying stream directly _, err := do.RangeSeek(ctx, underlyingOffset, 0, underlyingLimit) if err != nil { return 0, fh.finish(err) } } else { // if not reopen with seek _ = fh.rc.Close() // close underlying file fh.rc = nil // Re-open the underlying object with the offset given rc, err := fh.open(ctx, underlyingOffset, underlyingLimit) if err != nil { return 0, fh.finish(fmt.Errorf("couldn't reopen file with offset and limit: %w", err)) } // Set the file handle fh.rc = rc } // Fill the buffer err := fh.fillBuffer() if err != nil { return 0, fh.finish(err) } // Discard bytes from the buffer if int(discard) > fh.bufSize { return 0, fh.finish(ErrorBadSeek) } fh.bufIndex = int(discard) // Set the limit fh.limit = limit return offset, nil } // Seek implements the io.Seeker interface func (fh *decrypter) Seek(offset int64, whence int) (int64, error) { return fh.RangeSeek(context.TODO(), offset, whence, -1) } // finish sets the final error and tidies up func (fh *decrypter) finish(err error) error { if fh.err != nil { return fh.err } fh.err = err fh.c.putBlock(fh.buf) fh.buf = nil fh.c.putBlock(fh.readBuf) fh.readBuf = nil return err } // unFinish undoes the effects of finish func (fh *decrypter) unFinish() { // Clear error fh.err = nil // reinstate the buffers fh.buf = fh.c.getBlock() fh.readBuf = fh.c.getBlock() // Empty the buffer fh.bufIndex = 0 fh.bufSize = 0 } // Close func (fh *decrypter) Close() error { fh.mu.Lock() defer fh.mu.Unlock() // Check already closed if fh.err == ErrorFileClosed { return fh.err } // Closed before reading EOF so not finish()ed yet if fh.err == nil { _ = fh.finish(io.EOF) } // Show file now closed fh.err = ErrorFileClosed if fh.rc == nil { return nil } return fh.rc.Close() } // finishAndClose does finish then Close() // // Used when we are returning a nil fh from new func (fh *decrypter) finishAndClose(err error) error { _ = fh.finish(err) _ = fh.Close() return err } // DecryptData decrypts the data stream func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) { out, err := c.newDecrypter(rc) if err != nil { return nil, err } return out, nil } // DecryptDataSeek decrypts the data stream from offset // // The open function must return a ReadCloser opened to the offset supplied. // // You must use this form of DecryptData if you might want to Seek the file handle func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) { out, err := c.newDecrypterSeek(ctx, open, offset, limit) if err != nil { return nil, err } return out, nil } // EncryptedSize calculates the size of the data when encrypted func (c *Cipher) EncryptedSize(size int64) int64 { blocks, residue := size/blockDataSize, size%blockDataSize encryptedSize := int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize) if residue != 0 { encryptedSize += blockHeaderSize + residue } return encryptedSize } // DecryptedSize calculates the size of the data when decrypted func (c *Cipher) DecryptedSize(size int64) (int64, error) { size -= int64(fileHeaderSize) if size < 0 { return 0, ErrorEncryptedFileTooShort } blocks, residue := size/blockSize, size%blockSize decryptedSize := blocks * blockDataSize if residue != 0 { residue -= blockHeaderSize if residue <= 0 { return 0, ErrorEncryptedFileBadHeader } } decryptedSize += residue return decryptedSize, nil } // check interfaces var ( _ io.ReadCloser = (*decrypter)(nil) _ io.Seeker = (*decrypter)(nil) _ fs.RangeSeeker = (*decrypter)(nil) _ io.Reader = (*encrypter)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/crypt/crypt_test.go
backend/crypt/crypt_test.go
// Test Crypt filesystem interface package crypt_test import ( "os" "path/filepath" "runtime" "testing" "github.com/rclone/rclone/backend/crypt" _ "github.com/rclone/rclone/backend/drive" // for integration tests _ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/swift" // for integration tests "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { if *fstest.RemoteName == "" { t.Skip("Skipping as -remote not set") } fstests.Run(t, &fstests.Opt{ RemoteName: *fstest.RemoteName, NilObject: (*crypt.Object)(nil), UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableObjectMethods: []string{"MimeType"}, }) } // TestStandard runs integration tests against the remote func TestStandardBase32(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("Skipping as -remote set") } tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard") name := "TestCrypt" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":", NilObject: (*crypt.Object)(nil), ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "crypt"}, {Name: name, Key: "remote", Value: tempdir}, {Name: name, Key: "password", Value: obscure.MustObscure("potato")}, {Name: name, Key: "filename_encryption", Value: "standard"}, }, UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableObjectMethods: []string{"MimeType"}, QuickTestOK: true, }) } func TestStandardBase64(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("Skipping as -remote set") } tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard") name := "TestCrypt" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":", NilObject: (*crypt.Object)(nil), ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "crypt"}, {Name: name, Key: "remote", Value: tempdir}, {Name: name, Key: "password", Value: obscure.MustObscure("potato")}, {Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encoding", Value: "base64"}, }, UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableObjectMethods: []string{"MimeType"}, QuickTestOK: true, }) } func TestStandardBase32768(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("Skipping as -remote set") } tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard") name := "TestCrypt" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":", NilObject: (*crypt.Object)(nil), ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "crypt"}, {Name: name, Key: "remote", Value: tempdir}, {Name: name, Key: "password", Value: obscure.MustObscure("potato")}, {Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encoding", Value: "base32768"}, }, UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableObjectMethods: []string{"MimeType"}, QuickTestOK: true, }) } // TestOff runs integration tests against the remote func TestOff(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("Skipping as -remote set") } tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off") name := "TestCrypt2" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":", NilObject: (*crypt.Object)(nil), ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "crypt"}, {Name: name, Key: "remote", Value: tempdir}, {Name: name, Key: "password", Value: obscure.MustObscure("potato2")}, {Name: name, Key: "filename_encryption", Value: "off"}, }, UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableObjectMethods: []string{"MimeType"}, QuickTestOK: true, }) } // TestObfuscate runs integration tests against the remote func TestObfuscate(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("Skipping as -remote set") } if runtime.GOOS == "darwin" { t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with") } tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate") name := "TestCrypt3" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":", NilObject: (*crypt.Object)(nil), ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "crypt"}, {Name: name, Key: "remote", Value: tempdir}, {Name: name, Key: "password", Value: obscure.MustObscure("potato2")}, {Name: name, Key: "filename_encryption", Value: "obfuscate"}, }, SkipBadWindowsCharacters: true, UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableObjectMethods: []string{"MimeType"}, QuickTestOK: true, }) } // TestNoDataObfuscate runs integration tests against the remote func TestNoDataObfuscate(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("Skipping as -remote set") } if runtime.GOOS == "darwin" { t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with") } tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate") name := "TestCrypt4" fstests.Run(t, &fstests.Opt{ RemoteName: name + ":", NilObject: (*crypt.Object)(nil), ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "crypt"}, {Name: name, Key: "remote", Value: tempdir}, {Name: name, Key: "password", Value: obscure.MustObscure("potato2")}, {Name: name, Key: "filename_encryption", Value: "obfuscate"}, {Name: name, Key: "no_data_encryption", Value: "true"}, }, SkipBadWindowsCharacters: true, UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableObjectMethods: []string{"MimeType"}, QuickTestOK: true, }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/crypt/crypt_internal_test.go
backend/crypt/crypt_internal_test.go
package crypt import ( "bytes" "context" "crypto/md5" "fmt" "io" "testing" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/lib/random" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // Create a temporary local fs to upload things from func makeTempLocalFs(t *testing.T) (localFs fs.Fs) { localFs, err := fs.TemporaryLocalFs(context.Background()) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, localFs.Rmdir(context.Background(), "")) }) return localFs } // Upload a file to a remote func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) { inBuf := bytes.NewBufferString(contents) t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC) upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil) obj, err := f.Put(context.Background(), inBuf, upSrc) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, obj.Remove(context.Background())) }) return obj } // Test the ObjectInfo func testObjectInfo(t *testing.T, f *Fs, wrap bool) { var ( contents = random.String(100) path = "hash_test_object" ctx = context.Background() ) if wrap { path = "_wrap" } localFs := makeTempLocalFs(t) obj := uploadFile(t, localFs, path, contents) // encrypt the data inBuf := bytes.NewBufferString(contents) var outBuf bytes.Buffer enc, err := f.cipher.newEncrypter(inBuf, nil) require.NoError(t, err) nonce := enc.nonce // read the nonce at the start _, err = io.Copy(&outBuf, enc) require.NoError(t, err) var oi fs.ObjectInfo = obj if wrap { // wrap the object in an fs.ObjectUnwrapper if required oi = fs.NewOverrideRemote(oi, "new_remote") } // wrap the object in a crypt for upload using the nonce we // saved from the encrypter src := f.newObjectInfo(oi, nonce) // Test ObjectInfo methods if !f.opt.NoDataEncryption { assert.Equal(t, int64(outBuf.Len()), src.Size()) } assert.Equal(t, f, src.Fs()) assert.NotEqual(t, path, src.Remote()) // Test ObjectInfo.Hash wantHash := md5.Sum(outBuf.Bytes()) gotHash, err := src.Hash(ctx, hash.MD5) require.NoError(t, err) assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash) } func testComputeHash(t *testing.T, f *Fs) { var ( contents = random.String(100) path = "compute_hash_test" ctx = context.Background() hashType = f.Fs.Hashes().GetOne() ) if hashType == hash.None { t.Skipf("%v: does not support hashes", f.Fs) } localFs := makeTempLocalFs(t) // Upload a file to localFs as a test object localObj := uploadFile(t, localFs, path, contents) // Upload the same data to the remote Fs also remoteObj := uploadFile(t, f, path, contents) // Calculate the expected Hash of the remote object computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType) require.NoError(t, err) // Test computed hash matches remote object hash remoteObjHash, err := remoteObj.(*Object).Object.Hash(ctx, hashType) require.NoError(t, err) assert.Equal(t, remoteObjHash, computedHash) } // InternalTest is called by fstests.Run to extra tests func (f *Fs) InternalTest(t *testing.T) { t.Run("ObjectInfo", func(t *testing.T) { testObjectInfo(t, f, false) }) t.Run("ObjectInfoWrap", func(t *testing.T) { testObjectInfo(t, f, true) }) t.Run("ComputeHash", func(t *testing.T) { testComputeHash(t, f) }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/crypt/cipher_test.go
backend/crypt/cipher_test.go
package crypt import ( "bytes" "context" "encoding/base32" "encoding/base64" "errors" "fmt" "io" "strings" "testing" "github.com/Max-Sum/base32768" "github.com/rclone/rclone/backend/crypt/pkcs7" "github.com/rclone/rclone/lib/readers" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestNewNameEncryptionMode(t *testing.T) { for _, test := range []struct { in string expected NameEncryptionMode expectedErr string }{ {"off", NameEncryptionOff, ""}, {"standard", NameEncryptionStandard, ""}, {"obfuscate", NameEncryptionObfuscated, ""}, {"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""}, } { actual, actualErr := NewNameEncryptionMode(test.in) assert.Equal(t, actual, test.expected) if test.expectedErr == "" { assert.NoError(t, actualErr) } else { assert.EqualError(t, actualErr, test.expectedErr) } } } func TestNewNameEncryptionModeString(t *testing.T) { assert.Equal(t, NameEncryptionOff.String(), "off") assert.Equal(t, NameEncryptionStandard.String(), "standard") assert.Equal(t, NameEncryptionObfuscated.String(), "obfuscate") assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3") } type EncodingTestCase struct { in string expected string } func testEncodeFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) { for _, test := range testCases { enc, err := NewNameEncoding(encoding) assert.NoError(t, err, "There should be no error creating name encoder for base32.") actual := enc.EncodeToString([]byte(test.in)) assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in)) recovered, err := enc.DecodeString(test.expected) assert.NoError(t, err) assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected)) if caseInsensitive { in := strings.ToUpper(test.expected) recovered, err = enc.DecodeString(in) assert.NoError(t, err) assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in)) } } } func TestEncodeFileNameBase32(t *testing.T) { testEncodeFileName(t, "base32", []EncodingTestCase{ {"", ""}, {"1", "64"}, {"12", "64p0"}, {"123", "64p36"}, {"1234", "64p36d0"}, {"12345", "64p36d1l"}, {"123456", "64p36d1l6o"}, {"1234567", "64p36d1l6org"}, {"12345678", "64p36d1l6orjg"}, {"123456789", "64p36d1l6orjge8"}, {"1234567890", "64p36d1l6orjge9g"}, {"12345678901", "64p36d1l6orjge9g64"}, {"123456789012", "64p36d1l6orjge9g64p0"}, {"1234567890123", "64p36d1l6orjge9g64p36"}, {"12345678901234", "64p36d1l6orjge9g64p36d0"}, {"123456789012345", "64p36d1l6orjge9g64p36d1l"}, {"1234567890123456", "64p36d1l6orjge9g64p36d1l6o"}, }, true) } func TestEncodeFileNameBase64(t *testing.T) { testEncodeFileName(t, "base64", []EncodingTestCase{ {"", ""}, {"1", "MQ"}, {"12", "MTI"}, {"123", "MTIz"}, {"1234", "MTIzNA"}, {"12345", "MTIzNDU"}, {"123456", "MTIzNDU2"}, {"1234567", "MTIzNDU2Nw"}, {"12345678", "MTIzNDU2Nzg"}, {"123456789", "MTIzNDU2Nzg5"}, {"1234567890", "MTIzNDU2Nzg5MA"}, {"12345678901", "MTIzNDU2Nzg5MDE"}, {"123456789012", "MTIzNDU2Nzg5MDEy"}, {"1234567890123", "MTIzNDU2Nzg5MDEyMw"}, {"12345678901234", "MTIzNDU2Nzg5MDEyMzQ"}, {"123456789012345", "MTIzNDU2Nzg5MDEyMzQ1"}, {"1234567890123456", "MTIzNDU2Nzg5MDEyMzQ1Ng"}, }, false) } func TestEncodeFileNameBase32768(t *testing.T) { testEncodeFileName(t, "base32768", []EncodingTestCase{ {"", ""}, {"1", "㼿"}, {"12", "㻙ɟ"}, {"123", "㻙ⲿ"}, {"1234", "㻙ⲍƟ"}, {"12345", "㻙ⲍ⍟"}, {"123456", "㻙ⲍ⍆ʏ"}, {"1234567", "㻙ⲍ⍆觟"}, {"12345678", "㻙ⲍ⍆觓ɧ"}, {"123456789", "㻙ⲍ⍆觓栯"}, {"1234567890", "㻙ⲍ⍆觓栩ɣ"}, {"12345678901", "㻙ⲍ⍆觓栩朧"}, {"123456789012", "㻙ⲍ⍆觓栩朤ʅ"}, {"1234567890123", "㻙ⲍ⍆觓栩朤談"}, {"12345678901234", "㻙ⲍ⍆觓栩朤諆ɔ"}, {"123456789012345", "㻙ⲍ⍆觓栩朤諆媕"}, {"1234567890123456", "㻙ⲍ⍆觓栩朤諆媕䆿"}, }, false) } func TestDecodeFileNameBase32(t *testing.T) { enc, err := NewNameEncoding("base32") assert.NoError(t, err, "There should be no error creating name encoder for base32.") // We've tested decoding the valid ones above, now concentrate on the invalid ones for _, test := range []struct { in string expectedErr error }{ {"64=", ErrorBadBase32Encoding}, {"!", base32.CorruptInputError(0)}, {"hello=hello", base32.CorruptInputError(5)}, } { actual, actualErr := enc.DecodeString(test.in) assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr)) } } func TestDecodeFileNameBase64(t *testing.T) { enc, err := NewNameEncoding("base64") assert.NoError(t, err, "There should be no error creating name encoder for base32.") // We've tested decoding the valid ones above, now concentrate on the invalid ones for _, test := range []struct { in string expectedErr error }{ {"64=", base64.CorruptInputError(2)}, {"!", base64.CorruptInputError(0)}, {"Hello=Hello", base64.CorruptInputError(5)}, } { actual, actualErr := enc.DecodeString(test.in) assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr)) } } func TestDecodeFileNameBase32768(t *testing.T) { enc, err := NewNameEncoding("base32768") assert.NoError(t, err, "There should be no error creating name encoder for base32.") // We've tested decoding the valid ones above, now concentrate on the invalid ones for _, test := range []struct { in string expectedErr error }{ {"㼿c", base32768.CorruptInputError(1)}, {"!", base32768.CorruptInputError(0)}, {"㻙ⲿ=㻙ⲿ", base32768.CorruptInputError(2)}, } { actual, actualErr := enc.DecodeString(test.in) assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr)) } } func testEncryptSegment(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) { enc, _ := NewNameEncoding(encoding) c, _ := newCipher(NameEncryptionStandard, "", "", true, enc) for _, test := range testCases { actual := c.encryptSegment(test.in) assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in)) recovered, err := c.decryptSegment(test.expected) assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected)) assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected)) if caseInsensitive { in := strings.ToUpper(test.expected) recovered, err = c.decryptSegment(in) assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in)) assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in)) } } } func TestEncryptSegmentBase32(t *testing.T) { testEncryptSegment(t, "base32", []EncodingTestCase{ {"", ""}, {"1", "p0e52nreeaj0a5ea7s64m4j72s"}, {"12", "l42g6771hnv3an9cgc8cr2n1ng"}, {"123", "qgm4avr35m5loi1th53ato71v0"}, {"1234", "8ivr2e9plj3c3esisjpdisikos"}, {"12345", "rh9vu63q3o29eqmj4bg6gg7s44"}, {"123456", "bn717l3alepn75b2fb2ejmi4b4"}, {"1234567", "n6bo9jmb1qe3b1ogtj5qkf19k8"}, {"12345678", "u9t24j7uaq94dh5q53m3s4t9ok"}, {"123456789", "37hn305g6j12d1g0kkrl7ekbs4"}, {"1234567890", "ot8d91eplaglb62k2b1trm2qv0"}, {"12345678901", "h168vvrgb53qnrtvvmb378qrcs"}, {"123456789012", "s3hsdf9e29ithrqbjqu01t8q2s"}, {"1234567890123", "cf3jimlv1q2oc553mv7s3mh3eo"}, {"12345678901234", "moq0uqdlqrblrc5pa5u5c7hq9g"}, {"123456789012345", "eeam3li4rnommi3a762h5n7meg"}, {"1234567890123456", "mijbj0frqf6ms7frcr6bd9h0env53jv96pjaaoirk7forcgpt70g"}, }, true) } func TestEncryptSegmentBase64(t *testing.T) { testEncryptSegment(t, "base64", []EncodingTestCase{ {"", ""}, {"1", "yBxRX25ypgUVyj8MSxJnFw"}, {"12", "qQUDHOGN_jVdLIMQzYrhvA"}, {"123", "1CxFf2Mti1xIPYlGruDh-A"}, {"1234", "RL-xOTmsxsG7kuTy2XJUxw"}, {"12345", "3FP_GHoeBJdq0yLgaED8IQ"}, {"123456", "Xc4T1Gqrs3OVYnrE6dpEWQ"}, {"1234567", "uZeEzssOnDWHEOzLqjwpog"}, {"12345678", "8noiTP5WkkbEuijsPhOpxQ"}, {"123456789", "GeNxgLA0wiaGAKU3U7qL4Q"}, {"1234567890", "x1DUhdmqoVWYVBLD3dha-A"}, {"12345678901", "iEyP_3BZR6vvv_2WM6NbZw"}, {"123456789012", "4OPGvS4SZdjvS568APUaFw"}, {"1234567890123", "Y8c5Wr8OhYYUo7fPwdojdg"}, {"12345678901234", "tjQPabXW112wuVF8Vh46TA"}, {"123456789012345", "c5Vh1kTd8WtIajmFEtz2dA"}, {"1234567890123456", "tKa5gfvTzW4d-2bMtqYgdf5Rz-k2ZqViW6HfjbIZ6cE"}, }, false) } func TestEncryptSegmentBase32768(t *testing.T) { testEncryptSegment(t, "base32768", []EncodingTestCase{ {"", ""}, {"1", "詮㪗鐮僀伎作㻖㢧⪟"}, {"12", "竢朧䉱虃光塬䟛⣡蓟"}, {"123", "遶㞟鋅缕袡鲅ⵝ蝁ꌟ"}, {"1234", "䢟銮䵵狌㐜燳谒颴詟"}, {"12345", "钉Ꞇ㖃蚩憶狫朰杜㜿"}, {"123456", "啇ᚵⵕ憗䋫➫➓肤卟"}, {"1234567", "茫螓翁連劘樓㶔抉矟"}, {"12345678", "龝☳䘊辄岅較络㧩襟"}, {"123456789", "ⲱ苀㱆犂媐Ꮤ锇惫靟"}, {"1234567890", "計宁憕偵匢皫╛纺ꌟ"}, {"12345678901", "檆䨿鑫㪺藝ꡖ勇䦛婟"}, {"123456789012", "雑頏䰂䲝淚哚鹡魺⪟"}, {"1234567890123", "塃璶繁躸圅㔟䗃肃懟"}, {"12345678901234", "腺ᕚ崚鏕鏥讥鼌䑺䲿"}, {"123456789012345", "怪绕滻蕶肣但⠥荖惟"}, {"1234567890123456", "肳哀旚挶靏鏻㾭䱠慟㪳ꏆ賊兲铧敻塹魀ʟ"}, }, false) } func TestDecryptSegmentBase32(t *testing.T) { // We've tested the forwards above, now concentrate on the errors longName := make([]byte, 3328) for i := range longName { longName[i] = 'a' } enc, _ := NewNameEncoding("base32") c, _ := newCipher(NameEncryptionStandard, "", "", true, enc) for _, test := range []struct { in string expectedErr error }{ {"64=", ErrorBadBase32Encoding}, {"!", base32.CorruptInputError(0)}, {string(longName), ErrorTooLongAfterDecode}, {enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize}, {enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize}, {enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong}, } { actual, actualErr := c.decryptSegment(test.in) assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr)) } } func TestDecryptSegmentBase64(t *testing.T) { // We've tested the forwards above, now concentrate on the errors longName := make([]byte, 2816) for i := range longName { longName[i] = 'a' } enc, _ := NewNameEncoding("base64") c, _ := newCipher(NameEncryptionStandard, "", "", true, enc) for _, test := range []struct { in string expectedErr error }{ {"6H=", base64.CorruptInputError(2)}, {"!", base64.CorruptInputError(0)}, {string(longName), ErrorTooLongAfterDecode}, {enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize}, {enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize}, {enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong}, } { actual, actualErr := c.decryptSegment(test.in) assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr)) } } func TestDecryptSegmentBase32768(t *testing.T) { // We've tested the forwards above, now concentrate on the errors longName := strings.Repeat("怪", 1280) enc, _ := NewNameEncoding("base32768") c, _ := newCipher(NameEncryptionStandard, "", "", true, enc) for _, test := range []struct { in string expectedErr error }{ {"怪=", base32768.CorruptInputError(1)}, {"!", base32768.CorruptInputError(0)}, {longName, ErrorTooLongAfterDecode}, {enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize}, {enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize}, {enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong}, } { actual, actualErr := c.decryptSegment(test.in) assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr)) } } func testStandardEncryptFileName(t *testing.T, encoding string, testCasesEncryptDir []EncodingTestCase, testCasesNoEncryptDir []EncodingTestCase) { // First standard mode enc, _ := NewNameEncoding(encoding) c, _ := newCipher(NameEncryptionStandard, "", "", true, enc) for _, test := range testCasesEncryptDir { assert.Equal(t, test.expected, c.EncryptFileName(test.in)) } // Standard mode with directory name encryption off c, _ = newCipher(NameEncryptionStandard, "", "", false, enc) for _, test := range testCasesNoEncryptDir { assert.Equal(t, test.expected, c.EncryptFileName(test.in)) } } func TestStandardEncryptFileNameBase32(t *testing.T) { testStandardEncryptFileName(t, "base32", []EncodingTestCase{ {"1", "p0e52nreeaj0a5ea7s64m4j72s"}, {"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"}, {"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"}, {"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"}, {"1/12-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"}, }, []EncodingTestCase{ {"1", "p0e52nreeaj0a5ea7s64m4j72s"}, {"1/12", "1/l42g6771hnv3an9cgc8cr2n1ng"}, {"1/12/123", "1/12/qgm4avr35m5loi1th53ato71v0"}, {"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"}, {"1/12-v2001-02-03-040506-123", "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"}, }) } func TestStandardEncryptFileNameBase64(t *testing.T) { testStandardEncryptFileName(t, "base64", []EncodingTestCase{ {"1", "yBxRX25ypgUVyj8MSxJnFw"}, {"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"}, {"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"}, {"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"}, {"1/12-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"}, }, []EncodingTestCase{ {"1", "yBxRX25ypgUVyj8MSxJnFw"}, {"1/12", "1/qQUDHOGN_jVdLIMQzYrhvA"}, {"1/12/123", "1/12/1CxFf2Mti1xIPYlGruDh-A"}, {"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"}, {"1/12-v2001-02-03-040506-123", "1/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"}, }) } func TestStandardEncryptFileNameBase32768(t *testing.T) { testStandardEncryptFileName(t, "base32768", []EncodingTestCase{ {"1", "詮㪗鐮僀伎作㻖㢧⪟"}, {"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"}, {"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"}, {"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"}, {"1/12-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"}, }, []EncodingTestCase{ {"1", "詮㪗鐮僀伎作㻖㢧⪟"}, {"1/12", "1/竢朧䉱虃光塬䟛⣡蓟"}, {"1/12/123", "1/12/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"}, {"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"}, {"1/12-v2001-02-03-040506-123", "1/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"}, }) } func TestNonStandardEncryptFileName(t *testing.T) { // Off mode c, _ := newCipher(NameEncryptionOff, "", "", true, nil) assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123")) // Off mode with custom suffix c, _ = newCipher(NameEncryptionOff, "", "", true, nil) c.setEncryptedSuffix(".jpg") assert.Equal(t, "1/12/123.jpg", c.EncryptFileName("1/12/123")) // Off mode with empty suffix c.setEncryptedSuffix("none") assert.Equal(t, "1/12/123", c.EncryptFileName("1/12/123")) // Obfuscation mode c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil) assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello")) assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123")) assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt")) assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1")) assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0")) // Obfuscation mode with directory name encryption off c, _ = newCipher(NameEncryptionObfuscated, "", "", false, nil) assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello")) assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123")) assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1")) assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0")) } func testStandardDecryptFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) { enc, _ := NewNameEncoding(encoding) for _, test := range testCases { // Test when dirNameEncrypt=true c, _ := newCipher(NameEncryptionStandard, "", "", true, enc) actual, actualErr := c.DecryptFileName(test.in) assert.NoError(t, actualErr) assert.Equal(t, test.expected, actual) if caseInsensitive { c, _ := newCipher(NameEncryptionStandard, "", "", true, enc) actual, actualErr := c.DecryptFileName(strings.ToUpper(test.in)) assert.NoError(t, actualErr) assert.Equal(t, test.expected, actual) } // Add a character should raise ErrorNotAMultipleOfBlocksize actual, actualErr = c.DecryptFileName(enc.EncodeToString([]byte("1")) + test.in) assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr) assert.Equal(t, "", actual) // Test when dirNameEncrypt=false noDirEncryptIn := test.in if strings.LastIndex(test.expected, "/") != -1 { noDirEncryptIn = test.expected[:strings.LastIndex(test.expected, "/")] + test.in[strings.LastIndex(test.in, "/"):] } c, _ = newCipher(NameEncryptionStandard, "", "", false, enc) actual, actualErr = c.DecryptFileName(noDirEncryptIn) assert.NoError(t, actualErr) assert.Equal(t, test.expected, actual) } } func TestStandardDecryptFileNameBase32(t *testing.T) { testStandardDecryptFileName(t, "base32", []EncodingTestCase{ {"p0e52nreeaj0a5ea7s64m4j72s", "1"}, {"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"}, {"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"}, }, true) } func TestStandardDecryptFileNameBase64(t *testing.T) { testStandardDecryptFileName(t, "base64", []EncodingTestCase{ {"yBxRX25ypgUVyj8MSxJnFw", "1"}, {"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"}, {"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"}, }, false) } func TestStandardDecryptFileNameBase32768(t *testing.T) { testStandardDecryptFileName(t, "base32768", []EncodingTestCase{ {"詮㪗鐮僀伎作㻖㢧⪟", "1"}, {"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"}, {"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"}, }, false) } func TestNonStandardDecryptFileName(t *testing.T) { for _, encoding := range []string{"base32", "base64", "base32768"} { enc, _ := NewNameEncoding(encoding) for _, test := range []struct { mode NameEncryptionMode dirNameEncrypt bool in string expected string expectedErr error customSuffix string }{ {NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil, ""}, {NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile, ""}, {NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile, ""}, {NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil, ""}, {NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil, ""}, {NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil, ""}, {NameEncryptionOff, true, "1/12/123.jpg", "1/12/123", nil, ".jpg"}, {NameEncryptionOff, true, "1/12/123", "1/12/123", nil, "none"}, {NameEncryptionObfuscated, true, "!.hello", "hello", nil, ""}, {NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile, ""}, {NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil, ""}, {NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil, ""}, {NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil, ""}, {NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil, ""}, } { c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc) if test.customSuffix != "" { c.setEncryptedSuffix(test.customSuffix) } actual, actualErr := c.DecryptFileName(test.in) what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode) assert.Equal(t, test.expected, actual, what) assert.Equal(t, test.expectedErr, actualErr, what) } } } func TestEncDecMatches(t *testing.T) { for _, encoding := range []string{"base32", "base64", "base32768"} { enc, _ := NewNameEncoding(encoding) for _, test := range []struct { mode NameEncryptionMode in string }{ {NameEncryptionStandard, "1/2/3/4"}, {NameEncryptionOff, "1/2/3/4"}, {NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"}, {NameEncryptionObfuscated, "Avatar The Last Airbender"}, } { c, _ := newCipher(test.mode, "", "", true, enc) out, err := c.DecryptFileName(c.EncryptFileName(test.in)) what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode) assert.Equal(t, out, test.in, what) assert.Equal(t, err, nil, what) } } } func testStandardEncryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase) { enc, _ := NewNameEncoding(encoding) c, _ := newCipher(NameEncryptionStandard, "", "", true, enc) // First standard mode for _, test := range testCases { assert.Equal(t, test.expected, c.EncryptDirName(test.in)) } } func TestStandardEncryptDirNameBase32(t *testing.T) { testStandardEncryptDirName(t, "base32", []EncodingTestCase{ {"1", "p0e52nreeaj0a5ea7s64m4j72s"}, {"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"}, {"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"}, }) } func TestStandardEncryptDirNameBase64(t *testing.T) { testStandardEncryptDirName(t, "base64", []EncodingTestCase{ {"1", "yBxRX25ypgUVyj8MSxJnFw"}, {"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"}, {"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"}, }) } func TestStandardEncryptDirNameBase32768(t *testing.T) { testStandardEncryptDirName(t, "base32768", []EncodingTestCase{ {"1", "詮㪗鐮僀伎作㻖㢧⪟"}, {"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"}, {"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"}, }) } func TestNonStandardEncryptDirName(t *testing.T) { for _, encoding := range []string{"base32", "base64", "base32768"} { enc, _ := NewNameEncoding(encoding) c, _ := newCipher(NameEncryptionStandard, "", "", false, enc) assert.Equal(t, "1/12", c.EncryptDirName("1/12")) assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123")) // Now off mode c, _ = newCipher(NameEncryptionOff, "", "", true, enc) assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123")) } } func testStandardDecryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) { enc, _ := NewNameEncoding(encoding) for _, test := range testCases { // Test dirNameEncrypt=true c, _ := newCipher(NameEncryptionStandard, "", "", true, enc) actual, actualErr := c.DecryptDirName(test.in) assert.Equal(t, test.expected, actual) assert.NoError(t, actualErr) if caseInsensitive { actual, actualErr := c.DecryptDirName(strings.ToUpper(test.in)) assert.Equal(t, actual, test.expected) assert.NoError(t, actualErr) } actual, actualErr = c.DecryptDirName(enc.EncodeToString([]byte("1")) + test.in) assert.Equal(t, "", actual) assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr) // Test dirNameEncrypt=false c, _ = newCipher(NameEncryptionStandard, "", "", false, enc) actual, actualErr = c.DecryptDirName(test.in) assert.Equal(t, test.in, actual) assert.NoError(t, actualErr) actual, actualErr = c.DecryptDirName(test.expected) assert.Equal(t, test.expected, actual) assert.NoError(t, actualErr) // Test dirNameEncrypt=false } } /* enc, _ := NewNameEncoding(encoding) for _, test := range []struct { mode NameEncryptionMode dirNameEncrypt bool in string expected string expectedErr error }{ {NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil}, {NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil}, {NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil}, {NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil}, {NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize}, {NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil}, {NameEncryptionStandard, false, "1/12/123", "1/12/123", nil}, } { c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc) actual, actualErr := c.DecryptDirName(test.in) what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode) assert.Equal(t, test.expected, actual, what) assert.Equal(t, test.expectedErr, actualErr, what) } */ func TestStandardDecryptDirNameBase32(t *testing.T) { testStandardDecryptDirName(t, "base32", []EncodingTestCase{ {"p0e52nreeaj0a5ea7s64m4j72s", "1"}, {"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"}, {"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"}, }, true) } func TestStandardDecryptDirNameBase64(t *testing.T) { testStandardDecryptDirName(t, "base64", []EncodingTestCase{ {"yBxRX25ypgUVyj8MSxJnFw", "1"}, {"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"}, {"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"}, }, false) } func TestStandardDecryptDirNameBase32768(t *testing.T) { testStandardDecryptDirName(t, "base32768", []EncodingTestCase{ {"詮㪗鐮僀伎作㻖㢧⪟", "1"}, {"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"}, {"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"}, }, false) } func TestNonStandardDecryptDirName(t *testing.T) { for _, test := range []struct { mode NameEncryptionMode dirNameEncrypt bool in string expected string expectedErr error }{ {NameEncryptionOff, true, "1/12/123.bin", "1/12/123.bin", nil}, {NameEncryptionOff, true, "1/12/123", "1/12/123", nil}, {NameEncryptionOff, true, ".bin", ".bin", nil}, } { c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, nil) actual, actualErr := c.DecryptDirName(test.in) what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode) assert.Equal(t, test.expected, actual, what) assert.Equal(t, test.expectedErr, actualErr, what) } } func TestEncryptedSize(t *testing.T) { c, _ := newCipher(NameEncryptionStandard, "", "", true, nil) for _, test := range []struct { in int64 expected int64 }{ {0, 32}, {1, 32 + 16 + 1}, {65536, 32 + 16 + 65536}, {65537, 32 + 16 + 65536 + 16 + 1}, {1 << 20, 32 + 16*(16+65536)}, {(1 << 20) + 65535, 32 + 16*(16+65536) + 16 + 65535}, {1 << 30, 32 + 16384*(16+65536)}, {(1 << 40) + 1, 32 + 16777216*(16+65536) + 16 + 1}, } { actual := c.EncryptedSize(test.in) assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %d", test.in)) recovered, err := c.DecryptedSize(test.expected) assert.NoError(t, err, fmt.Sprintf("Testing reverse %d", test.expected)) assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %d", test.expected)) } } func TestDecryptedSize(t *testing.T) { // Test the errors since we tested the reverse above c, _ := newCipher(NameEncryptionStandard, "", "", true, nil) for _, test := range []struct { in int64 expectedErr error }{ {0, ErrorEncryptedFileTooShort}, {0, ErrorEncryptedFileTooShort}, {1, ErrorEncryptedFileTooShort}, {7, ErrorEncryptedFileTooShort}, {32 + 1, ErrorEncryptedFileBadHeader}, {32 + 16, ErrorEncryptedFileBadHeader}, {32 + 16 + 65536 + 1, ErrorEncryptedFileBadHeader}, {32 + 16 + 65536 + 16, ErrorEncryptedFileBadHeader}, } { _, actualErr := c.DecryptedSize(test.in) assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("Testing %d", test.in)) } } func TestNoncePointer(t *testing.T) { var x nonce assert.Equal(t, (*[24]byte)(&x), x.pointer()) } func TestNonceFromReader(t *testing.T) { var x nonce buf := bytes.NewBufferString("123456789abcdefghijklmno") err := x.fromReader(buf) assert.NoError(t, err) assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x) buf = bytes.NewBufferString("123456789abcdefghijklmn") err = x.fromReader(buf) assert.EqualError(t, err, "short read of nonce: EOF") } func TestNonceFromBuf(t *testing.T) { var x nonce buf := []byte("123456789abcdefghijklmnoXXXXXXXX") x.fromBuf(buf) assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x) buf = []byte("0123456789abcdefghijklmn") x.fromBuf(buf) assert.Equal(t, nonce{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n'}, x) buf = []byte("0123456789abcdefghijklm") assert.Panics(t, func() { x.fromBuf(buf) }) } func TestNonceIncrement(t *testing.T) { for _, test := range []struct { in nonce out nonce }{ { nonce{0x00}, nonce{0x01}, }, { nonce{0xFF}, nonce{0x00, 0x01}, }, { nonce{0xFF, 0xFF}, nonce{0x00, 0x00, 0x01}, }, { nonce{0xFF, 0xFF, 0xFF}, nonce{0x00, 0x00, 0x00, 0x01}, }, { nonce{0xFF, 0xFF, 0xFF, 0xFF}, nonce{0x00, 0x00, 0x00, 0x00, 0x01}, }, { nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, nonce{0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, }, { nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, nonce{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, }, { nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, nonce{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, }, { nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, nonce{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, }, { nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, nonce{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, }, { nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, nonce{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, }, { nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, nonce{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, }, { nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, nonce{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, }, { nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, nonce{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, }, { nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, nonce{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, }, { nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, nonce{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, }, { nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, nonce{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, }, {
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/crypt/pkcs7/pkcs7.go
backend/crypt/pkcs7/pkcs7.go
// Package pkcs7 implements PKCS#7 padding // // This is a standard way of encoding variable length buffers into // buffers which are a multiple of an underlying crypto block size. package pkcs7 import "errors" // Errors Unpad can return var ( ErrorPaddingNotFound = errors.New("bad PKCS#7 padding - not padded") ErrorPaddingNotAMultiple = errors.New("bad PKCS#7 padding - not a multiple of blocksize") ErrorPaddingTooLong = errors.New("bad PKCS#7 padding - too long") ErrorPaddingTooShort = errors.New("bad PKCS#7 padding - too short") ErrorPaddingNotAllTheSame = errors.New("bad PKCS#7 padding - not all the same") ) // Pad buf using PKCS#7 to a multiple of n. // // Appends the padding to buf - make a copy of it first if you don't // want it modified. func Pad(n int, buf []byte) []byte { if n <= 1 || n >= 256 { panic("bad multiple") } length := len(buf) padding := n - (length % n) for range padding { buf = append(buf, byte(padding)) } if (len(buf) % n) != 0 { panic("padding failed") } return buf } // Unpad buf using PKCS#7 from a multiple of n returning a slice of // buf or an error if malformed. func Unpad(n int, buf []byte) ([]byte, error) { if n <= 1 || n >= 256 { panic("bad multiple") } length := len(buf) if length == 0 { return nil, ErrorPaddingNotFound } if (length % n) != 0 { return nil, ErrorPaddingNotAMultiple } padding := int(buf[length-1]) if padding > n { return nil, ErrorPaddingTooLong } if padding == 0 { return nil, ErrorPaddingTooShort } for i := range padding { if buf[length-1-i] != byte(padding) { return nil, ErrorPaddingNotAllTheSame } } return buf[:length-padding], nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/crypt/pkcs7/pkcs7_test.go
backend/crypt/pkcs7/pkcs7_test.go
package pkcs7 import ( "fmt" "testing" "github.com/stretchr/testify/assert" ) func TestPad(t *testing.T) { for _, test := range []struct { n int in string expected string }{ {8, "", "\x08\x08\x08\x08\x08\x08\x08\x08"}, {8, "1", "1\x07\x07\x07\x07\x07\x07\x07"}, {8, "12", "12\x06\x06\x06\x06\x06\x06"}, {8, "123", "123\x05\x05\x05\x05\x05"}, {8, "1234", "1234\x04\x04\x04\x04"}, {8, "12345", "12345\x03\x03\x03"}, {8, "123456", "123456\x02\x02"}, {8, "1234567", "1234567\x01"}, {8, "abcdefgh", "abcdefgh\x08\x08\x08\x08\x08\x08\x08\x08"}, {8, "abcdefgh1", "abcdefgh1\x07\x07\x07\x07\x07\x07\x07"}, {8, "abcdefgh12", "abcdefgh12\x06\x06\x06\x06\x06\x06"}, {8, "abcdefgh123", "abcdefgh123\x05\x05\x05\x05\x05"}, {8, "abcdefgh1234", "abcdefgh1234\x04\x04\x04\x04"}, {8, "abcdefgh12345", "abcdefgh12345\x03\x03\x03"}, {8, "abcdefgh123456", "abcdefgh123456\x02\x02"}, {8, "abcdefgh1234567", "abcdefgh1234567\x01"}, {8, "abcdefgh12345678", "abcdefgh12345678\x08\x08\x08\x08\x08\x08\x08\x08"}, {16, "", "\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10"}, {16, "a", "a\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f"}, } { actual := Pad(test.n, []byte(test.in)) assert.Equal(t, test.expected, string(actual), fmt.Sprintf("Pad %d %q", test.n, test.in)) recovered, err := Unpad(test.n, actual) assert.NoError(t, err) assert.Equal(t, []byte(test.in), recovered, fmt.Sprintf("Unpad %d %q", test.n, test.in)) } assert.Panics(t, func() { Pad(1, []byte("")) }, "bad multiple") assert.Panics(t, func() { Pad(256, []byte("")) }, "bad multiple") } func TestUnpad(t *testing.T) { // We've tested the OK decoding in TestPad, now test the error cases for _, test := range []struct { n int in string err error }{ {8, "", ErrorPaddingNotFound}, {8, "1", ErrorPaddingNotAMultiple}, {8, "12", ErrorPaddingNotAMultiple}, {8, "123", ErrorPaddingNotAMultiple}, {8, "1234", ErrorPaddingNotAMultiple}, {8, "12345", ErrorPaddingNotAMultiple}, {8, "123456", ErrorPaddingNotAMultiple}, {8, "1234567", ErrorPaddingNotAMultiple}, {8, "1234567\xFF", ErrorPaddingTooLong}, {8, "1234567\x09", ErrorPaddingTooLong}, {8, "1234567\x00", ErrorPaddingTooShort}, {8, "123456\x01\x02", ErrorPaddingNotAllTheSame}, {8, "\x07\x08\x08\x08\x08\x08\x08\x08", ErrorPaddingNotAllTheSame}, } { result, actualErr := Unpad(test.n, []byte(test.in)) assert.Equal(t, test.err, actualErr, fmt.Sprintf("Unpad %d %q", test.n, test.in)) assert.Equal(t, result, []byte(nil)) } assert.Panics(t, func() { _, _ = Unpad(1, []byte("")) }, "bad multiple") assert.Panics(t, func() { _, _ = Unpad(256, []byte("")) }, "bad multiple") }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/fichier/fichier.go
backend/fichier/fichier.go
// Package fichier provides an interface to the 1Fichier storage system. package fichier import ( "context" "errors" "fmt" "io" "net/http" "strconv" "strings" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/rest" ) const ( rootID = "0" apiBaseURL = "https://api.1fichier.com/v1" minSleep = 400 * time.Millisecond // api is extremely rate limited now maxSleep = 5 * time.Second decayConstant = 2 // bigger for slower decay, exponential attackConstant = 0 // start with max sleep ) func init() { fs.Register(&fs.RegInfo{ Name: "fichier", Description: "1Fichier", NewFs: NewFs, Options: []fs.Option{{ Help: "Your API Key, get it from https://1fichier.com/console/params.pl.", Name: "api_key", Sensitive: true, }, { Help: "If you want to download a shared folder, add this parameter.", Name: "shared_folder", Advanced: true, }, { Help: "If you want to download a shared file that is password protected, add this parameter.", Name: "file_password", Advanced: true, IsPassword: true, }, { Help: "If you want to list the files in a shared folder that is password protected, add this parameter.", Name: "folder_password", Advanced: true, IsPassword: true, }, { Help: "Set if you wish to use CDN download links.", Name: "cdn", Default: false, Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, // Characters that need escaping // // '\\': '\', // FULLWIDTH REVERSE SOLIDUS // '<': '<', // FULLWIDTH LESS-THAN SIGN // '>': '>', // FULLWIDTH GREATER-THAN SIGN // '"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved // '\'': ''', // FULLWIDTH APOSTROPHE // '$': '$', // FULLWIDTH DOLLAR SIGN // '`': '`', // FULLWIDTH GRAVE ACCENT // // Leading space and trailing space Default: (encoder.Display | encoder.EncodeBackSlash | encoder.EncodeSingleQuote | encoder.EncodeBackQuote | encoder.EncodeDoubleQuote | encoder.EncodeLtGt | encoder.EncodeDollar | encoder.EncodeLeftSpace | encoder.EncodeRightSpace | encoder.EncodeInvalidUtf8), }}, }) } // Options defines the configuration for this backend type Options struct { APIKey string `config:"api_key"` SharedFolder string `config:"shared_folder"` FilePassword string `config:"file_password"` FolderPassword string `config:"folder_password"` CDN bool `config:"cdn"` Enc encoder.MultiEncoder `config:"encoding"` } // Fs is the interface a cloud storage system must provide type Fs struct { root string name string features *fs.Features opt Options dirCache *dircache.DirCache baseClient *http.Client pacer *fs.Pacer rest *rest.Client } // FindLeaf finds a directory of name leaf in the folder with ID pathID func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { folderID, err := strconv.Atoi(pathID) if err != nil { return "", false, err } folders, err := f.listFolders(ctx, folderID) if err != nil { return "", false, err } for _, folder := range folders.SubFolders { if folder.Name == leaf { pathIDOut := strconv.Itoa(folder.ID) return pathIDOut, true, nil } } return "", false, nil } // CreateDir makes a directory with pathID as parent and name leaf func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { folderID, err := strconv.Atoi(pathID) if err != nil { return "", err } resp, err := f.makeFolder(ctx, leaf, folderID) if err != nil { return "", err } return strconv.Itoa(resp.FolderID), err } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String returns a description of the FS func (f *Fs) String() string { return fmt.Sprintf("1Fichier root '%s'", f.root) } // Precision of the ModTimes in this Fs func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported } // Hashes returns the supported hash types of the filesystem func (f *Fs) Hashes() hash.Set { return hash.Set(hash.Whirlpool) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // NewFs makes a new Fs object from the path // // The path is of the form remote:path // // Remotes are looked up in the config file. If the remote isn't // found then NotFoundInConfigFile will be returned. // // On Windows avoid single character remote names as they can be mixed // up with drive letters. func NewFs(ctx context.Context, name string, root string, config configmap.Mapper) (fs.Fs, error) { opt := new(Options) err := configstruct.Set(config, opt) if err != nil { return nil, err } // If using a Shared Folder override root if opt.SharedFolder != "" { root = "" } //workaround for wonky parser root = strings.Trim(root, "/") f := &Fs{ name: name, root: root, opt: *opt, pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))), baseClient: &http.Client{}, } f.features = (&fs.Features{ DuplicateFiles: true, CanHaveEmptyDirectories: true, ReadMimeType: true, }).Fill(ctx, f) client := fshttp.NewClient(ctx) f.rest = rest.NewClient(client).SetRoot(apiBaseURL) f.rest.SetHeader("Authorization", "Bearer "+f.opt.APIKey) f.dirCache = dircache.New(root, rootID, f) // Find the current root err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) tempF := *f tempF.dirCache = dircache.New(newRoot, rootID, &tempF) tempF.root = newRoot // Make new Fs which is the parent err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } _, err := tempF.NewObject(ctx, remote) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f return f, nil } return nil, err } f.features.Fill(ctx, &tempF) // XXX: update the old f here instead of returning tempF, since // `features` were already filled with functions having *f as a receiver. // See https://github.com/rclone/rclone/issues/2182 f.dirCache = tempF.dirCache f.root = tempF.root // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { if f.opt.SharedFolder != "" { return f.listSharedFiles(ctx, f.opt.SharedFolder) } dirContent, err := f.listDir(ctx, dir) if err != nil { return nil, err } return dirContent, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, fs.ErrorObjectNotFound } return nil, err } folderID, err := strconv.Atoi(directoryID) if err != nil { return nil, err } files, err := f.listFiles(ctx, folderID) if err != nil { return nil, err } for _, file := range files.Items { if file.Filename == leaf { path, ok := f.dirCache.GetInv(directoryID) if !ok { return nil, errors.New("cannot find dir in dircache") } return f.newObjectFromFile(ctx, path, file), nil } } return nil, fs.ErrorObjectNotFound } // Put in to the remote path with the modTime given of the given size // // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Put should either // return an error or upload it properly (rather than e.g. calling panic). // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { existingObj, err := f.NewObject(ctx, src.Remote()) switch err { case nil: return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it return f.PutUnchecked(ctx, in, src, options...) default: return nil, err } } // putUnchecked uploads the object with the given name and size // // This will create a duplicate if we upload a new file without // checking to see if there is one already - use Put() for that. func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) { if size > int64(300e9) { return nil, errors.New("File too big, can't upload") } else if size == 0 { return nil, fs.ErrorCantUploadEmptyFiles } nodeResponse, err := f.getUploadNode(ctx) if err != nil { return nil, err } leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) if err != nil { return nil, err } _, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL, options...) if err != nil { return nil, err } fileUploadResponse, err := f.endUpload(ctx, nodeResponse.ID, nodeResponse.URL) if err != nil { return nil, err } if len(fileUploadResponse.Links) == 0 { return nil, errors.New("upload response not found") } else if len(fileUploadResponse.Links) > 1 { fs.Debugf(remote, "Multiple upload responses found, using the first") } link := fileUploadResponse.Links[0] fileSize, err := strconv.ParseInt(link.Size, 10, 64) if err != nil { return nil, err } return &Object{ fs: f, remote: remote, file: File{ CDN: 0, Checksum: link.Whirlpool, ContentType: "", Date: time.Now().Format("2006-01-02 15:04:05"), Filename: link.Filename, Pass: 0, Size: fileSize, URL: link.Download, }, }, nil } // PutUnchecked uploads the object // // This will create a duplicate if we upload a new file without // checking to see if there is one already - use Put() for that. func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...) } // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists func (f *Fs) Mkdir(ctx context.Context, dir string) error { _, err := f.dirCache.FindDir(ctx, dir, true) return err } // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return err } folderID, err := strconv.Atoi(directoryID) if err != nil { return err } _, err = f.removeFolder(ctx, dir, folderID) if err != nil { return err } f.dirCache.FlushDir(dir) return nil } // Move src to this remote using server side move operations. func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } srcFs := srcObj.fs // Find current directory ID srcLeaf, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcObj.remote, false) if err != nil { return nil, err } // Create temporary object dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote) if err != nil { return nil, err } // If it is in the correct directory, just rename it var url string if srcDirectoryID == dstDirectoryID { // No rename needed if srcLeaf == dstLeaf { return src, nil } resp, err := f.renameFile(ctx, srcObj.file.URL, dstLeaf) if err != nil { return nil, fmt.Errorf("couldn't rename file: %w", err) } if resp.Status != "OK" { return nil, fmt.Errorf("couldn't rename file: %s", resp.Message) } url = resp.URLs[0].URL } else { dstFolderID, err := strconv.Atoi(dstDirectoryID) if err != nil { return nil, err } rename := dstLeaf // No rename needed if srcLeaf == dstLeaf { rename = "" } resp, err := f.moveFile(ctx, srcObj.file.URL, dstFolderID, rename) if err != nil { return nil, fmt.Errorf("couldn't move file: %w", err) } if resp.Status != "OK" { return nil, fmt.Errorf("couldn't move file: %s", resp.Message) } url = resp.URLs[0] } file, err := f.readFileInfo(ctx, url) if err != nil { return nil, errors.New("couldn't read file data") } dstObj.setMetaData(*file) return dstObj, nil } // DirMove moves src, srcRemote to this remote at dstRemote // using server-side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove. // // If destination exists then return fs.ErrorDirExists. // // This is complicated by the fact that we can't use moveDir to move // to a different directory AND rename at the same time as it can // overwrite files in the source directory. func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) if err != nil { return err } srcIDnumeric, err := strconv.Atoi(srcID) if err != nil { return err } dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID) if err != nil { return err } var resp *MoveDirResponse resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric) if err != nil { return fmt.Errorf("couldn't rename leaf: %w", err) } if resp.Status != "OK" { return fmt.Errorf("couldn't rename leaf: %s", resp.Message) } srcFs.dirCache.FlushDir(srcRemote) return nil } // Copy src to this remote using server side move operations. func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } // Create temporary object dstObj, leaf, directoryID, err := f.createObject(ctx, remote) if err != nil { return nil, err } folderID, err := strconv.Atoi(directoryID) if err != nil { return nil, err } resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf) if err != nil { return nil, fmt.Errorf("couldn't move file: %w", err) } if resp.Status != "OK" { return nil, fmt.Errorf("couldn't move file: %s", resp.Message) } file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL) if err != nil { return nil, errors.New("couldn't read file data") } dstObj.setMetaData(*file) return dstObj, nil } // About gets quota information func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { opts := rest.Opts{ Method: "POST", Path: "/user/info.cgi", ContentType: "application/json", } var accountInfo AccountInfo var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rest.CallJSON(ctx, &opts, nil, &accountInfo) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("failed to read user info: %w", err) } // FIXME max upload size would be useful to use in Update usage = &fs.Usage{ Used: fs.NewUsageValue(accountInfo.ColdStorage), // bytes in use Total: fs.NewUsageValue(accountInfo.AvailableColdStorage), // bytes total Free: fs.NewUsageValue(accountInfo.AvailableColdStorage - accountInfo.ColdStorage), // bytes free } return usage, nil } // PublicLink adds a "readable by anyone with link" permission on the given file or folder. func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { o, err := f.NewObject(ctx, remote) if err != nil { return "", err } return o.(*Object).file.URL, nil } // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) _ fs.Mover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil) _ fs.Copier = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil) _ fs.PutUncheckeder = (*Fs)(nil) _ dircache.DirCacher = (*Fs)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/fichier/fichier_test.go
backend/fichier/fichier_test.go
// Test 1Fichier filesystem interface package fichier import ( "testing" "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestFichier:", }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/fichier/api.go
backend/fichier/api.go
package fichier import ( "context" "errors" "fmt" "io" "net/http" "net/url" "regexp" "strconv" "strings" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/lib/rest" ) // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. 403, // Forbidden (may happen when request limit is exceeded) 500, // Internal Server Error 502, // Bad Gateway 503, // Service Unavailable 504, // Gateway Timeout 509, // Bandwidth Limit Exceeded } var errorRegex = regexp.MustCompile(`#(\d{1,3})`) func parseFichierError(err error) int { matches := errorRegex.FindStringSubmatch(err.Error()) if len(matches) == 0 { return 0 } code, err := strconv.Atoi(matches[1]) if err != nil { fs.Debugf(nil, "failed parsing fichier error: %v", err) return 0 } return code } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } // 1Fichier uses HTTP error code 403 (Forbidden) for all kinds of errors with // responses looking like this: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}" // // We attempt to parse the actual 1Fichier error code from this body and handle it accordingly // Most importantly #374 (Flood detected: IP locked) which the integration tests provoke // The list below is far from complete and should be expanded if we see any more error codes. if err != nil { switch parseFichierError(err) { case 93: return false, err // No such user case 186: return false, err // IP blocked? case 374, 412: // Flood detected seems to be #412 now fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err) time.Sleep(30 * time.Second) default: } } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, leaf string, directoryID string, err error) { // Create the directory for the object if it doesn't exist leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true) if err != nil { return } // Temporary Object under construction o = &Object{ fs: f, remote: remote, } return o, leaf, directoryID, nil } func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) { request := FileInfoRequest{ URL: url, } opts := rest.Opts{ Method: "POST", Path: "/file/info.cgi", } var file File err := f.pacer.Call(func() (bool, error) { resp, err := f.rest.CallJSON(ctx, &opts, &request, &file) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't read file info: %w", err) } return &file, err } // maybe do some actual validation later if necessary func validToken(token *GetTokenResponse) bool { return token.Status == "OK" } func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) { request := DownloadRequest{ URL: url, Single: 1, Pass: f.opt.FilePassword, } if f.opt.CDN { request.CDN = 1 } opts := rest.Opts{ Method: "POST", Path: "/download/get_token.cgi", } var token GetTokenResponse err := f.pacer.Call(func() (bool, error) { resp, err := f.rest.CallJSON(ctx, &opts, &request, &token) doretry, err := shouldRetry(ctx, resp, err) return doretry || !validToken(&token), err }) if err != nil { return nil, fmt.Errorf("couldn't list files: %w", err) } return &token, nil } func fileFromSharedFile(file *SharedFile) File { return File{ URL: file.Link, Filename: file.Filename, Size: file.Size, } } func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) { opts := rest.Opts{ Method: "GET", RootURL: "https://1fichier.com/dir/", Path: id, Parameters: map[string][]string{"json": {"1"}}, ContentType: "application/x-www-form-urlencoded", } if f.opt.FolderPassword != "" { opts.Method = "POST" opts.Parameters = nil opts.Body = strings.NewReader("json=1&pass=" + url.QueryEscape(f.opt.FolderPassword)) } var sharedFiles SharedFolderResponse err = f.pacer.Call(func() (bool, error) { resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't list files: %w", err) } entries = make([]fs.DirEntry, len(sharedFiles)) for i, sharedFile := range sharedFiles { entries[i] = f.newObjectFromFile(ctx, "", fileFromSharedFile(&sharedFile)) } return entries, nil } func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesList, err error) { // fs.Debugf(f, "Requesting files for dir `%s`", directoryID) request := ListFilesRequest{ FolderID: directoryID, } opts := rest.Opts{ Method: "POST", Path: "/file/ls.cgi", } filesList = &FilesList{} err = f.pacer.Call(func() (bool, error) { resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't list files: %w", err) } for i := range filesList.Items { item := &filesList.Items[i] item.Filename = f.opt.Enc.ToStandardName(item.Filename) } return filesList, nil } func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *FoldersList, err error) { // fs.Debugf(f, "Requesting folders for id `%s`", directoryID) request := ListFolderRequest{ FolderID: directoryID, } opts := rest.Opts{ Method: "POST", Path: "/folder/ls.cgi", } foldersList = &FoldersList{} err = f.pacer.Call(func() (bool, error) { resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't list folders: %w", err) } foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name) for i := range foldersList.SubFolders { folder := &foldersList.SubFolders[i] folder.Name = f.opt.Enc.ToStandardName(folder.Name) } // fs.Debugf(f, "Got FoldersList for id `%s`", directoryID) return foldersList, err } func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) { directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } folderID, err := strconv.Atoi(directoryID) if err != nil { return nil, err } files, err := f.listFiles(ctx, folderID) if err != nil { return nil, err } folders, err := f.listFolders(ctx, folderID) if err != nil { return nil, err } entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders)) for i, item := range files.Items { entries[i] = f.newObjectFromFile(ctx, dir, item) } for i, folder := range folders.SubFolders { createDate, err := time.Parse("2006-01-02 15:04:05", folder.CreateDate) if err != nil { return nil, err } fullPath := getRemote(dir, folder.Name) folderID := strconv.Itoa(folder.ID) entries[len(files.Items)+i] = fs.NewDir(fullPath, createDate).SetID(folderID) // fs.Debugf(f, "Put Path `%s` for id `%d` into dircache", fullPath, folder.ID) f.dirCache.Put(fullPath, folderID) } return entries, nil } func (f *Fs) newObjectFromFile(ctx context.Context, dir string, item File) *Object { return &Object{ fs: f, remote: getRemote(dir, item.Filename), file: item, } } func getRemote(dir, fileName string) string { if dir == "" { return fileName } return dir + "/" + fileName } func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) { name := f.opt.Enc.FromStandardName(leaf) // fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID) request := MakeFolderRequest{ FolderID: folderID, Name: name, } opts := rest.Opts{ Method: "POST", Path: "/folder/mkdir.cgi", } response = &MakeFolderResponse{} err = f.pacer.Call(func() (bool, error) { resp, err := f.rest.CallJSON(ctx, &opts, &request, response) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't create folder: %w", err) } // fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID) return response, err } func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (response *GenericOKResponse, err error) { // fs.Debugf(f, "Removing folder with id `%s`", directoryID) request := &RemoveFolderRequest{ FolderID: folderID, } opts := rest.Opts{ Method: "POST", Path: "/folder/rm.cgi", } response = &GenericOKResponse{} var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = f.rest.CallJSON(ctx, &opts, request, response) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't remove folder: %w", err) } if response.Status != "OK" { return nil, fmt.Errorf("can't remove folder: %s", response.Message) } // fs.Debugf(f, "Removed Folder with id `%s`", directoryID) return response, nil } func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKResponse, err error) { request := &RemoveFileRequest{ Files: []RmFile{ {url}, }, } opts := rest.Opts{ Method: "POST", Path: "/file/rm.cgi", } response = &GenericOKResponse{} err = f.pacer.Call(func() (bool, error) { resp, err := f.rest.CallJSON(ctx, &opts, request, response) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't remove file: %w", err) } // fs.Debugf(f, "Removed file with url `%s`", url) return response, nil } func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename string) (response *MoveFileResponse, err error) { request := &MoveFileRequest{ URLs: []string{url}, FolderID: folderID, Rename: rename, } opts := rest.Opts{ Method: "POST", Path: "/file/mv.cgi", } response = &MoveFileResponse{} err = f.pacer.Call(func() (bool, error) { resp, err := f.rest.CallJSON(ctx, &opts, request, response) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't copy file: %w", err) } return response, nil } func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) { request := &MoveDirRequest{ FolderID: folderID, DestinationFolderID: destinationFolderID, Rename: newLeaf, // DestinationUser: destinationUser, } opts := rest.Opts{ Method: "POST", Path: "/folder/mv.cgi", } response = &MoveDirResponse{} err = f.pacer.Call(func() (bool, error) { resp, err := f.rest.CallJSON(ctx, &opts, request, response) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't move dir: %w", err) } return response, nil } func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) { request := &CopyFileRequest{ URLs: []string{url}, FolderID: folderID, Rename: rename, } opts := rest.Opts{ Method: "POST", Path: "/file/cp.cgi", } response = &CopyFileResponse{} err = f.pacer.Call(func() (bool, error) { resp, err := f.rest.CallJSON(ctx, &opts, request, response) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't copy file: %w", err) } return response, nil } func (f *Fs) renameFile(ctx context.Context, url string, newName string) (response *RenameFileResponse, err error) { request := &RenameFileRequest{ URLs: []RenameFileURL{ { URL: url, Filename: newName, }, }, } opts := rest.Opts{ Method: "POST", Path: "/file/rename.cgi", } response = &RenameFileResponse{} err = f.pacer.Call(func() (bool, error) { resp, err := f.rest.CallJSON(ctx, &opts, request, response) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't rename file: %w", err) } return response, nil } func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) { // fs.Debugf(f, "Requesting Upload node") opts := rest.Opts{ Method: "GET", ContentType: "application/json", // 1Fichier API is bad Path: "/upload/get_upload_server.cgi", } response = &GetUploadNodeResponse{} err = f.pacer.Call(func() (bool, error) { resp, err := f.rest.CallJSON(ctx, &opts, nil, response) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("didn't get an upload node: %w", err) } // fs.Debugf(f, "Got Upload node") return response, err } func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string, options ...fs.OpenOption) (response *http.Response, err error) { // fs.Debugf(f, "Uploading File `%s`", fileName) fileName = f.opt.Enc.FromStandardName(fileName) if len(uploadID) > 10 || !isAlphaNumeric(uploadID) { return nil, errors.New("invalid UploadID") } opts := rest.Opts{ Method: "POST", Path: "/upload.cgi", Parameters: map[string][]string{ "id": {uploadID}, }, NoResponse: true, Body: in, ContentLength: &size, Options: options, MultipartContentName: "file[]", MultipartFileName: fileName, MultipartParams: map[string][]string{ "did": {folderID}, }, } if node != "" { opts.RootURL = "https://" + node } err = f.pacer.CallNoRetry(func() (bool, error) { resp, err := f.rest.CallJSON(ctx, &opts, nil, nil) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't upload file: %w", err) } // fs.Debugf(f, "Uploaded File `%s`", fileName) return response, err } func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) { // fs.Debugf(f, "Ending File Upload `%s`", uploadID) if len(uploadID) > 10 || !isAlphaNumeric(uploadID) { return nil, errors.New("invalid UploadID") } opts := rest.Opts{ Method: "GET", Path: "/end.pl", RootURL: "https://" + nodeurl, Parameters: map[string][]string{ "xid": {uploadID}, }, ExtraHeaders: map[string]string{ "JSON": "1", }, } response = &EndFileUploadResponse{} err = f.pacer.Call(func() (bool, error) { resp, err := f.rest.CallJSON(ctx, &opts, nil, response) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, fmt.Errorf("couldn't finish file upload: %w", err) } return response, err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/fichier/object.go
backend/fichier/object.go
package fichier import ( "context" "errors" "fmt" "io" "net/http" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/rest" ) // Object is a filesystem like object provided by an Fs type Object struct { fs *Fs remote string file File } // String returns a description of the Object func (o *Object) String() string { return o.file.Filename } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // ModTime returns the modification date of the file // It should return a best guess if one isn't available func (o *Object) ModTime(ctx context.Context) time.Time { modTime, err := time.Parse("2006-01-02 15:04:05", o.file.Date) if err != nil { return time.Now() } return modTime } // Size returns the size of the file func (o *Object) Size() int64 { return o.file.Size } // Fs returns read only access to the Fs that this object is part of func (o *Object) Fs() fs.Info { return o.fs } // Hash returns the selected checksum of the file // If no checksum is available it returns "" func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.Whirlpool { return "", hash.ErrUnsupported } return o.file.Checksum, nil } // Storable says whether this object can be stored func (o *Object) Storable() bool { return true } // SetModTime sets the metadata on the object to set the modification date func (o *Object) SetModTime(context.Context, time.Time) error { return fs.ErrorCantSetModTime //return errors.New("setting modtime is not supported for 1fichier remotes") } func (o *Object) setMetaData(file File) { o.file = file } // Open opens the file for read. Call Close() on the returned io.ReadCloser func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { fs.FixRangeOption(options, o.file.Size) downloadToken, err := o.fs.getDownloadToken(ctx, o.file.URL) if err != nil { return nil, err } var resp *http.Response opts := rest.Opts{ Method: "GET", RootURL: downloadToken.URL, Options: options, } err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.rest.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) if err != nil { return nil, err } return resp.Body, err } // Update in to the object with the modTime given of the given size // // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either // return an error or update the object properly (rather than e.g. calling panic). func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { if src.Size() < 0 { return errors.New("refusing to update with unknown size") } // upload with new size but old name info, err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...) if err != nil { return err } // Delete duplicate after successful upload err = o.Remove(ctx) if err != nil { return fmt.Errorf("failed to remove old version: %w", err) } // Replace guts of old object with new one *o = *info.(*Object) return nil } // Remove removes this object func (o *Object) Remove(ctx context.Context) error { // fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL) _, err := o.fs.deleteFile(ctx, o.file.URL) if err != nil { return err } return nil } // MimeType of an Object if known, "" otherwise func (o *Object) MimeType(ctx context.Context) string { return o.file.ContentType } // ID returns the ID of the Object if known, or "" if not func (o *Object) ID() string { return o.file.URL } // Check the interfaces are satisfied var ( _ fs.Object = (*Object)(nil) _ fs.MimeTyper = (*Object)(nil) _ fs.IDer = (*Object)(nil) )
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/backend/fichier/structs.go
backend/fichier/structs.go
package fichier // FileInfoRequest is the request structure of the corresponding request type FileInfoRequest struct { URL string `json:"url"` } // ListFolderRequest is the request structure of the corresponding request type ListFolderRequest struct { FolderID int `json:"folder_id"` } // ListFilesRequest is the request structure of the corresponding request type ListFilesRequest struct { FolderID int `json:"folder_id"` } // DownloadRequest is the request structure of the corresponding request type DownloadRequest struct { URL string `json:"url"` Single int `json:"single"` Pass string `json:"pass,omitempty"` CDN int `json:"cdn,omitempty"` } // RemoveFolderRequest is the request structure of the corresponding request type RemoveFolderRequest struct { FolderID int `json:"folder_id"` } // RemoveFileRequest is the request structure of the corresponding request type RemoveFileRequest struct { Files []RmFile `json:"files"` } // RmFile is the request structure of the corresponding request type RmFile struct { URL string `json:"url"` } // GenericOKResponse is the response structure of the corresponding request type GenericOKResponse struct { Status string `json:"status"` Message string `json:"message"` } // MakeFolderRequest is the request structure of the corresponding request type MakeFolderRequest struct { Name string `json:"name"` FolderID int `json:"folder_id"` } // MakeFolderResponse is the response structure of the corresponding request type MakeFolderResponse struct { Name string `json:"name"` FolderID int `json:"folder_id"` } // MoveFileRequest is the request structure of the corresponding request type MoveFileRequest struct { URLs []string `json:"urls"` FolderID int `json:"destination_folder_id"` Rename string `json:"rename,omitempty"` } // MoveFileResponse is the response structure of the corresponding request type MoveFileResponse struct { Status string `json:"status"` Message string `json:"message"` URLs []string `json:"urls"` } // MoveDirRequest is the request structure of the corresponding request type MoveDirRequest struct { FolderID int `json:"folder_id"` DestinationFolderID int `json:"destination_folder_id,omitempty"` DestinationUser string `json:"destination_user"` Rename string `json:"rename,omitempty"` } // MoveDirResponse is the response structure of the corresponding request type MoveDirResponse struct { Status string `json:"status"` Message string `json:"message"` OldName string `json:"old_name"` NewName string `json:"new_name"` } // CopyFileRequest is the request structure of the corresponding request type CopyFileRequest struct { URLs []string `json:"urls"` FolderID int `json:"folder_id"` Rename string `json:"rename,omitempty"` } // CopyFileResponse is the response structure of the corresponding request type CopyFileResponse struct { Status string `json:"status"` Message string `json:"message"` Copied int `json:"copied"` URLs []FileCopy `json:"urls"` } // FileCopy is used in the CopyFileResponse type FileCopy struct { FromURL string `json:"from_url"` ToURL string `json:"to_url"` } // RenameFileURL is the data structure to rename a single file type RenameFileURL struct { URL string `json:"url"` Filename string `json:"filename"` } // RenameFileRequest is the request structure of the corresponding request type RenameFileRequest struct { URLs []RenameFileURL `json:"urls"` Pretty int `json:"pretty"` } // RenameFileResponse is the response structure of the corresponding request type RenameFileResponse struct { Status string `json:"status"` Message string `json:"message"` Renamed int `json:"renamed"` URLs []struct { URL string `json:"url"` OldFilename string `json:"old_filename"` NewFilename string `json:"new_filename"` } `json:"urls"` } // GetUploadNodeResponse is the response structure of the corresponding request type GetUploadNodeResponse struct { ID string `json:"id"` URL string `json:"url"` } // GetTokenResponse is the response structure of the corresponding request type GetTokenResponse struct { URL string `json:"url"` Status string `json:"Status"` Message string `json:"Message"` } // SharedFolderResponse is the response structure of the corresponding request type SharedFolderResponse []SharedFile // SharedFile is the structure how 1Fichier returns a shared File type SharedFile struct { Filename string `json:"filename"` Link string `json:"link"` Size int64 `json:"size"` } // EndFileUploadResponse is the response structure of the corresponding request type EndFileUploadResponse struct { Incoming int `json:"incoming"` Links []struct { Download string `json:"download"` Filename string `json:"filename"` Remove string `json:"remove"` Size string `json:"size"` Whirlpool string `json:"whirlpool"` } `json:"links"` } // File is the structure how 1Fichier returns a File type File struct { CDN int `json:"cdn"` Checksum string `json:"checksum"` ContentType string `json:"content-type"` Date string `json:"date"` Filename string `json:"filename"` Pass int `json:"pass"` Size int64 `json:"size"` URL string `json:"url"` } // FilesList is the structure how 1Fichier returns a list of files type FilesList struct { Items []File `json:"items"` Status string `json:"Status"` } // Folder is the structure how 1Fichier returns a Folder type Folder struct { CreateDate string `json:"create_date"` ID int `json:"id"` Name string `json:"name"` Pass int `json:"pass"` } // FoldersList is the structure how 1Fichier returns a list of Folders type FoldersList struct { FolderID int `json:"folder_id"` Name string `json:"name"` Status string `json:"Status"` SubFolders []Folder `json:"sub_folders"` } // AccountInfo is the structure how 1Fichier returns user info type AccountInfo struct { StatsDate string `json:"stats_date"` MailRM string `json:"mail_rm"` DefaultQuota int64 `json:"default_quota"` UploadForbidden string `json:"upload_forbidden"` PageLimit int `json:"page_limit"` ColdStorage int64 `json:"cold_storage"` Status string `json:"status"` UseCDN string `json:"use_cdn"` AvailableColdStorage int64 `json:"available_cold_storage"` DefaultPort string `json:"default_port"` DefaultDomain int `json:"default_domain"` Email string `json:"email"` DownloadMenu string `json:"download_menu"` FTPDID int `json:"ftp_did"` DefaultPortFiles string `json:"default_port_files"` FTPReport string `json:"ftp_report"` OverQuota int64 `json:"overquota"` AvailableStorage int64 `json:"available_storage"` CDN string `json:"cdn"` Offer string `json:"offer"` SubscriptionEnd string `json:"subscription_end"` TFA string `json:"2fa"` AllowedColdStorage int64 `json:"allowed_cold_storage"` HotStorage int64 `json:"hot_storage"` DefaultColdStorageQuota int64 `json:"default_cold_storage_quota"` FTPMode string `json:"ftp_mode"` RUReport string `json:"ru_report"` }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false