repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/http/serve/dir_test.go | lib/http/serve/dir_test.go | package serve
import (
"context"
"errors"
"html/template"
"io"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
libhttp "github.com/rclone/rclone/lib/http"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func GetTemplate(t *testing.T) *template.Template {
htmlTemplate, err := libhttp.GetTemplate("../../../cmd/serve/http/testdata/golden/testindex.html")
require.NoError(t, err)
return htmlTemplate
}
func TestNewDirectory(t *testing.T) {
d := NewDirectory("z", GetTemplate(t))
assert.Equal(t, "z", d.DirRemote)
assert.Equal(t, "Directory listing of /z", d.Title)
}
func TestSetQuery(t *testing.T) {
d := NewDirectory("z", GetTemplate(t))
assert.Equal(t, "", d.Query)
d.SetQuery(url.Values{"potato": []string{"42"}})
assert.Equal(t, "?potato=42", d.Query)
d.SetQuery(url.Values{})
assert.Equal(t, "", d.Query)
}
func TestAddHTMLEntry(t *testing.T) {
var modtime = time.Now()
var d = NewDirectory("z", GetTemplate(t))
d.AddHTMLEntry("", true, 0, modtime)
d.AddHTMLEntry("dir", true, 0, modtime)
d.AddHTMLEntry("a/b/c/d.txt", false, 64, modtime)
d.AddHTMLEntry("a/b/c/colon:colon.txt", false, 64, modtime)
d.AddHTMLEntry("\"quotes\".txt", false, 64, modtime)
assert.Equal(t, []DirEntry{
{remote: "", URL: "/", ZipURL: "/?download=zip", Leaf: "/", IsDir: true, Size: 0, ModTime: modtime},
{remote: "dir", URL: "dir/", ZipURL: "dir/?download=zip", Leaf: "dir/", IsDir: true, Size: 0, ModTime: modtime},
{remote: "a/b/c/d.txt", URL: "d.txt", ZipURL: "", Leaf: "d.txt", IsDir: false, Size: 64, ModTime: modtime},
{remote: "a/b/c/colon:colon.txt", URL: "./colon:colon.txt", ZipURL: "", Leaf: "colon:colon.txt", IsDir: false, Size: 64, ModTime: modtime},
{remote: "\"quotes\".txt", URL: "%22quotes%22.txt", ZipURL: "", Leaf: "\"quotes\".txt", Size: 64, IsDir: false, ModTime: modtime},
}, d.Entries)
// Now test with a query parameter
d = NewDirectory("z", GetTemplate(t)).SetQuery(url.Values{"potato": []string{"42"}})
d.AddHTMLEntry("file", false, 64, modtime)
d.AddHTMLEntry("dir", true, 0, modtime)
assert.Equal(t, []DirEntry{
{remote: "file", URL: "file?potato=42", ZipURL: "", Leaf: "file", IsDir: false, Size: 64, ModTime: modtime},
{remote: "dir", URL: "dir/?potato=42", ZipURL: "dir/?download=zip", Leaf: "dir/", IsDir: true, Size: 0, ModTime: modtime},
}, d.Entries)
}
func TestAddEntry(t *testing.T) {
var d = NewDirectory("z", GetTemplate(t))
d.AddEntry("", true)
d.AddEntry("dir", true)
d.AddEntry("a/b/c/d.txt", false)
d.AddEntry("a/b/c/colon:colon.txt", false)
d.AddEntry("\"quotes\".txt", false)
assert.Equal(t, []DirEntry{
{remote: "", URL: "/", Leaf: "/"},
{remote: "dir", URL: "dir/", Leaf: "dir/"},
{remote: "a/b/c/d.txt", URL: "d.txt", Leaf: "d.txt"},
{remote: "a/b/c/colon:colon.txt", URL: "./colon:colon.txt", Leaf: "colon:colon.txt"},
{remote: "\"quotes\".txt", URL: "%22quotes%22.txt", Leaf: "\"quotes\".txt"},
}, d.Entries)
// Now test with a query parameter
d = NewDirectory("z", GetTemplate(t)).SetQuery(url.Values{"potato": []string{"42"}})
d.AddEntry("file", false)
d.AddEntry("dir", true)
assert.Equal(t, []DirEntry{
{remote: "file", URL: "file?potato=42", Leaf: "file"},
{remote: "dir", URL: "dir/?potato=42", Leaf: "dir/"},
}, d.Entries)
}
func TestError(t *testing.T) {
ctx := context.Background()
w := httptest.NewRecorder()
err := errors.New("help")
Error(ctx, "potato", w, "sausage", err)
resp := w.Result()
assert.Equal(t, http.StatusInternalServerError, resp.StatusCode)
body, _ := io.ReadAll(resp.Body)
assert.Equal(t, "sausage.\n", string(body))
}
func TestServe(t *testing.T) {
d := NewDirectory("aDirectory", GetTemplate(t))
d.AddEntry("file", false)
d.AddEntry("dir", true)
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "http://example.com/aDirectory/", nil)
d.Serve(w, r)
resp := w.Result()
assert.Equal(t, http.StatusOK, resp.StatusCode)
body, _ := io.ReadAll(resp.Body)
assert.Equal(t, `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing of /aDirectory</title>
</head>
<body>
<h1>Directory listing of /aDirectory</h1>
<a href="file">file</a><br />
<a href="dir/">dir/</a><br />
</body>
</html>
`, string(body))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/http/serve/serve.go | lib/http/serve/serve.go | // Package serve deals with serving objects over HTTP
package serve
import (
"fmt"
"io"
"net/http"
"path"
"strconv"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
)
// Object serves an fs.Object via HEAD or GET
func Object(w http.ResponseWriter, r *http.Request, o fs.Object) {
if r.Method != "HEAD" && r.Method != "GET" {
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
return
}
// Show that we accept ranges
w.Header().Set("Accept-Ranges", "bytes")
// Set content length since we know how long the object is
if o.Size() >= 0 {
w.Header().Set("Content-Length", strconv.FormatInt(o.Size(), 10))
}
// Set content type
mimeType := fs.MimeType(r.Context(), o)
if mimeType == "application/octet-stream" && path.Ext(o.Remote()) == "" {
// Leave header blank so http server guesses
} else {
w.Header().Set("Content-Type", mimeType)
}
// Set last modified
modTime := o.ModTime(r.Context())
w.Header().Set("Last-Modified", modTime.UTC().Format(http.TimeFormat))
// Set metadata headers if present
metadata, err := fs.GetMetadata(r.Context(), o)
if err != nil {
fs.Debugf(o, "Request get metadata error: %v", err)
}
if metadata != nil {
if metadata["content-disposition"] != "" {
w.Header().Set("Content-Disposition", metadata["content-disposition"])
}
if metadata["cache-control"] != "" {
w.Header().Set("Cache-Control", metadata["cache-control"])
}
if metadata["content-language"] != "" {
w.Header().Set("Content-Language", metadata["content-language"])
}
if metadata["content-encoding"] != "" {
w.Header().Set("Content-Encoding", metadata["content-encoding"])
}
}
if r.Method == "HEAD" {
return
}
// Decode Range request if present
code := http.StatusOK
size := o.Size()
var options []fs.OpenOption
if rangeRequest := r.Header.Get("Range"); rangeRequest != "" {
//fs.Debugf(nil, "Range: request %q", rangeRequest)
option, err := fs.ParseRangeOption(rangeRequest)
if err != nil {
fs.Debugf(o, "Get request parse range request error: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
options = append(options, option)
offset, limit := option.Decode(o.Size())
end := o.Size() // exclusive
if limit >= 0 {
end = offset + limit
}
if end > o.Size() {
end = o.Size()
}
size = end - offset
// fs.Debugf(nil, "Range: offset=%d, limit=%d, end=%d, size=%d (object size %d)", offset, limit, end, size, o.Size())
// Content-Range: bytes 0-1023/146515
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", offset, end-1, o.Size()))
// fs.Debugf(nil, "Range: Content-Range: %q", w.Header().Get("Content-Range"))
code = http.StatusPartialContent
}
w.Header().Set("Content-Length", strconv.FormatInt(size, 10))
file, err := o.Open(r.Context(), options...)
if err != nil {
fs.Debugf(o, "Get request open error: %v", err)
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
tr := accounting.Stats(r.Context()).NewTransfer(o, nil)
defer func() {
tr.Done(r.Context(), err)
}()
in := tr.Account(r.Context(), file) // account the transfer (no buffering)
w.WriteHeader(code)
n, err := io.Copy(w, in)
if err != nil {
fs.Errorf(o, "Didn't finish writing GET request (wrote %d/%d bytes): %v", n, size, err)
return
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/http/serve/serve_test.go | lib/http/serve/serve_test.go | package serve
import (
"context"
"io"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
)
func TestObjectBadMethod(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest("BADMETHOD", "http://example.com/aFile", nil)
o := mockobject.New("aFile")
Object(w, r, o)
resp := w.Result()
assert.Equal(t, http.StatusMethodNotAllowed, resp.StatusCode)
body, _ := io.ReadAll(resp.Body)
assert.Equal(t, "Method Not Allowed\n", string(body))
}
func TestObjectHEAD(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest("HEAD", "http://example.com/aFile", nil)
o := mockobject.New("aFile").WithContent([]byte("hello"), mockobject.SeekModeNone)
_ = o.SetModTime(context.Background(), time.Date(2023, 9, 20, 12, 11, 15, 0, time.FixedZone("", 4*60*60))) // UTC+4
Object(w, r, o)
resp := w.Result()
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.Equal(t, "5", resp.Header.Get("Content-Length"))
assert.Equal(t, "bytes", resp.Header.Get("Accept-Ranges"))
assert.Equal(t, "Wed, 20 Sep 2023 08:11:15 GMT", resp.Header.Get("Last-Modified"))
body, _ := io.ReadAll(resp.Body)
assert.Equal(t, "", string(body))
}
func TestObjectGET(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "http://example.com/aFile", nil)
o := mockobject.New("aFile").WithContent([]byte("hello"), mockobject.SeekModeNone)
_ = o.SetModTime(context.Background(), time.Date(2023, 9, 20, 12, 11, 15, 0, time.FixedZone("", 2*60*60))) // UTC+2
Object(w, r, o)
resp := w.Result()
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.Equal(t, "5", resp.Header.Get("Content-Length"))
assert.Equal(t, "bytes", resp.Header.Get("Accept-Ranges"))
assert.Equal(t, "Wed, 20 Sep 2023 10:11:15 GMT", resp.Header.Get("Last-Modified"))
body, _ := io.ReadAll(resp.Body)
assert.Equal(t, "hello", string(body))
}
func TestObjectRange(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "http://example.com/aFile", nil)
r.Header.Add("Range", "bytes=3-5")
o := mockobject.New("aFile").WithContent([]byte("0123456789"), mockobject.SeekModeNone)
Object(w, r, o)
resp := w.Result()
assert.Equal(t, http.StatusPartialContent, resp.StatusCode)
assert.Equal(t, "3", resp.Header.Get("Content-Length"))
assert.Equal(t, "bytes", resp.Header.Get("Accept-Ranges"))
assert.Equal(t, "bytes 3-5/10", resp.Header.Get("Content-Range"))
body, _ := io.ReadAll(resp.Body)
assert.Equal(t, "345", string(body))
}
func TestObjectBadRange(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "http://example.com/aFile", nil)
r.Header.Add("Range", "xxxbytes=3-5")
o := mockobject.New("aFile").WithContent([]byte("0123456789"), mockobject.SeekModeNone)
Object(w, r, o)
resp := w.Result()
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
if contentLength := resp.Header.Get("Content-Length"); contentLength != "" {
assert.Equal(t, "10", contentLength)
}
body, _ := io.ReadAll(resp.Body)
assert.Equal(t, "Bad Request\n", string(body))
}
func TestObjectHEADMetadata(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest("HEAD", "http://example.com/aFile", nil)
m := fs.Metadata{
"content-disposition": "inline",
"cache-control": "no-cache",
"content-language": "en",
"content-encoding": "gzip",
}
o := object.NewMemoryObject("aFile", time.Now(), []byte("")).
WithMetadata(m).WithMimeType("text/plain; charset=utf-8")
Object(w, r, o)
resp := w.Result()
assert.Equal(t, "text/plain; charset=utf-8", resp.Header.Get("Content-Type"))
assert.Equal(t, "inline", resp.Header.Get("Content-Disposition"))
assert.Equal(t, "no-cache", resp.Header.Get("Cache-Control"))
assert.Equal(t, "en", resp.Header.Get("Content-Language"))
assert.Equal(t, "gzip", resp.Header.Get("Content-Encoding"))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/http/serve/dir.go | lib/http/serve/dir.go | package serve
import (
"bytes"
"context"
"fmt"
"html/template"
"net/http"
"net/url"
"path"
"sort"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/rest"
)
// DirEntry is a directory entry
type DirEntry struct {
remote string
URL string
ZipURL string
Leaf string
IsDir bool
Size int64
ModTime time.Time
}
// Directory represents a directory
type Directory struct {
DirRemote string
Title string
Name string
ZipURL string
DisableZip bool
Entries []DirEntry
Query string
HTMLTemplate *template.Template
Breadcrumb []Crumb
Sort string
Order string
}
// Crumb is a breadcrumb entry
type Crumb struct {
Link string
Text string
}
// NewDirectory makes an empty Directory
func NewDirectory(dirRemote string, htmlTemplate *template.Template) *Directory {
var breadcrumb []Crumb
// skip trailing slash
lpath := "/" + dirRemote
if lpath[len(lpath)-1] == '/' {
lpath = lpath[:len(lpath)-1]
}
parts := strings.Split(lpath, "/")
for i := range parts {
txt := parts[i]
if i == 0 && parts[i] == "" {
txt = "/"
}
lnk := strings.Repeat("../", len(parts)-i-1)
breadcrumb = append(breadcrumb, Crumb{Link: lnk, Text: txt})
}
d := &Directory{
DirRemote: dirRemote,
Title: fmt.Sprintf("Directory listing of /%s", dirRemote),
Name: fmt.Sprintf("/%s", dirRemote),
ZipURL: "?download=zip",
HTMLTemplate: htmlTemplate,
Breadcrumb: breadcrumb,
}
return d
}
// SetQuery sets the query parameters for each URL
func (d *Directory) SetQuery(queryParams url.Values) *Directory {
d.Query = ""
if len(queryParams) > 0 {
d.Query = "?" + queryParams.Encode()
}
return d
}
// AddHTMLEntry adds an entry to that directory
func (d *Directory) AddHTMLEntry(remote string, isDir bool, size int64, modTime time.Time) {
leaf := path.Base(remote)
if leaf == "." {
leaf = ""
}
urlRemote := leaf
if isDir {
leaf += "/"
urlRemote += "/"
}
d.Entries = append(d.Entries, DirEntry{
remote: remote,
URL: rest.URLPathEscape(urlRemote) + d.Query,
ZipURL: "",
Leaf: leaf,
IsDir: isDir,
Size: size,
ModTime: modTime,
})
if isDir {
d.Entries[len(d.Entries)-1].ZipURL = rest.URLPathEscape(urlRemote) + "?download=zip"
}
}
// AddEntry adds an entry to that directory
func (d *Directory) AddEntry(remote string, isDir bool) {
leaf := path.Base(remote)
if leaf == "." {
leaf = ""
}
urlRemote := leaf
if isDir {
leaf += "/"
urlRemote += "/"
}
d.Entries = append(d.Entries, DirEntry{
remote: remote,
URL: rest.URLPathEscape(urlRemote) + d.Query,
Leaf: leaf,
})
}
// Error logs the error and if a ResponseWriter is given it writes an http.StatusInternalServerError
func Error(ctx context.Context, what any, w http.ResponseWriter, text string, err error) {
err = fs.CountError(ctx, err)
fs.Errorf(what, "%s: %v", text, err)
if w != nil {
http.Error(w, text+".", http.StatusInternalServerError)
}
}
// ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and default is namedirfirst/asc
func (d *Directory) ProcessQueryParams(sortParm string, orderParm string) *Directory {
d.Sort = sortParm
d.Order = orderParm
var toSort sort.Interface
switch d.Sort {
case sortByName:
toSort = byName(*d)
case sortByNameDirFirst:
toSort = byNameDirFirst(*d)
case sortBySize:
toSort = bySize(*d)
case sortByTime:
toSort = byTime(*d)
default:
toSort = byNameDirFirst(*d)
}
if d.Order == "desc" && toSort != nil {
toSort = sort.Reverse(toSort)
}
if toSort != nil {
sort.Sort(toSort)
}
return d
}
type byName Directory
type byNameDirFirst Directory
type bySize Directory
type byTime Directory
func (d byName) Len() int { return len(d.Entries) }
func (d byName) Swap(i, j int) { d.Entries[i], d.Entries[j] = d.Entries[j], d.Entries[i] }
func (d byName) Less(i, j int) bool {
return strings.ToLower(d.Entries[i].Leaf) < strings.ToLower(d.Entries[j].Leaf)
}
func (d byNameDirFirst) Len() int { return len(d.Entries) }
func (d byNameDirFirst) Swap(i, j int) { d.Entries[i], d.Entries[j] = d.Entries[j], d.Entries[i] }
func (d byNameDirFirst) Less(i, j int) bool {
// sort by name if both are dir or file
if d.Entries[i].IsDir == d.Entries[j].IsDir {
return strings.ToLower(d.Entries[i].Leaf) < strings.ToLower(d.Entries[j].Leaf)
}
// sort dir ahead of file
return d.Entries[i].IsDir
}
func (d bySize) Len() int { return len(d.Entries) }
func (d bySize) Swap(i, j int) { d.Entries[i], d.Entries[j] = d.Entries[j], d.Entries[i] }
func (d bySize) Less(i, j int) bool {
const directoryOffset = -1 << 31 // = -math.MinInt32
iSize, jSize := d.Entries[i].Size, d.Entries[j].Size
// directory sizes depend on the file system; to
// provide a consistent experience, put them up front
// and sort them by name
if d.Entries[i].IsDir {
iSize = directoryOffset
}
if d.Entries[j].IsDir {
jSize = directoryOffset
}
if d.Entries[i].IsDir && d.Entries[j].IsDir {
return strings.ToLower(d.Entries[i].Leaf) < strings.ToLower(d.Entries[j].Leaf)
}
return iSize < jSize
}
func (d byTime) Len() int { return len(d.Entries) }
func (d byTime) Swap(i, j int) { d.Entries[i], d.Entries[j] = d.Entries[j], d.Entries[i] }
func (d byTime) Less(i, j int) bool { return d.Entries[i].ModTime.Before(d.Entries[j].ModTime) }
const (
sortByName = "name"
sortByNameDirFirst = "namedirfirst"
sortBySize = "size"
sortByTime = "time"
)
// Serve serves a directory
func (d *Directory) Serve(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Account the transfer
tr := accounting.Stats(r.Context()).NewTransferRemoteSize(d.DirRemote, -1, nil, nil)
defer tr.Done(r.Context(), nil)
fs.Infof(d.DirRemote, "%s: Serving directory", r.RemoteAddr)
buf := &bytes.Buffer{}
err := d.HTMLTemplate.Execute(buf, d)
if err != nil {
Error(ctx, d.DirRemote, w, "Failed to render template", err)
return
}
w.Header().Set("Content-Length", fmt.Sprintf("%d", buf.Len()))
_, err = buf.WriteTo(w)
if err != nil {
Error(ctx, d.DirRemote, nil, "Failed to drain template buffer", err)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/errcount/errcount.go | lib/errcount/errcount.go | // Package errcount provides an easy to use error counter which
// returns error count and last error so as to not overwhelm the user
// with errors.
package errcount
import (
"fmt"
"sync"
)
// ErrCount stores the state of the error counter.
type ErrCount struct {
mu sync.Mutex
lastErr error
count int
}
// New makes a new error counter
func New() *ErrCount {
return new(ErrCount)
}
// Add an error to the error count.
//
// err may be nil.
//
// Thread safe.
func (ec *ErrCount) Add(err error) {
if err == nil {
return
}
ec.mu.Lock()
ec.count++
ec.lastErr = err
ec.mu.Unlock()
}
// Err returns the error summary so far - may be nil
//
// txt is put in front of the error summary
//
// txt: %d errors: last error: %w
//
// or this if only one error
//
// txt: %w
//
// Thread safe.
func (ec *ErrCount) Err(txt string) error {
ec.mu.Lock()
defer ec.mu.Unlock()
if ec.count == 0 {
return nil
} else if ec.count == 1 {
return fmt.Errorf("%s: %w", txt, ec.lastErr)
}
return fmt.Errorf("%s: %d errors: last error: %w", txt, ec.count, ec.lastErr)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/errcount/errcount_test.go | lib/errcount/errcount_test.go | package errcount
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
)
func TestErrCount(t *testing.T) {
ec := New()
assert.Equal(t, nil, ec.Err("none"))
e1 := errors.New("one")
ec.Add(e1)
err := ec.Err("stuff")
assert.True(t, errors.Is(err, e1), err)
assert.Equal(t, "stuff: one", err.Error())
e2 := errors.New("two")
ec.Add(e2)
err = ec.Err("stuff")
assert.True(t, errors.Is(err, e2), err)
assert.Equal(t, "stuff: 2 errors: last error: two", err.Error())
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/newfs_internal_test.go | fs/newfs_internal_test.go | package fs
import (
"context"
"testing"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// When no override/global keys exist, ctx must be returned unchanged.
func TestAddConfigToContext_NoChanges(t *testing.T) {
ctx := context.Background()
newCtx, err := addConfigToContext(ctx, "unit-test", configmap.Simple{})
require.NoError(t, err)
assert.Equal(t, newCtx, ctx)
}
// A single override.key must create a new ctx, but leave the
// background ctx untouched.
func TestAddConfigToContext_OverrideOnly(t *testing.T) {
override := configmap.Simple{
"override.user_agent": "potato",
}
ctx := context.Background()
globalCI := GetConfig(ctx)
original := globalCI.UserAgent
newCtx, err := addConfigToContext(ctx, "unit-test", override)
require.NoError(t, err)
assert.NotEqual(t, newCtx, ctx)
assert.Equal(t, original, globalCI.UserAgent)
ci := GetConfig(newCtx)
assert.Equal(t, "potato", ci.UserAgent)
}
// A single global.key must create a new ctx and update the
// background/global config.
func TestAddConfigToContext_GlobalOnly(t *testing.T) {
global := configmap.Simple{
"global.user_agent": "potato2",
}
ctx := context.Background()
globalCI := GetConfig(ctx)
original := globalCI.UserAgent
defer func() {
globalCI.UserAgent = original
}()
newCtx, err := addConfigToContext(ctx, "unit-test", global)
require.NoError(t, err)
assert.NotEqual(t, newCtx, ctx)
assert.Equal(t, "potato2", globalCI.UserAgent)
ci := GetConfig(newCtx)
assert.Equal(t, "potato2", ci.UserAgent)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/terminalcolormode.go | fs/terminalcolormode.go | package fs
// TerminalColorMode describes how ANSI codes should be handled
type TerminalColorMode = Enum[terminalColorModeChoices]
// TerminalColorMode constants
const (
TerminalColorModeAuto TerminalColorMode = iota
TerminalColorModeNever
TerminalColorModeAlways
)
type terminalColorModeChoices struct{}
func (terminalColorModeChoices) Choices() []string {
return []string{
TerminalColorModeAuto: "AUTO",
TerminalColorModeNever: "NEVER",
TerminalColorModeAlways: "ALWAYS",
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/registry.go | fs/registry.go | // Filesystem registry and backend options
package fs
import (
"context"
"encoding/json"
"fmt"
"reflect"
"regexp"
"slices"
"sort"
"strings"
"sync"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/lib/errcount"
)
// Registry of filesystems
var Registry []*RegInfo
// optDescription is a basic description option
var optDescription = Option{
Name: "description",
Help: "Description of the remote.",
Default: "",
Advanced: true,
}
// RegInfo provides information about a filesystem
type RegInfo struct {
// Name of this fs
Name string
// Description of this fs - defaults to Name
Description string
// Prefix for command line flags for this fs - defaults to Name if not set
Prefix string
// Create a new file system. If root refers to an existing
// object, then it should return an Fs which points to
// the parent of that object and ErrorIsFile.
NewFs func(ctx context.Context, name string, root string, config configmap.Mapper) (Fs, error) `json:"-"`
// Function to call to help with config - see docs for ConfigIn for more info
Config func(ctx context.Context, name string, m configmap.Mapper, configIn ConfigIn) (*ConfigOut, error) `json:"-"`
// Options for the Fs configuration
Options Options
// The command help, if any
CommandHelp []CommandHelp
// Aliases - other names this backend is known by
Aliases []string
// Hide - if set don't show in the configurator
Hide bool
// MetadataInfo help about the metadata in use in this backend
MetadataInfo *MetadataInfo
}
// FileName returns the on disk file name for this backend
func (ri *RegInfo) FileName() string {
return strings.ReplaceAll(ri.Name, " ", "")
}
// Options is a slice of configuration Option for a backend
type Options []Option
// Add more options returning a new options slice
func (os Options) Add(newOptions Options) Options {
return append(os, newOptions...)
}
// AddPrefix adds more options with a prefix returning a new options slice
func (os Options) AddPrefix(newOptions Options, prefix string, groups string) Options {
for _, opt := range newOptions {
// opt is a copy so can modify
opt.Name = prefix + "_" + opt.Name
opt.Groups = groups
os = append(os, opt)
}
return os
}
// Set the default values for the options
func (os Options) setValues() {
for i := range os {
o := &os[i]
if o.Default == nil {
o.Default = ""
}
// Create options for Enums
if do, ok := o.Default.(Choices); ok && len(o.Examples) == 0 {
o.Exclusive = true
o.Required = true
o.Examples = make(OptionExamples, len(do.Choices()))
for i, choice := range do.Choices() {
o.Examples[i].Value = choice
}
}
}
}
// Get the Option corresponding to name or return nil if not found
func (os Options) Get(name string) *Option {
for i := range os {
opt := &os[i]
if opt.Name == name {
return opt
}
}
return nil
}
// SetDefault sets the default for the Option corresponding to name
//
// Writes an ERROR level log if the option is not found
func (os Options) SetDefault(name string, def any) Options {
opt := os.Get(name)
if opt == nil {
Errorf(nil, "Couldn't find option %q to SetDefault on", name)
} else {
opt.Default = def
}
return os
}
// Overridden discovers which config items have been overridden in the
// configmap passed in, either by the config string, command line
// flags or environment variables
func (os Options) Overridden(m *configmap.Map) configmap.Simple {
var overridden = configmap.Simple{}
for i := range os {
opt := &os[i]
value, isSet := m.GetPriority(opt.Name, configmap.PriorityNormal)
if isSet {
overridden.Set(opt.Name, value)
}
}
return overridden
}
// NonDefault discovers which config values aren't at their default
func (os Options) NonDefault(m configmap.Getter) configmap.Simple {
var nonDefault = configmap.Simple{}
for i := range os {
opt := &os[i]
value, isSet := m.Get(opt.Name)
if !isSet {
continue
}
defaultValue := fmt.Sprint(opt.Default)
if value != defaultValue {
nonDefault.Set(opt.Name, value)
}
}
return nonDefault
}
// NonDefaultRC discovers which config values aren't at their default
//
// It expects a pointer to the current config struct in opts.
//
// It returns the overridden config in rc config format.
func (os Options) NonDefaultRC(opts any) (map[string]any, error) {
items, err := configstruct.Items(opts)
if err != nil {
return nil, err
}
itemsByName := map[string]*configstruct.Item{}
for i := range items {
item := &items[i]
itemsByName[item.Name] = item
}
var nonDefault = map[string]any{}
for i := range os {
opt := &os[i]
item, found := itemsByName[opt.Name]
if !found {
return nil, fmt.Errorf("key %q in OptionsInfo not found in Options struct", opt.Name)
}
value := fmt.Sprint(item.Value)
defaultValue := fmt.Sprint(opt.Default)
if value != defaultValue {
nonDefault[item.Field] = item.Value
}
}
return nonDefault, nil
}
// HasAdvanced discovers if any options have an Advanced setting
func (os Options) HasAdvanced() bool {
for i := range os {
opt := &os[i]
if opt.Advanced {
return true
}
}
return false
}
// OptionVisibility controls whether the options are visible in the
// configurator or the command line.
type OptionVisibility byte
// Constants Option.Hide
const (
OptionHideCommandLine OptionVisibility = 1 << iota
OptionHideConfigurator
OptionHideBoth = OptionHideCommandLine | OptionHideConfigurator
)
// Option is describes an option for the config wizard
//
// This also describes command line options and environment variables.
//
// It is also used to describe options for the API.
//
// To create a multiple-choice option, specify the possible values
// in the Examples property. Whether the option's value is required
// to be one of these depends on other properties:
// - Default is to allow any value, either from specified examples,
// or any other value. To restrict exclusively to the specified
// examples, also set Exclusive=true.
// - If empty string should not be allowed then set Required=true,
// and do not set Default.
type Option struct {
Name string // name of the option in snake_case
FieldName string // name of the field used in the rc JSON - will be auto filled normally
Help string // help, start with a single sentence on a single line that will be extracted for command line help
Groups string `json:",omitempty"` // groups this option belongs to - comma separated string for options classification
Provider string `json:",omitempty"` // set to filter on provider
Default any // default value, nil => "", if set (and not to nil or "") then Required does nothing
Value any // value to be set by flags
Examples OptionExamples `json:",omitempty"` // predefined values that can be selected from list (multiple-choice option)
ShortOpt string `json:",omitempty"` // the short option for this if required
Hide OptionVisibility // set this to hide the config from the configurator or the command line
Required bool // this option is required, meaning value cannot be empty unless there is a default
IsPassword bool // set if the option is a password
NoPrefix bool // set if the option for this should not use the backend prefix
Advanced bool // set if this is an advanced config option
Exclusive bool // set if the answer can only be one of the examples (empty string allowed unless Required or Default is set)
Sensitive bool // set if this option should be redacted when using rclone config redacted
}
// BaseOption is an alias for Option used internally
type BaseOption Option
// MarshalJSON turns an Option into JSON
//
// It adds some generated fields for ease of use
// - DefaultStr - a string rendering of Default
// - ValueStr - a string rendering of Value
// - Type - the type of the option
func (o *Option) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
BaseOption
DefaultStr string
ValueStr string
Type string
}{
BaseOption: BaseOption(*o),
DefaultStr: fmt.Sprint(o.Default),
ValueStr: o.String(),
Type: o.Type(),
})
}
// GetValue gets the current value which is the default if not set
func (o *Option) GetValue() any {
val := o.Value
if val == nil {
val = o.Default
if val == nil {
val = ""
}
}
return val
}
// IsDefault returns true if the value is the default value
func (o *Option) IsDefault() bool {
if o.Value == nil {
return true
}
Default := o.Default
if Default == nil {
Default = ""
}
return reflect.DeepEqual(o.Value, Default)
}
// String turns Option into a string
func (o *Option) String() string {
v := o.GetValue()
switch x := v.(type) {
case []string:
// Treat empty string array as empty string
// This is to make the default value of the option help nice
if len(x) == 0 {
return ""
}
// Encode string arrays as CSV
// The default Go encoding can't be decoded uniquely
return CommaSepList(x).String()
case SizeSuffix:
str := x.String()
// Suffix bare numbers with "B" unless they are 0
//
// This makes sure that fs.SizeSuffix roundtrips through string
if len(str) > 0 && str != "0" {
if lastDigit := str[len(str)-1]; lastDigit >= '0' && lastDigit <= '9' {
str += "B"
}
}
return str
}
return fmt.Sprint(v)
}
// Set an Option from a string
func (o *Option) Set(s string) (err error) {
v := o.GetValue()
if stringArray, isStringArray := v.([]string); isStringArray {
if stringArray == nil {
stringArray = []string{}
}
// If this is still the default value then overwrite the defaults
if reflect.ValueOf(o.Default).Pointer() == reflect.ValueOf(v).Pointer() {
stringArray = []string{}
}
o.Value = append(stringArray, s)
return nil
}
newValue, err := configstruct.StringToInterface(v, s)
if err != nil {
return err
}
o.Value = newValue
return nil
}
type typer interface {
Type() string
}
// Type of the value
func (o *Option) Type() string {
v := o.GetValue()
// Try to call Type method on non-pointer
if do, ok := v.(typer); ok {
return do.Type()
}
// Special case []string
if _, isStringArray := v.([]string); isStringArray {
return "stringArray"
}
return reflect.TypeOf(v).Name()
}
// FlagName for the option
func (o *Option) FlagName(prefix string) string {
name := strings.ReplaceAll(o.Name, "_", "-") // convert snake_case to kebab-case
if !o.NoPrefix {
name = prefix + "-" + name
}
return name
}
// EnvVarName for the option
func (o *Option) EnvVarName(prefix string) string {
return OptionToEnv(prefix + "-" + o.Name)
}
// Copy makes a shallow copy of the option
func (o *Option) Copy() *Option {
copy := new(Option)
*copy = *o
return copy
}
// OptionExamples is a slice of examples
type OptionExamples []OptionExample
// Len is part of sort.Interface.
func (os OptionExamples) Len() int { return len(os) }
// Swap is part of sort.Interface.
func (os OptionExamples) Swap(i, j int) { os[i], os[j] = os[j], os[i] }
// Less is part of sort.Interface.
func (os OptionExamples) Less(i, j int) bool { return os[i].Help < os[j].Help }
// Sort sorts an OptionExamples
func (os OptionExamples) Sort() { sort.Sort(os) }
// OptionExample describes an example for an Option
type OptionExample struct {
Value string
Help string
Provider string `json:",omitempty"`
}
// Register a filesystem
//
// Fs modules should use this in an init() function
func Register(info *RegInfo) {
info.Options.setValues()
if info.Prefix == "" {
info.Prefix = info.Name
}
info.Options = append(info.Options, optDescription)
Registry = append(Registry, info)
for _, alias := range info.Aliases {
// Copy the info block and rename and hide the alias and options
aliasInfo := *info
aliasInfo.Name = alias
aliasInfo.Prefix = alias
aliasInfo.Hide = true
aliasInfo.Options = slices.Clone(info.Options)
for i := range aliasInfo.Options {
aliasInfo.Options[i].Hide = OptionHideBoth
}
Registry = append(Registry, &aliasInfo)
}
}
// Find looks for a RegInfo object for the name passed in. The name
// can be either the Name or the Prefix.
//
// Services are looked up in the config file
func Find(name string) (*RegInfo, error) {
for _, item := range Registry {
if item.Name == name || item.Prefix == name || item.FileName() == name {
return item, nil
}
}
return nil, fmt.Errorf("didn't find backend called %q", name)
}
// MustFind looks for an Info object for the type name passed in
//
// Services are looked up in the config file.
//
// Exits with a fatal error if not found
func MustFind(name string) *RegInfo {
fs, err := Find(name)
if err != nil {
Fatalf(nil, "Failed to find remote: %v", err)
}
return fs
}
// OptionsInfo holds info about an block of options
type OptionsInfo struct {
Name string // name of this options block for the rc
Opt any // pointer to a struct to set the options in
Options Options // description of the options
Reload func(context.Context) error // if not nil, call when options changed and on init
}
// OptionsRegistry is a registry of global options
var OptionsRegistry = map[string]OptionsInfo{}
// RegisterGlobalOptions registers global options to be made into
// command line options, rc options and environment variable reading.
//
// Packages which need global options should use this in an init() function
func RegisterGlobalOptions(oi OptionsInfo) {
oi.Options.setValues()
OptionsRegistry[oi.Name] = oi
if oi.Opt != nil && oi.Options != nil {
err := oi.Check()
if err != nil {
Fatalf(nil, "%v", err)
}
}
// Load the default values into the options.
//
// These will be from the ultimate defaults or environment
// variables.
//
// The flags haven't been processed yet so this will be run
// again when the flags are ready.
err := oi.load()
if err != nil {
Fatalf(nil, "Failed to load %q default values: %v", oi.Name, err)
}
}
var optionName = regexp.MustCompile(`^[a-z0-9_]+$`)
// Check ensures that for every element of oi.Options there is a field
// in oi.Opt that matches it.
//
// It also sets Option.FieldName to be the name of the field for use
// in JSON.
func (oi *OptionsInfo) Check() error {
errCount := errcount.New()
items, err := configstruct.Items(oi.Opt)
if err != nil {
return err
}
itemsByName := map[string]*configstruct.Item{}
for i := range items {
item := &items[i]
itemsByName[item.Name] = item
if !optionName.MatchString(item.Name) {
err = fmt.Errorf("invalid name in `config:%q` in Options struct", item.Name)
errCount.Add(err)
Errorf(nil, "%s", err)
}
}
for i := range oi.Options {
option := &oi.Options[i]
// Check name is correct
if !optionName.MatchString(option.Name) {
err = fmt.Errorf("invalid Name: %q", option.Name)
errCount.Add(err)
Errorf(nil, "%s", err)
continue
}
// Check item exists
item, found := itemsByName[option.Name]
if !found {
err = fmt.Errorf("key %q in OptionsInfo not found in Options struct", option.Name)
errCount.Add(err)
Errorf(nil, "%s", err)
continue
}
// Check type
optType := fmt.Sprintf("%T", option.Default)
itemType := fmt.Sprintf("%T", item.Value)
if optType != itemType {
err = fmt.Errorf("key %q in has type %q in OptionsInfo.Default but type %q in Options struct", option.Name, optType, itemType)
//errCount.Add(err)
Errorf(nil, "%s", err)
}
// Set FieldName
option.FieldName = item.Field
}
return errCount.Err(fmt.Sprintf("internal error: options block %q", oi.Name))
}
// load the defaults from the options
//
// Reload the options if required
func (oi *OptionsInfo) load() error {
if oi.Options == nil {
Errorf(nil, "No options defined for config block %q", oi.Name)
return nil
}
m := ConfigMap("", oi.Options, "", nil)
err := configstruct.Set(m, oi.Opt)
if err != nil {
return fmt.Errorf("failed to initialise %q options: %w", oi.Name, err)
}
if oi.Reload != nil {
err = oi.Reload(context.Background())
if err != nil {
return fmt.Errorf("failed to reload %q options: %w", oi.Name, err)
}
}
return nil
}
// GlobalOptionsInit initialises the defaults of global options to
// their values read from the options, environment variables and
// command line parameters.
func GlobalOptionsInit() error {
var keys []string
for key := range OptionsRegistry {
keys = append(keys, key)
}
sort.Slice(keys, func(i, j int) bool {
// Sort alphabetically, but with "main" first
if keys[i] == "main" {
return true
}
if keys[j] == "main" {
return false
}
return keys[i] < keys[j]
})
for _, key := range keys {
opt := OptionsRegistry[key]
err := opt.load()
if err != nil {
return err
}
}
return nil
}
// Type returns a textual string to identify the type of the remote
func Type(f Fs) string {
typeName := fmt.Sprintf("%T", f)
typeName = strings.TrimPrefix(typeName, "*")
typeName = strings.TrimSuffix(typeName, ".Fs")
return typeName
}
var (
typeToRegInfoMu sync.Mutex
typeToRegInfo = map[string]*RegInfo{}
)
// Add the RegInfo to the reverse map
func addReverse(f Fs, fsInfo *RegInfo) {
typeToRegInfoMu.Lock()
defer typeToRegInfoMu.Unlock()
typeToRegInfo[Type(f)] = fsInfo
}
// FindFromFs finds the *RegInfo used to create this Fs, provided
// it was created by fs.NewFs or cache.Get
//
// It returns nil if not found
func FindFromFs(f Fs) *RegInfo {
typeToRegInfoMu.Lock()
defer typeToRegInfoMu.Unlock()
return typeToRegInfo[Type(f)]
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/bits.go | fs/bits.go | package fs
import (
"encoding/json"
"fmt"
"strings"
)
// Bits is an option which can be any combination of the Choices.
//
// Suggested implementation is something like this:
//
// type bits = Bits[bitsChoices]
//
// const (
// bitA bits = 1 << iota
// bitB
// bitC
// )
//
// type bitsChoices struct{}
//
// func (bitsChoices) Choices() []BitsChoicesInfo {
// return []BitsChoicesInfo{
// {Bit: uint64(0), Name: "OFF"}, // Optional Off value - "" if not defined
// {Bit: uint64(bitA), Name: "A"},
// {Bit: uint64(bitB), Name: "B"},
// {Bit: uint64(bitC), Name: "C"},
// }
// }
type Bits[C BitsChoices] uint64
// BitsChoicesInfo should be returned from the Choices method
type BitsChoicesInfo struct {
Bit uint64
Name string
}
// BitsChoices returns the valid choices for this type.
//
// It must work on the zero value.
//
// Note that when using this in an Option the ExampleBitsChoices will be
// filled in automatically.
type BitsChoices interface {
// Choices returns the valid choices for each bit of this type
Choices() []BitsChoicesInfo
}
// String turns a Bits into a string
func (b Bits[C]) String() string {
var out []string
choices := b.Choices()
// Return an off value if set
if b == 0 {
for _, info := range choices {
if info.Bit == 0 {
return info.Name
}
}
}
for _, info := range choices {
if info.Bit == 0 {
continue
}
if b&Bits[C](info.Bit) != 0 {
out = append(out, info.Name)
b &^= Bits[C](info.Bit)
}
}
if b != 0 {
out = append(out, fmt.Sprintf("Unknown-0x%X", int(b)))
}
return strings.Join(out, ",")
}
// Help returns a comma separated list of all possible bits.
func (b Bits[C]) Help() string {
var out []string
for _, info := range b.Choices() {
out = append(out, info.Name)
}
return strings.Join(out, ", ")
}
// Choices returns the possible values of the Bits.
func (b Bits[C]) Choices() []BitsChoicesInfo {
var c C
return c.Choices()
}
// Set a Bits as a comma separated list of flags
func (b *Bits[C]) Set(s string) error {
var flags Bits[C]
parts := strings.Split(s, ",")
choices := b.Choices()
for _, part := range parts {
found := false
part = strings.TrimSpace(part)
if part == "" {
continue
}
for _, info := range choices {
if strings.EqualFold(info.Name, part) {
found = true
flags |= Bits[C](info.Bit)
}
}
if !found {
return fmt.Errorf("invalid choice %q from: %s", part, b.Help())
}
}
*b = flags
return nil
}
// IsSet returns true all the bits in mask are set in b.
func (b Bits[C]) IsSet(mask Bits[C]) bool {
return (b & mask) == mask
}
// Type of the value.
//
// If C has a Type() string method then it will be used instead.
func (b Bits[C]) Type() string {
var c C
if do, ok := any(c).(typer); ok {
return do.Type()
}
return "Bits"
}
// Scan implements the fmt.Scanner interface
func (b *Bits[C]) Scan(s fmt.ScanState, ch rune) error {
token, err := s.Token(true, nil)
if err != nil {
return err
}
return b.Set(string(token))
}
// UnmarshalJSON makes sure the value can be parsed as a string or integer in JSON
func (b *Bits[C]) UnmarshalJSON(in []byte) error {
return UnmarshalJSONFlag(in, b, func(i int64) error {
*b = (Bits[C])(i)
return nil
})
}
// MarshalJSON encodes it as string
func (b *Bits[C]) MarshalJSON() ([]byte, error) {
return json.Marshal(b.String())
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/bwtimetable_test.go | fs/bwtimetable_test.go | package fs
import (
"encoding/json"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check it satisfies the interfaces
var (
_ Flagger = (*BwTimetable)(nil)
_ FlaggerNP = BwTimetable{}
)
func TestBwTimetableSet(t *testing.T) {
for _, test := range []struct {
in string
want BwTimetable
err bool
out string
}{
{"", BwTimetable{}, true, ""},
{"bad,bad", BwTimetable{}, true, ""},
{"bad bad", BwTimetable{}, true, ""},
{"bad", BwTimetable{}, true, ""},
{"1000X", BwTimetable{}, true, ""},
{"2401,666", BwTimetable{}, true, ""},
{"1061,666", BwTimetable{}, true, ""},
{"bad-10:20,666", BwTimetable{}, true, ""},
{"Mon-bad,666", BwTimetable{}, true, ""},
{"Mon-10:20,bad", BwTimetable{}, true, ""},
{
"0",
BwTimetable{
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 0, Rx: 0}},
},
false,
"0",
},
{
"666",
BwTimetable{
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
},
false,
"666Ki",
},
{
"666:333",
BwTimetable{
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
},
false,
"666Ki:333Ki",
},
{
"10:20,666",
BwTimetable{
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
},
false,
"Sun-10:20,666Ki Mon-10:20,666Ki Tue-10:20,666Ki Wed-10:20,666Ki Thu-10:20,666Ki Fri-10:20,666Ki Sat-10:20,666Ki",
},
{
"10:20,666:333",
BwTimetable{
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
},
false,
"Sun-10:20,666Ki:333Ki Mon-10:20,666Ki:333Ki Tue-10:20,666Ki:333Ki Wed-10:20,666Ki:333Ki Thu-10:20,666Ki:333Ki Fri-10:20,666Ki:333Ki Sat-10:20,666Ki:333Ki",
},
{
"11:00,333 13:40,666 23:50,10M 23:59,off",
BwTimetable{
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}},
},
false,
"Sun-11:00,333Ki Mon-11:00,333Ki Tue-11:00,333Ki Wed-11:00,333Ki Thu-11:00,333Ki Fri-11:00,333Ki Sat-11:00,333Ki Sun-13:40,666Ki Mon-13:40,666Ki Tue-13:40,666Ki Wed-13:40,666Ki Thu-13:40,666Ki Fri-13:40,666Ki Sat-13:40,666Ki Sun-23:50,10Mi Mon-23:50,10Mi Tue-23:50,10Mi Wed-23:50,10Mi Thu-23:50,10Mi Fri-23:50,10Mi Sat-23:50,10Mi Sun-23:59,off Mon-23:59,off Tue-23:59,off Wed-23:59,off Thu-23:59,off Fri-23:59,off Sat-23:59,off",
},
{
"11:00,333:666 13:40,666:off 23:50,10M:1M 23:59,off:10M",
BwTimetable{
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 1 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 1 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 1 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 1 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 1 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 1 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 1 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: 10 * 1024 * 1024}},
},
false,
"Sun-11:00,333Ki:666Ki Mon-11:00,333Ki:666Ki Tue-11:00,333Ki:666Ki Wed-11:00,333Ki:666Ki Thu-11:00,333Ki:666Ki Fri-11:00,333Ki:666Ki Sat-11:00,333Ki:666Ki Sun-13:40,666Ki:off Mon-13:40,666Ki:off Tue-13:40,666Ki:off Wed-13:40,666Ki:off Thu-13:40,666Ki:off Fri-13:40,666Ki:off Sat-13:40,666Ki:off Sun-23:50,10Mi:1Mi Mon-23:50,10Mi:1Mi Tue-23:50,10Mi:1Mi Wed-23:50,10Mi:1Mi Thu-23:50,10Mi:1Mi Fri-23:50,10Mi:1Mi Sat-23:50,10Mi:1Mi Sun-23:59,off:10Mi Mon-23:59,off:10Mi Tue-23:59,off:10Mi Wed-23:59,off:10Mi Thu-23:59,off:10Mi Fri-23:59,off:10Mi Sat-23:59,off:10Mi",
},
{
"Mon-11:00,333 Tue-13:40,666:333 Fri-00:00,10M Sat-10:00,off Sun-23:00,666",
BwTimetable{
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 0000, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1000, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
},
false,
"Mon-11:00,333Ki Tue-13:40,666Ki:333Ki Fri-00:00,10Mi Sat-10:00,off Sun-23:00,666Ki",
},
{
"Mon-11:00,333 Tue-13:40,666 Fri-00:00,10M 00:01,off Sun-23:00,666:off",
BwTimetable{
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 0000, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: -1}},
},
false,
"Mon-11:00,333Ki Tue-13:40,666Ki Fri-00:00,10Mi Sun-00:01,off Mon-00:01,off Tue-00:01,off Wed-00:01,off Thu-00:01,off Fri-00:01,off Sat-00:01,off Sun-23:00,666Ki:off",
},
{
// from the docs
"08:00,512 12:00,10M 13:00,512 18:00,30M 23:00,off",
BwTimetable{
BwTimeSlot{DayOfTheWeek: 0, HHMM: 800, Bandwidth: BwPair{Tx: 512 * 1024, Rx: 512 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 800, Bandwidth: BwPair{Tx: 512 * 1024, Rx: 512 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 800, Bandwidth: BwPair{Tx: 512 * 1024, Rx: 512 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 800, Bandwidth: BwPair{Tx: 512 * 1024, Rx: 512 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 800, Bandwidth: BwPair{Tx: 512 * 1024, Rx: 512 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 800, Bandwidth: BwPair{Tx: 512 * 1024, Rx: 512 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 800, Bandwidth: BwPair{Tx: 512 * 1024, Rx: 512 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1200, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1200, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1200, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1200, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1200, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1200, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1200, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1300, Bandwidth: BwPair{Tx: 512 * 1024, Rx: 512 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1300, Bandwidth: BwPair{Tx: 512 * 1024, Rx: 512 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1300, Bandwidth: BwPair{Tx: 512 * 1024, Rx: 512 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1300, Bandwidth: BwPair{Tx: 512 * 1024, Rx: 512 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1300, Bandwidth: BwPair{Tx: 512 * 1024, Rx: 512 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1300, Bandwidth: BwPair{Tx: 512 * 1024, Rx: 512 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1300, Bandwidth: BwPair{Tx: 512 * 1024, Rx: 512 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1800, Bandwidth: BwPair{Tx: 30 * 1024 * 1024, Rx: 30 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1800, Bandwidth: BwPair{Tx: 30 * 1024 * 1024, Rx: 30 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1800, Bandwidth: BwPair{Tx: 30 * 1024 * 1024, Rx: 30 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1800, Bandwidth: BwPair{Tx: 30 * 1024 * 1024, Rx: 30 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1800, Bandwidth: BwPair{Tx: 30 * 1024 * 1024, Rx: 30 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1800, Bandwidth: BwPair{Tx: 30 * 1024 * 1024, Rx: 30 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1800, Bandwidth: BwPair{Tx: 30 * 1024 * 1024, Rx: 30 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2300, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2300, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2300, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2300, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2300, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2300, Bandwidth: BwPair{Tx: -1, Rx: -1}},
},
false,
"Sun-08:00,512Ki Mon-08:00,512Ki Tue-08:00,512Ki Wed-08:00,512Ki Thu-08:00,512Ki Fri-08:00,512Ki Sat-08:00,512Ki Sun-12:00,10Mi Mon-12:00,10Mi Tue-12:00,10Mi Wed-12:00,10Mi Thu-12:00,10Mi Fri-12:00,10Mi Sat-12:00,10Mi Sun-13:00,512Ki Mon-13:00,512Ki Tue-13:00,512Ki Wed-13:00,512Ki Thu-13:00,512Ki Fri-13:00,512Ki Sat-13:00,512Ki Sun-18:00,30Mi Mon-18:00,30Mi Tue-18:00,30Mi Wed-18:00,30Mi Thu-18:00,30Mi Fri-18:00,30Mi Sat-18:00,30Mi Sun-23:00,off Mon-23:00,off Tue-23:00,off Wed-23:00,off Thu-23:00,off Fri-23:00,off Sat-23:00,off",
},
{
// from the docs
"Mon-00:00,512 Fri-23:59,10M Sat-10:00,1M Sun-20:00,off",
BwTimetable{
BwTimeSlot{DayOfTheWeek: 1, HHMM: 0, Bandwidth: BwPair{Tx: 512 * 1024, Rx: 512 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2359, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1000, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2000, Bandwidth: BwPair{Tx: -1, Rx: -1}},
},
false,
"Mon-00:00,512Ki Fri-23:59,10Mi Sat-10:00,1Mi Sun-20:00,off",
},
{
// from the docs
"Mon-00:00,512 12:00,1M Sun-20:00,off",
BwTimetable{
BwTimeSlot{DayOfTheWeek: 1, HHMM: 0, Bandwidth: BwPair{Tx: 512 * 1024, Rx: 512 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1200, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1200, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1200, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1200, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1200, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1200, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1200, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2000, Bandwidth: BwPair{Tx: -1, Rx: -1}},
},
false,
"Mon-00:00,512Ki Sun-12:00,1Mi Mon-12:00,1Mi Tue-12:00,1Mi Wed-12:00,1Mi Thu-12:00,1Mi Fri-12:00,1Mi Sat-12:00,1Mi Sun-20:00,off",
},
{
"11:00,333;13:40,666;23:50,10M;23:59,off",
BwTimetable{
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2350, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}},
},
false,
"Sun-11:00,333Ki Mon-11:00,333Ki Tue-11:00,333Ki Wed-11:00,333Ki Thu-11:00,333Ki Fri-11:00,333Ki Sat-11:00,333Ki Sun-13:40,666Ki Mon-13:40,666Ki Tue-13:40,666Ki Wed-13:40,666Ki Thu-13:40,666Ki Fri-13:40,666Ki Sat-13:40,666Ki Sun-23:50,10Mi Mon-23:50,10Mi Tue-23:50,10Mi Wed-23:50,10Mi Thu-23:50,10Mi Fri-23:50,10Mi Sat-23:50,10Mi Sun-23:59,off Mon-23:59,off Tue-23:59,off Wed-23:59,off Thu-23:59,off Fri-23:59,off Sat-23:59,off",
},
{
"Mon-11:00,333;Tue-13:40,666:333;Fri-00:00,10M;Sat-10:00,off;Sun-23:00,666",
BwTimetable{
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 0000, Bandwidth: BwPair{Tx: 10 * 1024 * 1024, Rx: 10 * 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1000, Bandwidth: BwPair{Tx: -1, Rx: -1}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
},
false,
"Mon-11:00,333Ki Tue-13:40,666Ki:333Ki Fri-00:00,10Mi Sat-10:00,off Sun-23:00,666Ki",
},
} {
tt := BwTimetable{}
err := tt.Set(test.in)
if test.err {
require.Error(t, err)
} else {
require.NoError(t, err)
}
assert.Equal(t, test.want, tt)
assert.Equal(t, test.out, tt.String())
}
}
func TestBwTimetableLimitAt(t *testing.T) {
for _, test := range []struct {
tt BwTimetable
now time.Time
want BwTimeSlot
}{
{
BwTimetable{},
time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: -1, Rx: -1}},
},
{
BwTimetable{
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 666 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 333 * 1024}},
},
time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 666 * 1024}},
},
{
BwTimetable{
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
},
time.Date(2017, time.April, 20, 10, 15, 0, 0, time.UTC),
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
},
{
BwTimetable{
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
},
time.Date(2017, time.April, 20, 11, 0, 0, 0, time.UTC),
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
},
{
BwTimetable{
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2350, Bandwidth: BwPair{Tx: -1, Rx: 1024 * 1024}},
},
time.Date(2017, time.April, 20, 13, 1, 0, 0, time.UTC),
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
},
{
BwTimetable{
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: BwPair{Tx: 333 * 1024, Rx: 33 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 66 * 1024}},
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2301, Bandwidth: BwPair{Tx: 1024 * 1024, Rx: 102 * 1024}},
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | true |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/backend_config.go | fs/backend_config.go | // Structures and utilities for backend config
//
//
package fs
import (
"context"
"errors"
"fmt"
"slices"
"strconv"
"strings"
"github.com/rclone/rclone/fs/config/configmap"
)
const (
// ConfigToken is the key used to store the token under
ConfigToken = "token"
// ConfigKeyEphemeralPrefix marks config keys which shouldn't be stored in the config file
ConfigKeyEphemeralPrefix = "config_"
)
// ConfigOAuth should be called to do the OAuth
//
// set in lib/oauthutil to avoid a circular import
var ConfigOAuth func(ctx context.Context, name string, m configmap.Mapper, ri *RegInfo, in ConfigIn) (*ConfigOut, error)
// ConfigIn is passed to the Config function for an Fs
//
// The interactive config system for backends is state based. This is
// so that different frontends to the config can be attached, eg over
// the API or web page.
//
// Each call to the config system supplies ConfigIn which tells the
// system what to do. Each will return a ConfigOut which gives a
// question to ask the user and a state to return to. There is one
// special question which allows the backends to do OAuth.
//
// The ConfigIn contains a State which the backend should act upon and
// a Result from the previous question to the user.
//
// If ConfigOut is nil or ConfigOut.State == "" then the process is
// deemed to have finished. If there is no Option in ConfigOut then
// the next state will be called immediately. This is wrapped in
// ConfigGoto and ConfigResult.
//
// Backends should keep no state in memory - if they need to persist
// things between calls it should be persisted in the config file.
// Things can also be persisted in the state using the StatePush and
// StatePop utilities here.
//
// The utilities here are convenience methods for different kinds of
// questions and responses.
//
// Where the questions ask for a name then this should start with
// "config_" to show it is an ephemeral config input rather than the
// actual value stored in the config file. Names beginning with
// "config_fs_" are reserved for internal use.
//
// State names starting with "*" are reserved for internal use.
//
// Note that in the bin directory there is a python program called
// "config.py" which shows how this interface should be used.
type ConfigIn struct {
State string // State to run
Result string // Result from previous Option
}
// ConfigOut is returned from Config function for an Fs
//
// State is the state for the next call to Config
// OAuth is a special value set by oauthutil.ConfigOAuth
// Error is displayed to the user before asking a question
// Result is passed to the next call to Config if Option/OAuth isn't set
type ConfigOut struct {
State string // State to jump to after this
Option *Option // Option to query user about
OAuth any `json:"-"` // Do OAuth if set
Error string // error to be displayed to the user
Result string // if Option/OAuth not set then this is passed to the next state
}
// ConfigInputOptional asks the user for a string which may be empty
//
// state should be the next state required
// name is the config name for this item
// help should be the help shown to the user
func ConfigInputOptional(state string, name string, help string) (*ConfigOut, error) {
return &ConfigOut{
State: state,
Option: &Option{
Name: name,
Help: help,
Default: "",
},
}, nil
}
// ConfigInput asks the user for a non-empty string
//
// state should be the next state required
// name is the config name for this item
// help should be the help shown to the user
func ConfigInput(state string, name string, help string) (*ConfigOut, error) {
out, _ := ConfigInputOptional(state, name, help)
out.Option.Required = true
return out, nil
}
// ConfigPassword asks the user for a password
//
// state should be the next state required
// name is the config name for this item
// help should be the help shown to the user
func ConfigPassword(state string, name string, help string) (*ConfigOut, error) {
out, _ := ConfigInputOptional(state, name, help)
out.Option.IsPassword = true
return out, nil
}
// ConfigGoto goes to the next state with empty Result
//
// state should be the next state required
func ConfigGoto(state string) (*ConfigOut, error) {
return &ConfigOut{
State: state,
}, nil
}
// ConfigResult goes to the next state with result given
//
// state should be the next state required
// result should be the result for the next state
func ConfigResult(state, result string) (*ConfigOut, error) {
return &ConfigOut{
State: state,
Result: result,
}, nil
}
// ConfigError shows the error to the user and goes to the state passed in
//
// state should be the next state required
// Error should be the error shown to the user
func ConfigError(state string, Error string) (*ConfigOut, error) {
return &ConfigOut{
State: state,
Error: Error,
}, nil
}
// ConfigConfirm returns a ConfigOut structure which asks a Yes/No question
//
// state should be the next state required
// Default should be the default state
// name is the config name for this item
// help should be the help shown to the user
func ConfigConfirm(state string, Default bool, name string, help string) (*ConfigOut, error) {
return &ConfigOut{
State: state,
Option: &Option{
Name: name,
Help: help,
Default: Default,
Examples: []OptionExample{{
Value: "true",
Help: "Yes",
}, {
Value: "false",
Help: "No",
}},
Exclusive: true,
},
}, nil
}
// ConfigChooseExclusiveFixed returns a ConfigOut structure which has a list of
// items to choose from.
//
// Possible items must be supplied as a fixed list.
//
// User is required to supply a value, and is restricted to the specified list,
// i.e. free text input is not allowed.
//
// state should be the next state required
// name is the config name for this item
// help should be the help shown to the user
// items should be the items in the list
//
// It chooses the first item to be the default.
// If there are no items then it will return an error.
// If there is only one item it will short cut to the next state.
func ConfigChooseExclusiveFixed(state string, name string, help string, items []OptionExample) (*ConfigOut, error) {
if len(items) == 0 {
return nil, fmt.Errorf("no items found in: %s", help)
}
choose := &ConfigOut{
State: state,
Option: &Option{
Name: name,
Help: help,
Examples: items,
Exclusive: true,
},
}
choose.Option.Default = choose.Option.Examples[0].Value
if len(items) == 1 {
// short circuit asking the question if only one entry
choose.Result = choose.Option.Examples[0].Value
choose.Option = nil
}
return choose, nil
}
// ConfigChooseExclusive returns a ConfigOut structure which has a list of
// items to choose from.
//
// Possible items are retrieved from a supplied function.
//
// User is required to supply a value, and is restricted to the specified list,
// i.e. free text input is not allowed.
//
// state should be the next state required
// name is the config name for this item
// help should be the help shown to the user
// n should be the number of items in the list
// getItem should return the items (value, help)
//
// It chooses the first item to be the default.
// If there are no items then it will return an error.
// If there is only one item it will short cut to the next state.
func ConfigChooseExclusive(state string, name string, help string, n int, getItem func(i int) (itemValue string, itemHelp string)) (*ConfigOut, error) {
items := make(OptionExamples, n)
for i := range items {
items[i].Value, items[i].Help = getItem(i)
}
return ConfigChooseExclusiveFixed(state, name, help, items)
}
// ConfigChooseFixed returns a ConfigOut structure which has a list of
// suggested items.
//
// Suggested items must be supplied as a fixed list.
//
// User is required to supply a value, but is not restricted to the specified
// list, i.e. free text input is accepted.
//
// state should be the next state required
// name is the config name for this item
// help should be the help shown to the user
// items should be the items in the list
//
// It chooses the first item to be the default.
func ConfigChooseFixed(state string, name string, help string, items []OptionExample) (*ConfigOut, error) {
choose := &ConfigOut{
State: state,
Option: &Option{
Name: name,
Help: help,
Examples: items,
Required: true,
},
}
if len(choose.Option.Examples) > 0 {
choose.Option.Default = choose.Option.Examples[0].Value
}
return choose, nil
}
// ConfigChoose returns a ConfigOut structure which has a list of suggested
// items.
//
// Suggested items are retrieved from a supplied function.
//
// User is required to supply a value, but is not restricted to the specified
// list, i.e. free text input is accepted.
//
// state should be the next state required
// name is the config name for this item
// help should be the help shown to the user
// n should be the number of items in the list
// getItem should return the items (value, help)
//
// It chooses the first item to be the default.
func ConfigChoose(state string, name string, help string, n int, getItem func(i int) (itemValue string, itemHelp string)) (*ConfigOut, error) {
items := make(OptionExamples, n)
for i := range items {
items[i].Value, items[i].Help = getItem(i)
}
return ConfigChooseFixed(state, name, help, items)
}
// StatePush pushes a new values onto the front of the config string
func StatePush(state string, values ...string) string {
for i := range values {
values[i] = strings.ReplaceAll(values[i], ",", ",") // replace comma with unicode wide version
}
if state != "" {
values = append(values[:len(values):len(values)], state)
}
return strings.Join(values, ",")
}
type configOAuthKeyType struct{}
// OAuth key for config
var configOAuthKey = configOAuthKeyType{}
// ConfigOAuthOnly marks the ctx so that the Config will stop after
// finding an OAuth
func ConfigOAuthOnly(ctx context.Context) context.Context {
return context.WithValue(ctx, configOAuthKey, struct{}{})
}
// Return true if ctx is marked as ConfigOAuthOnly
func isConfigOAuthOnly(ctx context.Context) bool {
return ctx.Value(configOAuthKey) != nil
}
// StatePop pops a state from the front of the config string
// It returns the new state and the value popped
func StatePop(state string) (newState string, value string) {
comma := strings.IndexRune(state, ',')
if comma < 0 {
return "", state
}
value, newState = state[:comma], state[comma+1:]
value = strings.ReplaceAll(value, ",", ",") // replace unicode wide comma with comma
return newState, value
}
// BackendConfig calls the config for the backend in ri
//
// It wraps any OAuth transactions as necessary so only straight
// forward config questions are emitted
func BackendConfig(ctx context.Context, name string, m configmap.Mapper, ri *RegInfo, choices configmap.Getter, in ConfigIn) (out *ConfigOut, err error) {
for {
out, err = backendConfigStep(ctx, name, m, ri, choices, in)
if err != nil {
break
}
if out == nil || out.State == "" {
// finished
break
}
if out.Option != nil {
// question to ask user
break
}
if out.Error != "" {
// error to show user
break
}
// non terminal state, but no question to ask or error to show - loop here
in = ConfigIn{
State: out.State,
Result: out.Result,
}
}
return out, err
}
// ConfigAll should be passed in as the initial state to run the
// entire config
const ConfigAll = "*all"
// Run the config state machine for the normal config
func configAll(ctx context.Context, name string, m configmap.Mapper, ri *RegInfo, in ConfigIn) (out *ConfigOut, err error) {
if len(ri.Options) == 0 {
return ConfigGoto("*postconfig")
}
// States are encoded
//
// *all-ACTION,NUMBER,ADVANCED
//
// Where NUMBER is the current state, ADVANCED is a flag true or false
// to say whether we are asking about advanced config and
// ACTION is what the state should be doing next.
stateParams, state := StatePop(in.State)
stateParams, stateNumber := StatePop(stateParams)
_, stateAdvanced := StatePop(stateParams)
optionNumber := 0
advanced := stateAdvanced == "true"
if stateNumber != "" {
optionNumber, err = strconv.Atoi(stateNumber)
if err != nil {
return nil, fmt.Errorf("internal error: bad state number: %w", err)
}
}
// Detect if reached the end of the questions
if optionNumber == len(ri.Options) {
if ri.Options.HasAdvanced() {
return ConfigConfirm("*all-advanced", false, "config_fs_advanced", "Edit advanced config?")
}
return ConfigGoto("*postconfig")
} else if optionNumber < 0 || optionNumber > len(ri.Options) {
return nil, errors.New("internal error: option out of range")
}
// Make the next state
newState := func(state string, i int, advanced bool) string {
return StatePush("", state, fmt.Sprint(i), fmt.Sprint(advanced))
}
// Find the current option
option := &ri.Options[optionNumber]
switch state {
case "*all":
// If option is hidden or doesn't match advanced setting then skip it
if option.Hide&OptionHideConfigurator != 0 || option.Advanced != advanced {
return ConfigGoto(newState("*all", optionNumber+1, advanced))
}
// Skip this question if it isn't the correct provider
provider, _ := m.Get(ConfigProvider)
if !MatchProvider(option.Provider, provider) {
return ConfigGoto(newState("*all", optionNumber+1, advanced))
}
out = &ConfigOut{
State: newState("*all-set", optionNumber, advanced),
Option: option,
}
// Filter examples by provider if necessary
if provider != "" && len(option.Examples) > 0 {
optionCopy := option.Copy()
optionCopy.Examples = OptionExamples{}
for _, example := range option.Examples {
if MatchProvider(example.Provider, provider) {
optionCopy.Examples = append(optionCopy.Examples, example)
}
}
out.Option = optionCopy
}
return out, nil
case "*all-set":
// Set the value if not different to current
// Note this won't set blank values in the config file
// if the default is blank
currentValue, _ := m.Get(option.Name)
if currentValue != in.Result {
m.Set(option.Name, in.Result)
}
// Find the next question
return ConfigGoto(newState("*all", optionNumber+1, advanced))
case "*all-advanced":
// Reply to edit advanced question
if in.Result == "true" {
return ConfigGoto(newState("*all", 0, true))
}
return ConfigGoto("*postconfig")
}
return nil, fmt.Errorf("internal error: bad state %q", state)
}
func backendConfigStep(ctx context.Context, name string, m configmap.Mapper, ri *RegInfo, choices configmap.Getter, in ConfigIn) (out *ConfigOut, err error) {
ci := GetConfig(ctx)
Debugf(name, "config in: state=%q, result=%q", in.State, in.Result)
defer func() {
Debugf(name, "config out: out=%+v, err=%v", out, err)
}()
switch {
case strings.HasPrefix(in.State, ConfigAll):
// Do all config
out, err = configAll(ctx, name, m, ri, in)
case strings.HasPrefix(in.State, "*oauth"):
// Do internal oauth states
out, err = ConfigOAuth(ctx, name, m, ri, in)
case strings.HasPrefix(in.State, "*postconfig"):
// Do the post config starting from state ""
in.State = ""
return backendConfigStep(ctx, name, m, ri, choices, in)
case strings.HasPrefix(in.State, "*"):
err = fmt.Errorf("unknown internal state %q", in.State)
default:
// Otherwise pass to backend
if ri.Config == nil {
return nil, nil
}
out, err = ri.Config(ctx, name, m, in)
}
if err != nil {
return nil, err
}
switch {
case out == nil:
case out.OAuth != nil:
// If this is an OAuth state the deal with it here
returnState := out.State
// If rclone authorize, stop after doing oauth
if isConfigOAuthOnly(ctx) {
Debugf(nil, "OAuth only is set - overriding return state")
returnState = ""
}
// Run internal state, saving the input so we can recall the state
return ConfigGoto(StatePush("", "*oauth", returnState, in.State, in.Result))
case out.Option != nil:
if out.Option.Name == "" {
return nil, errors.New("internal error: no name set in Option")
}
// If override value is set in the choices then use that
if result, ok := choices.Get(out.Option.Name); ok {
Debugf(nil, "Override value found, choosing value %q for state %q", result, out.State)
return ConfigResult(out.State, result)
}
// If AutoConfirm is set, choose the default value
if ci.AutoConfirm {
result := fmt.Sprint(out.Option.Default)
Debugf(nil, "Auto confirm is set, choosing default %q for state %q, override by setting config parameter %q", result, out.State, out.Option.Name)
return ConfigResult(out.State, result)
}
// If fs.ConfigEdit is set then make the default value
// in the config the current value.
if result, ok := choices.Get(ConfigEdit); ok && result == "true" {
if value, ok := m.Get(out.Option.Name); ok {
newOption := out.Option.Copy()
oldValue := newOption.Value
err = newOption.Set(value)
if err != nil {
Errorf(nil, "Failed to set %q from %q - using default: %v", out.Option.Name, value, err)
} else {
newOption.Default = newOption.Value
newOption.Value = oldValue
out.Option = newOption
}
}
}
}
return out, nil
}
// MatchProvider returns true if provider matches the providerConfig string.
//
// The providerConfig string can either be a list of providers to
// match, or if it starts with "!" it will be a list of providers not
// to match.
//
// If either providerConfig or provider is blank then it will return true
func MatchProvider(providerConfig, provider string) bool {
if providerConfig == "" || provider == "" {
return true
}
negate := false
if strings.HasPrefix(providerConfig, "!") {
providerConfig = providerConfig[1:]
negate = true
}
providers := strings.Split(providerConfig, ",")
matched := slices.Contains(providers, provider)
if negate {
return !matched
}
return matched
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/pacer_test.go | fs/pacer_test.go | package fs
import (
"context"
"errors"
"sync"
"testing"
"time"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/pacer"
"github.com/stretchr/testify/require"
)
var errFoo = errors.New("foo")
type dummyPaced struct {
retry bool
called int
wait *sync.Cond
}
func (dp *dummyPaced) fn() (bool, error) {
if dp.wait != nil {
dp.wait.L.Lock()
dp.wait.Wait()
dp.wait.L.Unlock()
}
dp.called++
return dp.retry, errFoo
}
func TestPacerCall(t *testing.T) {
ctx := context.Background()
config := GetConfig(ctx)
expectedCalled := config.LowLevelRetries
if expectedCalled == 0 {
ctx, config = AddConfig(ctx)
expectedCalled = 20
config.LowLevelRetries = expectedCalled
}
p := NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
dp := &dummyPaced{retry: true}
err := p.Call(dp.fn)
require.Equal(t, expectedCalled, dp.called)
require.Implements(t, (*fserrors.Retrier)(nil), err)
}
func TestPacerCallNoRetry(t *testing.T) {
p := NewPacer(context.Background(), pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
dp := &dummyPaced{retry: true}
err := p.CallNoRetry(dp.fn)
require.Equal(t, 1, dp.called)
require.Implements(t, (*fserrors.Retrier)(nil), err)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/log_test.go | fs/log_test.go | package fs
import (
"encoding/json"
"fmt"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check it satisfies the interfaces
var (
_ Flagger = (*LogLevel)(nil)
_ FlaggerNP = LogLevel(0)
_ fmt.Stringer = LogValueItem{}
)
type withString struct{}
func (withString) String() string {
return "hello"
}
func TestLogValue(t *testing.T) {
x := LogValue("x", 1)
assert.Equal(t, "1", x.String())
x = LogValue("x", withString{})
assert.Equal(t, "hello", x.String())
x = LogValueHide("x", withString{})
assert.Equal(t, "", x.String())
}
func TestLogLevelString(t *testing.T) {
for _, test := range []struct {
in LogLevel
want string
}{
{LogLevelEmergency, "EMERGENCY"},
{LogLevelDebug, "DEBUG"},
{99, "Unknown(99)"},
} {
logLevel := test.in
got := logLevel.String()
assert.Equal(t, test.want, got, test.in)
}
}
func TestLogLevelSet(t *testing.T) {
for _, test := range []struct {
in string
want LogLevel
err bool
}{
{"EMERGENCY", LogLevelEmergency, false},
{"DEBUG", LogLevelDebug, false},
{"Potato", 100, true},
} {
logLevel := LogLevel(100)
err := logLevel.Set(test.in)
if test.err {
require.Error(t, err, test.in)
} else {
require.NoError(t, err, test.in)
}
assert.Equal(t, test.want, logLevel, test.in)
}
}
func TestLogLevelUnmarshalJSON(t *testing.T) {
for _, test := range []struct {
in string
want LogLevel
err bool
}{
{`"EMERGENCY"`, LogLevelEmergency, false},
{`"DEBUG"`, LogLevelDebug, false},
{`"Potato"`, 100, true},
{strconv.Itoa(int(LogLevelEmergency)), LogLevelEmergency, false},
{strconv.Itoa(int(LogLevelDebug)), LogLevelDebug, false},
{"Potato", 100, true},
{`99`, 100, true},
{`-99`, 100, true},
} {
logLevel := LogLevel(100)
err := json.Unmarshal([]byte(test.in), &logLevel)
if test.err {
require.Error(t, err, test.in)
} else {
require.NoError(t, err, test.in)
}
assert.Equal(t, test.want, logLevel, test.in)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/bwtimetable.go | fs/bwtimetable.go | package fs
import (
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
"time"
)
// BwPair represents an upload and a download bandwidth
type BwPair struct {
Tx SizeSuffix // upload bandwidth
Rx SizeSuffix // download bandwidth
}
// String returns a printable representation of a BwPair
func (bp *BwPair) String() string {
var out strings.Builder
out.WriteString(bp.Tx.String())
if bp.Rx != bp.Tx {
out.WriteRune(':')
out.WriteString(bp.Rx.String())
}
return out.String()
}
// Set the bandwidth from a string which is either
// SizeSuffix or SizeSuffix:SizeSuffix (for tx:rx bandwidth)
func (bp *BwPair) Set(s string) (err error) {
before, after, ok := strings.Cut(s, ":")
stx, srx := s, ""
if ok {
stx, srx = before, after
}
err = bp.Tx.Set(stx)
if err != nil {
return err
}
if !ok {
bp.Rx = bp.Tx
} else {
err = bp.Rx.Set(srx)
if err != nil {
return err
}
}
return nil
}
// IsSet returns true if either of the bandwidth limits are set
func (bp *BwPair) IsSet() bool {
return bp.Tx > 0 || bp.Rx > 0
}
// BwTimeSlot represents a bandwidth configuration at a point in time.
type BwTimeSlot struct {
DayOfTheWeek int
HHMM int
Bandwidth BwPair
}
// BwTimetable contains all configured time slots.
type BwTimetable []BwTimeSlot
// String returns a printable representation of BwTimetable.
func (x BwTimetable) String() string {
var out strings.Builder
bwOnly := len(x) == 1 && x[0].DayOfTheWeek == 0 && x[0].HHMM == 0
for _, ts := range x {
if out.Len() != 0 {
out.WriteRune(' ')
}
if !bwOnly {
_, _ = fmt.Fprintf(&out, "%s-%02d:%02d,", time.Weekday(ts.DayOfTheWeek).String()[:3], ts.HHMM/100, ts.HHMM%100)
}
out.WriteString(ts.Bandwidth.String())
}
return out.String()
}
// Basic hour format checking
func validateHour(HHMM string) error {
if len(HHMM) != 5 {
return fmt.Errorf("invalid time specification (hh:mm): %q", HHMM)
}
hh, err := strconv.Atoi(HHMM[0:2])
if err != nil {
return fmt.Errorf("invalid hour in time specification %q: %v", HHMM, err)
}
if hh < 0 || hh > 23 {
return fmt.Errorf("invalid hour (must be between 00 and 23): %q", hh)
}
mm, err := strconv.Atoi(HHMM[3:])
if err != nil {
return fmt.Errorf("invalid minute in time specification: %q: %v", HHMM, err)
}
if mm < 0 || mm > 59 {
return fmt.Errorf("invalid minute (must be between 00 and 59): %q", hh)
}
return nil
}
// Basic weekday format checking
func parseWeekday(dayOfWeek string) (int, error) {
dayOfWeek = strings.ToLower(dayOfWeek)
if dayOfWeek == "sun" || dayOfWeek == "sunday" {
return 0, nil
}
if dayOfWeek == "mon" || dayOfWeek == "monday" {
return 1, nil
}
if dayOfWeek == "tue" || dayOfWeek == "tuesday" {
return 2, nil
}
if dayOfWeek == "wed" || dayOfWeek == "wednesday" {
return 3, nil
}
if dayOfWeek == "thu" || dayOfWeek == "thursday" {
return 4, nil
}
if dayOfWeek == "fri" || dayOfWeek == "friday" {
return 5, nil
}
if dayOfWeek == "sat" || dayOfWeek == "saturday" {
return 6, nil
}
return 0, fmt.Errorf("invalid weekday: %q", dayOfWeek)
}
// Set the bandwidth timetable.
func (x *BwTimetable) Set(s string) error {
// The timetable is formatted as:
// "dayOfWeek-hh:mm,bandwidth dayOfWeek-hh:mm,bandwidth..." ex: "Mon-10:00,10G Mon-11:30,1G Tue-18:00,off"
// If only a single bandwidth identifier is provided, we assume constant bandwidth.
if len(s) == 0 {
return errors.New("empty string")
}
// Single value without time specification.
if !strings.Contains(s, " ") && !strings.Contains(s, ",") {
ts := BwTimeSlot{}
if err := ts.Bandwidth.Set(s); err != nil {
return err
}
ts.DayOfTheWeek = 0
ts.HHMM = 0
*x = BwTimetable{ts}
return nil
}
// Split the timetable string by both spaces and semicolons
for tok := range strings.FieldsFuncSeq(s, func(r rune) bool {
return r == ' ' || r == ';'
}) {
tv := strings.Split(tok, ",")
// Format must be dayOfWeek-HH:MM,BW
if len(tv) != 2 {
return fmt.Errorf("invalid time/bandwidth specification: %q", tok)
}
weekday := 0
HHMM := ""
if !strings.Contains(tv[0], "-") {
HHMM = tv[0]
if err := validateHour(HHMM); err != nil {
return err
}
for i := range 7 {
hh, _ := strconv.Atoi(HHMM[0:2])
mm, _ := strconv.Atoi(HHMM[3:])
ts := BwTimeSlot{
DayOfTheWeek: i,
HHMM: (hh * 100) + mm,
}
if err := ts.Bandwidth.Set(tv[1]); err != nil {
return err
}
*x = append(*x, ts)
}
} else {
timespec := strings.Split(tv[0], "-")
if len(timespec) != 2 {
return fmt.Errorf("invalid time specification: %q", tv[0])
}
var err error
weekday, err = parseWeekday(timespec[0])
if err != nil {
return err
}
HHMM = timespec[1]
if err := validateHour(HHMM); err != nil {
return err
}
hh, _ := strconv.Atoi(HHMM[0:2])
mm, _ := strconv.Atoi(HHMM[3:])
ts := BwTimeSlot{
DayOfTheWeek: weekday,
HHMM: (hh * 100) + mm,
}
// Bandwidth limit for this time slot.
if err := ts.Bandwidth.Set(tv[1]); err != nil {
return err
}
*x = append(*x, ts)
}
}
return nil
}
// Difference in minutes between lateDayOfWeekHHMM and earlyDayOfWeekHHMM
func timeDiff(lateDayOfWeekHHMM int, earlyDayOfWeekHHMM int) int {
lateTimeMinutes := (lateDayOfWeekHHMM / 10000) * 24 * 60
lateTimeMinutes += ((lateDayOfWeekHHMM / 100) % 100) * 60
lateTimeMinutes += lateDayOfWeekHHMM % 100
earlyTimeMinutes := (earlyDayOfWeekHHMM / 10000) * 24 * 60
earlyTimeMinutes += ((earlyDayOfWeekHHMM / 100) % 100) * 60
earlyTimeMinutes += earlyDayOfWeekHHMM % 100
return lateTimeMinutes - earlyTimeMinutes
}
// LimitAt returns a BwTimeSlot for the time requested.
func (x BwTimetable) LimitAt(tt time.Time) BwTimeSlot {
// If the timetable is empty, we return an unlimited BwTimeSlot starting at Sunday midnight.
if len(x) == 0 {
return BwTimeSlot{Bandwidth: BwPair{-1, -1}}
}
dayOfWeekHHMM := int(tt.Weekday())*10000 + tt.Hour()*100 + tt.Minute()
// By default, we return the last element in the timetable. This
// satisfies two conditions: 1) If there's only one element it
// will always be selected, and 2) The last element of the table
// will "wrap around" until overridden by an earlier time slot.
// there's only one time slot in the timetable.
ret := x[len(x)-1]
mindif := 0
first := true
// Look for most recent time slot.
for _, ts := range x {
// Ignore the past
if dayOfWeekHHMM < (ts.DayOfTheWeek*10000)+ts.HHMM {
continue
}
dif := timeDiff(dayOfWeekHHMM, (ts.DayOfTheWeek*10000)+ts.HHMM)
if first {
mindif = dif
first = false
}
if dif <= mindif {
mindif = dif
ret = ts
}
}
return ret
}
// Type of the value
func (x BwTimetable) Type() string {
return "BwTimetable"
}
// UnmarshalJSON unmarshals a string value
func (x *BwTimetable) UnmarshalJSON(in []byte) error {
var s string
err := json.Unmarshal(in, &s)
if err != nil {
return err
}
return x.Set(s)
}
// MarshalJSON marshals as a string value
func (x BwTimetable) MarshalJSON() ([]byte, error) {
s := x.String()
return json.Marshal(s)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/mimetype.go | fs/mimetype.go | package fs
import (
"context"
"mime"
"path"
"strings"
)
// Add a minimal number of mime types to augment go's built in types
// for environments which don't have access to a mime.types file (e.g.
// Termux on android)
func init() {
for _, t := range []struct {
mimeType string
extensions string
}{
{"audio/flac", ".flac"},
{"audio/mpeg", ".mpga,.mpega,.mp2,.mp3,.m4a"},
{"audio/ogg", ".oga,.ogg,.opus,.spx"},
{"audio/x-wav", ".wav"},
{"image/tiff", ".tiff,.tif"},
{"video/dv", ".dif,.dv"},
{"video/fli", ".fli"},
{"video/mpeg", ".mpeg,.mpg,.mpe"},
{"video/MP2T", ".ts"},
{"video/mp4", ".mp4"},
{"video/quicktime", ".qt,.mov"},
{"video/ogg", ".ogv"},
{"video/webm", ".webm"},
{"video/x-msvideo", ".avi"},
{"video/x-matroska", ".mpv,.mkv"},
{"application/x-subrip", ".srt"},
} {
for ext := range strings.SplitSeq(t.extensions, ",") {
if mime.TypeByExtension(ext) == "" {
err := mime.AddExtensionType(ext, t.mimeType)
if err != nil {
panic(err)
}
}
}
}
}
// MimeTypeFromName returns a guess at the mime type from the name
func MimeTypeFromName(remote string) (mimeType string) {
mimeType = mime.TypeByExtension(path.Ext(remote))
if !strings.ContainsRune(mimeType, '/') {
mimeType = "application/octet-stream"
}
return mimeType
}
// MimeType returns the MimeType from the object, either by calling
// the MimeTyper interface or using MimeTypeFromName
func MimeType(ctx context.Context, o DirEntry) (mimeType string) {
// Read the MimeType from the optional interface if available
if do, ok := o.(MimeTyper); ok {
mimeType = do.MimeType(ctx)
// Debugf(o, "Read MimeType as %q", mimeType)
if mimeType != "" {
return mimeType
}
}
return MimeTypeFromName(o.Remote())
}
// MimeTypeDirEntry returns the MimeType of a DirEntry
//
// It returns "inode/directory" for directories, or uses
// MimeType(Object)
func MimeTypeDirEntry(ctx context.Context, item DirEntry) string {
switch x := item.(type) {
case Object:
return MimeType(ctx, x)
case Directory:
return "inode/directory"
}
return ""
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config_list_test.go | fs/config_list_test.go | package fs
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func must(err error) {
if err != nil {
panic(err)
}
}
func ExampleSpaceSepList() {
for _, s := range []string{
`remotea:test/dir remoteb:`,
`"remotea:test/space dir" remoteb:`,
`"remotea:test/quote""dir" remoteb:`,
} {
var l SpaceSepList
must(l.Set(s))
fmt.Printf("%#v\n", l)
}
// Output:
// fs.SpaceSepList{"remotea:test/dir", "remoteb:"}
// fs.SpaceSepList{"remotea:test/space dir", "remoteb:"}
// fs.SpaceSepList{"remotea:test/quote\"dir", "remoteb:"}
}
func ExampleCommaSepList() {
for _, s := range []string{
`remotea:test/dir,remoteb:`,
`"remotea:test/space dir",remoteb:`,
`"remotea:test/quote""dir",remoteb:`,
} {
var l CommaSepList
must(l.Set(s))
fmt.Printf("%#v\n", l)
}
// Output:
// fs.CommaSepList{"remotea:test/dir", "remoteb:"}
// fs.CommaSepList{"remotea:test/space dir", "remoteb:"}
// fs.CommaSepList{"remotea:test/quote\"dir", "remoteb:"}
}
func TestSpaceSepListSet(t *testing.T) {
type tc struct {
in string
out SpaceSepList
err string
}
tests := []tc{
{``, nil, ""},
{`\`, SpaceSepList{`\`}, ""},
{`\\`, SpaceSepList{`\\`}, ""},
{`potato`, SpaceSepList{`potato`}, ""},
{`po\tato`, SpaceSepList{`po\tato`}, ""},
{`potato\`, SpaceSepList{`potato\`}, ""},
{`'potato`, SpaceSepList{`'potato`}, ""},
{`pot'ato`, SpaceSepList{`pot'ato`}, ""},
{`potato'`, SpaceSepList{`potato'`}, ""},
{`"potato"`, SpaceSepList{`potato`}, ""},
{`'potato'`, SpaceSepList{`'potato'`}, ""},
{`potato apple`, SpaceSepList{`potato`, `apple`}, ""},
{`potato\ apple`, SpaceSepList{`potato\`, `apple`}, ""},
{`"potato apple"`, SpaceSepList{`potato apple`}, ""},
{`"potato'apple"`, SpaceSepList{`potato'apple`}, ""},
{`"potato''apple"`, SpaceSepList{`potato''apple`}, ""},
{`"potato' 'apple"`, SpaceSepList{`potato' 'apple`}, ""},
{`potato="apple"`, nil, `bare " in non-quoted-field`},
{`apple "potato`, nil, "extraneous"},
{`apple pot"ato`, nil, "bare \" in non-quoted-field"},
{`potato"`, nil, "bare \" in non-quoted-field"},
}
for _, tc := range tests {
var l SpaceSepList
err := l.Set(tc.in)
if tc.err == "" {
require.NoErrorf(t, err, "input: %q", tc.in)
} else {
require.Containsf(t, err.Error(), tc.err, "input: %q", tc.in)
}
require.Equalf(t, tc.out, l, "input: %q", tc.in)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/features.go | fs/features.go | // Filesystem features and optional interfaces
package fs
import (
"context"
"io"
"reflect"
"strings"
"time"
)
// Features describe the optional features of the Fs
type Features struct {
// Feature flags, whether Fs
CaseInsensitive bool // has case insensitive files
DuplicateFiles bool // allows duplicate files
ReadMimeType bool // can read the mime type of objects
WriteMimeType bool // can set the mime type of objects
CanHaveEmptyDirectories bool // can have empty directories
BucketBased bool // is bucket based (like s3, swift, etc.)
BucketBasedRootOK bool // is bucket based and can use from root
SetTier bool // allows set tier functionality on objects
GetTier bool // allows to retrieve storage tier of objects
ServerSideAcrossConfigs bool // can server-side copy between different remotes of the same type
IsLocal bool // is the local backend
SlowModTime bool // if calling ModTime() generally takes an extra transaction
SlowHash bool // if calling Hash() generally takes an extra transaction
ReadMetadata bool // can read metadata from objects
WriteMetadata bool // can write metadata to objects
UserMetadata bool // can read/write general purpose metadata
ReadDirMetadata bool // can read metadata from directories (implements Directory.Metadata)
WriteDirMetadata bool // can write metadata to directories (implements Directory.SetMetadata)
WriteDirSetModTime bool // can write metadata to directories (implements Directory.SetModTime)
UserDirMetadata bool // can read/write general purpose metadata to/from directories
DirModTimeUpdatesOnWrite bool // indicate writing files to a directory updates its modtime
FilterAware bool // can make use of filters if provided for listing
PartialUploads bool // uploaded file can appear incomplete on the fs while it's being uploaded
NoMultiThreading bool // set if can't have multiplethreads on one download open
Overlay bool // this wraps one or more backends to add functionality
ChunkWriterDoesntSeek bool // set if the chunk writer doesn't need to read the data more than once
DoubleSlash bool // set if backend supports double slashes in paths
// Purge all files in the directory specified
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
Purge func(ctx context.Context, dir string) error
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
Copy func(ctx context.Context, src Object, remote string) (Object, error)
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
Move func(ctx context.Context, src Object, remote string) (Object, error)
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
DirMove func(ctx context.Context, src Fs, srcRemote, dstRemote string) error
// MkdirMetadata makes the directory passed in as dir.
//
// It shouldn't return an error if it already exists.
//
// If the metadata is not nil it is set.
//
// It returns the directory that was created.
MkdirMetadata func(ctx context.Context, dir string, metadata Metadata) (Directory, error)
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
ChangeNotify func(context.Context, func(string, EntryType), <-chan time.Duration)
// UnWrap returns the Fs that this Fs is wrapping
UnWrap func() Fs
// WrapFs returns the Fs that is wrapping this Fs
WrapFs func() Fs
// SetWrapper sets the Fs that is wrapping this Fs
SetWrapper func(f Fs)
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
DirCacheFlush func()
// PublicLink generates a public link to the remote path (usually readable by anyone)
PublicLink func(ctx context.Context, remote string, expire Duration, unlink bool) (string, error)
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
PutUnchecked func(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error)
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
PutStream func(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error)
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
MergeDirs func(ctx context.Context, dirs []Directory) error
// DirSetModTime sets the metadata on the directory to set the modification date
DirSetModTime func(ctx context.Context, dir string, modTime time.Time) error
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
CleanUp func(ctx context.Context) error
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
ListR ListRFn
// ListP lists the objects and directories of the Fs starting
// from dir non recursively to out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
ListP func(ctx context.Context, dir string, callback ListRCallback) error
// About gets quota information from the Fs
About func(ctx context.Context) (*Usage, error)
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
OpenWriterAt func(ctx context.Context, remote string, size int64) (WriterAtCloser, error)
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
//
OpenChunkWriter func(ctx context.Context, remote string, src ObjectInfo, options ...OpenOption) (info ChunkWriterInfo, writer ChunkWriter, err error)
// UserInfo returns info about the connected user
UserInfo func(ctx context.Context) (map[string]string, error)
// Disconnect the current user
Disconnect func(ctx context.Context) error
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
Command func(ctx context.Context, name string, arg []string, opt map[string]string) (any, error)
// Shutdown the backend, closing any background tasks and any
// cached connections.
Shutdown func(ctx context.Context) error
}
// Disable nil's out the named feature. If it isn't found then it
// will log a message.
func (ft *Features) Disable(name string) *Features {
// Prefix boolean values with ! to set the feature
invert := false
if strings.HasPrefix(name, "!") {
name = name[1:]
invert = true
}
v := reflect.ValueOf(ft).Elem()
vType := v.Type()
for i := range v.NumField() {
vName := vType.Field(i).Name
field := v.Field(i)
if strings.EqualFold(name, vName) {
if !field.CanSet() {
Errorf(nil, "Can't set Feature %q", name)
} else {
if invert {
if field.Type().Kind() == reflect.Bool {
field.Set(reflect.ValueOf(true))
Debugf(nil, "Set feature %q", name)
} else {
Errorf(nil, "Can't set Feature %q to true", name)
}
} else {
zero := reflect.Zero(field.Type())
field.Set(zero)
Debugf(nil, "Reset feature %q", name)
}
}
}
}
return ft
}
// List returns a slice of all the possible feature names
func (ft *Features) List() (out []string) {
v := reflect.ValueOf(ft).Elem()
vType := v.Type()
for i := range v.NumField() {
out = append(out, vType.Field(i).Name)
}
return out
}
// Enabled returns a map of features with keys showing whether they
// are enabled or not
func (ft *Features) Enabled() (features map[string]bool) {
v := reflect.ValueOf(ft).Elem()
vType := v.Type()
features = make(map[string]bool, v.NumField())
for i := range v.NumField() {
vName := vType.Field(i).Name
field := v.Field(i)
if field.Kind() == reflect.Func {
// Can't compare functions
features[vName] = !field.IsNil()
} else {
zero := reflect.Zero(field.Type())
features[vName] = field.Interface() != zero.Interface()
}
}
return features
}
// DisableList nil's out the comma separated list of named features.
// If it isn't found then it will log a message.
func (ft *Features) DisableList(list []string) *Features {
for _, feature := range list {
ft.Disable(strings.TrimSpace(feature))
}
return ft
}
// Fill fills in the function pointers in the Features struct from the
// optional interfaces. It returns the original updated Features
// struct passed in.
func (ft *Features) Fill(ctx context.Context, f Fs) *Features {
if do, ok := f.(Purger); ok {
ft.Purge = do.Purge
}
if do, ok := f.(Copier); ok {
ft.Copy = do.Copy
}
if do, ok := f.(Mover); ok {
ft.Move = do.Move
}
if do, ok := f.(DirMover); ok {
ft.DirMove = do.DirMove
}
if do, ok := f.(MkdirMetadataer); ok {
ft.MkdirMetadata = do.MkdirMetadata
}
if do, ok := f.(ChangeNotifier); ok {
ft.ChangeNotify = do.ChangeNotify
}
if do, ok := f.(UnWrapper); ok {
ft.UnWrap = do.UnWrap
}
if do, ok := f.(Wrapper); ok {
ft.WrapFs = do.WrapFs
ft.SetWrapper = do.SetWrapper
ft.Overlay = true // if implement UnWrap then must be an Overlay
}
if do, ok := f.(DirCacheFlusher); ok {
ft.DirCacheFlush = do.DirCacheFlush
}
if do, ok := f.(PublicLinker); ok {
ft.PublicLink = do.PublicLink
}
if do, ok := f.(PutUncheckeder); ok {
ft.PutUnchecked = do.PutUnchecked
}
if do, ok := f.(PutStreamer); ok {
ft.PutStream = do.PutStream
}
if do, ok := f.(MergeDirser); ok {
ft.MergeDirs = do.MergeDirs
}
if do, ok := f.(DirSetModTimer); ok {
ft.DirSetModTime = do.DirSetModTime
}
if do, ok := f.(CleanUpper); ok {
ft.CleanUp = do.CleanUp
}
if do, ok := f.(ListRer); ok {
ft.ListR = do.ListR
}
if do, ok := f.(ListPer); ok {
ft.ListP = do.ListP
}
if do, ok := f.(Abouter); ok {
ft.About = do.About
}
if do, ok := f.(OpenWriterAter); ok {
ft.OpenWriterAt = do.OpenWriterAt
}
if do, ok := f.(OpenChunkWriter); ok {
ft.OpenChunkWriter = do.OpenChunkWriter
}
if do, ok := f.(UserInfoer); ok {
ft.UserInfo = do.UserInfo
}
if do, ok := f.(Disconnecter); ok {
ft.Disconnect = do.Disconnect
}
if do, ok := f.(Commander); ok {
ft.Command = do.Command
}
if do, ok := f.(Shutdowner); ok {
ft.Shutdown = do.Shutdown
}
return ft.DisableList(GetConfig(ctx).DisableFeatures)
}
// Mask the Features with the Fs passed in
//
// Only optional features which are implemented in both the original
// Fs AND the one passed in will be advertised. Any features which
// aren't in both will be set to false/nil, except for UnWrap/Wrap which
// will be left untouched.
func (ft *Features) Mask(ctx context.Context, f Fs) *Features {
mask := f.Features()
ft.CaseInsensitive = ft.CaseInsensitive && mask.CaseInsensitive
ft.DuplicateFiles = ft.DuplicateFiles && mask.DuplicateFiles
ft.ReadMimeType = ft.ReadMimeType && mask.ReadMimeType
ft.WriteMimeType = ft.WriteMimeType && mask.WriteMimeType
ft.ReadMetadata = ft.ReadMetadata && mask.ReadMetadata
ft.WriteMetadata = ft.WriteMetadata && mask.WriteMetadata
ft.UserMetadata = ft.UserMetadata && mask.UserMetadata
ft.ReadDirMetadata = ft.ReadDirMetadata && mask.ReadDirMetadata
ft.WriteDirMetadata = ft.WriteDirMetadata && mask.WriteDirMetadata
ft.WriteDirSetModTime = ft.WriteDirSetModTime && mask.WriteDirSetModTime
ft.UserDirMetadata = ft.UserDirMetadata && mask.UserDirMetadata
ft.DirModTimeUpdatesOnWrite = ft.DirModTimeUpdatesOnWrite && mask.DirModTimeUpdatesOnWrite
ft.CanHaveEmptyDirectories = ft.CanHaveEmptyDirectories && mask.CanHaveEmptyDirectories
ft.BucketBased = ft.BucketBased && mask.BucketBased
ft.BucketBasedRootOK = ft.BucketBasedRootOK && mask.BucketBasedRootOK
ft.SetTier = ft.SetTier && mask.SetTier
ft.GetTier = ft.GetTier && mask.GetTier
ft.ServerSideAcrossConfigs = ft.ServerSideAcrossConfigs && mask.ServerSideAcrossConfigs
// ft.IsLocal = ft.IsLocal && mask.IsLocal Don't propagate IsLocal
ft.SlowModTime = ft.SlowModTime && mask.SlowModTime
ft.SlowHash = ft.SlowHash && mask.SlowHash
ft.FilterAware = ft.FilterAware && mask.FilterAware
ft.PartialUploads = ft.PartialUploads && mask.PartialUploads
ft.NoMultiThreading = ft.NoMultiThreading && mask.NoMultiThreading
// ft.Overlay = ft.Overlay && mask.Overlay don't propagate Overlay
ft.ChunkWriterDoesntSeek = ft.ChunkWriterDoesntSeek && mask.ChunkWriterDoesntSeek
ft.DoubleSlash = ft.DoubleSlash && mask.DoubleSlash
if mask.Purge == nil {
ft.Purge = nil
}
if mask.Copy == nil {
ft.Copy = nil
}
if mask.Move == nil {
ft.Move = nil
}
if mask.DirMove == nil {
ft.DirMove = nil
}
if mask.MkdirMetadata == nil {
ft.MkdirMetadata = nil
}
if mask.ChangeNotify == nil {
ft.ChangeNotify = nil
}
// if mask.UnWrap == nil {
// ft.UnWrap = nil
// }
// if mask.Wrapper == nil {
// ft.Wrapper = nil
// }
if mask.DirCacheFlush == nil {
ft.DirCacheFlush = nil
}
if mask.PublicLink == nil {
ft.PublicLink = nil
}
if mask.PutUnchecked == nil {
ft.PutUnchecked = nil
}
if mask.PutStream == nil {
ft.PutStream = nil
}
if mask.MergeDirs == nil {
ft.MergeDirs = nil
}
if mask.DirSetModTime == nil {
ft.DirSetModTime = nil
}
if mask.CleanUp == nil {
ft.CleanUp = nil
}
if mask.ListR == nil {
ft.ListR = nil
}
if mask.ListP == nil {
ft.ListP = nil
}
if mask.About == nil {
ft.About = nil
}
if mask.OpenWriterAt == nil {
ft.OpenWriterAt = nil
}
if mask.OpenChunkWriter == nil {
ft.OpenChunkWriter = nil
}
if mask.UserInfo == nil {
ft.UserInfo = nil
}
if mask.Disconnect == nil {
ft.Disconnect = nil
}
// Command is always local so we don't mask it
if mask.Shutdown == nil {
ft.Shutdown = nil
}
return ft.DisableList(GetConfig(ctx).DisableFeatures)
}
// Wrap makes a Copy of the features passed in, overriding the UnWrap/Wrap
// method only if available in f.
func (ft *Features) Wrap(f Fs) *Features {
ftCopy := new(Features)
*ftCopy = *ft
if do, ok := f.(UnWrapper); ok {
ftCopy.UnWrap = do.UnWrap
}
if do, ok := f.(Wrapper); ok {
ftCopy.WrapFs = do.WrapFs
ftCopy.SetWrapper = do.SetWrapper
}
return ftCopy
}
// WrapsFs adds extra information between `f` which wraps `w`
func (ft *Features) WrapsFs(f Fs, w Fs) *Features {
wFeatures := w.Features()
if wFeatures.WrapFs != nil && wFeatures.SetWrapper != nil {
wFeatures.SetWrapper(f)
}
return ft
}
// Purger is an optional interfaces for Fs
type Purger interface {
// Purge all files in the directory specified
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
Purge(ctx context.Context, dir string) error
}
// Copier is an optional interface for Fs
type Copier interface {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
Copy(ctx context.Context, src Object, remote string) (Object, error)
}
// Mover is an optional interface for Fs
type Mover interface {
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
Move(ctx context.Context, src Object, remote string) (Object, error)
}
// DirMover is an optional interface for Fs
type DirMover interface {
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
DirMove(ctx context.Context, src Fs, srcRemote, dstRemote string) error
}
// MkdirMetadataer is an optional interface for Fs
type MkdirMetadataer interface {
// MkdirMetadata makes the directory passed in as dir.
//
// It shouldn't return an error if it already exists.
//
// If the metadata is not nil it is set.
//
// It returns the directory that was created.
MkdirMetadata(ctx context.Context, dir string, metadata Metadata) (Directory, error)
}
// ChangeNotifier is an optional interface for Fs
type ChangeNotifier interface {
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implementation must empty the channel
// regularly. When the channel gets closed, the implementation
// should stop polling and release resources.
ChangeNotify(context.Context, func(string, EntryType), <-chan time.Duration)
}
// EntryType can be associated with remote paths to identify their type
type EntryType int
// Constants
const (
// EntryDirectory should be used to classify remote paths in directories
EntryDirectory EntryType = iota // 0
// EntryObject should be used to classify remote paths in objects
EntryObject // 1
)
// UnWrapper is an optional interfaces for Fs
type UnWrapper interface {
// UnWrap returns the Fs that this Fs is wrapping
UnWrap() Fs
}
// Wrapper is an optional interfaces for Fs
type Wrapper interface {
// Wrap returns the Fs that is wrapping this Fs
WrapFs() Fs
// SetWrapper sets the Fs that is wrapping this Fs
SetWrapper(f Fs)
}
// DirCacheFlusher is an optional interface for Fs
type DirCacheFlusher interface {
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
DirCacheFlush()
}
// PutUncheckeder is an optional interface for Fs
type PutUncheckeder interface {
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
PutUnchecked(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error)
}
// PutStreamer is an optional interface for Fs
type PutStreamer interface {
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
PutStream(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error)
}
// PublicLinker is an optional interface for Fs
type PublicLinker interface {
// PublicLink generates a public link to the remote path (usually readable by anyone)
PublicLink(ctx context.Context, remote string, expire Duration, unlink bool) (string, error)
}
// MergeDirser is an option interface for Fs
type MergeDirser interface {
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
MergeDirs(ctx context.Context, dirs []Directory) error
}
// DirSetModTimer is an optional interface for Fs
type DirSetModTimer interface {
// DirSetModTime sets the metadata on the directory to set the modification date
DirSetModTime(ctx context.Context, dir string, modTime time.Time) error
}
// CleanUpper is an optional interfaces for Fs
type CleanUpper interface {
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
CleanUp(ctx context.Context) error
}
// ListRer is an optional interfaces for Fs
type ListRer interface {
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
ListR(ctx context.Context, dir string, callback ListRCallback) error
}
// ListPer is an optional interfaces for Fs
type ListPer interface {
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
ListP(ctx context.Context, dir string, callback ListRCallback) error
}
// RangeSeeker is the interface that wraps the RangeSeek method.
//
// Some of the returns from Object.Open() may optionally implement
// this method for efficiency purposes.
type RangeSeeker interface {
// RangeSeek behaves like a call to Seek(offset int64, whence
// int) with the output wrapped in an io.LimitedReader
// limiting the total length to limit.
//
// RangeSeek with a limit of < 0 is equivalent to a regular Seek.
RangeSeek(ctx context.Context, offset int64, whence int, length int64) (int64, error)
}
// Abouter is an optional interface for Fs
type Abouter interface {
// About gets quota information from the Fs
About(ctx context.Context) (*Usage, error)
}
// OpenWriterAter is an optional interface for Fs
type OpenWriterAter interface {
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
OpenWriterAt(ctx context.Context, remote string, size int64) (WriterAtCloser, error)
}
// OpenWriterAtFn describes the OpenWriterAt function pointer
type OpenWriterAtFn func(ctx context.Context, remote string, size int64) (WriterAtCloser, error)
// ChunkWriterInfo describes how a backend would like ChunkWriter called
type ChunkWriterInfo struct {
ChunkSize int64 // preferred chunk size
Concurrency int // how many chunks to write at once
LeavePartsOnError bool // if set don't delete parts uploaded so far on error
}
// OpenChunkWriter is an option interface for Fs to implement chunked writing
type OpenChunkWriter interface {
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
OpenChunkWriter(ctx context.Context, remote string, src ObjectInfo, options ...OpenOption) (info ChunkWriterInfo, writer ChunkWriter, err error)
}
// OpenChunkWriterFn describes the OpenChunkWriter function pointer
type OpenChunkWriterFn func(ctx context.Context, remote string, src ObjectInfo, options ...OpenOption) (info ChunkWriterInfo, writer ChunkWriter, err error)
// ChunkWriter is returned by OpenChunkWriter to implement chunked writing
type ChunkWriter interface {
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error)
// Close complete chunked writer finalising the file.
Close(ctx context.Context) error
// Abort chunk write
//
// You can and should call Abort without calling Close.
Abort(ctx context.Context) error
}
// UserInfoer is an optional interface for Fs
type UserInfoer interface {
// UserInfo returns info about the connected user
UserInfo(ctx context.Context) (map[string]string, error)
}
// Disconnecter is an optional interface for Fs
type Disconnecter interface {
// Disconnect the current user
Disconnect(ctx context.Context) error
}
// CommandHelp describes a single backend Command
//
// These are automatically inserted in the docs
type CommandHelp struct {
Name string // Name of the command, e.g. "link"
Short string // Single line description
Long string // Long multi-line description
Opts map[string]string // maps option name to a single line help
}
// Commander is an interface to wrap the Command function
type Commander interface {
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error)
}
// Shutdowner is an interface to wrap the Shutdown function
type Shutdowner interface {
// Shutdown the backend, closing any background tasks and any
// cached connections.
Shutdown(ctx context.Context) error
}
// ObjectsChan is a channel of Objects
type ObjectsChan chan Object
// Objects is a slice of Object~s
type Objects []Object
// ObjectPair is a pair of Objects used to describe a potential copy
// operation.
type ObjectPair struct {
Src, Dst Object
}
// UnWrapFs unwraps f as much as possible and returns the base Fs
func UnWrapFs(f Fs) Fs {
for {
unwrap := f.Features().UnWrap
if unwrap == nil {
break // not a wrapped Fs, use current
}
next := unwrap()
if next == nil {
break // no base Fs found, use current
}
f = next
}
return f
}
// UnWrapObject unwraps o as much as possible and returns the base object
func UnWrapObject(o Object) Object {
for {
u, ok := o.(ObjectUnWrapper)
if !ok {
break // not a wrapped object, use current
}
next := u.UnWrap()
if next == nil {
break // no base object found, use current
}
o = next
}
return o
}
// UnWrapObjectInfo returns the underlying Object unwrapped as much as
// possible or nil.
func UnWrapObjectInfo(oi ObjectInfo) Object {
o, ok := oi.(Object)
if !ok {
return nil
}
return UnWrapObject(o)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/cutoffmode_test.go | fs/cutoffmode_test.go | package fs
import (
"encoding/json"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check it satisfies the interfaces
var (
_ Flagger = (*CutoffMode)(nil)
_ FlaggerNP = CutoffMode(0)
)
func TestCutoffModeString(t *testing.T) {
for _, test := range []struct {
in CutoffMode
want string
}{
{CutoffModeHard, "HARD"},
{CutoffModeSoft, "SOFT"},
{99, "Unknown(99)"},
} {
cm := test.in
got := cm.String()
assert.Equal(t, test.want, got, test.in)
}
}
func TestCutoffModeSet(t *testing.T) {
for _, test := range []struct {
in string
want CutoffMode
err bool
}{
{"hard", CutoffModeHard, false},
{"SOFT", CutoffModeSoft, false},
{"Cautious", CutoffModeCautious, false},
{"Potato", 0, true},
} {
cm := CutoffMode(0)
err := cm.Set(test.in)
if test.err {
require.Error(t, err, test.in)
} else {
require.NoError(t, err, test.in)
}
assert.Equal(t, test.want, cm, test.in)
}
}
func TestCutoffModeUnmarshalJSON(t *testing.T) {
for _, test := range []struct {
in string
want CutoffMode
err bool
}{
{`"hard"`, CutoffModeHard, false},
{`"SOFT"`, CutoffModeSoft, false},
{`"Cautious"`, CutoffModeCautious, false},
{`"Potato"`, 0, true},
{strconv.Itoa(int(CutoffModeHard)), CutoffModeHard, false},
{strconv.Itoa(int(CutoffModeSoft)), CutoffModeSoft, false},
{`99`, 0, true},
{`-99`, 0, true},
} {
var cm CutoffMode
err := json.Unmarshal([]byte(test.in), &cm)
if test.err {
require.Error(t, err, test.in)
} else {
require.NoError(t, err, test.in)
}
assert.Equal(t, test.want, cm, test.in)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/types.go | fs/types.go | // Filesystem related types and interfaces
// Note that optional interfaces are found in features.go
package fs
import (
"context"
"encoding/json"
"io"
"math"
"time"
"github.com/rclone/rclone/fs/hash"
)
// Fs is the interface a cloud storage system must provide
type Fs interface {
Info
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
List(ctx context.Context, dir string) (entries DirEntries, err error)
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
//
// If remote points to a directory then it should return
// ErrorIsDir if possible without doing any extra work,
// otherwise ErrorObjectNotFound.
NewObject(ctx context.Context, remote string) (Object, error)
// Put in to the remote path with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
Put(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error)
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
Mkdir(ctx context.Context, dir string) error
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
Rmdir(ctx context.Context, dir string) error
}
// Info provides a read only interface to information about a filesystem.
type Info interface {
// Name of the remote (as passed into NewFs)
Name() string
// Root of the remote (as passed into NewFs)
Root() string
// String returns a description of the FS
String() string
// Precision of the ModTimes in this Fs
Precision() time.Duration
// Returns the supported hash types of the filesystem
Hashes() hash.Set
// Features returns the optional features of this Fs
Features() *Features
}
// Object is a filesystem like object provided by an Fs
type Object interface {
ObjectInfo
// SetModTime sets the metadata on the object to set the modification date
SetModTime(ctx context.Context, t time.Time) error
// Open opens the file for read. Call Close() on the returned io.ReadCloser
Open(ctx context.Context, options ...OpenOption) (io.ReadCloser, error)
// Update in to the object with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
Update(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) error
// Removes this object
Remove(ctx context.Context) error
}
// ObjectInfo provides read only information about an object.
type ObjectInfo interface {
DirEntry
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
Hash(ctx context.Context, ty hash.Type) (string, error)
// Storable says whether this object can be stored
Storable() bool
}
// DirEntry provides read only information about the common subset of
// a Dir or Object. These are returned from directory listings - type
// assert them into the correct type.
type DirEntry interface {
// Fs returns read only access to the Fs that this object is part of
Fs() Info
// String returns a description of the Object
String() string
// Remote returns the remote path
Remote() string
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
ModTime(context.Context) time.Time
// Size returns the size of the file
Size() int64
}
// Directory is a filesystem like directory provided by an Fs
type Directory interface {
DirEntry
// Items returns the count of items in this directory or this
// directory and subdirectories if known, -1 for unknown
Items() int64
// ID returns the internal ID of this directory if known, or
// "" otherwise
ID() string
}
// FullDirectory contains all the optional interfaces for Directory
//
// Use for checking making wrapping Directories implement everything
type FullDirectory interface {
Directory
Metadataer
SetMetadataer
SetModTimer
}
// MimeTyper is an optional interface for Object
type MimeTyper interface {
// MimeType returns the content type of the Object if
// known, or "" if not
MimeType(ctx context.Context) string
}
// IDer is an optional interface for Object
type IDer interface {
// ID returns the ID of the Object if known, or "" if not
ID() string
}
// ParentIDer is an optional interface for Object
type ParentIDer interface {
// ParentID returns the ID of the parent directory if known or nil if not
ParentID() string
}
// ObjectUnWrapper is an optional interface for Object
type ObjectUnWrapper interface {
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
UnWrap() Object
}
// SetTierer is an optional interface for Object
type SetTierer interface {
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
SetTier(tier string) error
}
// GetTierer is an optional interface for Object
type GetTierer interface {
// GetTier returns storage tier or class of the Object
GetTier() string
}
// Metadataer is an optional interface for DirEntry
type Metadataer interface {
// Metadata returns metadata for an DirEntry
//
// It should return nil if there is no Metadata
Metadata(ctx context.Context) (Metadata, error)
}
// SetMetadataer is an optional interface for DirEntry
type SetMetadataer interface {
// SetMetadata sets metadata for an DirEntry
//
// It should return fs.ErrorNotImplemented if it can't set metadata
SetMetadata(ctx context.Context, metadata Metadata) error
}
// SetModTimer is an optional interface for Directory.
//
// Object implements this as part of its requires set of interfaces.
type SetModTimer interface {
// SetModTime sets the metadata on the DirEntry to set the modification date
//
// If there is any other metadata it does not overwrite it.
SetModTime(ctx context.Context, t time.Time) error
}
// FullObjectInfo contains all the read-only optional interfaces
//
// Use for checking making wrapping ObjectInfos implement everything
type FullObjectInfo interface {
ObjectInfo
MimeTyper
IDer
ObjectUnWrapper
GetTierer
Metadataer
}
// FullObject contains all the optional interfaces for Object
//
// Use for checking making wrapping Objects implement everything
type FullObject interface {
Object
MimeTyper
IDer
ObjectUnWrapper
GetTierer
SetTierer
Metadataer
SetMetadataer
}
// ObjectOptionalInterfaces returns the names of supported and
// unsupported optional interfaces for an Object
func ObjectOptionalInterfaces(o Object) (supported, unsupported []string) {
store := func(ok bool, name string) {
if ok {
supported = append(supported, name)
} else {
unsupported = append(unsupported, name)
}
}
_, ok := o.(MimeTyper)
store(ok, "MimeType")
_, ok = o.(IDer)
store(ok, "ID")
_, ok = o.(ObjectUnWrapper)
store(ok, "UnWrap")
_, ok = o.(SetTierer)
store(ok, "SetTier")
_, ok = o.(GetTierer)
store(ok, "GetTier")
_, ok = o.(Metadataer)
store(ok, "Metadata")
_, ok = o.(SetMetadataer)
store(ok, "SetMetadata")
return supported, unsupported
}
// DirectoryOptionalInterfaces returns the names of supported and
// unsupported optional interfaces for a Directory
func DirectoryOptionalInterfaces(d Directory) (supported, unsupported []string) {
store := func(ok bool, name string) {
if ok {
supported = append(supported, name)
} else {
unsupported = append(unsupported, name)
}
}
_, ok := d.(Metadataer)
store(ok, "Metadata")
_, ok = d.(SetMetadataer)
store(ok, "SetMetadata")
_, ok = d.(SetModTimer)
store(ok, "SetModTime")
return supported, unsupported
}
// ListRCallback defines a callback function for ListR to use
//
// It is called for each tranche of entries read from the listing and
// if it returns an error, the listing stops.
type ListRCallback func(entries DirEntries) error
// ListRFn is defines the call used to recursively list a directory
// with ListR or page through a directory with ListP
type ListRFn func(ctx context.Context, dir string, callback ListRCallback) error
// Flagger describes the interface rclone config types flags must satisfy
type Flagger interface {
// These are from pflag.Value which we don't want to pull in here
String() string
Set(string) error
Type() string
json.Unmarshaler
}
// FlaggerNP describes the interface rclone config types flags must
// satisfy as non-pointers
//
// These are from pflag.Value and need to be tested against
// non-pointer value due the the way the backend flags are inserted
// into the flags.
type FlaggerNP interface {
String() string
Type() string
}
// NewUsageValue makes a valid value
func NewUsageValue[T interface {
int64 | uint64 | float64
}](value T) *int64 {
p := new(int64)
if value > T(int64(math.MaxInt64)) {
*p = math.MaxInt64
} else {
*p = int64(value)
}
return p
}
// Usage is returned by the About call
//
// If a value is nil then it isn't supported by that backend
type Usage struct {
Total *int64 `json:"total,omitempty"` // quota of bytes that can be used
Used *int64 `json:"used,omitempty"` // bytes in use
Trashed *int64 `json:"trashed,omitempty"` // bytes in trash
Other *int64 `json:"other,omitempty"` // other usage e.g. gmail in drive
Free *int64 `json:"free,omitempty"` // bytes which can be uploaded before reaching the quota
Objects *int64 `json:"objects,omitempty"` // objects in the storage system
}
// WriterAtCloser wraps io.WriterAt and io.Closer
type WriterAtCloser interface {
io.WriterAt
io.Closer
}
type unknownFs struct{}
// Name of the remote (as passed into NewFs)
func (unknownFs) Name() string { return "unknown" }
// Root of the remote (as passed into NewFs)
func (unknownFs) Root() string { return "" }
// String returns a description of the FS
func (unknownFs) String() string { return "unknown" }
// Precision of the ModTimes in this Fs
func (unknownFs) Precision() time.Duration { return ModTimeNotSupported }
// Returns the supported hash types of the filesystem
func (unknownFs) Hashes() hash.Set { return hash.Set(hash.None) }
// Features returns the optional features of this Fs
func (unknownFs) Features() *Features { return &Features{} }
// Unknown holds an Info for an unknown Fs
//
// This is used when we need an Fs but don't have one.
var Unknown Info = unknownFs{}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/mount_helper.go | fs/mount_helper.go | package fs
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
)
func init() {
// This block is run super-early, before configuration harness kick in
if IsMountHelper() {
if args, err := convertMountHelperArgs(os.Args); err == nil {
os.Args = args
} else {
Fatalf(nil, "Failed to parse command line: %v", err)
}
}
}
// PassDaemonArgsAsEnviron tells how CLI arguments are passed to the daemon
// When false, arguments are passed as is, visible in the `ps` output.
// When true, arguments are converted into environment variables (more secure).
var PassDaemonArgsAsEnviron bool
// Comma-separated list of mount options to ignore.
// Leading and trailing commas are required.
const helperIgnoredOpts = ",rw,_netdev,nofail,user,dev,nodev,suid,nosuid,exec,noexec,auto,noauto,"
// Valid option name characters
const helperValidOptChars = "-_0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
// Parser errors
var (
errHelperBadOption = errors.New("option names may only contain `0-9`, `A-Z`, `a-z`, `-` and `_`")
errHelperOptionName = errors.New("option name can't start with `-` or `_`")
errHelperEmptyOption = errors.New("option name can't be empty")
errHelperQuotedValue = errors.New("unterminated quoted value")
errHelperAfterQuote = errors.New("expecting `,` or another quote after a quote")
errHelperSyntax = errors.New("syntax error in option string")
errHelperEmptyCommand = errors.New("command name can't be empty")
errHelperEnvSyntax = errors.New("environment variable must have syntax env.NAME=[VALUE]")
)
// IsMountHelper returns true if rclone was invoked as mount helper:
// as /sbin/mount.rlone (by /bin/mount)
// or /usr/bin/rclonefs (by fusermount or directly)
func IsMountHelper() bool {
if runtime.GOOS == "windows" {
return false
}
me := filepath.Base(os.Args[0])
return me == "mount.rclone" || me == "rclonefs"
}
// convertMountHelperArgs converts "-o" styled mount helper arguments
// into usual rclone flags
func convertMountHelperArgs(origArgs []string) ([]string, error) {
if IsDaemon() {
// The arguments have already been converted by the parent
return origArgs, nil
}
args := []string{}
command := "mount"
parseOpts := false
gotDaemon := false
gotVerbose := false
vCount := 0
for _, arg := range origArgs[1:] {
if !parseOpts {
switch arg {
case "-o", "--opt":
parseOpts = true
case "-v", "-vv", "-vvv", "-vvvv":
vCount += len(arg) - 1
case "-h", "--help":
args = append(args, "--help")
default:
if strings.HasPrefix(arg, "-") {
return nil, fmt.Errorf("flag %q is not supported in mount mode", arg)
}
args = append(args, arg)
}
continue
}
opts, err := parseHelperOptionString(arg)
if err != nil {
return nil, err
}
parseOpts = false
for _, opt := range opts {
if strings.Contains(helperIgnoredOpts, ","+opt+",") || strings.HasPrefix(opt, "x-systemd") {
continue
}
param, value, _ := strings.Cut(opt, "=")
// Set environment variables
if strings.HasPrefix(param, "env.") {
if param = param[4:]; param == "" {
return nil, errHelperEnvSyntax
}
_ = os.Setenv(param, value)
continue
}
switch param {
// Change command to run
case "command":
if value == "" {
return nil, errHelperEmptyCommand
}
command = value
continue
// Flag StartDaemon to pass arguments as environment
case "args2env":
PassDaemonArgsAsEnviron = true
continue
// Handle verbosity options
case "v", "vv", "vvv", "vvvv":
vCount += len(param)
continue
case "verbose":
gotVerbose = true
// Don't add --daemon if it was explicitly included
case "daemon":
gotDaemon = true
// Alias for the standard mount option "ro"
case "ro":
param = "read-only"
}
arg = "--" + strings.ToLower(strings.ReplaceAll(param, "_", "-"))
if value != "" {
arg += "=" + value
}
args = append(args, arg)
}
}
if parseOpts {
return nil, fmt.Errorf("dangling -o without argument")
}
if vCount > 0 && !gotVerbose {
args = append(args, fmt.Sprintf("--verbose=%d", vCount))
}
if strings.Contains(command, "mount") && !gotDaemon {
// Default to daemonized mount
args = append(args, "--daemon")
}
if len(args) > 0 && args[0] == command {
// Remove artefact of repeated conversion
args = args[1:]
}
prepend := []string{origArgs[0], command}
return append(prepend, args...), nil
}
// parseHelperOptionString deconstructs the -o value into slice of options
// in a way similar to connection strings.
// Example:
//
// param1=value,param2="qvalue",param3='item1,item2',param4="a ""b"" 'c'"
//
// An error may be returned if the remote name has invalid characters
// or the parameters are invalid or the path is empty.
//
// The algorithm was adapted from fspath.Parse with some modifications:
// - allow `-` in option names
// - handle special options `x-systemd.X` and `env.X`
// - drop support for :backend: and /path
func parseHelperOptionString(optString string) (opts []string, err error) {
if optString = strings.TrimSpace(optString); optString == "" {
return nil, nil
}
// States for parser
const (
stateParam = uint8(iota)
stateValue
stateQuotedValue
stateAfterQuote
stateDone
)
var (
state = stateParam // current state of parser
i int // position in path
prev int // previous position in path
c rune // current rune under consideration
quote rune // kind of quote to end this quoted string
param string // current parameter value
doubled bool // set if had doubled quotes
)
for i, c = range optString + "," {
switch state {
// Parses param= and param2=
case stateParam:
switch c {
case ',', '=':
param = optString[prev:i]
if len(param) == 0 {
return nil, errHelperEmptyOption
}
if param[0] == '-' {
return nil, errHelperOptionName
}
prev = i + 1
if c == '=' {
state = stateValue
break
}
opts = append(opts, param)
case '.':
if pref := optString[prev:i]; pref != "env" && pref != "x-systemd" {
return nil, errHelperBadOption
}
default:
if !strings.ContainsRune(helperValidOptChars, c) {
return nil, errHelperBadOption
}
}
case stateValue:
switch c {
case '\'', '"':
if i == prev {
quote = c
prev = i + 1
doubled = false
state = stateQuotedValue
}
case ',':
value := optString[prev:i]
prev = i + 1
opts = append(opts, param+"="+value)
state = stateParam
}
case stateQuotedValue:
if c == quote {
state = stateAfterQuote
}
case stateAfterQuote:
switch c {
case ',':
value := optString[prev : i-1]
// replace any doubled quotes if there were any
if doubled {
value = strings.ReplaceAll(value, string(quote)+string(quote), string(quote))
}
prev = i + 1
opts = append(opts, param+"="+value)
state = stateParam
case quote:
// Here is a doubled quote to indicate a literal quote
state = stateQuotedValue
doubled = true
default:
return nil, errHelperAfterQuote
}
}
}
// Depending on which state we were in when we fell off the
// end of the state machine we can return a sensible error.
if state == stateParam && prev > len(optString) {
state = stateDone
}
switch state {
case stateQuotedValue:
return nil, errHelperQuotedValue
case stateAfterQuote:
return nil, errHelperAfterQuote
case stateDone:
break
default:
return nil, errHelperSyntax
}
return opts, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/open_options_test.go | fs/open_options_test.go | package fs
import (
"fmt"
"net/http"
"testing"
"github.com/rclone/rclone/fs/hash"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestParseRangeOption(t *testing.T) {
for _, test := range []struct {
in string
want RangeOption
err string
}{
{in: "", err: "doesn't start with bytes="},
{in: "bytes=1-2,3-4", err: "contains multiple ranges"},
{in: "bytes=100", err: "contains no '-'"},
{in: "bytes=x-8", err: "bad start"},
{in: "bytes=8-x", err: "bad end"},
{in: "bytes=1-2", want: RangeOption{Start: 1, End: 2}},
{in: "bytes=-123456789123456789", want: RangeOption{Start: -1, End: 123456789123456789}},
{in: "bytes=123456789123456789-", want: RangeOption{Start: 123456789123456789, End: -1}},
{in: "bytes= 1 - 2 ", want: RangeOption{Start: 1, End: 2}},
{in: "bytes=-", want: RangeOption{Start: -1, End: -1}},
{in: "bytes= - ", want: RangeOption{Start: -1, End: -1}},
} {
got, err := ParseRangeOption(test.in)
what := fmt.Sprintf("parsing %q", test.in)
if test.err != "" {
require.Contains(t, err.Error(), test.err)
require.Nil(t, got, what)
} else {
require.NoError(t, err, what)
assert.Equal(t, test.want, *got, what)
}
}
}
func TestRangeOptionDecode(t *testing.T) {
for _, test := range []struct {
in RangeOption
size int64
wantOffset int64
wantLimit int64
}{
{in: RangeOption{Start: 1, End: 10}, size: 100, wantOffset: 1, wantLimit: 10},
{in: RangeOption{Start: 10, End: 10}, size: 100, wantOffset: 10, wantLimit: 1},
{in: RangeOption{Start: 10, End: 9}, size: 100, wantOffset: 10, wantLimit: 0},
{in: RangeOption{Start: 1, End: -1}, size: 100, wantOffset: 1, wantLimit: -1},
{in: RangeOption{Start: -1, End: 90}, size: 100, wantOffset: 10, wantLimit: -1},
{in: RangeOption{Start: -1, End: -1}, size: 100, wantOffset: 0, wantLimit: -1},
} {
gotOffset, gotLimit := test.in.Decode(test.size)
what := fmt.Sprintf("%+v size=%d", test.in, test.size)
assert.Equal(t, test.wantOffset, gotOffset, "offset "+what)
assert.Equal(t, test.wantLimit, gotLimit, "limit "+what)
}
}
func TestRangeOption(t *testing.T) {
opt := &RangeOption{Start: 1, End: 10}
var _ OpenOption = opt // check interface
assert.Equal(t, "RangeOption(1,10)", opt.String())
key, value := opt.Header()
assert.Equal(t, "Range", key)
assert.Equal(t, "bytes=1-10", value)
assert.Equal(t, true, opt.Mandatory())
opt = &RangeOption{Start: -1, End: 10}
assert.Equal(t, "RangeOption(-1,10)", opt.String())
key, value = opt.Header()
assert.Equal(t, "Range", key)
assert.Equal(t, "bytes=-10", value)
assert.Equal(t, true, opt.Mandatory())
opt = &RangeOption{Start: 1, End: -1}
assert.Equal(t, "RangeOption(1,-1)", opt.String())
key, value = opt.Header()
assert.Equal(t, "Range", key)
assert.Equal(t, "bytes=1-", value)
assert.Equal(t, true, opt.Mandatory())
opt = &RangeOption{Start: -1, End: -1}
assert.Equal(t, "RangeOption(-1,-1)", opt.String())
key, value = opt.Header()
assert.Equal(t, "Range", key)
assert.Equal(t, "bytes=-", value)
assert.Equal(t, true, opt.Mandatory())
}
func TestSeekOption(t *testing.T) {
opt := &SeekOption{Offset: 1}
var _ OpenOption = opt // check interface
assert.Equal(t, "SeekOption(1)", opt.String())
key, value := opt.Header()
assert.Equal(t, "Range", key)
assert.Equal(t, "bytes=1-", value)
assert.Equal(t, true, opt.Mandatory())
}
func TestHTTPOption(t *testing.T) {
opt := &HTTPOption{Key: "k", Value: "v"}
var _ OpenOption = opt // check interface
assert.Equal(t, `HTTPOption("k","v")`, opt.String())
key, value := opt.Header()
assert.Equal(t, "k", key)
assert.Equal(t, "v", value)
assert.Equal(t, false, opt.Mandatory())
}
func TestHashesOption(t *testing.T) {
opt := &HashesOption{hash.Set(hash.MD5 | hash.SHA1)}
var _ OpenOption = opt // check interface
assert.Equal(t, `HashesOption([md5, sha1])`, opt.String())
key, value := opt.Header()
assert.Equal(t, "", key)
assert.Equal(t, "", value)
assert.Equal(t, false, opt.Mandatory())
}
func TestNullOption(t *testing.T) {
opt := NullOption{}
var _ OpenOption = opt // check interface
assert.Equal(t, "NullOption()", opt.String())
key, value := opt.Header()
assert.Equal(t, "", key)
assert.Equal(t, "", value)
assert.Equal(t, false, opt.Mandatory())
}
func TestMetadataOption(t *testing.T) {
opt := MetadataOption{"onion": "ice cream"}
var _ OpenOption = opt // check interface
assert.Equal(t, "MetadataOption(map[onion:ice cream])", opt.String())
key, value := opt.Header()
assert.Equal(t, "", key)
assert.Equal(t, "", value)
assert.Equal(t, false, opt.Mandatory())
}
func TestFixRangeOptions(t *testing.T) {
for _, test := range []struct {
name string
in []OpenOption
size int64
want []OpenOption
}{
{
name: "Nil options",
in: nil,
want: nil,
},
{
name: "Empty options",
in: []OpenOption{},
want: []OpenOption{},
},
{
name: "Unknown size -1",
in: []OpenOption{
&RangeOption{Start: 1, End: -1},
},
want: []OpenOption{
&RangeOption{Start: 1, End: -1},
},
size: -1,
},
{
name: "Fetch a range with size=0",
in: []OpenOption{
&HTTPOption{Key: "a", Value: "1"},
&RangeOption{Start: 1, End: 10},
&HTTPOption{Key: "b", Value: "2"},
},
want: []OpenOption{
&HTTPOption{Key: "a", Value: "1"},
NullOption{},
&HTTPOption{Key: "b", Value: "2"},
},
size: 0,
},
{
name: "Fetch a range",
in: []OpenOption{
&HTTPOption{Key: "a", Value: "1"},
&RangeOption{Start: 1, End: 10},
&HTTPOption{Key: "b", Value: "2"},
},
want: []OpenOption{
&HTTPOption{Key: "a", Value: "1"},
&RangeOption{Start: 1, End: 10},
&HTTPOption{Key: "b", Value: "2"},
},
size: 100,
},
{
name: "Fetch to end",
in: []OpenOption{
&RangeOption{Start: 1, End: -1},
},
want: []OpenOption{
&RangeOption{Start: 1, End: 99},
},
size: 100,
},
{
name: "Fetch the last 10 bytes",
in: []OpenOption{
&RangeOption{Start: -1, End: 10},
},
want: []OpenOption{
&RangeOption{Start: 90, End: 99},
},
size: 100,
},
{
name: "Fetch with end bigger than size",
in: []OpenOption{
&RangeOption{Start: 10, End: 200},
},
want: []OpenOption{
&RangeOption{Start: 10, End: 99},
},
size: 100,
},
{
name: "SeekOption",
in: []OpenOption{
&HTTPOption{Key: "a", Value: "1"},
&SeekOption{Offset: 10},
&HTTPOption{Key: "b", Value: "2"},
},
want: []OpenOption{
&HTTPOption{Key: "a", Value: "1"},
&RangeOption{Start: 10, End: 99},
&HTTPOption{Key: "b", Value: "2"},
},
size: 100,
},
} {
FixRangeOption(test.in, test.size)
assert.Equal(t, test.want, test.in, test.name)
}
}
var testOpenOptions = []OpenOption{
&HTTPOption{Key: "a", Value: "1"},
&RangeOption{Start: 1, End: 10},
&HTTPOption{Key: "b", Value: "2"},
NullOption{},
&HashesOption{hash.Set(hash.MD5 | hash.SHA1)},
}
func TestOpenOptionAddHeaders(t *testing.T) {
m := map[string]string{}
want := map[string]string{
"a": "1",
"Range": "bytes=1-10",
"b": "2",
}
OpenOptionAddHeaders(testOpenOptions, m)
assert.Equal(t, want, m)
}
func TestOpenOptionHeaders(t *testing.T) {
want := map[string]string{
"a": "1",
"Range": "bytes=1-10",
"b": "2",
}
m := OpenOptionHeaders(testOpenOptions)
assert.Equal(t, want, m)
assert.Nil(t, OpenOptionHeaders([]OpenOption{}))
}
func TestOpenOptionAddHTTPHeaders(t *testing.T) {
headers := http.Header{}
want := http.Header{
"A": {"1"},
"Range": {"bytes=1-10"},
"B": {"2"},
}
OpenOptionAddHTTPHeaders(headers, testOpenOptions)
assert.Equal(t, want, headers)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/override_dir_test.go | fs/override_dir_test.go | package fs
// Check interfaces satisfied
var _ Directory = (*OverrideDirectory)(nil)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/dir_wrapper.go | fs/dir_wrapper.go | package fs
import (
"context"
"time"
)
// DirWrapper wraps a Directory object so the Remote can be overridden
type DirWrapper struct {
Directory // Directory we are wrapping
remote string // name of the directory
failSilently bool // if set, ErrorNotImplemented should not be considered an error for this directory
}
// NewDirWrapper creates a wrapper for a directory object
//
// This passes through optional methods and should be used for
// wrapping backends to wrap native directories.
func NewDirWrapper(remote string, d Directory) *DirWrapper {
return &DirWrapper{
Directory: d,
remote: remote,
}
}
// NewLimitedDirWrapper creates a DirWrapper that should fail silently instead of erroring for ErrorNotImplemented.
//
// Intended for exceptional dirs lacking abilities that the Fs otherwise usually supports
// (ex. a Combine root which can't set metadata/modtime, regardless of support by wrapped backend)
func NewLimitedDirWrapper(remote string, d Directory) *DirWrapper {
dw := NewDirWrapper(remote, d)
dw.failSilently = true
return dw
}
// String returns the name
func (d *DirWrapper) String() string {
return d.remote
}
// Remote returns the remote path
func (d *DirWrapper) Remote() string {
return d.remote
}
// SetRemote sets the remote
func (d *DirWrapper) SetRemote(remote string) *DirWrapper {
d.remote = remote
return d
}
// Metadata returns metadata for an DirEntry
//
// It should return nil if there is no Metadata
func (d *DirWrapper) Metadata(ctx context.Context) (Metadata, error) {
do, ok := d.Directory.(Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// SetMetadata sets metadata for an DirEntry
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (d *DirWrapper) SetMetadata(ctx context.Context, metadata Metadata) error {
do, ok := d.Directory.(SetMetadataer)
if !ok {
if d.failSilently {
Debugf(d, "Can't SetMetadata for this directory (%T from %v) -- skipping", d.Directory, d.Fs())
return nil
}
return ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// SetModTime sets the metadata on the DirEntry to set the modification date
//
// If there is any other metadata it does not overwrite it.
func (d *DirWrapper) SetModTime(ctx context.Context, t time.Time) error {
do, ok := d.Directory.(SetModTimer)
if !ok {
if d.failSilently {
Debugf(d, "Can't SetModTime for this directory (%T from %v) -- skipping", d.Directory, d.Fs())
return nil
}
return ErrorNotImplemented
}
return do.SetModTime(ctx, t)
}
// Check interfaces
var (
_ DirEntry = (*DirWrapper)(nil)
_ Directory = (*DirWrapper)(nil)
_ FullDirectory = (*DirWrapper)(nil)
)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config.go | fs/config.go | package fs
import (
"context"
"errors"
"fmt"
"net"
"os"
"strconv"
"strings"
"time"
)
// Global
var (
// globalConfig for rclone
globalConfig = new(ConfigInfo)
// Read a value from the config file
//
// This is a function pointer to decouple the config
// implementation from the fs
ConfigFileGet = func(section, key string) (string, bool) { return "", false }
// Set a value into the config file and persist it
//
// This is a function pointer to decouple the config
// implementation from the fs
ConfigFileSet = func(section, key, value string) (err error) {
return errors.New("no config file set handler")
}
// Check if the config file has the named section
//
// This is a function pointer to decouple the config
// implementation from the fs
ConfigFileHasSection = func(section string) bool { return false }
// CountError counts an error. If any errors have been
// counted then rclone will exit with a non zero error code.
//
// This is a function pointer to decouple the config
// implementation from the fs
CountError = func(ctx context.Context, err error) error { return err }
// ConfigProvider is the config key used for provider options
ConfigProvider = "provider"
// ConfigEdit is the config key used to show we wish to edit existing entries
ConfigEdit = "config_fs_edit"
)
// ConfigOptionsInfo describes the Options in use
var ConfigOptionsInfo = Options{{
Name: "modify_window",
Default: time.Nanosecond,
Help: "Max time diff to be considered the same",
Groups: "Copy",
}, {
Name: "checkers",
Default: 8,
Help: "Number of checkers to run in parallel",
Groups: "Performance",
}, {
Name: "transfers",
Default: 4,
Help: "Number of file transfers to run in parallel",
Groups: "Performance",
}, {
Name: "checksum",
ShortOpt: "c",
Default: false,
Help: "Check for changes with size & checksum (if available, or fallback to size only)",
Groups: "Copy",
}, {
Name: "size_only",
Default: false,
Help: "Skip based on size only, not modtime or checksum",
Groups: "Copy",
}, {
Name: "ignore_times",
ShortOpt: "I",
Default: false,
Help: "Don't skip items that match size and time - transfer all unconditionally",
Groups: "Copy",
}, {
Name: "ignore_existing",
Default: false,
Help: "Skip all files that exist on destination",
Groups: "Copy",
}, {
Name: "ignore_errors",
Default: false,
Help: "Delete even if there are I/O errors",
Groups: "Sync",
}, {
Name: "dry_run",
ShortOpt: "n",
Default: false,
Help: "Do a trial run with no permanent changes",
Groups: "Config,Important",
}, {
Name: "interactive",
ShortOpt: "i",
Default: false,
Help: "Enable interactive mode",
Groups: "Config,Important",
}, {
Name: "links",
Help: "Translate symlinks to/from regular files with a '" + LinkSuffix + "' extension.",
Default: false,
ShortOpt: "l",
Groups: "Copy",
}, {
Name: "contimeout",
Default: 60 * time.Second,
Help: "Connect timeout",
Groups: "Networking",
}, {
Name: "timeout",
Default: 5 * 60 * time.Second,
Help: "IO idle timeout",
Groups: "Networking",
}, {
Name: "expect_continue_timeout",
Default: 1 * time.Second,
Help: "Timeout when using expect / 100-continue in HTTP",
Groups: "Networking",
}, {
Name: "no_check_certificate",
Default: false,
Help: "Do not verify the server SSL certificate (insecure)",
Groups: "Networking",
}, {
Name: "ask_password",
Default: true,
Help: "Allow prompt for password for encrypted configuration",
Groups: "Config",
}, {
Name: "password_command",
Default: SpaceSepList{},
Help: "Command for supplying password for encrypted configuration",
Groups: "Config",
}, {
Name: "max_delete",
Default: int64(-1),
Help: "When synchronizing, limit the number of deletes",
Groups: "Sync",
}, {
Name: "max_delete_size",
Default: SizeSuffix(-1),
Help: "When synchronizing, limit the total size of deletes",
Groups: "Sync",
}, {
Name: "track_renames",
Default: false,
Help: "When synchronizing, track file renames and do a server-side move if possible",
Groups: "Sync",
}, {
Name: "track_renames_strategy",
Default: "hash",
Help: "Strategies to use when synchronizing using track-renames hash|modtime|leaf",
Groups: "Sync",
}, {
Name: "retries",
Default: 3,
Help: "Retry operations this many times if they fail",
Groups: "Config",
}, {
Name: "retries_sleep",
Default: time.Duration(0),
Help: "Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable)",
Groups: "Config",
}, {
Name: "low_level_retries",
Default: 10,
Help: "Number of low level retries to do",
Groups: "Config",
}, {
Name: "update",
ShortOpt: "u",
Default: false,
Help: "Skip files that are newer on the destination",
Groups: "Copy",
}, {
Name: "use_server_modtime",
Default: false,
Help: "Use server modified time instead of object metadata",
Groups: "Config",
}, {
Name: "no_gzip_encoding",
Default: false,
Help: "Don't set Accept-Encoding: gzip",
Groups: "Networking",
}, {
Name: "max_depth",
Default: -1,
Help: "If set limits the recursion depth to this",
Groups: "Filter",
}, {
Name: "ignore_size",
Default: false,
Help: "Ignore size when skipping use modtime or checksum",
Groups: "Copy",
}, {
Name: "ignore_checksum",
Default: false,
Help: "Skip post copy check of checksums",
Groups: "Copy",
}, {
Name: "ignore_case_sync",
Default: false,
Help: "Ignore case when synchronizing",
Groups: "Copy",
}, {
Name: "fix_case",
Default: false,
Help: "Force rename of case insensitive dest to match source",
Groups: "Sync",
}, {
Name: "no_traverse",
Default: false,
Help: "Don't traverse destination file system on copy",
Groups: "Copy",
}, {
Name: "check_first",
Default: false,
Help: "Do all the checks before starting transfers",
Groups: "Copy",
}, {
Name: "no_check_dest",
Default: false,
Help: "Don't check the destination, copy regardless",
Groups: "Copy",
}, {
Name: "no_unicode_normalization",
Default: false,
Help: "Don't normalize unicode characters in filenames",
Groups: "Config",
}, {
Name: "no_update_modtime",
Default: false,
Help: "Don't update destination modtime if files identical",
Groups: "Copy",
}, {
Name: "no_update_dir_modtime",
Default: false,
Help: "Don't update directory modification times",
Groups: "Copy",
}, {
Name: "compare_dest",
Default: []string{},
Help: "Include additional server-side paths during comparison",
Groups: "Copy",
}, {
Name: "copy_dest",
Default: []string{},
Help: "Implies --compare-dest but also copies files from paths into destination",
Groups: "Copy",
}, {
Name: "backup_dir",
Default: "",
Help: "Make backups into hierarchy based in DIR",
Groups: "Sync",
}, {
Name: "suffix",
Default: "",
Help: "Suffix to add to changed files",
Groups: "Sync",
}, {
Name: "suffix_keep_extension",
Default: false,
Help: "Preserve the extension when using --suffix",
Groups: "Sync",
}, {
Name: "fast_list",
Default: false,
Help: "Use recursive list if available; uses more memory but fewer transactions",
Groups: "Listing",
}, {
Name: "list_cutoff",
Default: 1_000_000,
Help: "To save memory, sort directory listings on disk above this threshold",
Groups: "Sync",
}, {
Name: "tpslimit",
Default: 0.0,
Help: "Limit HTTP transactions per second to this",
Groups: "Networking",
}, {
Name: "tpslimit_burst",
Default: 1,
Help: "Max burst of transactions for --tpslimit",
Groups: "Networking",
}, {
Name: "user_agent",
Default: "rclone/" + Version,
Help: "Set the user-agent to a specified string",
Groups: "Networking",
}, {
Name: "immutable",
Default: false,
Help: "Do not modify files, fail if existing files have been modified",
Groups: "Copy",
}, {
Name: "auto_confirm",
Default: false,
Help: "If enabled, do not request console confirmation",
Groups: "Config",
}, {
Name: "stats_unit",
Default: "bytes",
Help: "Show data rate in stats as either 'bits' or 'bytes' per second",
Groups: "Logging",
}, {
Name: "stats_file_name_length",
Default: 45,
Help: "Max file name length in stats (0 for no limit)",
Groups: "Logging",
}, {
Name: "log_level",
Default: LogLevelNotice,
Help: "Log level DEBUG|INFO|NOTICE|ERROR",
Groups: "Logging",
}, {
Name: "stats_log_level",
Default: LogLevelInfo,
Help: "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR",
Groups: "Logging",
}, {
Name: "bwlimit",
Default: BwTimetable{},
Help: "Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable",
Groups: "Networking",
}, {
Name: "bwlimit_file",
Default: BwTimetable{},
Help: "Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable",
Groups: "Networking",
}, {
Name: "buffer_size",
Default: SizeSuffix(16 << 20),
Help: "In memory buffer size when reading files for each --transfer",
Groups: "Performance",
}, {
Name: "streaming_upload_cutoff",
Default: SizeSuffix(100 * 1024),
Help: "Cutoff for switching to chunked upload if file size is unknown, upload starts after reaching cutoff or when file ends",
Groups: "Copy",
}, {
Name: "dump",
Default: DumpFlags(0),
Help: "List of items to dump from: " + DumpFlagsList,
Groups: "Debugging",
}, {
Name: "max_transfer",
Default: SizeSuffix(-1),
Help: "Maximum size of data to transfer",
Groups: "Copy",
}, {
Name: "max_duration",
Default: time.Duration(0),
Help: "Maximum duration rclone will transfer data for",
Groups: "Copy",
}, {
Name: "cutoff_mode",
Default: CutoffMode(0),
Help: "Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS",
Groups: "Copy",
}, {
Name: "max_backlog",
Default: 10000,
Help: "Maximum number of objects in sync or check backlog",
Groups: "Copy,Check",
}, {
Name: "max_stats_groups",
Default: 1000,
Help: "Maximum number of stats groups to keep in memory, on max oldest is discarded",
Groups: "Logging",
}, {
Name: "stats_one_line",
Default: false,
Help: "Make the stats fit on one line",
Groups: "Logging",
}, {
Name: "stats_one_line_date",
Default: false,
Help: "Enable --stats-one-line and add current date/time prefix",
Groups: "Logging",
}, {
Name: "stats_one_line_date_format",
Default: "",
Help: "Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes (\"), see https://golang.org/pkg/time/#Time.Format",
Groups: "Logging",
}, {
Name: "error_on_no_transfer",
Default: false,
Help: "Sets exit code 9 if no files are transferred, useful in scripts",
Groups: "Config",
}, {
Name: "progress",
ShortOpt: "P",
Default: false,
Help: "Show progress during transfer",
Groups: "Logging",
}, {
Name: "progress_terminal_title",
Default: false,
Help: "Show progress on the terminal title (requires -P/--progress)",
Groups: "Logging",
}, {
Name: "use_cookies",
Default: false,
Help: "Enable session cookiejar",
Groups: "Networking",
}, {
Name: "use_mmap",
Default: false,
Help: "Use mmap allocator (see docs)",
Groups: "Config",
}, {
Name: "max_buffer_memory",
Default: SizeSuffix(-1),
Help: "If set, don't allocate more than this amount of memory as buffers",
Groups: "Config",
}, {
Name: "ca_cert",
Default: []string{},
Help: "CA certificate used to verify servers",
Groups: "Networking",
}, {
Name: "client_cert",
Default: "",
Help: "Client SSL certificate (PEM) for mutual TLS auth",
Groups: "Networking",
}, {
Name: "client_key",
Default: "",
Help: "Client SSL private key (PEM) for mutual TLS auth",
Groups: "Networking",
}, {
Name: "client_pass",
Default: "",
Help: "Password for client SSL private key (PEM) for mutual TLS auth (obscured)",
Groups: "Networking",
IsPassword: true,
}, {
Name: "multi_thread_cutoff",
Default: SizeSuffix(256 * 1024 * 1024),
Help: "Use multi-thread downloads for files above this size",
Groups: "Copy",
}, {
Name: "multi_thread_streams",
Default: 4,
Help: "Number of streams to use for multi-thread downloads",
Groups: "Copy",
}, {
Name: "multi_thread_write_buffer_size",
Default: SizeSuffix(128 * 1024),
Help: "In memory buffer size for writing when in multi-thread mode",
Groups: "Copy",
}, {
Name: "multi_thread_chunk_size",
Default: SizeSuffix(64 * 1024 * 1024),
Help: "Chunk size for multi-thread downloads / uploads, if not set by filesystem",
Groups: "Copy",
}, {
Name: "use_json_log",
Default: false,
Help: "Use json log format",
Groups: "Logging",
}, {
Name: "order_by",
Default: "",
Help: "Instructions on how to order the transfers, e.g. 'size,descending'",
Groups: "Copy",
}, {
Name: "refresh_times",
Default: false,
Help: "Refresh the modtime of remote files",
Groups: "Copy",
}, {
Name: "no_console",
Default: false,
Help: "Hide console window (supported on Windows only)",
Groups: "Config",
}, {
Name: "fs_cache_expire_duration",
Default: 300 * time.Second,
Help: "Cache remotes for this long (0 to disable caching)",
Groups: "Config",
}, {
Name: "fs_cache_expire_interval",
Default: 60 * time.Second,
Help: "Interval to check for expired remotes",
Groups: "Config",
}, {
Name: "disable_http2",
Default: false,
Help: "Disable HTTP/2 in the global transport",
Groups: "Networking",
}, {
Name: "human_readable",
Default: false,
Help: "Print numbers in a human-readable format, sizes with suffix Ki|Mi|Gi|Ti|Pi",
Groups: "Config",
}, {
Name: "kv_lock_time",
Default: 1 * time.Second,
Help: "Maximum time to keep key-value database locked by process",
Groups: "Config",
}, {
Name: "disable_http_keep_alives",
Default: false,
Help: "Disable HTTP keep-alives and use each connection once.",
Groups: "Networking",
}, {
Name: "metadata",
ShortOpt: "M",
Default: false,
Help: "If set, preserve metadata when copying objects",
Groups: "Metadata,Copy",
}, {
Name: "server_side_across_configs",
Default: false,
Help: "Allow server-side operations (e.g. copy) to work across different configs",
Groups: "Copy",
}, {
Name: "color",
Default: TerminalColorMode(0),
Help: "When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS",
Groups: "Config",
}, {
Name: "default_time",
Default: Time(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)),
Help: "Time to show if modtime is unknown for files and directories",
Groups: "Config,Listing",
}, {
Name: "inplace",
Default: false,
Help: "Download directly to destination file instead of atomic download to temp/rename",
Groups: "Copy",
}, {
Name: "metadata_mapper",
Default: SpaceSepList{},
Help: "Program to run to transforming metadata before upload",
Groups: "Metadata",
}, {
Name: "partial_suffix",
Default: ".partial",
Help: "Add partial-suffix to temporary file name when --inplace is not used",
Groups: "Copy",
}, {
Name: "max_connections",
Help: "Maximum number of simultaneous backend API connections, 0 for unlimited.",
Default: 0,
Advanced: true,
Groups: "Networking",
}, {
Name: "name_transform",
Default: []string{},
Help: "Transform paths during the copy process.",
Groups: "Copy",
}, {
Name: "http_proxy",
Default: "",
Help: "HTTP proxy URL.",
Groups: "Networking",
}}
// ConfigInfo is filesystem config options
type ConfigInfo struct {
LogLevel LogLevel `config:"log_level"`
StatsLogLevel LogLevel `config:"stats_log_level"`
UseJSONLog bool `config:"use_json_log"`
DryRun bool `config:"dry_run"`
Interactive bool `config:"interactive"`
Links bool `config:"links"`
CheckSum bool `config:"checksum"`
SizeOnly bool `config:"size_only"`
IgnoreTimes bool `config:"ignore_times"`
IgnoreExisting bool `config:"ignore_existing"`
IgnoreErrors bool `config:"ignore_errors"`
ModifyWindow Duration `config:"modify_window"`
Checkers int `config:"checkers"`
Transfers int `config:"transfers"`
ConnectTimeout Duration `config:"contimeout"` // Connect timeout
Timeout Duration `config:"timeout"` // Data channel timeout
ExpectContinueTimeout Duration `config:"expect_continue_timeout"`
Dump DumpFlags `config:"dump"`
InsecureSkipVerify bool `config:"no_check_certificate"` // Skip server certificate verification
DeleteMode DeleteMode `config:"delete_mode"`
MaxDelete int64 `config:"max_delete"`
MaxDeleteSize SizeSuffix `config:"max_delete_size"`
TrackRenames bool `config:"track_renames"` // Track file renames.
TrackRenamesStrategy string `config:"track_renames_strategy"` // Comma separated list of strategies used to track renames
Retries int `config:"retries"` // High-level retries
RetriesInterval Duration `config:"retries_sleep"`
LowLevelRetries int `config:"low_level_retries"`
UpdateOlder bool `config:"update"` // Skip files that are newer on the destination
NoGzip bool `config:"no_gzip_encoding"` // Disable compression
MaxDepth int `config:"max_depth"`
IgnoreSize bool `config:"ignore_size"`
IgnoreChecksum bool `config:"ignore_checksum"`
IgnoreCaseSync bool `config:"ignore_case_sync"`
FixCase bool `config:"fix_case"`
NoTraverse bool `config:"no_traverse"`
CheckFirst bool `config:"check_first"`
NoCheckDest bool `config:"no_check_dest"`
NoUnicodeNormalization bool `config:"no_unicode_normalization"`
NoUpdateModTime bool `config:"no_update_modtime"`
NoUpdateDirModTime bool `config:"no_update_dir_modtime"`
DataRateUnit string `config:"stats_unit"`
CompareDest []string `config:"compare_dest"`
CopyDest []string `config:"copy_dest"`
BackupDir string `config:"backup_dir"`
Suffix string `config:"suffix"`
SuffixKeepExtension bool `config:"suffix_keep_extension"`
UseListR bool `config:"fast_list"`
ListCutoff int `config:"list_cutoff"`
BufferSize SizeSuffix `config:"buffer_size"`
BwLimit BwTimetable `config:"bwlimit"`
BwLimitFile BwTimetable `config:"bwlimit_file"`
TPSLimit float64 `config:"tpslimit"`
TPSLimitBurst int `config:"tpslimit_burst"`
BindAddr net.IP `config:"bind_addr"`
DisableFeatures []string `config:"disable"`
UserAgent string `config:"user_agent"`
Immutable bool `config:"immutable"`
AutoConfirm bool `config:"auto_confirm"`
StreamingUploadCutoff SizeSuffix `config:"streaming_upload_cutoff"`
StatsFileNameLength int `config:"stats_file_name_length"`
AskPassword bool `config:"ask_password"`
PasswordCommand SpaceSepList `config:"password_command"`
UseServerModTime bool `config:"use_server_modtime"`
MaxTransfer SizeSuffix `config:"max_transfer"`
MaxDuration Duration `config:"max_duration"`
CutoffMode CutoffMode `config:"cutoff_mode"`
MaxBacklog int `config:"max_backlog"`
MaxStatsGroups int `config:"max_stats_groups"`
StatsOneLine bool `config:"stats_one_line"`
StatsOneLineDate bool `config:"stats_one_line_date"` // If we want a date prefix at all
StatsOneLineDateFormat string `config:"stats_one_line_date_format"` // If we want to customize the prefix
ErrorOnNoTransfer bool `config:"error_on_no_transfer"` // Set appropriate exit code if no files transferred
Progress bool `config:"progress"`
ProgressTerminalTitle bool `config:"progress_terminal_title"`
Cookie bool `config:"use_cookies"`
UseMmap bool `config:"use_mmap"`
MaxBufferMemory SizeSuffix `config:"max_buffer_memory"`
CaCert []string `config:"ca_cert"` // Client Side CA
ClientCert string `config:"client_cert"` // Client Side Cert
ClientKey string `config:"client_key"` // Client Side Key
ClientPass string `config:"client_pass"` // Client Side Key Password (obscured)
MultiThreadCutoff SizeSuffix `config:"multi_thread_cutoff"`
MultiThreadStreams int `config:"multi_thread_streams"`
MultiThreadSet bool `config:"multi_thread_set"` // whether MultiThreadStreams was set (set in fs/config/configflags)
MultiThreadChunkSize SizeSuffix `config:"multi_thread_chunk_size"` // Chunk size for multi-thread downloads / uploads, if not set by filesystem
MultiThreadWriteBufferSize SizeSuffix `config:"multi_thread_write_buffer_size"`
OrderBy string `config:"order_by"` // instructions on how to order the transfer
UploadHeaders []*HTTPOption `config:"upload_headers"`
DownloadHeaders []*HTTPOption `config:"download_headers"`
Headers []*HTTPOption `config:"headers"`
MetadataSet Metadata `config:"metadata_set"` // extra metadata to write when uploading
RefreshTimes bool `config:"refresh_times"`
NoConsole bool `config:"no_console"`
TrafficClass uint8 `config:"traffic_class"`
FsCacheExpireDuration Duration `config:"fs_cache_expire_duration"`
FsCacheExpireInterval Duration `config:"fs_cache_expire_interval"`
DisableHTTP2 bool `config:"disable_http2"`
HumanReadable bool `config:"human_readable"`
KvLockTime Duration `config:"kv_lock_time"` // maximum time to keep key-value database locked by process
DisableHTTPKeepAlives bool `config:"disable_http_keep_alives"`
Metadata bool `config:"metadata"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
TerminalColorMode TerminalColorMode `config:"color"`
DefaultTime Time `config:"default_time"` // time that directories with no time should display
Inplace bool `config:"inplace"` // Download directly to destination file instead of atomic download to temp/rename
PartialSuffix string `config:"partial_suffix"`
MetadataMapper SpaceSepList `config:"metadata_mapper"`
MaxConnections int `config:"max_connections"`
NameTransform []string `config:"name_transform"`
HTTPProxy string `config:"http_proxy"`
}
func init() {
// Set any values which aren't the zero for the type
globalConfig.DeleteMode = DeleteModeDefault
// Register the config and fill globalConfig with the defaults
RegisterGlobalOptions(OptionsInfo{Name: "main", Opt: globalConfig, Options: ConfigOptionsInfo, Reload: globalConfig.Reload})
// initial guess at log level from the flags
globalConfig.LogLevel = InitialLogLevel()
}
// LogReload is written by fs/log to set variables which should really
// be there but we can't move due to them being visible here in the rc.
var LogReload = func(*ConfigInfo) error { return nil }
// Reload assumes the config has been edited and does what is necessary to make it live
func (ci *ConfigInfo) Reload(ctx context.Context) error {
// Set -vv if --dump is in use
if ci.Dump != 0 && ci.LogLevel != LogLevelDebug {
Logf(nil, "Automatically setting -vv as --dump is enabled")
ci.LogLevel = LogLevelDebug
}
// If --dry-run or -i then use NOTICE as minimum log level
if (ci.DryRun || ci.Interactive) && ci.StatsLogLevel > LogLevelNotice {
ci.StatsLogLevel = LogLevelNotice
}
// Check --compare-dest and --copy-dest
if len(ci.CompareDest) > 0 && len(ci.CopyDest) > 0 {
return fmt.Errorf("can't use --compare-dest with --copy-dest")
}
// Check --stats-one-line and dependent flags
switch {
case len(ci.StatsOneLineDateFormat) > 0:
ci.StatsOneLineDate = true
ci.StatsOneLine = true
case ci.StatsOneLineDate:
ci.StatsOneLineDateFormat = "2006/01/02 15:04:05 - "
ci.StatsOneLine = true
}
// Check --partial-suffix
if len(ci.PartialSuffix) > 16 {
return fmt.Errorf("--partial-suffix: Expecting suffix length not greater than %d but got %d", 16, len(ci.PartialSuffix))
}
// Make sure some values are > 0
nonZero := func(pi *int) {
if *pi <= 0 {
*pi = 1
}
}
// Check --stats-unit
if ci.DataRateUnit != "bits" && ci.DataRateUnit != "bytes" {
Errorf(nil, "Unknown unit %q passed to --stats-unit. Defaulting to bytes.", ci.DataRateUnit)
ci.DataRateUnit = "bytes"
}
// Check these are all > 0
nonZero(&ci.Retries)
nonZero(&ci.LowLevelRetries)
nonZero(&ci.Transfers)
nonZero(&ci.Checkers)
return LogReload(ci)
}
// InitialLogLevel performs a simple check for debug flags to enable
// debug logging during the flag initialization.
func InitialLogLevel() LogLevel {
logLevel := LogLevelNotice
for argIndex, arg := range os.Args {
if strings.HasPrefix(arg, "-vv") && strings.TrimRight(arg, "v") == "-" {
logLevel = LogLevelDebug
}
if arg == "--log-level=DEBUG" || (arg == "--log-level" && len(os.Args) > argIndex+1 && os.Args[argIndex+1] == "DEBUG") {
logLevel = LogLevelDebug
}
if strings.HasPrefix(arg, "--verbose=") {
if level, err := strconv.Atoi(arg[10:]); err == nil && level >= 2 {
logLevel = LogLevelDebug
}
}
}
envValue, found := os.LookupEnv("RCLONE_LOG_LEVEL")
if found && envValue == "DEBUG" {
logLevel = LogLevelDebug
}
return logLevel
}
// TimeoutOrInfinite returns ci.Timeout if > 0 or infinite otherwise
func (ci *ConfigInfo) TimeoutOrInfinite() time.Duration {
if ci.Timeout > 0 {
return time.Duration(ci.Timeout)
}
return ModTimeNotSupported
}
type configContextKeyType struct{}
// Context key for config
var configContextKey = configContextKeyType{}
// GetConfig returns the global or context sensitive context
func GetConfig(ctx context.Context) *ConfigInfo {
if ctx == nil {
return globalConfig
}
c := ctx.Value(configContextKey)
if c == nil {
return globalConfig
}
return c.(*ConfigInfo)
}
// CopyConfig copies the global config (if any) from srcCtx into
// dstCtx returning the new context.
func CopyConfig(dstCtx, srcCtx context.Context) context.Context {
if srcCtx == nil {
return dstCtx
}
c := srcCtx.Value(configContextKey)
if c == nil {
return dstCtx
}
return context.WithValue(dstCtx, configContextKey, c)
}
// AddConfig returns a mutable config structure based on a shallow
// copy of that found in ctx and returns a new context with that added
// to it.
func AddConfig(ctx context.Context) (context.Context, *ConfigInfo) {
c := GetConfig(ctx)
cCopy := new(ConfigInfo)
*cCopy = *c
newCtx := context.WithValue(ctx, configContextKey, cCopy)
return newCtx, cCopy
}
// ConfigToEnv converts a config section and name, e.g. ("my-remote",
// "ignore-size") into an environment name
// "RCLONE_CONFIG_MY-REMOTE_IGNORE_SIZE"
func ConfigToEnv(section, name string) string {
return "RCLONE_CONFIG_" + strings.ToUpper(section+"_"+strings.ReplaceAll(name, "-", "_"))
}
// OptionToEnv converts an option name, e.g. "ignore-size" into an
// environment name "RCLONE_IGNORE_SIZE"
func OptionToEnv(name string) string {
return "RCLONE_" + strings.ToUpper(strings.ReplaceAll(name, "-", "_"))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/metadata.go | fs/metadata.go | package fs
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os/exec"
"strings"
"time"
)
// Metadata represents Object metadata in a standardised form
//
// See docs/content/metadata.md for the interpretation of the keys
type Metadata map[string]string
// MetadataHelp represents help for a bit of system metadata
type MetadataHelp struct {
Help string
Type string
Example string
ReadOnly bool
}
// MetadataInfo is help for the whole metadata for this backend.
type MetadataInfo struct {
System map[string]MetadataHelp
Help string
}
// Set k to v on m
//
// If m is nil, then it will get made
func (m *Metadata) Set(k, v string) {
if *m == nil {
*m = make(Metadata, 1)
}
(*m)[k] = v
}
// Merge other into m
//
// If m is nil, then it will get made
func (m *Metadata) Merge(other Metadata) {
for k, v := range other {
if *m == nil {
*m = make(Metadata, len(other))
}
(*m)[k] = v
}
}
// MergeOptions gets any Metadata from the options passed in and
// stores it in m (which may be nil).
//
// If there is no m then metadata will be nil
func (m *Metadata) MergeOptions(options []OpenOption) {
for _, opt := range options {
if metadataOption, ok := opt.(MetadataOption); ok {
m.Merge(Metadata(metadataOption))
}
}
}
// GetMetadata from an DirEntry
//
// If the object has no metadata then metadata will be nil
func GetMetadata(ctx context.Context, o DirEntry) (metadata Metadata, err error) {
do, ok := o.(Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// mapItem descripts the item to be mapped
type mapItem struct {
SrcFs string
SrcFsType string
DstFs string
DstFsType string
Remote string
Size int64
MimeType string `json:",omitempty"`
ModTime time.Time
IsDir bool
ID string `json:",omitempty"`
Metadata Metadata `json:",omitempty"`
}
// This runs an external program on the metadata which can be used to
// map it from one form to another.
func metadataMapper(ctx context.Context, cmdLine SpaceSepList, dstFs Fs, o DirEntry, metadata Metadata) (newMetadata Metadata, err error) {
ci := GetConfig(ctx)
cmd := exec.Command(cmdLine[0], cmdLine[1:]...)
in := mapItem{
DstFs: ConfigString(dstFs),
DstFsType: Type(dstFs),
Remote: o.Remote(),
Size: o.Size(),
MimeType: MimeType(ctx, o),
ModTime: o.ModTime(ctx),
IsDir: false,
Metadata: metadata,
}
fInfo := o.Fs()
if f, ok := fInfo.(Fs); ok {
in.SrcFs = ConfigString(f)
in.SrcFsType = Type(f)
} else {
in.SrcFs = fInfo.Name() + ":" + fInfo.Root()
in.SrcFsType = "unknown"
}
if do, ok := o.(IDer); ok {
in.ID = do.ID()
}
inBytes, err := json.MarshalIndent(in, "", "\t")
if err != nil {
return nil, fmt.Errorf("metadata mapper: failed to marshal input: %w", err)
}
if ci.Dump.IsSet(DumpMapper) {
Debugf(nil, "Metadata mapper sent: \n%s\n", string(inBytes))
}
var stdout, stderr bytes.Buffer
cmd.Stdin = bytes.NewBuffer(inBytes)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
start := time.Now()
err = cmd.Run()
Debugf(o, "Calling metadata mapper %v", cmdLine)
duration := time.Since(start)
if err != nil {
return nil, fmt.Errorf("metadata mapper: failed on %v: %q: %w", cmdLine, strings.TrimSpace(stderr.String()), err)
}
if ci.Dump.IsSet(DumpMapper) {
Debugf(nil, "Metadata mapper received: \n%s\n", stdout.String())
}
var out mapItem
err = json.Unmarshal(stdout.Bytes(), &out)
if err != nil {
return nil, fmt.Errorf("metadata mapper: failed to read output: %q: %w", stdout.String(), err)
}
Debugf(o, "Metadata mapper returned in %v", duration)
return out.Metadata, nil
}
// GetMetadataOptions from an DirEntry and merge it with any in options
//
// If --metadata isn't in use it will return nil.
//
// If the object has no metadata then metadata will be nil.
//
// This should be passed the destination Fs for the metadata mapper
func GetMetadataOptions(ctx context.Context, dstFs Fs, o DirEntry, options []OpenOption) (metadata Metadata, err error) {
ci := GetConfig(ctx)
if !ci.Metadata {
return nil, nil
}
metadata, err = GetMetadata(ctx, o)
if err != nil {
return nil, err
}
metadata.MergeOptions(options)
if len(ci.MetadataMapper) != 0 {
metadata, err = metadataMapper(ctx, ci.MetadataMapper, dstFs, o, metadata)
if err != nil {
return nil, err
}
}
return metadata, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/tristate_test.go | fs/tristate_test.go | package fs
import (
"encoding/json"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check it satisfies the interfaces
var (
_ Flagger = (*Tristate)(nil)
_ FlaggerNP = Tristate{}
)
func TestTristateString(t *testing.T) {
for _, test := range []struct {
in Tristate
want string
}{
{Tristate{}, "unset"},
{Tristate{Valid: false, Value: false}, "unset"},
{Tristate{Valid: false, Value: true}, "unset"},
{Tristate{Valid: true, Value: false}, "false"},
{Tristate{Valid: true, Value: true}, "true"},
} {
got := test.in.String()
assert.Equal(t, test.want, got)
}
}
func TestTristateSet(t *testing.T) {
for _, test := range []struct {
in string
want Tristate
err bool
}{
{"", Tristate{Valid: false, Value: false}, false},
{"nil", Tristate{Valid: false, Value: false}, false},
{"null", Tristate{Valid: false, Value: false}, false},
{"UNSET", Tristate{Valid: false, Value: false}, false},
{"true", Tristate{Valid: true, Value: true}, false},
{"1", Tristate{Valid: true, Value: true}, false},
{"false", Tristate{Valid: true, Value: false}, false},
{"0", Tristate{Valid: true, Value: false}, false},
{"potato", Tristate{Valid: false, Value: false}, true},
} {
var got Tristate
err := got.Set(test.in)
if test.err {
require.Error(t, err)
} else {
require.NoError(t, err)
assert.Equal(t, test.want, got)
}
}
}
func TestTristateScan(t *testing.T) {
var v Tristate
n, err := fmt.Sscan(" true ", &v)
require.NoError(t, err)
assert.Equal(t, 1, n)
assert.Equal(t, Tristate{Valid: true, Value: true}, v)
}
func TestTristateUnmarshalJSON(t *testing.T) {
for _, test := range []struct {
in string
want Tristate
err bool
}{
{`null`, Tristate{}, false},
{`true`, Tristate{Valid: true, Value: true}, false},
{`false`, Tristate{Valid: true, Value: false}, false},
{`potato`, Tristate{}, true},
{``, Tristate{}, true},
} {
var got Tristate
err := json.Unmarshal([]byte(test.in), &got)
if test.err {
require.Error(t, err, test.in)
} else {
require.NoError(t, err, test.in)
}
assert.Equal(t, test.want, got, test.in)
}
}
func TestTristateMarshalJSON(t *testing.T) {
for _, test := range []struct {
in Tristate
want string
}{
{Tristate{}, `null`},
{Tristate{Valid: true, Value: true}, `true`},
{Tristate{Valid: true, Value: false}, `false`},
} {
got, err := json.Marshal(&test.in)
require.NoError(t, err)
assert.Equal(t, test.want, string(got), fmt.Sprintf("%#v", test.in))
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/daemon_unix.go | fs/daemon_unix.go | // Daemonization interface for Unix platforms (common definitions)
//go:build !windows && !plan9 && !js
package fs
import (
"os"
)
// We use a special environment variable to let the child process know its role.
const (
DaemonMarkVar = "_RCLONE_DAEMON_"
DaemonMarkChild = "_rclone_daemon_"
)
// IsDaemon returns true if this process runs in background
func IsDaemon() bool {
return os.Getenv(DaemonMarkVar) == DaemonMarkChild
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/versioncheck.go | fs/versioncheck.go | //go:build !go1.24
package fs
// Upgrade to Go version 1.24 to compile rclone - latest stable go
// compiler recommended.
func init() { Go_version_1_24_required_for_compilation() }
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/pacer.go | fs/pacer.go | // Pacer with logging and calculator
package fs
import (
"context"
"time"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/pacer"
)
// Pacer is a simple wrapper around a pacer.Pacer with logging.
type Pacer struct {
*pacer.Pacer
}
type logCalculator struct {
pacer.Calculator
}
// NewPacer creates a Pacer for the given Fs and Calculator.
func NewPacer(ctx context.Context, c pacer.Calculator) *Pacer {
ci := GetConfig(ctx)
retries := max(ci.LowLevelRetries, 1)
maxConnections := max(ci.MaxConnections, 0)
p := &Pacer{
Pacer: pacer.New(
pacer.InvokerOption(pacerInvoker),
pacer.MaxConnectionsOption(maxConnections),
pacer.RetriesOption(retries),
pacer.CalculatorOption(c),
),
}
p.SetCalculator(c)
return p
}
func (d *logCalculator) Calculate(state pacer.State) time.Duration {
oldSleepTime := state.SleepTime
newSleepTime := d.Calculator.Calculate(state)
if state.ConsecutiveRetries > 0 {
if newSleepTime != oldSleepTime {
Debugf("pacer", "Rate limited, increasing sleep to %v", newSleepTime)
}
} else {
if newSleepTime != oldSleepTime {
Debugf("pacer", "Reducing sleep to %v", newSleepTime)
}
}
return newSleepTime
}
// SetCalculator sets the pacing algorithm. Don't modify the Calculator object
// afterwards, use the ModifyCalculator method when needed.
//
// It will choose the default algorithm if nil is passed in.
func (p *Pacer) SetCalculator(c pacer.Calculator) {
switch c.(type) {
case *logCalculator:
Logf("pacer", "Invalid Calculator in fs.Pacer.SetCalculator")
case nil:
c = &logCalculator{pacer.NewDefault()}
default:
c = &logCalculator{c}
}
p.Pacer.SetCalculator(c)
}
// ModifyCalculator calls the given function with the currently configured
// Calculator and the Pacer lock held.
func (p *Pacer) ModifyCalculator(f func(pacer.Calculator)) {
p.Pacer.ModifyCalculator(func(c pacer.Calculator) {
switch _c := c.(type) {
case *logCalculator:
f(_c.Calculator)
default:
Logf("pacer", "Invalid Calculator in fs.Pacer: %t", c)
f(c)
}
})
}
func pacerInvoker(try, retries int, f pacer.Paced) (retry bool, err error) {
retry, err = f()
if retry {
Debugf("pacer", "low level retry %d/%d (error %v)", try, retries, err)
err = fserrors.RetryError(err)
}
return
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/metadata_mapper_code.go | fs/metadata_mapper_code.go | //go:build ignore
// A simple metadata mapper for testing purposes
package main
import (
"encoding/json"
"fmt"
"log"
"os"
)
func check[T comparable](in map[string]any, key string, want T) {
value, ok := in[key]
if !ok {
fmt.Fprintf(os.Stderr, "%s key not found\n", key)
os.Exit(1)
}
if value.(T) != want {
fmt.Fprintf(os.Stderr, "%s wrong - expecting %s but got %s\n", key, want, value)
os.Exit(1)
}
}
func main() {
// Read the input
var in map[string]any
err := json.NewDecoder(os.Stdin).Decode(&in)
if err != nil {
log.Fatal(err)
}
// Check the input
metadata, ok := in["Metadata"]
if !ok {
fmt.Fprintf(os.Stderr, "Metadata key not found\n")
os.Exit(1)
}
check(in, "Size", 5.0)
check(in, "SrcFs", "memory:")
check(in, "SrcFsType", "object.memoryFs")
check(in, "DstFs", "dstFs:dstFsRoot")
check(in, "DstFsType", "mockfs")
check(in, "Remote", "file.txt")
check(in, "MimeType", "text/plain; charset=utf-8")
check(in, "ModTime", "2001-02-03T04:05:06.000000007Z")
check(in, "IsDir", false)
//check(in, "ID", "Potato")
// Map the metadata
metadataOut := map[string]string{}
var out = map[string]any{
"Metadata": metadataOut,
}
for k, v := range metadata.(map[string]any) {
switch k {
case "error":
fmt.Fprintf(os.Stderr, "Error: %s\n", v)
os.Exit(1)
case "key1":
v = "two " + v.(string)
case "key3":
continue
}
metadataOut[k] = v.(string)
}
metadataOut["key0"] = "cabbage"
// Write the output
json.NewEncoder(os.Stdout).Encode(&out)
if err != nil {
log.Fatal(err)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/versiontag.go | fs/versiontag.go | package fs
// VersionTag of rclone
var VersionTag = "v1.73.0"
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/parsetime_test.go | fs/parsetime_test.go | package fs
import (
"encoding/json"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check it satisfies the interfaces
var (
_ Flagger = (*Time)(nil)
_ FlaggerNP = Time{}
)
func TestParseTime(t *testing.T) {
now := time.Date(2020, 9, 5, 8, 15, 5, 250, time.UTC)
oldTimeNowFunc := timeNowFunc
timeNowFunc = func() time.Time { return now }
defer func() { timeNowFunc = oldTimeNowFunc }()
for _, test := range []struct {
in string
want time.Time
err bool
}{
{"", time.Time{}, true},
{"1ms", now.Add(-time.Millisecond), false},
{"1s", now.Add(-time.Second), false},
{"1", now.Add(-time.Second), false},
{"1m", now.Add(-time.Minute), false},
{"1.5m", now.Add(-(3 * time.Minute) / 2), false},
{"1h", now.Add(-time.Hour), false},
{"1d", now.Add(-time.Hour * 24), false},
{"1w", now.Add(-time.Hour * 24 * 7), false},
{"1M", now.Add(-time.Hour * 24 * 30), false},
{"1y", now.Add(-time.Hour * 24 * 365), false},
{"1.5y", now.Add(-time.Hour * 24 * 365 * 3 / 2), false},
{"-1.5y", now.Add(time.Hour * 24 * 365 * 3 / 2), false},
{"-1s", now.Add(time.Second), false},
{"-1", now.Add(time.Second), false},
{"0", now, false},
{"100", now.Add(-100 * time.Second), false},
{"-100", now.Add(100 * time.Second), false},
{"1.s", now.Add(-time.Second), false},
{"1x", time.Time{}, true},
{"-1x", time.Time{}, true},
{"off", time.Time{}, false},
{"1h2m3s", now.Add(-(time.Hour + 2*time.Minute + 3*time.Second)), false},
{"2001-02-03", time.Date(2001, 2, 3, 0, 0, 0, 0, time.Local), false},
{"2001-02-03 10:11:12", time.Date(2001, 2, 3, 10, 11, 12, 0, time.Local), false},
{"2001-08-03 10:11:12", time.Date(2001, 8, 3, 10, 11, 12, 0, time.Local), false},
{"2001-02-03T10:11:12", time.Date(2001, 2, 3, 10, 11, 12, 0, time.Local), false},
{"2001-02-03T10:11:12.123Z", time.Date(2001, 2, 3, 10, 11, 12, 123000000, time.UTC), false},
{"2001-02-03T10:11:12.123+00:00", time.Date(2001, 2, 3, 10, 11, 12, 123000000, time.UTC), false},
} {
parsedTime, err := ParseTime(test.in)
if test.err {
require.Error(t, err, test.in)
} else {
require.NoError(t, err, test.in)
}
assert.True(t, test.want.Equal(parsedTime), "%v should be parsed as %v instead of %v", test.in, test.want, parsedTime)
}
}
func TestTimeString(t *testing.T) {
now := time.Date(2020, 9, 5, 8, 15, 5, 250, time.UTC)
oldTimeNowFunc := timeNowFunc
timeNowFunc = func() time.Time { return now }
defer func() { timeNowFunc = oldTimeNowFunc }()
for _, test := range []struct {
in time.Time
want string
}{
{now, "2020-09-05T08:15:05.00000025Z"},
{time.Date(2021, 8, 5, 8, 15, 5, 0, time.UTC), "2021-08-05T08:15:05Z"},
{time.Time{}, "off"},
} {
got := Time(test.in).String()
assert.Equal(t, test.want, got)
// Test the reverse
reverse, err := ParseTime(test.want)
assert.NoError(t, err)
assert.Equal(t, test.in, reverse)
}
}
func TestTimeScan(t *testing.T) {
now := time.Date(2020, 9, 5, 8, 15, 5, 250, time.UTC)
oldTimeNowFunc := timeNowFunc
timeNowFunc = func() time.Time { return now }
defer func() { timeNowFunc = oldTimeNowFunc }()
for _, test := range []struct {
in string
want Time
}{
{"17m", Time(now.Add(-17 * time.Minute))},
{"-12h", Time(now.Add(12 * time.Hour))},
{"0", Time(now)},
{"off", Time(time.Time{})},
{"2022-03-26T17:48:19Z", Time(time.Date(2022, 03, 26, 17, 48, 19, 0, time.UTC))},
{"2022-03-26 17:48:19", Time(time.Date(2022, 03, 26, 17, 48, 19, 0, time.Local))},
} {
var got Time
n, err := fmt.Sscan(test.in, &got)
require.NoError(t, err)
assert.Equal(t, 1, n)
assert.Equal(t, test.want, got)
}
}
func TestParseTimeUnmarshalJSON(t *testing.T) {
now := time.Date(2020, 9, 5, 8, 15, 5, 250, time.UTC)
oldTimeNowFunc := timeNowFunc
timeNowFunc = func() time.Time { return now }
defer func() { timeNowFunc = oldTimeNowFunc }()
for _, test := range []struct {
in string
want time.Time
err bool
}{
{`""`, time.Time{}, true},
{"0", time.Time{}, true},
{"1", time.Time{}, true},
{"1", time.Time{}, true},
{`"2022-03-26T17:48:19Z"`, time.Date(2022, 03, 26, 17, 48, 19, 0, time.UTC), false},
{`"0"`, now, false},
{`"1ms"`, now.Add(-time.Millisecond), false},
{`"1s"`, now.Add(-time.Second), false},
{`"1"`, now.Add(-time.Second), false},
{`"1m"`, now.Add(-time.Minute), false},
{`"1h"`, now.Add(-time.Hour), false},
{`"-1h"`, now.Add(time.Hour), false},
{`"1d"`, now.Add(-time.Hour * 24), false},
{`"1w"`, now.Add(-time.Hour * 24 * 7), false},
{`"1M"`, now.Add(-time.Hour * 24 * 30), false},
{`"1y"`, now.Add(-time.Hour * 24 * 365), false},
{`"off"`, time.Time{}, false},
{`"error"`, time.Time{}, true},
{"error", time.Time{}, true},
} {
var parsedTime Time
err := json.Unmarshal([]byte(test.in), &parsedTime)
if test.err {
require.Error(t, err, test.in)
} else {
require.NoError(t, err, test.in)
}
assert.Equal(t, Time(test.want), parsedTime, test.in)
}
}
func TestParseTimeMarshalJSON(t *testing.T) {
for _, test := range []struct {
in time.Time
want string
err bool
}{
{time.Time{}, `"0001-01-01T00:00:00Z"`, false},
{time.Date(2022, 03, 26, 17, 48, 19, 0, time.UTC), `"2022-03-26T17:48:19Z"`, false},
} {
gotBytes, err := json.Marshal(test.in)
got := string(gotBytes)
if test.err {
require.Error(t, err, test.in)
} else {
require.NoError(t, err, test.in)
}
assert.Equal(t, test.want, got, test.in)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/bits_test.go | fs/bits_test.go | package fs
import (
"encoding/json"
"fmt"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type bits = Bits[bitsChoices]
const (
bitA bits = 1 << iota
bitB
bitC
)
type bitsChoices struct{}
func (bitsChoices) Choices() []BitsChoicesInfo {
return []BitsChoicesInfo{
{uint64(0), "OFF"},
{uint64(bitA), "A"},
{uint64(bitB), "B"},
{uint64(bitC), "C"},
}
}
// Check it satisfies the interfaces
var (
_ Flagger = (*bits)(nil)
_ FlaggerNP = bits(0)
)
func TestBitsString(t *testing.T) {
assert.Equal(t, "OFF", bits(0).String())
assert.Equal(t, "A", (bitA).String())
assert.Equal(t, "A,B", (bitA | bitB).String())
assert.Equal(t, "A,B,C", (bitA | bitB | bitC).String())
assert.Equal(t, "A,Unknown-0x8000", (bitA | bits(0x8000)).String())
}
func TestBitsHelp(t *testing.T) {
assert.Equal(t, "OFF, A, B, C", bits(0).Help())
}
func TestBitsSet(t *testing.T) {
for _, test := range []struct {
in string
want bits
wantErr string
}{
{"", bits(0), ""},
{"B", bitB, ""},
{"B,A", bitB | bitA, ""},
{"a,b,C", bitA | bitB | bitC, ""},
{"A,B,unknown,E", 0, `invalid choice "unknown" from: OFF, A, B, C`},
} {
f := bits(0xffffffffffffffff)
initial := f
err := f.Set(test.in)
if err != nil {
if test.wantErr == "" {
t.Errorf("Got an error when not expecting one on %q: %v", test.in, err)
} else {
assert.Contains(t, err.Error(), test.wantErr)
}
assert.Equal(t, initial, f, test.want)
} else {
if test.wantErr != "" {
t.Errorf("Got no error when expecting one on %q", test.in)
} else {
assert.Equal(t, test.want, f)
}
}
}
}
func TestBitsIsSet(t *testing.T) {
b := bitA | bitB
assert.True(t, b.IsSet(bitA))
assert.True(t, b.IsSet(bitB))
assert.True(t, b.IsSet(bitA|bitB))
assert.False(t, b.IsSet(bitC))
assert.False(t, b.IsSet(bitA|bitC))
}
func TestBitsType(t *testing.T) {
f := bits(0)
assert.Equal(t, "Bits", f.Type())
}
func TestBitsScan(t *testing.T) {
var v bits
n, err := fmt.Sscan(" C,B ", &v)
require.NoError(t, err)
assert.Equal(t, 1, n)
assert.Equal(t, bitC|bitB, v)
}
func TestBitsUnmarshallJSON(t *testing.T) {
for _, test := range []struct {
in string
want bits
wantErr string
}{
{`""`, bits(0), ""},
{`"B"`, bitB, ""},
{`"B,A"`, bitB | bitA, ""},
{`"A,B,C"`, bitA | bitB | bitC, ""},
{`"A,B,unknown,E"`, 0, `invalid choice "unknown" from: OFF, A, B, C`},
{`0`, bits(0), ""},
{strconv.Itoa(int(bitB)), bitB, ""},
{strconv.Itoa(int(bitB | bitA)), bitB | bitA, ""},
} {
f := bits(0xffffffffffffffff)
initial := f
err := json.Unmarshal([]byte(test.in), &f)
if err != nil {
if test.wantErr == "" {
t.Errorf("Got an error when not expecting one on %q: %v", test.in, err)
} else {
assert.Contains(t, err.Error(), test.wantErr)
}
assert.Equal(t, initial, f, test.want)
} else {
if test.wantErr != "" {
t.Errorf("Got no error when expecting one on %q", test.in)
} else {
assert.Equal(t, test.want, f)
}
}
}
}
func TestBitsMarshalJSON(t *testing.T) {
for _, test := range []struct {
in bits
want string
}{
{bitA | bitC, `"A,C"`},
{0, `"OFF"`},
} {
got, err := json.Marshal(&test.in)
require.NoError(t, err)
assert.Equal(t, test.want, string(got), fmt.Sprintf("%#v", test.in))
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fingerprint.go | fs/fingerprint.go | package fs
import (
"context"
"fmt"
"strings"
"github.com/rclone/rclone/fs/hash"
)
// Fingerprint produces a unique-ish string for an object.
//
// This is for detecting whether an object has changed since we last
// saw it, not for checking object identity between two different
// remotes - operations.Equal should be used for that.
//
// If fast is set then Fingerprint will only include attributes where
// usually another operation is not required to fetch them. For
// example if fast is set then this won't include hashes on the local
// backend.
func Fingerprint(ctx context.Context, o ObjectInfo, fast bool) string {
var (
out strings.Builder
f = o.Fs()
features = f.Features()
)
fmt.Fprintf(&out, "%d", o.Size())
// Whether we want to do a slow operation or not
//
// fast true false true false
// opIsSlow true true false false
// do Op false true true true
//
// If !fast (slow) do the operation or if !OpIsSlow ==
// OpIsFast do the operation.
//
// Eg don't do this for S3 where modtimes are expensive
if !fast || !features.SlowModTime {
if f.Precision() != ModTimeNotSupported {
fmt.Fprintf(&out, ",%v", o.ModTime(ctx).UTC())
}
}
// Eg don't do this for SFTP/local where hashes are expensive?
if !fast || !features.SlowHash {
hashType := f.Hashes().GetOne()
if hashType != hash.None {
hash, err := o.Hash(ctx, hashType)
if err == nil {
fmt.Fprintf(&out, ",%v", hash)
}
}
}
return out.String()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/open_options.go | fs/open_options.go | // Options for Open
package fs
import (
"context"
"errors"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/rclone/rclone/fs/hash"
)
// OpenOption is an interface describing options for Open
type OpenOption interface {
fmt.Stringer
// Header returns the option as an HTTP header
Header() (key string, value string)
// Mandatory returns whether this option can be ignored or not
Mandatory() bool
}
// RangeOption defines an HTTP Range option with start and end. If
// either start or end are < 0 then they will be omitted.
//
// End may be bigger than the Size of the object in which case it will
// be capped to the size of the object.
//
// Note that the End is inclusive, so to fetch 100 bytes you would use
// RangeOption{Start: 0, End: 99}
//
// If Start is specified but End is not then it will fetch from Start
// to the end of the file.
//
// If End is specified, but Start is not then it will fetch the last
// End bytes.
//
// Examples:
//
// RangeOption{Start: 0, End: 99} - fetch the first 100 bytes
// RangeOption{Start: 100, End: 199} - fetch the second 100 bytes
// RangeOption{Start: 100, End: -1} - fetch bytes from offset 100 to the end
// RangeOption{Start: -1, End: 100} - fetch the last 100 bytes
//
// A RangeOption implements a single byte-range-spec from
// https://tools.ietf.org/html/rfc7233#section-2.1
type RangeOption struct {
Start int64
End int64
}
// Header formats the option as an http header
func (o *RangeOption) Header() (key string, value string) {
key = "Range"
value = "bytes="
if o.Start >= 0 {
value += strconv.FormatInt(o.Start, 10)
}
value += "-"
if o.End >= 0 {
value += strconv.FormatInt(o.End, 10)
}
return key, value
}
// ParseRangeOption parses a RangeOption from a Range: header.
// It only accepts single ranges.
func ParseRangeOption(s string) (po *RangeOption, err error) {
const preamble = "bytes="
if !strings.HasPrefix(s, preamble) {
return nil, errors.New("range: header invalid: doesn't start with " + preamble)
}
s = s[len(preamble):]
if strings.ContainsRune(s, ',') {
return nil, errors.New("range: header invalid: contains multiple ranges which isn't supported")
}
dash := strings.IndexRune(s, '-')
if dash < 0 {
return nil, errors.New("range: header invalid: contains no '-'")
}
start, end := strings.TrimSpace(s[:dash]), strings.TrimSpace(s[dash+1:])
o := RangeOption{Start: -1, End: -1}
if start != "" {
o.Start, err = strconv.ParseInt(start, 10, 64)
if err != nil || o.Start < 0 {
return nil, errors.New("range: header invalid: bad start")
}
}
if end != "" {
o.End, err = strconv.ParseInt(end, 10, 64)
if err != nil || o.End < 0 {
return nil, errors.New("range: header invalid: bad end")
}
}
return &o, nil
}
// String formats the option into human-readable form
func (o *RangeOption) String() string {
return fmt.Sprintf("RangeOption(%d,%d)", o.Start, o.End)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *RangeOption) Mandatory() bool {
return true
}
// Decode interprets the RangeOption into an offset and a limit
//
// The offset is the start of the stream and the limit is how many
// bytes should be read from it. If the limit is -1 then the stream
// should be read to the end.
func (o *RangeOption) Decode(size int64) (offset, limit int64) {
if o.Start >= 0 {
offset = o.Start
if o.End >= 0 {
limit = o.End - o.Start + 1
} else {
limit = -1
}
} else {
if o.End >= 0 {
offset = size - o.End
} else {
offset = 0
}
limit = -1
}
return offset, limit
}
// FixRangeOption looks through the slice of options and adjusts any
// RangeOption~s found that request a fetch from the end into an
// absolute fetch using the size passed in and makes sure the range does
// not exceed filesize. Some remotes (e.g. Onedrive, Box) don't support
// range requests which index from the end.
//
// It also adjusts any SeekOption~s, turning them into absolute
// RangeOption~s instead.
func FixRangeOption(options []OpenOption, size int64) {
if size < 0 {
// Can't do anything for unknown length objects
return
} else if size == 0 {
// if size 0 then remove RangeOption~s
// replacing with a NullOptions~s which won't be rendered
for i := range options {
if _, ok := options[i].(*RangeOption); ok {
options[i] = NullOption{}
}
}
return
}
for i, option := range options {
switch x := option.(type) {
case *RangeOption:
// If start is < 0 then fetch from the end
if x.Start < 0 {
x = &RangeOption{Start: size - x.End, End: -1}
options[i] = x
}
// If end is too big or undefined, fetch to the end
if x.End > size || x.End < 0 {
x = &RangeOption{Start: x.Start, End: size - 1}
options[i] = x
}
case *SeekOption:
options[i] = &RangeOption{Start: x.Offset, End: size - 1}
}
}
}
// SeekOption defines an HTTP Range option with start only.
type SeekOption struct {
Offset int64
}
// Header formats the option as an http header
func (o *SeekOption) Header() (key string, value string) {
key = "Range"
value = fmt.Sprintf("bytes=%d-", o.Offset)
return key, value
}
// String formats the option into human-readable form
func (o *SeekOption) String() string {
return fmt.Sprintf("SeekOption(%d)", o.Offset)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *SeekOption) Mandatory() bool {
return true
}
// HTTPOption defines a general purpose HTTP option
type HTTPOption struct {
Key string
Value string
}
// Header formats the option as an http header
func (o *HTTPOption) Header() (key string, value string) {
return o.Key, o.Value
}
// String formats the option into human-readable form
func (o *HTTPOption) String() string {
return fmt.Sprintf("HTTPOption(%q,%q)", o.Key, o.Value)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *HTTPOption) Mandatory() bool {
return false
}
// HashesOption defines an option used to tell the local fs to limit
// the number of hashes it calculates.
type HashesOption struct {
Hashes hash.Set
}
// Header formats the option as an http header
func (o *HashesOption) Header() (key string, value string) {
return "", ""
}
// String formats the option into human-readable form
func (o *HashesOption) String() string {
return fmt.Sprintf("HashesOption(%v)", o.Hashes)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *HashesOption) Mandatory() bool {
return false
}
// NullOption defines an Option which does nothing
type NullOption struct {
}
// Header formats the option as an http header
func (o NullOption) Header() (key string, value string) {
return "", ""
}
// String formats the option into human-readable form
func (o NullOption) String() string {
return "NullOption()"
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o NullOption) Mandatory() bool {
return false
}
// MetadataOption defines an Option which does nothing
type MetadataOption Metadata
// Header formats the option as an http header
func (o MetadataOption) Header() (key string, value string) {
return "", ""
}
// String formats the option into human-readable form
func (o MetadataOption) String() string {
return fmt.Sprintf("MetadataOption(%v)", Metadata(o))
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o MetadataOption) Mandatory() bool {
return false
}
// MetadataAsOpenOptions fetch any metadata to set as open options
func MetadataAsOpenOptions(ctx context.Context) (options []OpenOption) {
ci := GetConfig(ctx)
if ci.MetadataSet != nil {
options = append(options, MetadataOption(ci.MetadataSet))
}
return options
}
// ChunkOption defines an Option which returns a preferred chunk size
type ChunkOption struct {
ChunkSize int64
}
// Header formats the option as an http header
func (o *ChunkOption) Header() (key string, value string) {
return "", ""
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *ChunkOption) Mandatory() bool {
return false
}
// String formats the option into human-readable form
func (o *ChunkOption) String() string {
return fmt.Sprintf("ChunkOption(%v)", o.ChunkSize)
}
// OpenOptionAddHeaders adds each header found in options to the
// headers map provided the key was non empty.
func OpenOptionAddHeaders(options []OpenOption, headers map[string]string) {
for _, option := range options {
key, value := option.Header()
if key != "" && value != "" {
headers[key] = value
}
}
}
// OpenOptionHeaders adds each header found in options to the
// headers map provided the key was non empty.
//
// It returns a nil map if options was empty
func OpenOptionHeaders(options []OpenOption) (headers map[string]string) {
if len(options) == 0 {
return nil
}
headers = make(map[string]string, len(options))
OpenOptionAddHeaders(options, headers)
return headers
}
// OpenOptionAddHTTPHeaders Sets each header found in options to the
// http.Header map provided the key was non empty.
func OpenOptionAddHTTPHeaders(headers http.Header, options []OpenOption) {
for _, option := range options {
key, value := option.Header()
if key != "" && value != "" {
headers.Set(key, value)
}
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/log.go | fs/log.go | package fs
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"os"
"slices"
"strings"
"github.com/rclone/rclone/lib/caller"
)
// LogLevel describes rclone's logs. These are a subset of the syslog log levels.
type LogLevel = Enum[logLevelChoices]
// Log levels. These are the syslog levels of which we only use a
// subset.
//
// LOG_EMERG system is unusable
// LOG_ALERT action must be taken immediately
// LOG_CRIT critical conditions
// LOG_ERR error conditions
// LOG_WARNING warning conditions
// LOG_NOTICE normal, but significant, condition
// LOG_INFO informational message
// LOG_DEBUG debug-level message
const (
LogLevelEmergency LogLevel = iota
LogLevelAlert
LogLevelCritical
LogLevelError // Error - can't be suppressed
LogLevelWarning
LogLevelNotice // Normal logging, -q suppresses
LogLevelInfo // Transfers, needs -v
LogLevelDebug // Debug level, needs -vv
LogLevelOff
)
type logLevelChoices struct{}
func (logLevelChoices) Choices() []string {
return []string{
LogLevelEmergency: "EMERGENCY",
LogLevelAlert: "ALERT",
LogLevelCritical: "CRITICAL",
LogLevelError: "ERROR",
LogLevelWarning: "WARNING",
LogLevelNotice: "NOTICE",
LogLevelInfo: "INFO",
LogLevelDebug: "DEBUG",
LogLevelOff: "OFF",
}
}
func (logLevelChoices) Type() string {
return "LogLevel"
}
// slogLevel definitions defined as slog.Level constants.
// The integer values determine severity for filtering.
// Lower values are less severe (e.g., Debug), higher values are more severe (e.g., Emergency).
// We fit our extra values into slog's scale.
const (
// slog.LevelDebug slog.Level = -4
// slog.LevelInfo slog.Level = 0
SlogLevelNotice = slog.Level(2) // Between Info (0) and Warn (4)
// slog.LevelWarn slog.Level = 4
// slog.LevelError slog.Level = 8
SlogLevelCritical = slog.Level(12) // More severe than Error
SlogLevelAlert = slog.Level(16) // More severe than Critical
SlogLevelEmergency = slog.Level(20) // Most severe
SlogLevelOff = slog.Level(24) // A very high value
)
// Map our level numbers to slog level numbers
var levelToSlog = []slog.Level{
LogLevelEmergency: SlogLevelEmergency,
LogLevelAlert: SlogLevelAlert,
LogLevelCritical: SlogLevelCritical,
LogLevelError: slog.LevelError,
LogLevelWarning: slog.LevelWarn,
LogLevelNotice: SlogLevelNotice,
LogLevelInfo: slog.LevelInfo,
LogLevelDebug: slog.LevelDebug,
LogLevelOff: SlogLevelOff,
}
// LogValueItem describes keyed item for a JSON log entry
type LogValueItem struct {
key string
value any
render bool
}
// LogValue should be used as an argument to any logging calls to
// augment the JSON output with more structured information.
//
// key is the dictionary parameter used to store value.
func LogValue(key string, value any) LogValueItem {
return LogValueItem{key: key, value: value, render: true}
}
// LogValueHide should be used as an argument to any logging calls to
// augment the JSON output with more structured information.
//
// key is the dictionary parameter used to store value.
//
// String() will return a blank string - this is useful to put items
// in which don't print into the log.
func LogValueHide(key string, value any) LogValueItem {
return LogValueItem{key: key, value: value, render: false}
}
// String returns the representation of value. If render is false this
// is an empty string so LogValueItem entries won't show in the
// textual representation of logs.
func (j LogValueItem) String() string {
if !j.render {
return ""
}
if do, ok := j.value.(fmt.Stringer); ok {
return do.String()
}
return fmt.Sprint(j.value)
}
// LogLevelToSlog converts an rclone log level to log/slog log level.
func LogLevelToSlog(level LogLevel) slog.Level {
slogLevel := slog.LevelError
// NB level is unsigned so we don't check < 0 here
if int(level) < len(levelToSlog) {
slogLevel = levelToSlog[level]
}
return slogLevel
}
func logSlog(level LogLevel, text string, attrs []any) {
slog.Log(context.Background(), LogLevelToSlog(level), text, attrs...)
}
func logSlogWithObject(level LogLevel, o any, text string, attrs []any) {
if o != nil {
attrs = slices.Concat(attrs, []any{
"object", fmt.Sprintf("%+v", o),
"objectType", fmt.Sprintf("%T", o),
})
}
logSlog(level, text, attrs)
}
// LogPrint produces a log string from the arguments passed in
func LogPrint(level LogLevel, o any, text string) {
logSlogWithObject(level, o, text, nil)
}
// LogPrintf produces a log string from the arguments passed in
func LogPrintf(level LogLevel, o any, text string, args ...any) {
text = fmt.Sprintf(text, args...)
var fields []any
for _, arg := range args {
if item, ok := arg.(LogValueItem); ok {
fields = append(fields, item.key, item.value)
}
}
logSlogWithObject(level, o, text, fields)
}
// LogLevelPrint writes logs at the given level
func LogLevelPrint(level LogLevel, o any, text string) {
if GetConfig(context.TODO()).LogLevel >= level {
LogPrint(level, o, text)
}
}
// LogLevelPrintf writes logs at the given level
func LogLevelPrintf(level LogLevel, o any, text string, args ...any) {
if GetConfig(context.TODO()).LogLevel >= level {
LogPrintf(level, o, text, args...)
}
}
// Panic writes alert log output for this Object or Fs and calls panic().
// It should always be seen by the user.
func Panic(o any, text string) {
if GetConfig(context.TODO()).LogLevel >= LogLevelAlert {
LogPrint(LogLevelAlert, o, text)
}
panic(text)
}
// Panicf writes alert log output for this Object or Fs and calls panic().
// It should always be seen by the user.
func Panicf(o any, text string, args ...any) {
if GetConfig(context.TODO()).LogLevel >= LogLevelAlert {
LogPrintf(LogLevelAlert, o, text, args...)
}
panic(fmt.Sprintf(text, args...))
}
// Panic if this called from an rc job.
//
// This means fatal errors get turned into panics which get caught by
// the rc job handler so they don't crash rclone.
//
// This detects if we are being called from an rc Job by looking for
// Job.run in the call stack.
//
// Ideally we would do this by passing a context about but we don't
// have one with the logging calls yet.
//
// This is tested in fs/rc/internal_job_test.go in TestInternalFatal.
func panicIfRcJob(o any, text string, args []any) {
if !caller.Present("(*Job).run") {
return
}
var errTxt strings.Builder
_, _ = errTxt.WriteString("fatal error: ")
if o != nil {
_, _ = fmt.Fprintf(&errTxt, "%v: ", o)
}
if args != nil {
_, _ = fmt.Fprintf(&errTxt, text, args...)
} else {
_, _ = errTxt.WriteString(text)
}
panic(errTxt.String())
}
// Fatal writes critical log output for this Object or Fs and calls os.Exit(1).
// It should always be seen by the user.
func Fatal(o any, text string) {
if GetConfig(context.TODO()).LogLevel >= LogLevelCritical {
LogPrint(LogLevelCritical, o, text)
}
panicIfRcJob(o, text, nil)
os.Exit(1)
}
// Fatalf writes critical log output for this Object or Fs and calls os.Exit(1).
// It should always be seen by the user.
func Fatalf(o any, text string, args ...any) {
if GetConfig(context.TODO()).LogLevel >= LogLevelCritical {
LogPrintf(LogLevelCritical, o, text, args...)
}
panicIfRcJob(o, text, args)
os.Exit(1)
}
// Error writes error log output for this Object or Fs. It
// should always be seen by the user.
func Error(o any, text string) {
LogLevelPrint(LogLevelError, o, text)
}
// Errorf writes error log output for this Object or Fs. It
// should always be seen by the user.
func Errorf(o any, text string, args ...any) {
LogLevelPrintf(LogLevelError, o, text, args...)
}
// Print writes log output for this Object or Fs, same as Logf.
func Print(o any, text string) {
LogLevelPrint(LogLevelNotice, o, text)
}
// Printf writes log output for this Object or Fs, same as Logf.
func Printf(o any, text string, args ...any) {
LogLevelPrintf(LogLevelNotice, o, text, args...)
}
// Log writes log output for this Object or Fs. This should be
// considered to be Notice level logging. It is the default level.
// By default rclone should not log very much so only use this for
// important things the user should see. The user can filter these
// out with the -q flag.
func Log(o any, text string) {
LogLevelPrint(LogLevelNotice, o, text)
}
// Logf writes log output for this Object or Fs. This should be
// considered to be Notice level logging. It is the default level.
// By default rclone should not log very much so only use this for
// important things the user should see. The user can filter these
// out with the -q flag.
func Logf(o any, text string, args ...any) {
LogLevelPrintf(LogLevelNotice, o, text, args...)
}
// Infoc writes info on transfers for this Object or Fs. Use this
// level for logging transfers, deletions and things which should
// appear with the -v flag.
// There is name class on "Info", hence the name "Infoc", "c" for constant.
func Infoc(o any, text string) {
LogLevelPrint(LogLevelInfo, o, text)
}
// Infof writes info on transfers for this Object or Fs. Use this
// level for logging transfers, deletions and things which should
// appear with the -v flag.
func Infof(o any, text string, args ...any) {
LogLevelPrintf(LogLevelInfo, o, text, args...)
}
// Debug writes debugging output for this Object or Fs. Use this for
// debug only. The user must have to specify -vv to see this.
func Debug(o any, text string) {
LogLevelPrint(LogLevelDebug, o, text)
}
// Debugf writes debugging output for this Object or Fs. Use this for
// debug only. The user must have to specify -vv to see this.
func Debugf(o any, text string, args ...any) {
LogLevelPrintf(LogLevelDebug, o, text, args...)
}
// LogDirName returns an object for the logger, logging a root
// directory which would normally be "" as the Fs
func LogDirName(f Fs, dir string) any {
if dir != "" {
return dir
}
return f
}
// PrettyPrint formats JSON for improved readability in debug logs.
// If it can't Marshal JSON, it falls back to fmt.
func PrettyPrint(in any, label string, level LogLevel) {
if GetConfig(context.TODO()).LogLevel < level {
return
}
inBytes, err := json.MarshalIndent(in, "", "\t")
if err != nil || string(inBytes) == "{}" || string(inBytes) == "[]" {
LogPrintf(level, label, "\n%+v\n", in)
return
}
LogPrintf(level, label, "\n%s\n", string(inBytes))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fs_test.go | fs/fs_test.go | package fs
import (
"context"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestFeaturesDisable(t *testing.T) {
ft := new(Features)
ft.Copy = func(ctx context.Context, src Object, remote string) (Object, error) {
return nil, nil
}
ft.CaseInsensitive = true
assert.NotNil(t, ft.Copy)
assert.Nil(t, ft.Purge)
ft.Disable("copy")
assert.Nil(t, ft.Copy)
assert.Nil(t, ft.Purge)
assert.True(t, ft.CaseInsensitive)
assert.False(t, ft.DuplicateFiles)
ft.Disable("caseinsensitive")
assert.False(t, ft.CaseInsensitive)
assert.False(t, ft.DuplicateFiles)
}
func TestFeaturesList(t *testing.T) {
ft := new(Features)
names := strings.Join(ft.List(), ",")
assert.True(t, strings.Contains(names, ",Copy,"))
}
func TestFeaturesEnabled(t *testing.T) {
ft := new(Features)
ft.CaseInsensitive = true
ft.Purge = func(ctx context.Context, dir string) error { return nil }
enabled := ft.Enabled()
flag, ok := enabled["CaseInsensitive"]
assert.Equal(t, true, ok)
assert.Equal(t, true, flag, enabled)
flag, ok = enabled["Purge"]
assert.Equal(t, true, ok)
assert.Equal(t, true, flag, enabled)
flag, ok = enabled["DuplicateFiles"]
assert.Equal(t, true, ok)
assert.Equal(t, false, flag, enabled)
flag, ok = enabled["Copy"]
assert.Equal(t, true, ok)
assert.Equal(t, false, flag, enabled)
assert.Equal(t, len(ft.List()), len(enabled))
}
func TestFeaturesDisableList(t *testing.T) {
ft := new(Features)
ft.Copy = func(ctx context.Context, src Object, remote string) (Object, error) {
return nil, nil
}
ft.CaseInsensitive = true
assert.NotNil(t, ft.Copy)
assert.Nil(t, ft.Purge)
assert.True(t, ft.CaseInsensitive)
assert.False(t, ft.DuplicateFiles)
ft.DisableList([]string{"copy", "caseinsensitive"})
assert.Nil(t, ft.Copy)
assert.Nil(t, ft.Purge)
assert.False(t, ft.CaseInsensitive)
assert.False(t, ft.DuplicateFiles)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config_test.go | fs/config_test.go | package fs
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
)
func TestGetConfig(t *testing.T) {
ctx := context.Background()
// Check nil
//lint:ignore SA1012 false positive when running staticcheck, we want to test passing a nil Context and therefore ignore lint suggestion to use context.TODO
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1012
config := GetConfig(nil)
assert.Equal(t, globalConfig, config)
// Check empty config
config = GetConfig(ctx)
assert.Equal(t, globalConfig, config)
// Check adding a config
ctx2, config2 := AddConfig(ctx)
config2.Transfers++
assert.NotEqual(t, config2, config)
// Check can get config back
config2ctx := GetConfig(ctx2)
assert.Equal(t, config2, config2ctx)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/dump_test.go | fs/dump_test.go | package fs
import (
"encoding/json"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
)
// Check it satisfies the interfaces
var (
_ Flagger = (*DumpFlags)(nil)
_ FlaggerNP = DumpFlags(0)
)
func TestDumpFlagsString(t *testing.T) {
assert.Equal(t, "", DumpFlags(0).String())
assert.Equal(t, "headers", (DumpHeaders).String())
assert.Equal(t, "headers,bodies", (DumpHeaders | DumpBodies).String())
assert.Equal(t, "headers,bodies,requests,responses,auth,filters", (DumpHeaders | DumpBodies | DumpRequests | DumpResponses | DumpAuth | DumpFilters).String())
assert.Equal(t, "headers,Unknown-0x8000", (DumpHeaders | DumpFlags(0x8000)).String())
}
func TestDumpFlagsSet(t *testing.T) {
for _, test := range []struct {
in string
want DumpFlags
wantErr string
}{
{"", DumpFlags(0), ""},
{"bodies", DumpBodies, ""},
{"bodies,headers,auth", DumpBodies | DumpHeaders | DumpAuth, ""},
{"bodies,headers,auth", DumpBodies | DumpHeaders | DumpAuth, ""},
{"headers,bodies,requests,responses,auth,filters", DumpHeaders | DumpBodies | DumpRequests | DumpResponses | DumpAuth | DumpFilters, ""},
{"headers,bodies,unknown,auth", 0, "invalid choice \"unknown\""},
} {
f := DumpFlags(0xffffffffffffffff)
initial := f
err := f.Set(test.in)
if err != nil {
if test.wantErr == "" {
t.Errorf("Got an error when not expecting one on %q: %v", test.in, err)
} else {
assert.Contains(t, err.Error(), test.wantErr)
}
assert.Equal(t, initial, f, test.want)
} else {
if test.wantErr != "" {
t.Errorf("Got no error when expecting one on %q", test.in)
} else {
assert.Equal(t, test.want, f)
}
}
}
}
func TestDumpFlagsType(t *testing.T) {
f := DumpFlags(0)
assert.Equal(t, "DumpFlags", f.Type())
}
func TestDumpFlagsUnmarshallJSON(t *testing.T) {
for _, test := range []struct {
in string
want DumpFlags
wantErr string
}{
{`""`, DumpFlags(0), ""},
{`"bodies"`, DumpBodies, ""},
{`"bodies,headers,auth"`, DumpBodies | DumpHeaders | DumpAuth, ""},
{`"bodies,headers,auth"`, DumpBodies | DumpHeaders | DumpAuth, ""},
{`"headers,bodies,requests,responses,auth,filters"`, DumpHeaders | DumpBodies | DumpRequests | DumpResponses | DumpAuth | DumpFilters, ""},
{`"headers,bodies,unknown,auth"`, 0, "invalid choice \"unknown\""},
{`0`, DumpFlags(0), ""},
{strconv.Itoa(int(DumpBodies)), DumpBodies, ""},
{strconv.Itoa(int(DumpBodies | DumpHeaders | DumpAuth)), DumpBodies | DumpHeaders | DumpAuth, ""},
} {
f := DumpFlags(0xffffffffffffffff)
initial := f
err := json.Unmarshal([]byte(test.in), &f)
if err != nil {
if test.wantErr == "" {
t.Errorf("Got an error when not expecting one on %q: %v", test.in, err)
} else {
assert.Contains(t, err.Error(), test.wantErr)
}
assert.Equal(t, initial, f, test.want)
} else {
if test.wantErr != "" {
t.Errorf("Got no error when expecting one on %q", test.in)
} else {
assert.Equal(t, test.want, f)
}
}
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/daemon_other.go | fs/daemon_other.go | // Daemonization stub for non-Unix platforms (common definitions)
//go:build windows || plan9 || js
package fs
// IsDaemon returns true if this process runs in background
func IsDaemon() bool {
return false
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/newfs.go | fs/newfs.go | // NewFs and its helpers
package fs
import (
"context"
"crypto/md5"
"encoding/base64"
"fmt"
"maps"
"os"
"path/filepath"
"slices"
"strings"
"sync"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
)
// Store the hashes of the overridden config
var (
overriddenConfigMu sync.Mutex
overriddenConfig = make(map[string]string)
)
// NewFs makes a new Fs object from the path
//
// The path is of the form remote:path
//
// Remotes are looked up in the config file. If the remote isn't
// found then NotFoundInConfigFile will be returned.
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
func NewFs(ctx context.Context, path string) (Fs, error) {
Debugf(nil, "Creating backend with remote %q", path)
if ConfigFileHasSection(path) {
Logf(nil, "%q refers to a local folder, use %q to refer to your remote or %q to hide this warning", path, path+":", "./"+path)
}
fsInfo, configName, fsPath, config, err := ConfigFs(path)
if err != nil {
return nil, err
}
overridden := fsInfo.Options.Overridden(config)
if len(overridden) > 0 {
extraConfig := overridden.String()
//Debugf(nil, "detected overridden config %q", extraConfig)
md5sumBinary := md5.Sum([]byte(extraConfig))
configHash := base64.RawURLEncoding.EncodeToString(md5sumBinary[:])
// 5 characters length is 5*6 = 30 bits of base64
overriddenConfigMu.Lock()
var suffix string
for maxLength := 5; ; maxLength++ {
suffix = "{" + configHash[:maxLength] + "}"
existingExtraConfig, ok := overriddenConfig[suffix]
if !ok || existingExtraConfig == extraConfig {
break
}
}
Debugf(configName, "detected overridden config - adding %q suffix to name", suffix)
// Add the suffix to the config name
//
// These need to work as filesystem names as the VFS cache will use them
configName += suffix
// Store the config suffixes for reversing in ConfigString
overriddenConfig[suffix] = extraConfig
overriddenConfigMu.Unlock()
}
ctx, err = addConfigToContext(ctx, configName, config)
if err != nil {
return nil, err
}
f, err := fsInfo.NewFs(ctx, configName, fsPath, config)
if f != nil && (err == nil || err == ErrorIsFile) {
addReverse(f, fsInfo)
}
return f, err
}
// Add "global" config or "override" to ctx and the global config if required.
//
// This looks through keys prefixed with "global." or "override." in
// config and sets ctx and optionally the global context if "global.".
func addConfigToContext(ctx context.Context, configName string, config configmap.Getter) (newCtx context.Context, err error) {
overrideConfig := make(configmap.Simple)
globalConfig := make(configmap.Simple)
for i := range ConfigOptionsInfo {
opt := &ConfigOptionsInfo[i]
globalName := "global." + opt.Name
value, isSet := config.Get(globalName)
if isSet {
// Set both override and global if global
overrideConfig[opt.Name] = value
globalConfig[opt.Name] = value
}
overrideName := "override." + opt.Name
value, isSet = config.Get(overrideName)
if isSet {
overrideConfig[opt.Name] = value
}
}
if len(overrideConfig) == 0 && len(globalConfig) == 0 {
return ctx, nil
}
newCtx, ci := AddConfig(ctx)
overrideKeys := slices.Collect(maps.Keys(overrideConfig))
slices.Sort(overrideKeys)
globalKeys := slices.Collect(maps.Keys(globalConfig))
slices.Sort(globalKeys)
// Set the config in the newCtx
err = configstruct.Set(overrideConfig, ci)
if err != nil {
return ctx, fmt.Errorf("failed to set override config variables %q: %w", overrideKeys, err)
}
Debugf(configName, "Set overridden config %q for backend startup", overrideKeys)
// Set the global context only
if len(globalConfig) != 0 {
globalCI := GetConfig(context.Background())
err = configstruct.Set(globalConfig, globalCI)
if err != nil {
return ctx, fmt.Errorf("failed to set global config variables %q: %w", globalKeys, err)
}
Debugf(configName, "Set global config %q at backend startup", overrideKeys)
}
return newCtx, nil
}
// ConfigFs makes the config for calling NewFs with.
//
// It parses the path which is of the form remote:path
//
// Remotes are looked up in the config file. If the remote isn't
// found then NotFoundInConfigFile will be returned.
func ConfigFs(path string) (fsInfo *RegInfo, configName, fsPath string, config *configmap.Map, err error) {
// Parse the remote path
fsInfo, configName, fsPath, connectionStringConfig, err := ParseRemote(path)
if err != nil {
return
}
config = ConfigMap(fsInfo.Prefix, fsInfo.Options, configName, connectionStringConfig)
return
}
// ParseRemote deconstructs a path into configName, fsPath, looking up
// the fsName in the config file (returning NotFoundInConfigFile if not found)
func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, connectionStringConfig configmap.Simple, err error) {
parsed, err := fspath.Parse(path)
if err != nil {
return nil, "", "", nil, err
}
configName, fsPath = parsed.Name, parsed.Path
var fsName string
var ok bool
if configName != "" {
if strings.HasPrefix(configName, ":") {
fsName = configName[1:]
} else {
m := ConfigMap("", nil, configName, parsed.Config)
fsName, ok = m.Get("type")
if !ok {
return nil, "", "", nil, fmt.Errorf("%w (%q)", ErrorNotFoundInConfigFile, configName)
}
}
} else {
fsName = "local"
configName = "local"
}
fsInfo, err = Find(fsName)
return fsInfo, configName, fsPath, parsed.Config, err
}
// configString returns a canonical version of the config string used
// to configure the Fs as passed to fs.NewFs
func configString(f Info, full bool) string {
name := f.Name()
if open := strings.IndexRune(name, '{'); full && open >= 0 && strings.HasSuffix(name, "}") {
suffix := name[open:]
overriddenConfigMu.Lock()
config, ok := overriddenConfig[suffix]
overriddenConfigMu.Unlock()
if ok {
name = name[:open] + "," + config
} else {
Errorf(f, "Failed to find config for suffix %q", suffix)
}
}
root := f.Root()
if name == "local" && f.Features().IsLocal {
return root
}
return name + ":" + root
}
// ConfigString returns a canonical version of the config string used
// to configure the Fs as passed to fs.NewFs. For Fs with extra
// parameters this will include a canonical {hexstring} suffix.
func ConfigString(f Info) string {
return configString(f, false)
}
// FullPath returns the full path with remote:path/to/object
// for an object.
func FullPath(o Object) string {
return fspath.JoinRootPath(ConfigString(o.Fs()), o.Remote())
}
// ConfigStringFull returns a canonical version of the config string
// used to configure the Fs as passed to fs.NewFs. This string can be
// used to re-instantiate the Fs exactly so includes all the extra
// parameters passed in.
func ConfigStringFull(f Fs) string {
return configString(f, true)
}
// TemporaryLocalFs creates a local FS in the OS's temporary directory.
//
// No cleanup is performed, the caller must call Purge on the Fs themselves.
func TemporaryLocalFs(ctx context.Context) (Fs, error) {
path, err := os.MkdirTemp("", "rclone-spool")
if err == nil {
err = os.Remove(path)
}
if err != nil {
return nil, err
}
path = filepath.ToSlash(path)
return NewFs(ctx, path)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/countsuffix.go | fs/countsuffix.go | package fs
// CountSuffix is parsed by flag with k/M/G decimal suffixes
import (
"errors"
"fmt"
"math"
"sort"
"strconv"
"strings"
)
// CountSuffix is an int64 with a friendly way of printing setting
type CountSuffix int64
// Common multipliers for SizeSuffix
const (
CountSuffixBase CountSuffix = 1
Kilo = 1000 * CountSuffixBase
Mega = 1000 * Kilo
Giga = 1000 * Mega
Tera = 1000 * Giga
Peta = 1000 * Tera
Exa = 1000 * Peta
)
const (
// CountSuffixMax is the largest CountSuffix multiplier
CountSuffixMax = Exa
// CountSuffixMaxValue is the largest value that can be used to create CountSuffix
CountSuffixMaxValue = math.MaxInt64
// CountSuffixMinValue is the smallest value that can be used to create CountSuffix
CountSuffixMinValue = math.MinInt64
)
// Turn CountSuffix into a string and a suffix
func (x CountSuffix) string() (string, string) {
scaled := float64(0)
suffix := ""
switch {
case x < 0:
return "off", ""
case x == 0:
return "0", ""
case x < Kilo:
scaled = float64(x)
suffix = ""
case x < Mega:
scaled = float64(x) / float64(Kilo)
suffix = "k"
case x < Giga:
scaled = float64(x) / float64(Mega)
suffix = "M"
case x < Tera:
scaled = float64(x) / float64(Giga)
suffix = "G"
case x < Peta:
scaled = float64(x) / float64(Tera)
suffix = "T"
case x < Exa:
scaled = float64(x) / float64(Peta)
suffix = "P"
default:
scaled = float64(x) / float64(Exa)
suffix = "E"
}
if math.Floor(scaled) == scaled {
return fmt.Sprintf("%.0f", scaled), suffix
}
return fmt.Sprintf("%.3f", scaled), suffix
}
// String turns CountSuffix into a string
func (x CountSuffix) String() string {
val, suffix := x.string()
return val + suffix
}
// Unit turns CountSuffix into a string with a unit
func (x CountSuffix) Unit(unit string) string {
val, suffix := x.string()
if val == "off" {
return val
}
return val + " " + suffix + unit
}
func (x *CountSuffix) multiplierFromSymbol(s byte) (found bool, multiplier float64) {
switch s {
case 'k', 'K':
return true, float64(Kilo)
case 'm', 'M':
return true, float64(Mega)
case 'g', 'G':
return true, float64(Giga)
case 't', 'T':
return true, float64(Tera)
case 'p', 'P':
return true, float64(Peta)
case 'e', 'E':
return true, float64(Exa)
default:
return false, float64(CountSuffixBase)
}
}
// Set a CountSuffix
func (x *CountSuffix) Set(s string) error {
if len(s) == 0 {
return errors.New("empty string")
}
if strings.ToLower(s) == "off" {
*x = -1
return nil
}
suffix := s[len(s)-1]
suffixLen := 1
multiplierFound := false
var multiplier float64
switch suffix {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
suffixLen = 0
multiplier = float64(Kilo)
case 'b', 'B':
if len(s) > 1 {
suffix = s[len(s)-2]
if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); multiplierFound {
suffixLen = 2
}
} else {
multiplier = float64(CountSuffixBase)
}
default:
if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); !multiplierFound {
return fmt.Errorf("bad suffix %q", suffix)
}
}
s = s[:len(s)-suffixLen]
value, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
if value < 0 {
return fmt.Errorf("size can't be negative %q", s)
}
value *= multiplier
*x = CountSuffix(value)
return nil
}
// Type of the value
func (x CountSuffix) Type() string {
return "CountSuffix"
}
// Scan implements the fmt.Scanner interface
func (x *CountSuffix) Scan(s fmt.ScanState, ch rune) error {
token, err := s.Token(true, nil)
if err != nil {
return err
}
return x.Set(string(token))
}
// CountSuffixList is a slice CountSuffix values
type CountSuffixList []CountSuffix
func (l CountSuffixList) Len() int { return len(l) }
func (l CountSuffixList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l CountSuffixList) Less(i, j int) bool { return l[i] < l[j] }
// Sort sorts the list
func (l CountSuffixList) Sort() {
sort.Sort(l)
}
// UnmarshalJSON makes sure the value can be parsed as a string or integer in JSON
func (x *CountSuffix) UnmarshalJSON(in []byte) error {
return UnmarshalJSONFlag(in, x, func(i int64) error {
*x = CountSuffix(i)
return nil
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/override_test.go | fs/override_test.go | package fs
// Check all optional interfaces satisfied
var _ FullObjectInfo = (*OverrideRemote)(nil)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/parseduration_test.go | fs/parseduration_test.go | package fs
import (
"encoding/json"
"fmt"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check it satisfies the interfaces
var (
_ Flagger = (*Duration)(nil)
_ FlaggerNP = Duration(0)
)
func TestParseDuration(t *testing.T) {
now := time.Date(2020, 9, 5, 8, 15, 5, 250, time.UTC)
getNow := func() time.Time {
return now
}
for _, test := range []struct {
in string
want time.Duration
err bool
}{
{"0", 0, false},
{"", 0, true},
{"1ms", time.Millisecond, false},
{"1s", time.Second, false},
{"1m", time.Minute, false},
{"1.5m", (3 * time.Minute) / 2, false},
{"1h", time.Hour, false},
{"1d", time.Hour * 24, false},
{"1w", time.Hour * 24 * 7, false},
{"1M", time.Hour * 24 * 30, false},
{"1y", time.Hour * 24 * 365, false},
{"1.5y", time.Hour * 24 * 365 * 3 / 2, false},
{"-1s", -time.Second, false},
{"1.s", time.Second, false},
{"1x", 0, true},
{"off", time.Duration(DurationOff), false},
{"1h2m3s", time.Hour + 2*time.Minute + 3*time.Second, false},
{"2001-02-03", now.Sub(time.Date(2001, 2, 3, 0, 0, 0, 0, time.Local)), false},
{"2001-02-03 10:11:12", now.Sub(time.Date(2001, 2, 3, 10, 11, 12, 0, time.Local)), false},
{"2001-08-03 10:11:12", now.Sub(time.Date(2001, 8, 3, 10, 11, 12, 0, time.Local)), false},
{"2001-02-03T10:11:12", now.Sub(time.Date(2001, 2, 3, 10, 11, 12, 0, time.Local)), false},
{"2001-02-03T10:11:12.123Z", now.Sub(time.Date(2001, 2, 3, 10, 11, 12, 123, time.UTC)), false},
{"2001-02-03T10:11:12.123+00:00", now.Sub(time.Date(2001, 2, 3, 10, 11, 12, 123, time.UTC)), false},
} {
duration, err := parseDurationFromNow(test.in, getNow)
if test.err {
require.Error(t, err)
} else {
require.NoError(t, err)
}
if strings.HasPrefix(test.in, "2001-") {
ok := duration > test.want-time.Second && duration < test.want+time.Second
assert.True(t, ok, test.in)
} else {
assert.Equal(t, test.want, duration)
}
}
}
func TestDurationString(t *testing.T) {
now := time.Date(2020, 9, 5, 8, 15, 5, 250, time.UTC)
getNow := func() time.Time {
return now
}
for _, test := range []struct {
in time.Duration
want string
}{
{time.Duration(0), "0s"},
{time.Second, "1s"},
{time.Minute, "1m0s"},
{time.Millisecond, "1ms"},
{time.Second, "1s"},
{(3 * time.Minute) / 2, "1m30s"},
{time.Hour, "1h0m0s"},
{time.Hour * 24, "1d"},
{time.Hour * 24 * 7, "1w"},
{time.Hour * 24 * 30, "1M"},
{time.Hour * 24 * 365, "1y"},
{time.Hour * 24 * 365 * 3 / 2, "1.5y"},
{-time.Second, "-1s"},
{time.Second, "1s"},
{time.Duration(DurationOff), "off"},
{time.Hour + 2*time.Minute + 3*time.Second, "1h2m3s"},
{time.Hour * 24, "1d"},
{time.Hour * 24 * 7, "1w"},
{time.Hour * 24 * 30, "1M"},
{time.Hour * 24 * 365, "1y"},
{time.Hour * 24 * 365 * 3 / 2, "1.5y"},
{-time.Hour * 24 * 365 * 3 / 2, "-1.5y"},
} {
got := Duration(test.in).String()
assert.Equal(t, test.want, got)
// Test the reverse
reverse, err := parseDurationFromNow(test.want, getNow)
assert.NoError(t, err)
assert.Equal(t, test.in, reverse)
}
}
func TestDurationReadableString(t *testing.T) {
for _, test := range []struct {
negative bool
in time.Duration
wantLong string
wantShort string
}{
// Edge Cases
{false, time.Duration(DurationOff), "off", "off"},
// Base Cases
{false, time.Duration(0), "0s", "0s"},
{true, time.Millisecond, "1ms", "1ms"},
{true, time.Second, "1s", "1s"},
{true, time.Minute, "1m", "1m"},
{true, (3 * time.Minute) / 2, "1m30s", "1m30s"},
{true, time.Hour, "1h", "1h"},
{true, time.Hour * 24, "1d", "1d"},
{true, time.Hour * 24 * 7, "1w", "1w"},
{true, time.Hour * 24 * 365, "1y", "1y"},
// Composite Cases
{true, time.Hour + 2*time.Minute + 3*time.Second, "1h2m3s", "1h2m3s"},
{true, time.Hour * 24 * (365 + 14), "1y2w", "1y2w"},
{true, time.Hour*24*4 + time.Hour*3 + time.Minute*2 + time.Second, "4d3h2m1s", "4d3h2m"},
{true, time.Hour * 24 * (365*3 + 7*2 + 1), "3y2w1d", "3y2w1d"},
{true, time.Hour*24*(365*3+7*2+1) + time.Hour*2 + time.Second, "3y2w1d2h1s", "3y2w1d"},
{true, time.Hour*24*(365*3+7*2+1) + time.Second, "3y2w1d1s", "3y2w1d"},
{true, time.Hour*24*(365+7*2+3) + time.Hour*4 + time.Minute*5 + time.Second*6 + time.Millisecond*7, "1y2w3d4h5m6s7ms", "1y2w3d"},
{true, time.Duration(DurationOff) / time.Millisecond * time.Millisecond, "292y24w3d23h47m16s853ms", "292y24w3d"}, // Should have been 854ms but some precision are lost with floating point calculations
} {
got := Duration(test.in).ReadableString()
assert.Equal(t, test.wantLong, got)
got = Duration(test.in).ShortReadableString()
assert.Equal(t, test.wantShort, got)
// Test Negative Case
if test.negative {
got = Duration(-test.in).ReadableString()
assert.Equal(t, "-"+test.wantLong, got)
got = Duration(-test.in).ShortReadableString()
assert.Equal(t, "-"+test.wantShort, got)
}
}
}
func TestDurationScan(t *testing.T) {
now := time.Date(2020, 9, 5, 8, 15, 5, 250, time.UTC)
oldTimeNowFunc := timeNowFunc
timeNowFunc = func() time.Time { return now }
defer func() { timeNowFunc = oldTimeNowFunc }()
for _, test := range []struct {
in string
want Duration
}{
{"17m", Duration(17 * time.Minute)},
{"-12h", Duration(-12 * time.Hour)},
{"0", Duration(0)},
{"off", DurationOff},
{"2022-03-26T17:48:19Z", Duration(now.Sub(time.Date(2022, 03, 26, 17, 48, 19, 0, time.UTC)))},
{"2022-03-26 17:48:19", Duration(now.Sub(time.Date(2022, 03, 26, 17, 48, 19, 0, time.Local)))},
} {
var got Duration
n, err := fmt.Sscan(test.in, &got)
require.NoError(t, err)
assert.Equal(t, 1, n)
assert.Equal(t, test.want, got)
}
}
func TestParseUnmarshalJSON(t *testing.T) {
for _, test := range []struct {
in string
want time.Duration
err bool
}{
{`""`, 0, true},
{`"0"`, 0, false},
{`"1ms"`, time.Millisecond, false},
{`"1s"`, time.Second, false},
{`"1m"`, time.Minute, false},
{`"1h"`, time.Hour, false},
{`"1d"`, time.Hour * 24, false},
{`"1w"`, time.Hour * 24 * 7, false},
{`"1M"`, time.Hour * 24 * 30, false},
{`"1y"`, time.Hour * 24 * 365, false},
{`"off"`, time.Duration(DurationOff), false},
{`"error"`, 0, true},
{"0", 0, false},
{"1000000", time.Millisecond, false},
{"1000000000", time.Second, false},
{"60000000000", time.Minute, false},
{"3600000000000", time.Hour, false},
{"9223372036854775807", time.Duration(DurationOff), false},
{"error", 0, true},
} {
var duration Duration
err := json.Unmarshal([]byte(test.in), &duration)
if test.err {
require.Error(t, err, test.in)
} else {
require.NoError(t, err, test.in)
}
assert.Equal(t, Duration(test.want), duration, test.in)
}
}
func TestUnmarshalJSON(t *testing.T) {
tests := []struct {
name string
input string
want Duration
wantErr bool
}{
{"off string", `"off"`, DurationOff, false},
{"max int64", `9223372036854775807`, DurationOff, false},
{"duration string", `"1h"`, Duration(time.Hour), false},
{"invalid string", `"invalid"`, 0, true},
{"negative int", `-1`, Duration(-1), false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var d Duration
err := json.Unmarshal([]byte(tt.input), &d)
if (err != nil) != tt.wantErr {
t.Errorf("UnmarshalJSON() error = %v, wantErr %v", err, tt.wantErr)
return
}
if d != tt.want {
t.Errorf("UnmarshalJSON() got = %v, want %v", d, tt.want)
}
})
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/tristate.go | fs/tristate.go | package fs
import (
"encoding/json"
"fmt"
"strconv"
"strings"
)
// Tristate is a boolean that can has the states, true, false and
// unset/invalid/nil
type Tristate struct {
Value bool
Valid bool
}
// String renders the tristate as true/false/unset
func (t Tristate) String() string {
if !t.Valid {
return "unset"
}
if t.Value {
return "true"
}
return "false"
}
// Set the List entries
func (t *Tristate) Set(s string) error {
s = strings.ToLower(s)
if s == "" || s == "nil" || s == "null" || s == "unset" {
t.Valid = false
return nil
}
value, err := strconv.ParseBool(s)
if err != nil {
return fmt.Errorf("failed to parse Tristate %q: %w", s, err)
}
t.Value = value
t.Valid = true
return nil
}
// Type of the value
func (Tristate) Type() string {
return "Tristate"
}
// Scan implements the fmt.Scanner interface
func (t *Tristate) Scan(s fmt.ScanState, ch rune) error {
token, err := s.Token(true, nil)
if err != nil {
return err
}
return t.Set(string(token))
}
// UnmarshalJSON parses it as a bool or nil for unset
func (t *Tristate) UnmarshalJSON(in []byte) error {
var b *bool
err := json.Unmarshal(in, &b)
if err != nil {
return err
}
if b != nil {
t.Valid = true
t.Value = *b
} else {
t.Valid = false
}
return nil
}
// MarshalJSON encodes it as a bool or nil for unset
func (t *Tristate) MarshalJSON() ([]byte, error) {
if !t.Valid {
return json.Marshal(nil)
}
return json.Marshal(t.Value)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/metadata_test.go | fs/metadata_test.go | package fs_test
import (
"context"
"fmt"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest/mockfs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMetadataSet(t *testing.T) {
var m fs.Metadata
assert.Nil(t, m)
m.Set("key", "value")
assert.NotNil(t, m)
assert.Equal(t, "value", m["key"])
m.Set("key", "value2")
assert.Equal(t, "value2", m["key"])
}
func TestMetadataMerge(t *testing.T) {
for _, test := range []struct {
in fs.Metadata
merge fs.Metadata
want fs.Metadata
}{
{
in: fs.Metadata{},
merge: fs.Metadata{},
want: fs.Metadata{},
}, {
in: nil,
merge: nil,
want: nil,
}, {
in: nil,
merge: fs.Metadata{},
want: nil,
}, {
in: nil,
merge: fs.Metadata{"a": "1", "b": "2"},
want: fs.Metadata{"a": "1", "b": "2"},
}, {
in: fs.Metadata{"a": "1", "b": "2"},
merge: nil,
want: fs.Metadata{"a": "1", "b": "2"},
}, {
in: fs.Metadata{"a": "1", "b": "2"},
merge: fs.Metadata{"b": "B", "c": "3"},
want: fs.Metadata{"a": "1", "b": "B", "c": "3"},
},
} {
what := fmt.Sprintf("in=%v, merge=%v", test.in, test.merge)
test.in.Merge(test.merge)
assert.Equal(t, test.want, test.in, what)
}
}
func TestMetadataMergeOptions(t *testing.T) {
for _, test := range []struct {
in fs.Metadata
opts []fs.OpenOption
want fs.Metadata
}{
{
opts: []fs.OpenOption{},
want: nil,
}, {
opts: []fs.OpenOption{&fs.HTTPOption{}},
want: nil,
}, {
opts: []fs.OpenOption{fs.MetadataOption{"a": "1", "b": "2"}},
want: fs.Metadata{"a": "1", "b": "2"},
}, {
opts: []fs.OpenOption{
&fs.HTTPOption{},
fs.MetadataOption{"a": "1", "b": "2"},
fs.MetadataOption{"b": "B", "c": "3"},
&fs.HTTPOption{},
},
want: fs.Metadata{"a": "1", "b": "B", "c": "3"},
}, {
in: fs.Metadata{"a": "first", "z": "OK"},
opts: []fs.OpenOption{
&fs.HTTPOption{},
fs.MetadataOption{"a": "1", "b": "2"},
fs.MetadataOption{"b": "B", "c": "3"},
&fs.HTTPOption{},
},
want: fs.Metadata{"a": "1", "b": "B", "c": "3", "z": "OK"},
},
} {
what := fmt.Sprintf("in=%v, opts=%v", test.in, test.opts)
test.in.MergeOptions(test.opts)
assert.Equal(t, test.want, test.in, what)
}
}
func TestMetadataMapper(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
ci.Metadata = true
require.NoError(t, ci.MetadataMapper.Set("go run metadata_mapper_code.go"))
now := time.Date(2001, 2, 3, 4, 5, 6, 7, time.UTC)
f, err := mockfs.NewFs(ctx, "dstFs", "dstFsRoot", nil)
require.NoError(t, err)
t.Run("Normal", func(t *testing.T) {
o := object.NewMemoryObject("file.txt", now, []byte("hello")).WithMetadata(fs.Metadata{
"key1": "potato",
"key2": "sausage",
"key3": "gravy",
})
metadata, err := fs.GetMetadataOptions(ctx, f, o, nil)
require.NoError(t, err)
assert.Equal(t, fs.Metadata{
"key0": "cabbage",
"key1": "two potato",
"key2": "sausage",
}, metadata)
})
t.Run("Error", func(t *testing.T) {
o := object.NewMemoryObject("file.txt", now, []byte("hello")).WithMetadata(fs.Metadata{
"error": "Red Alert",
})
metadata, err := fs.GetMetadataOptions(ctx, f, o, nil)
require.Error(t, err)
assert.ErrorContains(t, err, "Red Alert")
require.Nil(t, metadata)
})
t.Run("Merge", func(t *testing.T) {
o := object.NewMemoryObject("file.txt", now, []byte("hello")).WithMetadata(fs.Metadata{
"key1": "potato",
"key2": "sausage",
"key3": "gravy",
})
metadata, err := fs.GetMetadataOptions(ctx, f, o, []fs.OpenOption{fs.MetadataOption(fs.Metadata{
"option": "optionValue",
"key1": "new potato",
"key2": "salami",
})})
require.NoError(t, err)
assert.Equal(t, fs.Metadata{
"key0": "cabbage",
"key1": "two new potato",
"key2": "salami",
"option": "optionValue",
}, metadata)
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/newfs_test.go | fs/newfs_test.go | package fs_test
import (
"context"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/mockfs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewFs(t *testing.T) {
ctx := context.Background()
// Register mockfs temporarily
oldRegistry := fs.Registry
mockfs.Register()
defer func() {
fs.Registry = oldRegistry
}()
f1, err := fs.NewFs(ctx, ":mockfs:/tmp")
require.NoError(t, err)
assert.Equal(t, ":mockfs", f1.Name())
assert.Equal(t, "/tmp", f1.Root())
assert.Equal(t, ":mockfs:/tmp", fs.ConfigString(f1))
f2, err := fs.NewFs(ctx, ":mockfs,potato:/tmp")
require.NoError(t, err)
assert.Equal(t, ":mockfs{S_NHG}", f2.Name())
assert.Equal(t, "/tmp", f2.Root())
assert.Equal(t, ":mockfs{S_NHG}:/tmp", fs.ConfigString(f2))
assert.Equal(t, ":mockfs,potato='true':/tmp", fs.ConfigStringFull(f2))
f3, err := fs.NewFs(ctx, ":mockfs,potato='true':/tmp")
require.NoError(t, err)
assert.Equal(t, ":mockfs{S_NHG}", f3.Name())
assert.Equal(t, "/tmp", f3.Root())
assert.Equal(t, ":mockfs{S_NHG}:/tmp", fs.ConfigString(f3))
assert.Equal(t, ":mockfs,potato='true':/tmp", fs.ConfigStringFull(f3))
// Check that the overrides work
globalCI := fs.GetConfig(ctx)
original := globalCI.UserAgent
defer func() {
globalCI.UserAgent = original
}()
f4, err := fs.NewFs(ctx, ":mockfs,global.user_agent='julian':/tmp")
require.NoError(t, err)
assert.Equal(t, ":mockfs", f4.Name())
assert.Equal(t, "/tmp", f4.Root())
assert.Equal(t, ":mockfs:/tmp", fs.ConfigString(f4))
assert.Equal(t, ":mockfs:/tmp", fs.ConfigStringFull(f4))
assert.Equal(t, "julian", globalCI.UserAgent)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config_list.go | fs/config_list.go | package fs
import (
"bytes"
"encoding/csv"
"fmt"
)
// CommaSepList is a comma separated config value
// It uses the encoding/csv rules for quoting and escaping
type CommaSepList []string
// SpaceSepList is a space separated config value
// It uses the encoding/csv rules for quoting and escaping
type SpaceSepList []string
type genericList []string
func (l CommaSepList) String() string {
return genericList(l).string(',')
}
// Set the List entries
func (l *CommaSepList) Set(s string) error {
return (*genericList)(l).set(',', []byte(s))
}
// Type of the value
func (CommaSepList) Type() string {
return "CommaSepList"
}
// Scan implements the fmt.Scanner interface
func (l *CommaSepList) Scan(s fmt.ScanState, ch rune) error {
return (*genericList)(l).scan(',', s, ch)
}
func (l SpaceSepList) String() string {
return genericList(l).string(' ')
}
// Set the List entries
func (l *SpaceSepList) Set(s string) error {
return (*genericList)(l).set(' ', []byte(s))
}
// Type of the value
func (SpaceSepList) Type() string {
return "SpaceSepList"
}
// Scan implements the fmt.Scanner interface
func (l *SpaceSepList) Scan(s fmt.ScanState, ch rune) error {
return (*genericList)(l).scan(' ', s, ch)
}
func (gl genericList) string(sep rune) string {
var buf bytes.Buffer
w := csv.NewWriter(&buf)
w.Comma = sep
err := w.Write(gl)
if err != nil {
// can only happen if w.Comma is invalid
panic(err)
}
w.Flush()
return string(bytes.TrimSpace(buf.Bytes()))
}
func (gl *genericList) set(sep rune, b []byte) error {
if len(b) == 0 {
*gl = nil
return nil
}
r := csv.NewReader(bytes.NewReader(b))
r.Comma = sep
record, err := r.Read()
switch _err := err.(type) {
case nil:
*gl = record
case *csv.ParseError:
err = _err.Err // remove line numbers from the error message
}
return err
}
func (gl *genericList) scan(sep rune, s fmt.ScanState, ch rune) error {
token, err := s.Token(true, func(rune) bool { return true })
if err != nil {
return err
}
return gl.set(sep, bytes.TrimSpace(token))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/sizesuffix_test.go | fs/sizesuffix_test.go | package fs
import (
"encoding/json"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check it satisfies the interfaces
var (
_ Flagger = (*SizeSuffix)(nil)
_ FlaggerNP = SizeSuffix(0)
)
func TestSizeSuffixString(t *testing.T) {
for _, test := range []struct {
in float64
want string
}{
{0, "0"},
{102, "102"},
{1024, "1Ki"},
{1024 * 1024, "1Mi"},
{1024 * 1024 * 1024, "1Gi"},
{10 * 1024 * 1024 * 1024, "10Gi"},
{10.1 * 1024 * 1024 * 1024, "10.100Gi"},
{-1, "off"},
{-100, "off"},
} {
ss := SizeSuffix(test.in)
got := ss.String()
assert.Equal(t, test.want, got)
}
}
func TestSizeSuffixByteUnit(t *testing.T) {
for _, test := range []struct {
in float64
want string
}{
{0, "0 B"},
{102, "102 B"},
{1024, "1 KiB"},
{1024 * 1024, "1 MiB"},
{1024 * 1024 * 1024, "1 GiB"},
{10 * 1024 * 1024 * 1024, "10 GiB"},
{10.1 * 1024 * 1024 * 1024, "10.100 GiB"},
{10 * 1024 * 1024 * 1024 * 1024, "10 TiB"},
{10 * 1024 * 1024 * 1024 * 1024 * 1024, "10 PiB"},
{1 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024, "1 EiB"},
{-1, "off"},
{-100, "off"},
} {
ss := SizeSuffix(test.in)
got := ss.ByteUnit()
assert.Equal(t, test.want, got)
}
}
func TestSizeSuffixBitRateUnit(t *testing.T) {
for _, test := range []struct {
in float64
want string
}{
{0, "0 bit/s"},
{1024, "1 Kibit/s"},
{1024 * 1024, "1 Mibit/s"},
{1024 * 1024 * 1024, "1 Gibit/s"},
{10 * 1024 * 1024 * 1024, "10 Gibit/s"},
{10.1 * 1024 * 1024 * 1024, "10.100 Gibit/s"},
{10 * 1024 * 1024 * 1024 * 1024, "10 Tibit/s"},
{10 * 1024 * 1024 * 1024 * 1024 * 1024, "10 Pibit/s"},
{1 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024, "1 Eibit/s"},
{-1, "off"},
{-100, "off"},
} {
ss := SizeSuffix(test.in)
got := ss.BitRateUnit()
assert.Equal(t, test.want, got)
}
}
func TestSizeSuffixSet(t *testing.T) {
for _, test := range []struct {
in string
want int64
err bool
}{
{"0", 0, false},
{"1b", 1, false},
{"102B", 102, false},
{"0.1k", 102, false},
{"0.1", 102, false},
{"1K", 1024, false},
{"1k", 1024, false},
//{"1KB", 1024, false},
//{"1kB", 1024, false},
//{"1kb", 1024, false},
{"1KI", 1024, false},
{"1Ki", 1024, false},
{"1kI", 1024, false},
{"1ki", 1024, false},
{"1KiB", 1024, false},
{"1KiB", 1024, false},
{"1kib", 1024, false},
{"1", 1024, false},
{"2.5", 1024 * 2.5, false},
{"1M", 1024 * 1024, false},
//{"1MB", 1024 * 1024, false},
{"1Mi", 1024 * 1024, false},
{"1MiB", 1024 * 1024, false},
{"1.g", 1024 * 1024 * 1024, false},
{"10G", 10 * 1024 * 1024 * 1024, false},
{"10T", 10 * 1024 * 1024 * 1024 * 1024, false},
{"10P", 10 * 1024 * 1024 * 1024 * 1024 * 1024, false},
{"off", -1, false},
{"OFF", -1, false},
{"", 0, true},
{"1q", 0, true},
{"1.q", 0, true},
{"1q", 0, true},
{"-1K", 0, true},
{"1i", 0, true},
{"1iB", 0, true},
{"1MB", 0, true},
} {
ss := SizeSuffix(0)
err := ss.Set(test.in)
if test.err {
require.Error(t, err, test.in)
} else {
require.NoError(t, err, test.in)
}
assert.Equal(t, test.want, int64(ss))
}
}
func TestSizeSuffixScan(t *testing.T) {
var v SizeSuffix
n, err := fmt.Sscan(" 17M ", &v)
require.NoError(t, err)
assert.Equal(t, 1, n)
assert.Equal(t, SizeSuffix(17<<20), v)
}
func TestSizeSuffixUnmarshalJSON(t *testing.T) {
for _, test := range []struct {
in string
want int64
err bool
}{
{`"0"`, 0, false},
{`"102B"`, 102, false},
{`"1K"`, 1024, false},
{`"2.5"`, 1024 * 2.5, false},
{`"1M"`, 1024 * 1024, false},
{`"1.g"`, 1024 * 1024 * 1024, false},
{`"10G"`, 10 * 1024 * 1024 * 1024, false},
{`"off"`, -1, false},
{`""`, 0, true},
{`"1q"`, 0, true},
{`"-1K"`, 0, true},
{`0`, 0, false},
{`102`, 102, false},
{`1024`, 1024, false},
{`1000000000`, 1000000000, false},
{`1.1.1`, 0, true},
} {
var ss SizeSuffix
err := json.Unmarshal([]byte(test.in), &ss)
if test.err {
require.Error(t, err, test.in)
} else {
require.NoError(t, err, test.in)
}
assert.Equal(t, test.want, int64(ss))
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/override.go | fs/override.go | package fs
import "context"
// OverrideRemote is a wrapper to override the Remote for an
// ObjectInfo
type OverrideRemote struct {
ObjectInfo
remote string
}
// NewOverrideRemote returns an OverrideRemoteObject which will
// return the remote specified
func NewOverrideRemote(oi ObjectInfo, remote string) *OverrideRemote {
// re-wrap an OverrideRemote
if or, ok := oi.(*OverrideRemote); ok {
return &OverrideRemote{
ObjectInfo: or.ObjectInfo,
remote: remote,
}
}
return &OverrideRemote{
ObjectInfo: oi,
remote: remote,
}
}
// Remote returns the overridden remote name
func (o *OverrideRemote) Remote() string {
return o.remote
}
// String returns the overridden remote name
func (o *OverrideRemote) String() string {
return o.remote
}
// MimeType returns the mime type of the underlying object or "" if it
// can't be worked out
func (o *OverrideRemote) MimeType(ctx context.Context) string {
if do, ok := o.ObjectInfo.(MimeTyper); ok {
return do.MimeType(ctx)
}
return ""
}
// ID returns the ID of the Object if known, or "" if not
func (o *OverrideRemote) ID() string {
if do, ok := o.ObjectInfo.(IDer); ok {
return do.ID()
}
return ""
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o *OverrideRemote) UnWrap() Object {
if o, ok := o.ObjectInfo.(Object); ok {
return o
}
return nil
}
// GetTier returns storage tier or class of the Object
func (o *OverrideRemote) GetTier() string {
if do, ok := o.ObjectInfo.(GetTierer); ok {
return do.GetTier()
}
return ""
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *OverrideRemote) Metadata(ctx context.Context) (Metadata, error) {
if do, ok := o.ObjectInfo.(Metadataer); ok {
return do.Metadata(ctx)
}
return nil, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/version.go | fs/version.go | package fs
// Version of rclone containing the complete version string
var Version string
func init() {
if Version == "" {
if VersionSuffix == "" {
Version = VersionTag
} else {
Version = VersionTag + "-" + VersionSuffix
}
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/direntries.go | fs/direntries.go | package fs
import "fmt"
// DirEntries is a slice of Object or *Dir
type DirEntries []DirEntry
// Len is part of sort.Interface.
func (ds DirEntries) Len() int {
return len(ds)
}
// Swap is part of sort.Interface.
func (ds DirEntries) Swap(i, j int) {
ds[i], ds[j] = ds[j], ds[i]
}
// Less is part of sort.Interface.
func (ds DirEntries) Less(i, j int) bool {
return CompareDirEntries(ds[i], ds[j]) < 0
}
// ForObject runs the function supplied on every object in the entries
func (ds DirEntries) ForObject(fn func(o Object)) {
for _, entry := range ds {
o, ok := entry.(Object)
if ok {
fn(o)
}
}
}
// ForObjectError runs the function supplied on every object in the entries
func (ds DirEntries) ForObjectError(fn func(o Object) error) error {
for _, entry := range ds {
o, ok := entry.(Object)
if ok {
err := fn(o)
if err != nil {
return err
}
}
}
return nil
}
// ForDir runs the function supplied on every Directory in the entries
func (ds DirEntries) ForDir(fn func(dir Directory)) {
for _, entry := range ds {
dir, ok := entry.(Directory)
if ok {
fn(dir)
}
}
}
// ForDirError runs the function supplied on every Directory in the entries
func (ds DirEntries) ForDirError(fn func(dir Directory) error) error {
for _, entry := range ds {
dir, ok := entry.(Directory)
if ok {
err := fn(dir)
if err != nil {
return err
}
}
}
return nil
}
// DirEntryType returns a string description of the DirEntry, either
// "object", "directory" or "unknown type XXX"
func DirEntryType(d DirEntry) string {
switch d.(type) {
case Object:
return "object"
case Directory:
return "directory"
}
return fmt.Sprintf("unknown type %T", d)
}
// CompareDirEntries returns 1 if a > b, 0 if a == b and -1 if a < b
// If two dir entries have the same name, compare their types (directories are before objects)
func CompareDirEntries(a, b DirEntry) int {
aName := a.Remote()
bName := b.Remote()
if aName > bName {
return 1
} else if aName < bName {
return -1
}
typeA := DirEntryType(a)
typeB := DirEntryType(b)
// same name, compare types
if typeA > typeB {
return 1
} else if typeA < typeB {
return -1
}
return 0
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/sizesuffix.go | fs/sizesuffix.go | package fs
// SizeSuffix is parsed by flag with K/M/G binary suffixes
import (
"encoding/json"
"errors"
"fmt"
"math"
"sort"
"strconv"
"strings"
)
// SizeSuffix is an int64 with a friendly way of printing setting
type SizeSuffix int64
// Common multipliers for SizeSuffix
const (
SizeSuffixBase SizeSuffix = 1 << (iota * 10)
Kibi
Mebi
Gibi
Tebi
Pebi
Exbi
)
const (
// SizeSuffixMax is the largest SizeSuffix multiplier
SizeSuffixMax = Exbi
// SizeSuffixMaxValue is the largest value that can be used to create SizeSuffix
SizeSuffixMaxValue = math.MaxInt64
// SizeSuffixMinValue is the smallest value that can be used to create SizeSuffix
SizeSuffixMinValue = math.MinInt64
)
// Turn SizeSuffix into a string and a suffix
func (x SizeSuffix) string() (string, string) {
scaled := float64(0)
suffix := ""
switch {
case x < 0:
return "off", ""
case x == 0:
return "0", ""
case x < Kibi:
scaled = float64(x)
suffix = ""
case x < Mebi:
scaled = float64(x) / float64(Kibi)
suffix = "Ki"
case x < Gibi:
scaled = float64(x) / float64(Mebi)
suffix = "Mi"
case x < Tebi:
scaled = float64(x) / float64(Gibi)
suffix = "Gi"
case x < Pebi:
scaled = float64(x) / float64(Tebi)
suffix = "Ti"
case x < Exbi:
scaled = float64(x) / float64(Pebi)
suffix = "Pi"
default:
scaled = float64(x) / float64(Exbi)
suffix = "Ei"
}
if math.Floor(scaled) == scaled {
return fmt.Sprintf("%.0f", scaled), suffix
}
return fmt.Sprintf("%.3f", scaled), suffix
}
// String turns SizeSuffix into a string
func (x SizeSuffix) String() string {
val, suffix := x.string()
return val + suffix
}
// Unit turns SizeSuffix into a string with a unit
func (x SizeSuffix) unit(unit string) string {
val, suffix := x.string()
if val == "off" {
return val
}
return val + " " + suffix + unit
}
// BitUnit turns SizeSuffix into a string with bit unit
func (x SizeSuffix) BitUnit() string {
return x.unit("bit")
}
// BitRateUnit turns SizeSuffix into a string with bit rate unit
func (x SizeSuffix) BitRateUnit() string {
return x.unit("bit/s")
}
// ByteUnit turns SizeSuffix into a string with byte unit
func (x SizeSuffix) ByteUnit() string {
return x.unit("B")
}
// ByteRateUnit turns SizeSuffix into a string with byte rate unit
func (x SizeSuffix) ByteRateUnit() string {
return x.unit("B/s")
}
func (x *SizeSuffix) multiplierFromSymbol(s byte) (found bool, multiplier float64) {
switch s {
case 'k', 'K':
return true, float64(Kibi)
case 'm', 'M':
return true, float64(Mebi)
case 'g', 'G':
return true, float64(Gibi)
case 't', 'T':
return true, float64(Tebi)
case 'p', 'P':
return true, float64(Pebi)
case 'e', 'E':
return true, float64(Exbi)
default:
return false, float64(SizeSuffixBase)
}
}
// Set a SizeSuffix
func (x *SizeSuffix) Set(s string) error {
if len(s) == 0 {
return errors.New("empty string")
}
if strings.ToLower(s) == "off" {
*x = -1
return nil
}
suffix := s[len(s)-1]
suffixLen := 1
multiplierFound := false
var multiplier float64
switch suffix {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
suffixLen = 0
multiplier = float64(Kibi)
case 'b', 'B':
if len(s) > 2 && s[len(s)-2] == 'i' {
suffix = s[len(s)-3]
suffixLen = 3
if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); !multiplierFound {
return fmt.Errorf("bad suffix %q", suffix)
}
// Could also support SI form MB, and treat it equivalent to MiB, but perhaps better to reserve it for CountSuffix?
//} else if len(s) > 1 {
// suffix = s[len(s)-2]
// if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); multiplierFound {
// suffixLen = 2
// }
//}
} else {
multiplier = float64(SizeSuffixBase)
}
case 'i', 'I':
if len(s) > 1 {
suffix = s[len(s)-2]
suffixLen = 2
multiplierFound, multiplier = x.multiplierFromSymbol(suffix)
}
if !multiplierFound {
return fmt.Errorf("bad suffix %q", suffix)
}
default:
if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); !multiplierFound {
return fmt.Errorf("bad suffix %q", suffix)
}
}
s = s[:len(s)-suffixLen]
value, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
if value < 0 {
return fmt.Errorf("size can't be negative %q", s)
}
value *= multiplier
*x = SizeSuffix(value)
return nil
}
// Type of the value
func (x SizeSuffix) Type() string {
return "SizeSuffix"
}
// Scan implements the fmt.Scanner interface
func (x *SizeSuffix) Scan(s fmt.ScanState, ch rune) error {
token, err := s.Token(true, nil)
if err != nil {
return err
}
return x.Set(string(token))
}
// SizeSuffixList is a slice SizeSuffix values
type SizeSuffixList []SizeSuffix
func (l SizeSuffixList) Len() int { return len(l) }
func (l SizeSuffixList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l SizeSuffixList) Less(i, j int) bool { return l[i] < l[j] }
// Sort sorts the list
func (l SizeSuffixList) Sort() {
sort.Sort(l)
}
// UnmarshalJSONFlag unmarshals a JSON input for a flag. If the input
// is a string then it calls the Set method on the flag otherwise it
// calls the setInt function with a parsed int64.
func UnmarshalJSONFlag(in []byte, x interface{ Set(string) error }, setInt func(int64) error) error {
// Try to parse as string first
var s string
err := json.Unmarshal(in, &s)
if err == nil {
return x.Set(s)
}
// If that fails parse as integer
var i int64
err = json.Unmarshal(in, &i)
if err != nil {
return err
}
return setInt(i)
}
// UnmarshalJSON makes sure the value can be parsed as a string or integer in JSON
func (x *SizeSuffix) UnmarshalJSON(in []byte) error {
return UnmarshalJSONFlag(in, x, func(i int64) error {
*x = SizeSuffix(i)
return nil
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/cutoffmode.go | fs/cutoffmode.go | package fs
type cutoffModeChoices struct{}
func (cutoffModeChoices) Choices() []string {
return []string{
CutoffModeHard: "HARD",
CutoffModeSoft: "SOFT",
CutoffModeCautious: "CAUTIOUS",
}
}
// CutoffMode describes the possible delete modes in the config
type CutoffMode = Enum[cutoffModeChoices]
// CutoffMode constants
const (
CutoffModeHard CutoffMode = iota
CutoffModeSoft
CutoffModeCautious
CutoffModeDefault = CutoffModeHard
)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/terminalcolormode_test.go | fs/terminalcolormode_test.go | package fs
import (
"encoding/json"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestTerminalColorModeString(t *testing.T) {
for _, test := range []struct {
in TerminalColorMode
want string
}{
{TerminalColorModeAuto, "AUTO"},
{TerminalColorModeAlways, "ALWAYS"},
{TerminalColorModeNever, "NEVER"},
{36, "Unknown(36)"},
} {
tcm := test.in
assert.Equal(t, test.want, tcm.String(), test.in)
}
}
func TestTerminalColorModeSet(t *testing.T) {
for _, test := range []struct {
in string
want TerminalColorMode
expectError bool
}{
{"auto", TerminalColorModeAuto, false},
{"ALWAYS", TerminalColorModeAlways, false},
{"Never", TerminalColorModeNever, false},
{"INVALID", 0, true},
} {
tcm := TerminalColorMode(0)
err := tcm.Set(test.in)
if test.expectError {
require.Error(t, err, test.in)
} else {
require.NoError(t, err, test.in)
}
assert.Equal(t, test.want, tcm, test.in)
}
}
func TestTerminalColorModeUnmarshalJSON(t *testing.T) {
for _, test := range []struct {
in string
want TerminalColorMode
expectError bool
}{
{`"auto"`, TerminalColorModeAuto, false},
{`"ALWAYS"`, TerminalColorModeAlways, false},
{`"Never"`, TerminalColorModeNever, false},
{`"Invalid"`, 0, true},
{strconv.Itoa(int(TerminalColorModeAuto)), TerminalColorModeAuto, false},
{strconv.Itoa(int(TerminalColorModeAlways)), TerminalColorModeAlways, false},
{strconv.Itoa(int(TerminalColorModeNever)), TerminalColorModeNever, false},
{`99`, 0, true},
{`-99`, 0, true},
} {
var tcm TerminalColorMode
err := json.Unmarshal([]byte(test.in), &tcm)
if test.expectError {
require.Error(t, err, test.in)
} else {
require.NoError(t, err, test.in)
}
assert.Equal(t, test.want, tcm, test.in)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/parsetime.go | fs/parsetime.go | package fs
import (
"encoding/json"
"fmt"
"time"
)
// Time is a time.Time with some more parsing options
type Time time.Time
// For overriding in unittests.
var (
timeNowFunc = time.Now
)
// Turn Time into a string
func (t Time) String() string {
if !t.IsSet() {
return "off"
}
return time.Time(t).Format(time.RFC3339Nano)
}
// IsSet returns if the time is not zero
func (t Time) IsSet() bool {
return !time.Time(t).IsZero()
}
// ParseTime parses a time or duration string as a Time.
func ParseTime(date string) (t time.Time, err error) {
if date == "off" {
return time.Time{}, nil
}
now := timeNowFunc()
// Attempt to parse as a text time
t, err = parseTimeDates(date)
if err == nil {
return t, nil
}
// Attempt to parse as a time.Duration offset from now
d, err := time.ParseDuration(date)
if err == nil {
return now.Add(-d), nil
}
d, err = parseDurationSuffixes(date)
if err == nil {
return now.Add(-d), nil
}
return t, err
}
// Set a Time
func (t *Time) Set(s string) error {
parsedTime, err := ParseTime(s)
if err != nil {
return err
}
*t = Time(parsedTime)
return nil
}
// Type of the value
func (t Time) Type() string {
return "Time"
}
// UnmarshalJSON makes sure the value can be parsed as a string in JSON
func (t *Time) UnmarshalJSON(in []byte) error {
var s string
err := json.Unmarshal(in, &s)
if err != nil {
return err
}
return t.Set(s)
}
// MarshalJSON marshals as a time.Time value
func (t Time) MarshalJSON() ([]byte, error) {
return json.Marshal(time.Time(t))
}
// Scan implements the fmt.Scanner interface
func (t *Time) Scan(s fmt.ScanState, ch rune) error {
token, err := s.Token(true, func(rune) bool { return true })
if err != nil {
return err
}
return t.Set(string(token))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/enum.go | fs/enum.go | package fs
import (
"encoding/json"
"fmt"
"strings"
)
// Enum is an option which can only be one of the Choices.
//
// Suggested implementation is something like this:
//
// type choice = Enum[choices]
//
// const (
// choiceA choice = iota
// choiceB
// choiceC
// )
//
// type choices struct{}
//
// func (choices) Choices() []string {
// return []string{
// choiceA: "A",
// choiceB: "B",
// choiceC: "C",
// }
// }
type Enum[C Choices] byte
// Choices returns the valid choices for this type.
//
// It must work on the zero value.
//
// Note that when using this in an Option the ExampleChoices will be
// filled in automatically.
type Choices interface {
// Choices returns the valid choices for this type
Choices() []string
}
// String renders the Enum as a string
func (e Enum[C]) String() string {
choices := e.Choices()
if int(e) >= len(choices) {
return fmt.Sprintf("Unknown(%d)", e)
}
return choices[e]
}
// Choices returns the possible values of the Enum.
func (e Enum[C]) Choices() []string {
var c C
return c.Choices()
}
// Help returns a comma separated list of all possible states.
func (e Enum[C]) Help() string {
return strings.Join(e.Choices(), ", ")
}
// Set the Enum entries
func (e *Enum[C]) Set(s string) error {
for i, choice := range e.Choices() {
if strings.EqualFold(s, choice) {
*e = Enum[C](i)
return nil
}
}
return fmt.Errorf("invalid choice %q from: %s", s, e.Help())
}
// Type of the value.
//
// If C has a Type() string method then it will be used instead.
func (e Enum[C]) Type() string {
var c C
if do, ok := any(c).(typer); ok {
return do.Type()
}
return strings.Join(e.Choices(), "|")
}
// Scan implements the fmt.Scanner interface
func (e *Enum[C]) Scan(s fmt.ScanState, ch rune) error {
token, err := s.Token(true, nil)
if err != nil {
return err
}
return e.Set(string(token))
}
// UnmarshalJSON parses it as a string or an integer
func (e *Enum[C]) UnmarshalJSON(in []byte) error {
choices := e.Choices()
return UnmarshalJSONFlag(in, e, func(i int64) error {
if i < 0 || i >= int64(len(choices)) {
return fmt.Errorf("%d is out of range: must be 0..%d", i, len(choices))
}
*e = Enum[C](i)
return nil
})
}
// MarshalJSON encodes it as string
func (e *Enum[C]) MarshalJSON() ([]byte, error) {
return json.Marshal(e.String())
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/versionsuffix.go | fs/versionsuffix.go | package fs
// VersionSuffix of rclone containing the pre-release label if any
var VersionSuffix = "DEV"
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/enum_test.go | fs/enum_test.go | package fs
import (
"encoding/json"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type choices struct{}
func (choices) Choices() []string {
return []string{
choiceA: "A",
choiceB: "B",
choiceC: "C",
}
}
type choice = Enum[choices]
const (
choiceA choice = iota
choiceB
choiceC
)
// Check it satisfies the interfaces
var (
_ Flagger = (*choice)(nil)
_ FlaggerNP = choice(0)
)
func TestEnumString(t *testing.T) {
for _, test := range []struct {
in choice
want string
}{
{choiceA, "A"},
{choiceB, "B"},
{choiceC, "C"},
{choice(100), "Unknown(100)"},
} {
got := test.in.String()
assert.Equal(t, test.want, got)
}
}
func TestEnumType(t *testing.T) {
assert.Equal(t, "A|B|C", choiceA.Type())
}
// Enum with Type() on the choices
type choicestype struct{}
func (choicestype) Choices() []string {
return []string{}
}
func (choicestype) Type() string {
return "potato"
}
type choicetype = Enum[choicestype]
func TestEnumTypeWithFunction(t *testing.T) {
assert.Equal(t, "potato", choicetype(0).Type())
}
func TestEnumHelp(t *testing.T) {
assert.Equal(t, "A, B, C", choice(0).Help())
}
func TestEnumSet(t *testing.T) {
for _, test := range []struct {
in string
want choice
err bool
}{
{"A", choiceA, false},
{"B", choiceB, false},
{"C", choiceC, false},
{"D", choice(100), true},
} {
var got choice
err := got.Set(test.in)
if test.err {
require.Error(t, err)
} else {
require.NoError(t, err)
assert.Equal(t, test.want, got)
}
}
}
func TestEnumScan(t *testing.T) {
var v choice
n, err := fmt.Sscan(" A ", &v)
require.NoError(t, err)
assert.Equal(t, 1, n)
assert.Equal(t, choiceA, v)
}
func TestEnumUnmarshalJSON(t *testing.T) {
for _, test := range []struct {
in string
want choice
err string
}{
{`"A"`, choiceA, ""},
{`"B"`, choiceB, ""},
{`0`, choiceA, ""},
{`1`, choiceB, ""},
{`"D"`, choice(0), `invalid choice "D" from: A, B, C`},
{`100`, choice(0), `100 is out of range: must be 0..3`},
} {
var got choice
err := json.Unmarshal([]byte(test.in), &got)
if test.err != "" {
require.Error(t, err, test.in)
assert.ErrorContains(t, err, test.err)
} else {
require.NoError(t, err, test.in)
}
assert.Equal(t, test.want, got, test.in)
}
}
func TestEnumMarshalJSON(t *testing.T) {
for _, test := range []struct {
in choice
want string
}{
{choiceA, `"A"`},
{choiceB, `"B"`},
} {
got, err := json.Marshal(&test.in)
require.NoError(t, err)
assert.Equal(t, test.want, string(got), fmt.Sprintf("%#v", test.in))
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fingerprint_test.go | fs/fingerprint_test.go | package fs_test
import (
"context"
"fmt"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest/mockfs"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFingerprint(t *testing.T) {
ctx := context.Background()
fMock, err := mockfs.NewFs(ctx, "test", "root", nil)
require.NoError(t, err)
f := fMock.(*mockfs.Fs)
f.SetHashes(hash.NewHashSet(hash.MD5))
for i, test := range []struct {
fast bool
slowModTime bool
slowHash bool
want string
}{
{fast: false, slowModTime: false, slowHash: false, want: "4,0001-01-01 00:00:00 +0000 UTC,8d777f385d3dfec8815d20f7496026dc"},
{fast: false, slowModTime: false, slowHash: true, want: "4,0001-01-01 00:00:00 +0000 UTC,8d777f385d3dfec8815d20f7496026dc"},
{fast: false, slowModTime: true, slowHash: false, want: "4,0001-01-01 00:00:00 +0000 UTC,8d777f385d3dfec8815d20f7496026dc"},
{fast: false, slowModTime: true, slowHash: true, want: "4,0001-01-01 00:00:00 +0000 UTC,8d777f385d3dfec8815d20f7496026dc"},
{fast: true, slowModTime: false, slowHash: false, want: "4,0001-01-01 00:00:00 +0000 UTC,8d777f385d3dfec8815d20f7496026dc"},
{fast: true, slowModTime: false, slowHash: true, want: "4,0001-01-01 00:00:00 +0000 UTC"},
{fast: true, slowModTime: true, slowHash: false, want: "4,8d777f385d3dfec8815d20f7496026dc"},
{fast: true, slowModTime: true, slowHash: true, want: "4"},
} {
what := fmt.Sprintf("#%d fast=%v,slowModTime=%v,slowHash=%v", i, test.fast, test.slowModTime, test.slowHash)
o := mockobject.New("potato").WithContent([]byte("data"), mockobject.SeekModeRegular)
o.SetFs(f)
f.Features().SlowModTime = test.slowModTime
f.Features().SlowHash = test.slowHash
got := fs.Fingerprint(ctx, o, test.fast)
assert.Equal(t, test.want, got, what)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/backend_config_test.go | fs/backend_config_test.go | package fs
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestStatePush(t *testing.T) {
assert.Equal(t, "", StatePush(""))
assert.Equal(t, "", StatePush("", ""))
assert.Equal(t, "a", StatePush("", "a"))
assert.Equal(t, "a,1,2,3", StatePush("", "a", "1,2,3"))
assert.Equal(t, "potato", StatePush("potato"))
assert.Equal(t, ",potato", StatePush("potato", ""))
assert.Equal(t, "a,potato", StatePush("potato", "a"))
assert.Equal(t, "a,1,2,3,potato", StatePush("potato", "a", "1,2,3"))
}
func TestStatePop(t *testing.T) {
state, value := StatePop("")
assert.Equal(t, "", value)
assert.Equal(t, "", state)
state, value = StatePop("a")
assert.Equal(t, "a", value)
assert.Equal(t, "", state)
state, value = StatePop("a,1,2,3")
assert.Equal(t, "a", value)
assert.Equal(t, "1,2,3", state)
state, value = StatePop("1,2,3,a")
assert.Equal(t, "1,2,3", value)
assert.Equal(t, "a", state)
}
func TestMatchProvider(t *testing.T) {
for _, test := range []struct {
config string
provider string
want bool
}{
{"", "", true},
{"one", "one", true},
{"one,two", "two", true},
{"one,two,three", "two", true},
{"one", "on", false},
{"one,two,three", "tw", false},
{"!one,two,three", "two", false},
{"!one,two,three", "four", true},
} {
what := fmt.Sprintf("%q,%q", test.config, test.provider)
got := MatchProvider(test.config, test.provider)
assert.Equal(t, test.want, got, what)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fs.go | fs/fs.go | // Package fs is a generic file system interface for rclone object storage systems
package fs
import (
"context"
"errors"
"io"
"math"
"time"
)
// Constants
const (
// ModTimeNotSupported is a very large precision value to show
// mod time isn't supported on this Fs
ModTimeNotSupported = 100 * 365 * 24 * time.Hour
// MaxLevel is a sentinel representing an infinite depth for listings
MaxLevel = math.MaxInt32
// The suffix added to a translated symbolic link
LinkSuffix = ".rclonelink"
)
// Globals
var (
// ErrorNotFoundInConfigFile is returned by NewFs if not found in config file
ErrorNotFoundInConfigFile = errors.New("didn't find section in config file")
ErrorCantPurge = errors.New("can't purge directory")
ErrorCantCopy = errors.New("can't copy object - incompatible remotes")
ErrorCantMove = errors.New("can't move object - incompatible remotes")
ErrorCantDirMove = errors.New("can't move directory - incompatible remotes")
ErrorCantUploadEmptyFiles = errors.New("can't upload empty files to this remote")
ErrorDirExists = errors.New("can't copy directory - destination already exists")
ErrorCantSetModTime = errors.New("can't set modified time")
ErrorCantSetModTimeWithoutDelete = errors.New("can't set modified time without deleting existing object")
ErrorDirNotFound = errors.New("directory not found")
ErrorObjectNotFound = errors.New("object not found")
ErrorLevelNotSupported = errors.New("level value not supported")
ErrorListAborted = errors.New("list aborted")
ErrorListBucketRequired = errors.New("bucket or container name is needed in remote")
ErrorIsFile = errors.New("is a file not a directory")
ErrorIsDir = errors.New("is a directory not a file")
ErrorNotAFile = errors.New("is not a regular file")
ErrorNotDeleting = errors.New("not deleting files as there were IO errors")
ErrorNotDeletingDirs = errors.New("not deleting directories as there were IO errors")
ErrorOverlapping = errors.New("can't sync or move files on overlapping remotes (try excluding the destination with a filter rule)")
ErrorDirectoryNotEmpty = errors.New("directory not empty")
ErrorImmutableModified = errors.New("immutable file modified")
ErrorPermissionDenied = errors.New("permission denied")
ErrorCantShareDirectories = errors.New("this backend can't share directories with link")
ErrorNotImplemented = errors.New("optional feature not implemented")
ErrorCommandNotFound = errors.New("command not found")
ErrorFileNameTooLong = errors.New("file name too long")
)
// CheckClose is a utility function used to check the return from
// Close in a defer statement.
func CheckClose(c io.Closer, err *error) {
cerr := c.Close()
if *err == nil {
*err = cerr
}
}
// FileExists returns true if a file remote exists.
// If remote is a directory, FileExists returns false.
func FileExists(ctx context.Context, fs Fs, remote string) (bool, error) {
_, err := fs.NewObject(ctx, remote)
if err != nil {
if err == ErrorObjectNotFound || err == ErrorNotAFile || err == ErrorPermissionDenied {
return false, nil
}
return false, err
}
return true, nil
}
// GetModifyWindow calculates the maximum modify window between the given Fses
// and the Config.ModifyWindow parameter.
func GetModifyWindow(ctx context.Context, fss ...Info) time.Duration {
window := time.Duration(GetConfig(ctx).ModifyWindow)
for _, f := range fss {
if f != nil {
precision := f.Precision()
if precision == ModTimeNotSupported {
return ModTimeNotSupported
}
if precision > window {
window = precision
}
}
}
return window
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/direntries_test.go | fs/direntries_test.go | package fs_test
import (
"sort"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/mockdir"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
)
func TestDirEntriesSort(t *testing.T) {
a := mockobject.New("a")
aDir := mockdir.New("a")
b := mockobject.New("b")
bDir := mockdir.New("b")
c := mockobject.New("c")
cDir := mockdir.New("c")
anotherc := mockobject.New("c")
dirEntries := fs.DirEntries{bDir, b, aDir, a, c, cDir, anotherc}
sort.Stable(dirEntries)
assert.Equal(t, fs.DirEntries{aDir, a, bDir, b, cDir, c, anotherc}, dirEntries)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/deletemode.go | fs/deletemode.go | package fs
// DeleteMode describes the possible delete modes in the config
type DeleteMode byte
// DeleteMode constants
const (
DeleteModeOff DeleteMode = iota
DeleteModeBefore
DeleteModeDuring
DeleteModeAfter
DeleteModeOnly
DeleteModeDefault = DeleteModeAfter
)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/countsuffix_test.go | fs/countsuffix_test.go | package fs
import (
"encoding/json"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check it satisfies the interfaces
var (
_ Flagger = (*CountSuffix)(nil)
_ FlaggerNP = CountSuffix(0)
)
func TestCountSuffixString(t *testing.T) {
for _, test := range []struct {
in float64
want string
}{
{0, "0"},
{102, "102"},
{1000, "1k"},
{1000 * 1000, "1M"},
{1000 * 1000 * 1000, "1G"},
{10 * 1000 * 1000 * 1000, "10G"},
{10.1 * 1000 * 1000 * 1000, "10.100G"},
{-1, "off"},
{-100, "off"},
} {
ss := CountSuffix(test.in)
got := ss.String()
assert.Equal(t, test.want, got)
}
}
func TestCountSuffixUnit(t *testing.T) {
for _, test := range []struct {
in float64
want string
}{
{0, "0 Byte"},
{102, "102 Byte"},
{1000, "1 kByte"},
{1000 * 1000, "1 MByte"},
{1000 * 1000 * 1000, "1 GByte"},
{10 * 1000 * 1000 * 1000, "10 GByte"},
{10.1 * 1000 * 1000 * 1000, "10.100 GByte"},
{10 * 1000 * 1000 * 1000 * 1000, "10 TByte"},
{10 * 1000 * 1000 * 1000 * 1000 * 1000, "10 PByte"},
{1 * 1000 * 1000 * 1000 * 1000 * 1000 * 1000, "1 EByte"},
{-1, "off"},
{-100, "off"},
} {
ss := CountSuffix(test.in)
got := ss.Unit("Byte")
assert.Equal(t, test.want, got)
}
}
func TestCountSuffixSet(t *testing.T) {
for _, test := range []struct {
in string
want int64
err bool
}{
{"0", 0, false},
{"1b", 1, false},
{"100B", 100, false},
{"0.1k", 100, false},
{"0.1", 100, false},
{"1K", 1000, false},
{"1k", 1000, false},
{"1KB", 1000, false},
{"1kB", 1000, false},
{"1kb", 1000, false},
{"1", 1000, false},
{"2.5", 1000 * 2.5, false},
{"1M", 1000 * 1000, false},
{"1MB", 1000 * 1000, false},
{"1.g", 1000 * 1000 * 1000, false},
{"10G", 10 * 1000 * 1000 * 1000, false},
{"10T", 10 * 1000 * 1000 * 1000 * 1000, false},
{"10P", 10 * 1000 * 1000 * 1000 * 1000 * 1000, false},
{"off", -1, false},
{"OFF", -1, false},
{"", 0, true},
{"1q", 0, true},
{"1.q", 0, true},
{"1q", 0, true},
{"-1K", 0, true},
{"1i", 0, true},
{"1iB", 0, true},
{"1Ki", 0, true},
{"1KiB", 0, true},
} {
ss := CountSuffix(0)
err := ss.Set(test.in)
if test.err {
require.Error(t, err, test.in)
} else {
require.NoError(t, err, test.in)
}
assert.Equal(t, test.want, int64(ss))
}
}
func TestCountSuffixScan(t *testing.T) {
var v CountSuffix
n, err := fmt.Sscan(" 17M ", &v)
require.NoError(t, err)
assert.Equal(t, 1, n)
assert.Equal(t, CountSuffix(17000000), v)
}
func TestCountSuffixUnmarshalJSON(t *testing.T) {
for _, test := range []struct {
in string
want int64
err bool
}{
{`"0"`, 0, false},
{`"102B"`, 102, false},
{`"1K"`, 1000, false},
{`"2.5"`, 1000 * 2.5, false},
{`"1M"`, 1000 * 1000, false},
{`"1.g"`, 1000 * 1000 * 1000, false},
{`"10G"`, 10 * 1000 * 1000 * 1000, false},
{`"off"`, -1, false},
{`""`, 0, true},
{`"1q"`, 0, true},
{`"-1K"`, 0, true},
{`0`, 0, false},
{`102`, 102, false},
{`1000`, 1000, false},
{`1000000000`, 1000000000, false},
{`1.1.1`, 0, true},
} {
var ss CountSuffix
err := json.Unmarshal([]byte(test.in), &ss)
if test.err {
require.Error(t, err, test.in)
} else {
require.NoError(t, err, test.in)
}
assert.Equal(t, test.want, int64(ss))
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/configmap.go | fs/configmap.go | // Getters and Setters for ConfigMap
package fs
import (
"os"
"github.com/rclone/rclone/fs/config/configmap"
)
// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name
type configEnvVars string
// Get a config item from the environment variables if possible
func (configName configEnvVars) Get(key string) (value string, ok bool) {
envKey := ConfigToEnv(string(configName), key)
value, ok = os.LookupEnv(envKey)
if ok {
Debugf(nil, "Setting %s=%q for %q from environment variable %s", key, value, configName, envKey)
}
return value, ok
}
// A configmap.Getter to read from the environment RCLONE_option_name
type optionEnvVars struct {
prefix string
options Options
}
// Get a config item from the option environment variables if possible
func (oev optionEnvVars) Get(key string) (value string, ok bool) {
opt := oev.options.Get(key)
if opt == nil {
return "", false
}
var envKey string
if oev.prefix == "" {
envKey = OptionToEnv(key)
} else {
envKey = OptionToEnv(oev.prefix + "-" + key)
}
value, ok = os.LookupEnv(envKey)
if ok {
Debugf(nil, "Setting %s %s=%q from environment variable %s", oev.prefix, key, value, envKey)
} else if opt.NoPrefix {
// For options with NoPrefix set, check without prefix too
envKey := OptionToEnv(key)
value, ok = os.LookupEnv(envKey)
if ok {
Debugf(nil, "Setting %s=%q for %s from environment variable %s", key, value, oev.prefix, envKey)
}
}
return value, ok
}
// A configmap.Getter to read either the default value or the set
// value from the RegInfo.Options
type regInfoValues struct {
options Options
useDefault bool
}
// override the values in configMap with the either the flag values or
// the default values
func (r *regInfoValues) Get(key string) (value string, ok bool) {
opt := r.options.Get(key)
if opt != nil && (r.useDefault || !opt.IsDefault()) {
return opt.String(), true
}
return "", false
}
// A configmap.Setter to read from the config file
type setConfigFile string
// Set a config item into the config file
func (section setConfigFile) Set(key, value string) {
Debugf(nil, "Saving config %q in section %q of the config file", key, section)
err := ConfigFileSet(string(section), key, value)
if err != nil {
Errorf(nil, "Failed saving config %q in section %q of the config file: %v", key, section, err)
}
}
// A configmap.Getter to read from the config file
type getConfigFile string
// Get a config item from the config file
func (section getConfigFile) Get(key string) (value string, ok bool) {
value, ok = ConfigFileGet(string(section), key)
// Ignore empty lines in the config file
if value == "" {
ok = false
}
return value, ok
}
// ConfigMap creates a configmap.Map from the Options, prefix and the
// configName passed in. If connectionStringConfig has any entries (it may be nil),
// then it will be added to the lookup with the highest priority.
//
// If options is nil then the returned configmap.Map should only be
// used for reading non backend specific parameters, such as "type".
//
// This can be used for global settings if prefix is "" and configName is ""
func ConfigMap(prefix string, options Options, configName string, connectionStringConfig configmap.Simple) (config *configmap.Map) {
// Create the config
config = configmap.New()
// Read the config, more specific to least specific
// Config from connection string
if len(connectionStringConfig) > 0 {
config.AddGetter(connectionStringConfig, configmap.PriorityNormal)
}
// flag values
if options != nil {
config.AddGetter(®InfoValues{options, false}, configmap.PriorityNormal)
}
// remote specific environment vars
if configName != "" {
config.AddGetter(configEnvVars(configName), configmap.PriorityNormal)
}
// backend specific environment vars
if options != nil {
config.AddGetter(optionEnvVars{prefix: prefix, options: options}, configmap.PriorityNormal)
}
// config file
if configName != "" {
config.AddGetter(getConfigFile(configName), configmap.PriorityConfig)
}
// default values
if options != nil {
config.AddGetter(®InfoValues{options, true}, configmap.PriorityDefault)
}
// Set Config
config.AddSetter(setConfigFile(configName))
return config
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/registry_test.go | fs/registry_test.go | package fs
import (
"encoding/json"
"fmt"
"os"
"testing"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/spf13/pflag"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check it satisfies the interface
var _ pflag.Value = (*Option)(nil)
func TestOption(t *testing.T) {
d := &Option{
Name: "potato",
Value: SizeSuffix(17 << 20),
}
assert.Equal(t, "17Mi", d.String())
assert.Equal(t, "SizeSuffix", d.Type())
err := d.Set("18M")
assert.NoError(t, err)
assert.Equal(t, SizeSuffix(18<<20), d.Value)
err = d.Set("sdfsdf")
assert.Error(t, err)
}
// Test options
var (
nouncOption = Option{
Name: "nounc",
}
copyLinksOption = Option{
Name: "copy_links",
Default: false,
NoPrefix: true,
ShortOpt: "L",
Advanced: true,
}
caseInsensitiveOption = Option{
Name: "case_insensitive",
Default: false,
Value: true,
Advanced: true,
}
testOptions = Options{nouncOption, copyLinksOption, caseInsensitiveOption}
)
func TestOptionsSetValues(t *testing.T) {
assert.Nil(t, testOptions[0].Default)
assert.Equal(t, false, testOptions[1].Default)
assert.Equal(t, false, testOptions[2].Default)
testOptions.setValues()
assert.Equal(t, "", testOptions[0].Default)
assert.Equal(t, false, testOptions[1].Default)
assert.Equal(t, false, testOptions[2].Default)
}
func TestOptionsGet(t *testing.T) {
opt := testOptions.Get("copy_links")
assert.Equal(t, ©LinksOption, opt)
opt = testOptions.Get("not_found")
assert.Nil(t, opt)
}
func TestOptionsOveridden(t *testing.T) {
m := configmap.New()
m1 := configmap.Simple{
"nounc": "m1",
"copy_links": "m1",
}
m.AddGetter(m1, configmap.PriorityNormal)
m2 := configmap.Simple{
"nounc": "m2",
"case_insensitive": "m2",
}
m.AddGetter(m2, configmap.PriorityConfig)
m3 := configmap.Simple{
"nounc": "m3",
}
m.AddGetter(m3, configmap.PriorityDefault)
got := testOptions.Overridden(m)
assert.Equal(t, configmap.Simple{
"copy_links": "m1",
"nounc": "m1",
}, got)
}
func TestOptionsNonDefault(t *testing.T) {
m := configmap.Simple{}
got := testOptions.NonDefault(m)
assert.Equal(t, configmap.Simple{}, got)
m["case_insensitive"] = "false"
got = testOptions.NonDefault(m)
assert.Equal(t, configmap.Simple{}, got)
m["case_insensitive"] = "true"
got = testOptions.NonDefault(m)
assert.Equal(t, configmap.Simple{"case_insensitive": "true"}, got)
}
func TestOptionMarshalJSON(t *testing.T) {
out, err := json.MarshalIndent(&caseInsensitiveOption, "", "")
assert.NoError(t, err)
require.Equal(t, `{
"Name": "case_insensitive",
"FieldName": "",
"Help": "",
"Default": false,
"Value": true,
"Hide": 0,
"Required": false,
"IsPassword": false,
"NoPrefix": false,
"Advanced": true,
"Exclusive": false,
"Sensitive": false,
"DefaultStr": "false",
"ValueStr": "true",
"Type": "bool"
}`, string(out))
}
func TestOptionGetValue(t *testing.T) {
assert.Equal(t, "", nouncOption.GetValue())
assert.Equal(t, false, copyLinksOption.GetValue())
assert.Equal(t, true, caseInsensitiveOption.GetValue())
}
func TestOptionString(t *testing.T) {
assert.Equal(t, "", nouncOption.String())
assert.Equal(t, "false", copyLinksOption.String())
assert.Equal(t, "true", caseInsensitiveOption.String())
}
func TestOptionStringStringArray(t *testing.T) {
opt := Option{
Name: "string_array",
Default: []string(nil),
}
assert.Equal(t, "", opt.String())
opt.Default = []string{}
assert.Equal(t, "", opt.String())
opt.Default = []string{"a", "b"}
assert.Equal(t, "a,b", opt.String())
opt.Default = []string{"hello, world!", "goodbye, world!"}
assert.Equal(t, `"hello, world!","goodbye, world!"`, opt.String())
}
func TestOptionStringSizeSuffix(t *testing.T) {
opt := Option{
Name: "size_suffix",
Default: SizeSuffix(0),
}
assert.Equal(t, "0", opt.String())
opt.Default = SizeSuffix(-1)
assert.Equal(t, "off", opt.String())
opt.Default = SizeSuffix(100)
assert.Equal(t, "100B", opt.String())
opt.Default = SizeSuffix(1024)
assert.Equal(t, "1Ki", opt.String())
}
func TestOptionSet(t *testing.T) {
o := caseInsensitiveOption
assert.Equal(t, true, o.Value)
err := o.Set("FALSE")
assert.NoError(t, err)
assert.Equal(t, false, o.Value)
o = copyLinksOption
assert.Equal(t, nil, o.Value)
err = o.Set("True")
assert.NoError(t, err)
assert.Equal(t, true, o.Value)
err = o.Set("INVALID")
assert.Error(t, err)
assert.Equal(t, true, o.Value)
}
func TestOptionType(t *testing.T) {
assert.Equal(t, "string", nouncOption.Type())
assert.Equal(t, "bool", copyLinksOption.Type())
assert.Equal(t, "bool", caseInsensitiveOption.Type())
}
func TestOptionFlagName(t *testing.T) {
assert.Equal(t, "local-nounc", nouncOption.FlagName("local"))
assert.Equal(t, "copy-links", copyLinksOption.FlagName("local"))
assert.Equal(t, "local-case-insensitive", caseInsensitiveOption.FlagName("local"))
}
func TestOptionEnvVarName(t *testing.T) {
assert.Equal(t, "RCLONE_LOCAL_NOUNC", nouncOption.EnvVarName("local"))
assert.Equal(t, "RCLONE_LOCAL_COPY_LINKS", copyLinksOption.EnvVarName("local"))
assert.Equal(t, "RCLONE_LOCAL_CASE_INSENSITIVE", caseInsensitiveOption.EnvVarName("local"))
}
func TestOptionGetters(t *testing.T) {
// Set up env vars
envVars := [][2]string{
{"RCLONE_CONFIG_LOCAL_POTATO_PIE", "yes"},
{"RCLONE_COPY_LINKS", "TRUE"},
{"RCLONE_LOCAL_NOUNC", "NOUNC"},
}
for _, ev := range envVars {
assert.NoError(t, os.Setenv(ev[0], ev[1]))
}
defer func() {
for _, ev := range envVars {
assert.NoError(t, os.Unsetenv(ev[0]))
}
}()
oldConfigFileGet := ConfigFileGet
ConfigFileGet = func(section, key string) (string, bool) {
if section == "sausage" && key == "key1" {
return "value1", true
}
return "", false
}
defer func() {
ConfigFileGet = oldConfigFileGet
}()
// set up getters
// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name
configEnvVarsGetter := configEnvVars("local")
// A configmap.Getter to read from the environment RCLONE_option_name
optionEnvVarsGetter := optionEnvVars{"local", testOptions}
// A configmap.Getter to read either the default value or the set
// value from the RegInfo.Options
regInfoValuesGetterFalse := ®InfoValues{
options: testOptions,
useDefault: false,
}
regInfoValuesGetterTrue := ®InfoValues{
options: testOptions,
useDefault: true,
}
// A configmap.Setter to read from the config file
configFileGetter := getConfigFile("sausage")
for i, test := range []struct {
get configmap.Getter
key string
wantValue string
wantOk bool
}{
{configEnvVarsGetter, "not_found", "", false},
{configEnvVarsGetter, "potato_pie", "yes", true},
{optionEnvVarsGetter, "not_found", "", false},
{optionEnvVarsGetter, "copy_links", "TRUE", true},
{optionEnvVarsGetter, "nounc", "NOUNC", true},
{optionEnvVarsGetter, "case_insensitive", "", false},
{regInfoValuesGetterFalse, "not_found", "", false},
{regInfoValuesGetterFalse, "case_insensitive", "true", true},
{regInfoValuesGetterFalse, "copy_links", "", false},
{regInfoValuesGetterTrue, "not_found", "", false},
{regInfoValuesGetterTrue, "case_insensitive", "true", true},
{regInfoValuesGetterTrue, "copy_links", "false", true},
{configFileGetter, "not_found", "", false},
{configFileGetter, "key1", "value1", true},
} {
what := fmt.Sprintf("%d: %+v: %q", i, test.get, test.key)
gotValue, gotOk := test.get.Get(test.key)
assert.Equal(t, test.wantValue, gotValue, what)
assert.Equal(t, test.wantOk, gotOk, what)
}
}
func TestOptionsNonDefaultRC(t *testing.T) {
type cfg struct {
X string `config:"x"`
Y int `config:"y"`
}
c := &cfg{X: "a", Y: 6}
opts := Options{
{Name: "x", Default: "a"}, // at default, should be omitted
{Name: "y", Default: 5}, // non-default, should be included
}
got, err := opts.NonDefaultRC(c)
require.NoError(t, err)
require.Equal(t, map[string]any{"Y": 6}, got)
}
func TestOptionsNonDefaultRCMissingKey(t *testing.T) {
type cfg struct {
X string `config:"x"`
}
c := &cfg{X: "a"}
// Options refers to a key not present in the struct -> expect error
opts := Options{{Name: "missing", Default: ""}}
_, err := opts.NonDefaultRC(c)
assert.ErrorContains(t, err, "not found")
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/dump.go | fs/dump.go | package fs
// DumpFlags describes the Dump options in force
type DumpFlags = Bits[dumpChoices]
// DumpFlags definitions
const (
DumpHeaders DumpFlags = 1 << iota
DumpBodies
DumpRequests
DumpResponses
DumpAuth
DumpFilters
DumpGoRoutines
DumpOpenFiles
DumpMapper
)
type dumpChoices struct{}
func (dumpChoices) Choices() []BitsChoicesInfo {
return []BitsChoicesInfo{
{uint64(DumpHeaders), "headers"},
{uint64(DumpBodies), "bodies"},
{uint64(DumpRequests), "requests"},
{uint64(DumpResponses), "responses"},
{uint64(DumpAuth), "auth"},
{uint64(DumpFilters), "filters"},
{uint64(DumpGoRoutines), "goroutines"},
{uint64(DumpOpenFiles), "openfiles"},
{uint64(DumpMapper), "mapper"},
}
}
func (dumpChoices) Type() string {
return "DumpFlags"
}
// DumpFlagsList is a list of dump flags used in the help
var DumpFlagsList = DumpHeaders.Help()
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/parseduration.go | fs/parseduration.go | package fs
import (
"encoding/json"
"fmt"
"math"
"strconv"
"strings"
"time"
)
// Duration is a time.Duration with some more parsing options
type Duration time.Duration
// DurationOff is the default value for flags which can be turned off
const DurationOff = Duration((1 << 63) - 1)
// Turn Duration into a string
func (d Duration) String() string {
if d == DurationOff {
return "off"
}
for i := len(ageSuffixes) - 2; i >= 0; i-- {
ageSuffix := &ageSuffixes[i]
if math.Abs(float64(d)) >= float64(ageSuffix.Multiplier) {
timeUnits := float64(d) / float64(ageSuffix.Multiplier)
return strconv.FormatFloat(timeUnits, 'f', -1, 64) + ageSuffix.Suffix
}
}
return time.Duration(d).String()
}
// IsSet returns if the duration is != DurationOff
func (d Duration) IsSet() bool {
return d != DurationOff
}
// We use time conventions
var ageSuffixes = []struct {
Suffix string
Multiplier time.Duration
}{
{Suffix: "d", Multiplier: time.Hour * 24},
{Suffix: "w", Multiplier: time.Hour * 24 * 7},
{Suffix: "M", Multiplier: time.Hour * 24 * 30},
{Suffix: "y", Multiplier: time.Hour * 24 * 365},
// Default to second
{Suffix: "", Multiplier: time.Second},
}
// parse the age as suffixed ages
func parseDurationSuffixes(age string) (time.Duration, error) {
var period float64
for _, ageSuffix := range ageSuffixes {
if strings.HasSuffix(age, ageSuffix.Suffix) {
numberString := age[:len(age)-len(ageSuffix.Suffix)]
var err error
period, err = strconv.ParseFloat(numberString, 64)
if err != nil {
return time.Duration(0), err
}
period *= float64(ageSuffix.Multiplier)
break
}
}
return time.Duration(period), nil
}
// time formats to try parsing ages as - in order
var timeFormats = []string{
time.RFC3339,
"2006-01-02T15:04:05",
"2006-01-02 15:04:05",
"2006-01-02",
}
// parse the date as time in various date formats
func parseTimeDates(date string) (t time.Time, err error) {
var instant time.Time
for _, timeFormat := range timeFormats {
instant, err = time.ParseInLocation(timeFormat, date, time.Local)
if err == nil {
return instant, nil
}
}
return t, err
}
// parse the age as time before the epoch in various date formats
func parseDurationDates(age string, epoch time.Time) (d time.Duration, err error) {
instant, err := parseTimeDates(age)
if err != nil {
return d, err
}
return epoch.Sub(instant), nil
}
// parseDurationFromNow parses a duration string. Allows ParseDuration to match the time
// package and easier testing within the fs package.
func parseDurationFromNow(age string, getNow func() time.Time) (d time.Duration, err error) {
if age == "off" {
return time.Duration(DurationOff), nil
}
// Attempt to parse as a time.Duration first
d, err = time.ParseDuration(age)
if err == nil {
return d, nil
}
d, err = parseDurationSuffixes(age)
if err == nil {
return d, nil
}
d, err = parseDurationDates(age, getNow())
if err == nil {
return d, nil
}
return d, err
}
// ParseDuration parses a duration string. Accept ms|s|m|h|d|w|M|y suffixes. Defaults to second if not provided
func ParseDuration(age string) (time.Duration, error) {
return parseDurationFromNow(age, timeNowFunc)
}
// ReadableString parses d into a human-readable duration with units.
// Examples: "3s", "1d2h23m20s", "292y24w3d23h47m16s".
func (d Duration) ReadableString() string {
return d.readableString(0)
}
// ShortReadableString parses d into a human-readable duration with units.
// This method returns it in short format, including the 3 most significant
// units only, sacrificing precision if necessary. E.g. returns "292y24w3d"
// instead of "292y24w3d23h47m16s", and "3d23h47m" instead of "3d23h47m16s".
func (d Duration) ShortReadableString() string {
return d.readableString(3)
}
// readableString parses d into a human-readable duration with units.
// Parameter maxNumberOfUnits limits number of significant units to include,
// sacrificing precision. E.g. with argument 3 it returns "292y24w3d" instead
// of "292y24w3d23h47m16s", and "3d23h47m" instead of "3d23h47m16s". Zero or
// negative argument means include all.
// Based on https://github.com/hako/durafmt
func (d Duration) readableString(maxNumberOfUnits int) string {
switch d {
case DurationOff:
return "off"
case 0:
return "0s"
}
var readableString strings.Builder
// Check for minus durations.
if d < 0 {
readableString.WriteString("-")
}
duration := time.Duration(math.Abs(float64(d)))
// Convert duration.
seconds := int64(duration.Seconds()) % 60
minutes := int64(duration.Minutes()) % 60
hours := int64(duration.Hours()) % 24
days := int64(duration/(24*time.Hour)) % 365 % 7
// Edge case between 364 and 365 days.
// We need to calculate weeks from what is left from years
leftYearDays := int64(duration/(24*time.Hour)) % 365
weeks := leftYearDays / 7
if leftYearDays >= 364 && leftYearDays < 365 {
weeks = 52
}
years := int64(duration/(24*time.Hour)) / 365
milliseconds := int64(duration/time.Millisecond) -
(seconds * 1000) - (minutes * 60000) - (hours * 3600000) -
(days * 86400000) - (weeks * 604800000) - (years * 31536000000)
// Create a map of the converted duration time.
durationMap := map[string]int64{
"ms": milliseconds,
"s": seconds,
"m": minutes,
"h": hours,
"d": days,
"w": weeks,
"y": years,
}
// Construct duration string.
numberOfUnits := 0
for _, u := range [...]string{"y", "w", "d", "h", "m", "s", "ms"} {
v := durationMap[u]
strval := strconv.FormatInt(v, 10)
if v == 0 {
continue
}
readableString.WriteString(strval + u)
numberOfUnits++
if maxNumberOfUnits > 0 && numberOfUnits >= maxNumberOfUnits {
break
}
}
return readableString.String()
}
// Set a Duration
func (d *Duration) Set(s string) error {
duration, err := ParseDuration(s)
if err != nil {
return err
}
*d = Duration(duration)
return nil
}
// Type of the value
func (d Duration) Type() string {
return "Duration"
}
// UnmarshalJSON makes sure the value can be parsed as a string or integer in JSON
func (d *Duration) UnmarshalJSON(in []byte) error {
// Check if the input is a string value.
if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' {
strVal := string(in[1 : len(in)-1]) // Remove the quotes
// Attempt to parse the string as a duration.
parsedDuration, err := ParseDuration(strVal)
if err != nil {
return err
}
*d = Duration(parsedDuration)
return nil
}
// Handle numeric values.
var i int64
err := json.Unmarshal(in, &i)
if err != nil {
return err
}
*d = Duration(i)
return nil
}
// Scan implements the fmt.Scanner interface
func (d *Duration) Scan(s fmt.ScanState, ch rune) error {
token, err := s.Token(true, func(rune) bool { return true })
if err != nil {
return err
}
return d.Set(string(token))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/dir.go | fs/dir.go | package fs
import (
"context"
"time"
)
// Dir describes an unspecialized directory for directory/container/bucket lists
type Dir struct {
f Info // Fs this directory is part of
remote string // name of the directory
modTime time.Time // modification or creation time - IsZero for unknown
size int64 // size of directory and contents or -1 if unknown
items int64 // number of objects or -1 for unknown
id string // optional ID
parent string // optional parent directory ID
}
// NewDir creates an unspecialized Directory object
//
// If the modTime is unknown pass in time.Time{}
func NewDir(remote string, modTime time.Time) *Dir {
return &Dir{
f: Unknown,
remote: remote,
modTime: modTime,
size: -1,
items: -1,
}
}
// NewDirCopy creates an unspecialized copy of the Directory object passed in
func NewDirCopy(ctx context.Context, d Directory) *Dir {
return &Dir{
f: d.Fs(),
remote: d.Remote(),
modTime: d.ModTime(ctx),
size: d.Size(),
items: d.Items(),
id: d.ID(),
}
}
// Fs returns the Fs that this directory is part of
func (d *Dir) Fs() Info {
return d.f
}
// String returns the name
func (d *Dir) String() string {
return d.remote
}
// Remote returns the remote path
func (d *Dir) Remote() string {
return d.remote
}
// SetRemote sets the remote
func (d *Dir) SetRemote(remote string) *Dir {
d.remote = remote
return d
}
// ID gets the optional ID
func (d *Dir) ID() string {
return d.id
}
// SetID sets the optional ID
func (d *Dir) SetID(id string) *Dir {
d.id = id
return d
}
// ParentID returns the IDs of the Dir parent if known
func (d *Dir) ParentID() string {
return d.parent
}
// SetParentID sets the optional parent ID of the Dir
func (d *Dir) SetParentID(parent string) *Dir {
d.parent = parent
return d
}
// ModTime returns the modification date of the file
//
// If one isn't available it returns the configured --default-dir-time
func (d *Dir) ModTime(ctx context.Context) time.Time {
if !d.modTime.IsZero() {
return d.modTime
}
ci := GetConfig(ctx)
return time.Time(ci.DefaultTime)
}
// Size returns the size of the file
func (d *Dir) Size() int64 {
return d.size
}
// SetSize sets the size of the directory
func (d *Dir) SetSize(size int64) *Dir {
d.size = size
return d
}
// Items returns the count of items in this directory or this
// directory and subdirectories if known, -1 for unknown
func (d *Dir) Items() int64 {
return d.items
}
// SetItems sets the number of items in the directory
func (d *Dir) SetItems(items int64) *Dir {
d.items = items
return d
}
// Check interfaces
var (
_ DirEntry = (*Dir)(nil)
_ Directory = (*Dir)(nil)
)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/mount_helper_test.go | fs/mount_helper_test.go | package fs
import (
"os"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMountHelperArgs(t *testing.T) {
type testCase struct {
src []string
dst []string
env string
err string
}
normalCases := []testCase{{
src: []string{},
dst: []string{"mount", "--daemon"},
}, {
src: []string{"-o", `x-systemd.automount,vvv,env.HTTPS_PROXY="a b;c,d?EF",ro,rw,args2env,_netdev`},
dst: []string{"mount", "--read-only", "--verbose=3", "--daemon"},
env: "HTTPS_PROXY=a b;c,d?EF",
}}
for _, tc := range normalCases {
exe := []string{"rclone"}
src := append(exe, tc.src...)
res, err := convertMountHelperArgs(src)
if tc.err != "" {
require.Error(t, err)
assert.Contains(t, err.Error(), tc.err)
continue
}
require.NoError(t, err)
require.Greater(t, len(res), 1)
assert.Equal(t, exe[0], res[0])
dst := res[1:]
//log.Printf("%q -> %q", tc.src, dst)
assert.Equal(t, tc.dst, dst)
if tc.env != "" {
idx := strings.Index(tc.env, "=")
name, value := tc.env[:idx], tc.env[idx+1:]
assert.Equal(t, value, os.Getenv(name))
}
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/override_dir.go | fs/override_dir.go | package fs
// OverrideDirectory is a wrapper to override the Remote for an
// Directory
type OverrideDirectory struct {
Directory
remote string
}
// NewOverrideDirectory returns an OverrideDirectoryObject which will
// return the remote specified
func NewOverrideDirectory(oi Directory, remote string) *OverrideDirectory {
// re-wrap an OverrideDirectory
if or, ok := oi.(*OverrideDirectory); ok {
return &OverrideDirectory{
Directory: or.Directory,
remote: remote,
}
}
return &OverrideDirectory{
Directory: oi,
remote: remote,
}
}
// Remote returns the overridden remote name
func (o *OverrideDirectory) Remote() string {
return o.remote
}
// String returns the overridden remote name
func (o *OverrideDirectory) String() string {
return o.remote
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/asyncreader/asyncreader.go | fs/asyncreader/asyncreader.go | // Package asyncreader provides an asynchronous reader which reads
// independently of write
package asyncreader
import (
"context"
"errors"
"io"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/readers"
)
const (
// BufferSize is the default size of the async buffer
BufferSize = pool.BufferSize
softStartInitial = 4 * 1024
)
// ErrorStreamAbandoned is returned when the input is closed before the end of the stream
var ErrorStreamAbandoned = errors.New("stream abandoned")
// AsyncReader will do async read-ahead from the input reader
// and make the data available as an io.Reader.
// This should be fully transparent, except that once an error
// has been returned from the Reader, it will not recover.
type AsyncReader struct {
in io.ReadCloser // Input reader
ready chan *buffer // Buffers ready to be handed to the reader
token chan struct{} // Tokens which allow a buffer to be taken
exit chan struct{} // Closes when finished
buffers int // Number of buffers
err error // If an error has occurred it is here
cur *buffer // Current buffer being served
exited chan struct{} // Channel is closed been the async reader shuts down
size int // size of buffer to use
closed bool // whether we have closed the underlying stream
mu sync.Mutex // lock for Read/WriteTo/Abandon/Close
ci *fs.ConfigInfo // for reading config
pool *pool.Pool // pool to get memory from
}
// New returns a reader that will asynchronously read from
// the supplied Reader into a number of buffers each of size BufferSize
// It will start reading from the input at once, maybe even before this
// function has returned.
// The input can be read from the returned reader.
// When done use Close to release the buffers and close the supplied input.
func New(ctx context.Context, rd io.ReadCloser, buffers int) (*AsyncReader, error) {
if buffers <= 0 {
return nil, errors.New("number of buffers too small")
}
if rd == nil {
return nil, errors.New("nil reader supplied")
}
a := &AsyncReader{
ci: fs.GetConfig(ctx),
pool: pool.Global(),
}
a.init(rd, buffers)
return a, nil
}
func (a *AsyncReader) init(rd io.ReadCloser, buffers int) {
a.in = rd
a.ready = make(chan *buffer, buffers)
a.token = make(chan struct{}, buffers)
a.exit = make(chan struct{})
a.exited = make(chan struct{})
a.buffers = buffers
a.cur = nil
a.size = softStartInitial
// Create tokens
for range buffers {
a.token <- struct{}{}
}
// Start async reader
go func() {
// Ensure that when we exit this is signalled.
defer close(a.exited)
defer close(a.ready)
for {
select {
case <-a.token:
b := a.getBuffer()
if a.size < BufferSize {
b.buf = b.buf[:a.size]
a.size <<= 1
}
err := b.read(a.in)
a.ready <- b
if err != nil {
return
}
case <-a.exit:
return
}
}
}()
}
// return the buffer to the pool (clearing it)
func (a *AsyncReader) putBuffer(b *buffer) {
a.pool.Put(b.buf)
b.buf = nil
}
// get a buffer from the pool
func (a *AsyncReader) getBuffer() *buffer {
return &buffer{
buf: a.pool.Get(),
}
}
// Read will return the next available data.
func (a *AsyncReader) fill() (err error) {
if a.cur.isEmpty() {
if a.cur != nil {
a.putBuffer(a.cur)
a.token <- struct{}{}
a.cur = nil
}
b, ok := <-a.ready
if !ok {
// Return an error to show fill failed
if a.err == nil {
return ErrorStreamAbandoned
}
return a.err
}
a.cur = b
}
return nil
}
// Read will return the next available data.
func (a *AsyncReader) Read(p []byte) (n int, err error) {
a.mu.Lock()
defer a.mu.Unlock()
// Swap buffer and maybe return error
err = a.fill()
if err != nil {
return 0, err
}
// Copy what we can
n = copy(p, a.cur.buffer())
a.cur.increment(n)
// If at end of buffer, return any error, if present
if a.cur.isEmpty() {
a.err = a.cur.err
return n, a.err
}
return n, nil
}
// WriteTo writes data to w until there's no more data to write or when an error occurs.
// The return value n is the number of bytes written.
// Any error encountered during the write is also returned.
func (a *AsyncReader) WriteTo(w io.Writer) (n int64, err error) {
a.mu.Lock()
defer a.mu.Unlock()
n = 0
for {
err = a.fill()
if err == io.EOF {
return n, nil
}
if err != nil {
return n, err
}
n2, err := w.Write(a.cur.buffer())
a.cur.increment(n2)
n += int64(n2)
if err != nil {
return n, err
}
if a.cur.err == io.EOF {
a.err = a.cur.err
return n, err
}
if a.cur.err != nil {
a.err = a.cur.err
return n, a.cur.err
}
}
}
// SkipBytes will try to seek 'skip' bytes relative to the current position.
// On success it returns true. If 'skip' is outside the current buffer data or
// an error occurs, Abandon is called and false is returned.
func (a *AsyncReader) SkipBytes(skip int) (ok bool) {
a.mu.Lock()
defer func() {
a.mu.Unlock()
if !ok {
a.Abandon()
}
}()
if a.err != nil {
return false
}
if skip < 0 {
// seek backwards if skip is inside current buffer
if a.cur != nil && a.cur.offset+skip >= 0 {
a.cur.offset += skip
return true
}
return false
}
// early return if skip is past the maximum buffer capacity
if skip >= (len(a.ready)+1)*BufferSize {
return false
}
refillTokens := 0
for {
if a.cur.isEmpty() {
if a.cur != nil {
a.putBuffer(a.cur)
refillTokens++
a.cur = nil
}
select {
case b, ok := <-a.ready:
if !ok {
return false
}
a.cur = b
default:
return false
}
}
n := min(len(a.cur.buffer()), skip)
a.cur.increment(n)
skip -= n
if skip == 0 {
for ; refillTokens > 0; refillTokens-- {
a.token <- struct{}{}
}
// If at end of buffer, store any error, if present
if a.cur.isEmpty() && a.cur.err != nil {
a.err = a.cur.err
}
return true
}
if a.cur.err != nil {
a.err = a.cur.err
return false
}
}
}
// StopBuffering will ensure that the underlying async reader is shut
// down so no more is read from the input.
//
// This does not free the memory so Abandon() or Close() need to be
// called on the input.
//
// This does not wait for Read/WriteTo to complete so can be called
// concurrently to those.
func (a *AsyncReader) StopBuffering() {
select {
case <-a.exit:
// Do nothing if reader routine already exited
return
default:
}
// Close and wait for go routine
close(a.exit)
<-a.exited
}
// Abandon will ensure that the underlying async reader is shut down
// and memory is returned. It does everything but close the input.
//
// It will NOT close the input supplied on New.
func (a *AsyncReader) Abandon() {
a.StopBuffering()
// take the lock to wait for Read/WriteTo to complete
a.mu.Lock()
defer a.mu.Unlock()
// Return any outstanding buffers to the Pool
if a.cur != nil {
a.putBuffer(a.cur)
a.cur = nil
}
for b := range a.ready {
a.putBuffer(b)
}
}
// Close will ensure that the underlying async reader is shut down.
// It will also close the input supplied on New.
func (a *AsyncReader) Close() (err error) {
a.Abandon()
if a.closed {
return nil
}
a.closed = true
return a.in.Close()
}
// Internal buffer
// If an error is present, it must be returned
// once all buffer content has been served.
type buffer struct {
buf []byte
err error
offset int
}
// isEmpty returns true is offset is at end of
// buffer, or
func (b *buffer) isEmpty() bool {
if b == nil {
return true
}
if len(b.buf)-b.offset <= 0 {
return true
}
return false
}
// read into start of the buffer from the supplied reader,
// resets the offset and updates the size of the buffer.
// Any error encountered during the read is returned.
func (b *buffer) read(rd io.Reader) error {
var n int
n, b.err = readers.ReadFill(rd, b.buf)
b.buf = b.buf[0:n]
b.offset = 0
return b.err
}
// Return the buffer at current offset
func (b *buffer) buffer() []byte {
return b.buf[b.offset:]
}
// increment the offset
func (b *buffer) increment(n int) {
b.offset += n
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/asyncreader/asyncreader_test.go | fs/asyncreader/asyncreader_test.go | package asyncreader
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"math/rand"
"strings"
"sync"
"testing"
"testing/iotest"
"time"
"github.com/rclone/rclone/lib/israce"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAsyncReader(t *testing.T) {
ctx := context.Background()
buf := io.NopCloser(bytes.NewBufferString("Testbuffer"))
ar, err := New(ctx, buf, 4)
require.NoError(t, err)
var dst = make([]byte, 100)
n, err := ar.Read(dst)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 10, n)
n, err = ar.Read(dst)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 0, n)
// Test read after error
n, err = ar.Read(dst)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 0, n)
err = ar.Close()
require.NoError(t, err)
// Test double close
err = ar.Close()
require.NoError(t, err)
// Test Close without reading everything
buf = io.NopCloser(bytes.NewBuffer(make([]byte, 50000)))
ar, err = New(ctx, buf, 4)
require.NoError(t, err)
err = ar.Close()
require.NoError(t, err)
}
func TestAsyncWriteTo(t *testing.T) {
ctx := context.Background()
buf := io.NopCloser(bytes.NewBufferString("Testbuffer"))
ar, err := New(ctx, buf, 4)
require.NoError(t, err)
var dst = &bytes.Buffer{}
n, err := io.Copy(dst, ar)
require.NoError(t, err)
assert.Equal(t, int64(10), n)
// Should still not return any errors
n, err = io.Copy(dst, ar)
require.NoError(t, err)
assert.Equal(t, int64(0), n)
err = ar.Close()
require.NoError(t, err)
}
func TestAsyncReaderErrors(t *testing.T) {
ctx := context.Background()
// test nil reader
_, err := New(ctx, nil, 4)
require.Error(t, err)
// invalid buffer number
buf := io.NopCloser(bytes.NewBufferString("Testbuffer"))
_, err = New(ctx, buf, 0)
require.Error(t, err)
_, err = New(ctx, buf, -1)
require.Error(t, err)
}
// Complex read tests, leveraged from "bufio".
type readMaker struct {
name string
fn func(io.Reader) io.Reader
}
var readMakers = []readMaker{
{"full", func(r io.Reader) io.Reader { return r }},
{"byte", iotest.OneByteReader},
{"half", iotest.HalfReader},
{"data+err", iotest.DataErrReader},
{"timeout", iotest.TimeoutReader},
}
// Call Read to accumulate the text of a file
func reads(buf io.Reader, m int) string {
var b [1000]byte
nb := 0
for {
n, err := buf.Read(b[nb : nb+m])
nb += n
if err == io.EOF {
break
} else if err != nil && err != iotest.ErrTimeout {
panic("Data: " + err.Error())
} else if err != nil {
break
}
}
return string(b[0:nb])
}
type bufReader struct {
name string
fn func(io.Reader) string
}
var bufreaders = []bufReader{
{"1", func(b io.Reader) string { return reads(b, 1) }},
{"2", func(b io.Reader) string { return reads(b, 2) }},
{"3", func(b io.Reader) string { return reads(b, 3) }},
{"4", func(b io.Reader) string { return reads(b, 4) }},
{"5", func(b io.Reader) string { return reads(b, 5) }},
{"7", func(b io.Reader) string { return reads(b, 7) }},
}
const minReadBufferSize = 16
var bufsizes = []int{
0, minReadBufferSize, 23, 32, 46, 64, 93, 128, 1024, 4096,
}
// Test various input buffer sizes, number of buffers and read sizes.
func TestAsyncReaderSizes(t *testing.T) {
ctx := context.Background()
var texts [31]string
str := ""
var all strings.Builder
for i := range len(texts) - 1 {
texts[i] = str + "\n"
all.WriteString(texts[i])
str += string(rune(i)%26 + 'a')
}
texts[len(texts)-1] = all.String()
for h := range len(texts) {
text := texts[h]
for i := range readMakers {
for j := range bufreaders {
for k := range bufsizes {
for l := 1; l < 10; l++ {
readmaker := readMakers[i]
bufreader := bufreaders[j]
bufsize := bufsizes[k]
read := readmaker.fn(strings.NewReader(text))
buf := bufio.NewReaderSize(read, bufsize)
ar, _ := New(ctx, io.NopCloser(buf), l)
s := bufreader.fn(ar)
// "timeout" expects the Reader to recover, AsyncReader does not.
if s != text && readmaker.name != "timeout" {
t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
readmaker.name, bufreader.name, bufsize, text, s)
}
err := ar.Close()
require.NoError(t, err)
}
}
}
}
}
}
// Test various input buffer sizes, number of buffers and read sizes.
func TestAsyncReaderWriteTo(t *testing.T) {
ctx := context.Background()
var texts [31]string
str := ""
var all strings.Builder
for i := range len(texts) - 1 {
texts[i] = str + "\n"
all.WriteString(texts[i])
str += string(rune(i)%26 + 'a')
}
texts[len(texts)-1] = all.String()
for h := range len(texts) {
text := texts[h]
for i := range readMakers {
for j := range bufreaders {
for k := range bufsizes {
for l := 1; l < 10; l++ {
readmaker := readMakers[i]
bufreader := bufreaders[j]
bufsize := bufsizes[k]
read := readmaker.fn(strings.NewReader(text))
buf := bufio.NewReaderSize(read, bufsize)
ar, _ := New(ctx, io.NopCloser(buf), l)
dst := &bytes.Buffer{}
_, err := ar.WriteTo(dst)
if err != nil && err != io.EOF && err != iotest.ErrTimeout {
t.Fatal("Copy:", err)
}
s := dst.String()
// "timeout" expects the Reader to recover, AsyncReader does not.
if s != text && readmaker.name != "timeout" {
t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
readmaker.name, bufreader.name, bufsize, text, s)
}
err = ar.Close()
require.NoError(t, err)
}
}
}
}
}
}
// Read an infinite number of zeros
type zeroReader struct {
closed bool
}
func (z *zeroReader) Read(p []byte) (n int, err error) {
if z.closed {
return 0, io.EOF
}
for i := range p {
p[i] = 0
}
return len(p), nil
}
func (z *zeroReader) Close() error {
if z.closed {
panic("double close on zeroReader")
}
z.closed = true
return nil
}
// Test closing and abandoning
func testAsyncReaderClose(t *testing.T, writeto bool) {
ctx := context.Background()
zr := &zeroReader{}
a, err := New(ctx, zr, 16)
require.NoError(t, err)
var copyN int64
var copyErr error
var wg sync.WaitGroup
started := make(chan struct{})
wg.Add(1)
go func() {
defer wg.Done()
close(started)
if writeto {
// exercise the WriteTo path
copyN, copyErr = a.WriteTo(io.Discard)
} else {
// exercise the Read path
buf := make([]byte, 64*1024)
for {
var n int
n, copyErr = a.Read(buf)
copyN += int64(n)
if copyErr != nil {
break
}
}
}
}()
// Do some copying
<-started
time.Sleep(100 * time.Millisecond)
// Abandon the copy
a.Abandon()
wg.Wait()
assert.Equal(t, ErrorStreamAbandoned, copyErr)
// t.Logf("Copied %d bytes, err %v", copyN, copyErr)
assert.True(t, copyN > 0)
}
func TestAsyncReaderCloseRead(t *testing.T) { testAsyncReaderClose(t, false) }
func TestAsyncReaderCloseWriteTo(t *testing.T) { testAsyncReaderClose(t, true) }
func TestAsyncReaderSkipBytes(t *testing.T) {
ctx := context.Background()
t.Parallel()
data := make([]byte, 15000)
buf := make([]byte, len(data))
r := rand.New(rand.NewSource(42))
n, err := r.Read(data)
require.NoError(t, err)
require.Equal(t, len(data), n)
initialReads := []int{0, 1, 100, 2048,
softStartInitial - 1, softStartInitial, softStartInitial + 1,
8000, len(data)}
skips := []int{-1000, -101, -100, -99, 0, 1, 2048,
softStartInitial - 1, softStartInitial, softStartInitial + 1,
8000, len(data), BufferSize, 2 * BufferSize}
for buffers := 1; buffers <= 5; buffers++ {
if israce.Enabled && buffers > 1 {
t.Skip("FIXME Skipping further tests with race detector until https://github.com/golang/go/issues/27070 is fixed.")
}
t.Run(fmt.Sprintf("%d", buffers), func(t *testing.T) {
for _, initialRead := range initialReads {
t.Run(fmt.Sprintf("%d", initialRead), func(t *testing.T) {
for _, skip := range skips {
t.Run(fmt.Sprintf("%d", skip), func(t *testing.T) {
ar, err := New(ctx, io.NopCloser(bytes.NewReader(data)), buffers)
require.NoError(t, err)
wantSkipFalse := false
buf = buf[:initialRead]
n, err := readers.ReadFill(ar, buf)
if initialRead >= len(data) {
wantSkipFalse = true
if initialRead > len(data) {
assert.Equal(t, err, io.EOF)
} else {
assert.True(t, err == nil || err == io.EOF)
}
assert.Equal(t, len(data), n)
assert.Equal(t, data, buf[:len(data)])
} else {
assert.NoError(t, err)
assert.Equal(t, initialRead, n)
assert.Equal(t, data[:initialRead], buf)
}
skipped := ar.SkipBytes(skip)
buf = buf[:1024]
n, err = readers.ReadFill(ar, buf)
offset := initialRead + skip
if skipped {
assert.False(t, wantSkipFalse)
l := len(buf)
if offset >= len(data) {
assert.Equal(t, err, io.EOF)
} else {
if offset+1024 >= len(data) {
l = len(data) - offset
}
assert.Equal(t, l, n)
assert.Equal(t, data[offset:offset+l], buf[:l])
}
} else {
if initialRead >= len(data) {
assert.Equal(t, err, io.EOF)
} else {
assert.True(t, err == ErrorStreamAbandoned || err == io.EOF)
}
}
})
}
})
}
})
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/filter/rules.go | fs/filter/rules.go | package filter
import (
"bufio"
"fmt"
"os"
"regexp"
"slices"
"strings"
"github.com/rclone/rclone/fs"
)
// RulesOpt is configuration for a rule set
type RulesOpt struct {
FilterRule []string `config:"filter"`
FilterFrom []string `config:"filter_from"`
ExcludeRule []string `config:"exclude"`
ExcludeFrom []string `config:"exclude_from"`
IncludeRule []string `config:"include"`
IncludeFrom []string `config:"include_from"`
}
// rule is one filter rule
type rule struct {
Include bool
Regexp *regexp.Regexp
}
// Match returns true if rule matches path
func (r *rule) Match(path string) bool {
return r.Regexp.MatchString(path)
}
// String the rule
func (r *rule) String() string {
c := "-"
if r.Include {
c = "+"
}
return fmt.Sprintf("%s %s", c, r.Regexp.String())
}
// rules is a slice of rules
type rules struct {
rules []rule
existing map[string]struct{}
}
type addFn func(Include bool, glob string) error
// add adds a rule if it doesn't exist already
func (rs *rules) add(Include bool, re *regexp.Regexp) {
if rs.existing == nil {
rs.existing = make(map[string]struct{})
}
newRule := rule{
Include: Include,
Regexp: re,
}
newRuleString := newRule.String()
if _, ok := rs.existing[newRuleString]; ok {
return // rule already exists
}
rs.rules = append(rs.rules, newRule)
rs.existing[newRuleString] = struct{}{}
}
// Add adds a filter rule with include or exclude status indicated
func (rs *rules) Add(Include bool, glob string) error {
re, err := GlobPathToRegexp(glob, false /* f.Opt.IgnoreCase */)
if err != nil {
return err
}
rs.add(Include, re)
return nil
}
type clearFn func()
// clear clears all the rules
func (rs *rules) clear() {
rs.rules = nil
rs.existing = nil
}
// len returns the number of rules
func (rs *rules) len() int {
return len(rs.rules)
}
// include returns whether this remote passes the filter rules.
func (rs *rules) include(remote string) bool {
for _, rule := range rs.rules {
if rule.Match(remote) {
return rule.Include
}
}
return true
}
// include returns whether this collection of strings remote passes
// the filter rules.
//
// the first rule is evaluated on all the remotes and if it matches
// then the result is returned. If not the next rule is tested and so
// on.
func (rs *rules) includeMany(remotes []string) bool {
for _, rule := range rs.rules {
if slices.ContainsFunc(remotes, rule.Match) {
return rule.Include
}
}
return true
}
// forEachLine calls fn on every line in the file pointed to by path
//
// It ignores empty lines and lines starting with '#' or ';' if raw is false
func forEachLine(path string, raw bool, fn func(string) error) (err error) {
var scanner *bufio.Scanner
if path == "-" {
scanner = bufio.NewScanner(os.Stdin)
} else {
in, err := os.Open(path)
if err != nil {
return err
}
scanner = bufio.NewScanner(in)
defer fs.CheckClose(in, &err)
}
for scanner.Scan() {
line := scanner.Text()
if !raw {
line = strings.TrimSpace(line)
if len(line) == 0 || line[0] == '#' || line[0] == ';' {
continue
}
}
err := fn(line)
if err != nil {
return err
}
}
return scanner.Err()
}
// AddRule adds a filter rule with include/exclude indicated by the prefix
//
// These are
//
// # Comment
// + glob
// - glob
// !
//
// '+' includes the glob, '-' excludes it and '!' resets the filter list
//
// Line comments may be introduced with '#' or ';'
func addRule(rule string, add addFn, clear clearFn) error {
switch {
case rule == "!":
clear()
return nil
case strings.HasPrefix(rule, "- "):
return add(false, rule[2:])
case strings.HasPrefix(rule, "+ "):
return add(true, rule[2:])
}
return fmt.Errorf("malformed rule %q", rule)
}
// AddRule adds a filter rule with include/exclude indicated by the prefix
//
// These are
//
// # Comment
// + glob
// - glob
// !
//
// '+' includes the glob, '-' excludes it and '!' resets the filter list
//
// Line comments may be introduced with '#' or ';'
func (rs *rules) AddRule(rule string) error {
return addRule(rule, rs.Add, rs.clear)
}
// Parse the rules passed in and add them to the function
func parseRules(opt *RulesOpt, add addFn, clear clearFn) (err error) {
addImplicitExclude := false
foundExcludeRule := false
for _, rule := range opt.IncludeRule {
err = add(true, rule)
if err != nil {
return err
}
addImplicitExclude = true
}
for _, rule := range opt.IncludeFrom {
err := forEachLine(rule, false, func(line string) error {
return add(true, line)
})
if err != nil {
return err
}
addImplicitExclude = true
}
for _, rule := range opt.ExcludeRule {
err = add(false, rule)
if err != nil {
return err
}
foundExcludeRule = true
}
for _, rule := range opt.ExcludeFrom {
err := forEachLine(rule, false, func(line string) error {
return add(false, line)
})
if err != nil {
return err
}
foundExcludeRule = true
}
if addImplicitExclude && foundExcludeRule {
fs.Errorf(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate")
}
for _, rule := range opt.FilterRule {
err = addRule(rule, add, clear)
if err != nil {
return err
}
}
for _, rule := range opt.FilterFrom {
err := forEachLine(rule, false, func(rule string) error {
return addRule(rule, add, clear)
})
if err != nil {
return err
}
}
if addImplicitExclude {
err = add(false, "/**")
if err != nil {
return err
}
}
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/filter/filter.go | fs/filter/filter.go | // Package filter controls the filtering of files
package filter
import (
"context"
"crypto/md5"
"encoding/binary"
"errors"
"fmt"
"math/rand/v2"
"path"
"slices"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/fs"
"golang.org/x/sync/errgroup"
"golang.org/x/text/unicode/norm"
)
// This is the globally active filter
//
// This is accessed through GetConfig and AddConfig
var globalConfig = mustNewFilter(nil)
// OptionsInfo describes the Options in use
var OptionsInfo = fs.Options{{
Name: "delete_excluded",
Default: false,
Help: "Delete files on dest excluded from sync",
Groups: "Filter",
}, {
Name: "exclude_if_present",
Default: []string{},
Help: "Exclude directories if filename is present",
Groups: "Filter",
}, {
Name: "files_from",
Default: []string{},
Help: "Read list of source-file names from file (use - to read from stdin)",
Groups: "Filter",
}, {
Name: "files_from_raw",
Default: []string{},
Help: "Read list of source-file names from file without any processing of lines (use - to read from stdin)",
Groups: "Filter",
}, {
Name: "min_age",
Default: fs.DurationOff,
Help: "Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y",
Groups: "Filter",
}, {
Name: "max_age",
Default: fs.DurationOff,
Help: "Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y",
Groups: "Filter",
}, {
Name: "min_size",
Default: fs.SizeSuffix(-1),
Help: "Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P",
Groups: "Filter",
}, {
Name: "max_size",
Default: fs.SizeSuffix(-1),
Help: "Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P",
Groups: "Filter",
}, {
Name: "ignore_case",
Default: false,
Help: "Ignore case in filters (case insensitive)",
Groups: "Filter",
}, {
Name: "hash_filter",
Default: "",
Help: "Partition filenames by hash k/n or randomly @/n",
Groups: "Filter",
}, {
Name: "filter",
Default: []string{},
ShortOpt: "f",
Help: "Add a file filtering rule",
Groups: "Filter",
}, {
Name: "filter_from",
Default: []string{},
Help: "Read file filtering patterns from a file (use - to read from stdin)",
Groups: "Filter",
}, {
Name: "exclude",
Default: []string{},
Help: "Exclude files matching pattern",
Groups: "Filter",
}, {
Name: "exclude_from",
Default: []string{},
Help: "Read file exclude patterns from file (use - to read from stdin)",
Groups: "Filter",
}, {
Name: "include",
Default: []string{},
Help: "Include files matching pattern",
Groups: "Filter",
}, {
Name: "include_from",
Default: []string{},
Help: "Read file include patterns from file (use - to read from stdin)",
Groups: "Filter",
}, {
Name: "metadata_filter",
Default: []string{},
Help: "Add a metadata filtering rule",
Groups: "Filter,Metadata",
}, {
Name: "metadata_filter_from",
Default: []string{},
Help: "Read metadata filtering patterns from a file (use - to read from stdin)",
Groups: "Filter,Metadata",
}, {
Name: "metadata_exclude",
Default: []string{},
Help: "Exclude metadatas matching pattern",
Groups: "Filter,Metadata",
}, {
Name: "metadata_exclude_from",
Default: []string{},
Help: "Read metadata exclude patterns from file (use - to read from stdin)",
Groups: "Filter,Metadata",
}, {
Name: "metadata_include",
Default: []string{},
Help: "Include metadatas matching pattern",
Groups: "Filter,Metadata",
}, {
Name: "metadata_include_from",
Default: []string{},
Help: "Read metadata include patterns from file (use - to read from stdin)",
Groups: "Filter,Metadata",
}}
// Options configures the filter
type Options struct {
DeleteExcluded bool `config:"delete_excluded"`
RulesOpt // embedded so we don't change the JSON API
ExcludeFile []string `config:"exclude_if_present"`
FilesFrom []string `config:"files_from"`
FilesFromRaw []string `config:"files_from_raw"`
MetaRules RulesOpt `config:"metadata"`
MinAge fs.Duration `config:"min_age"`
MaxAge fs.Duration `config:"max_age"`
MinSize fs.SizeSuffix `config:"min_size"`
MaxSize fs.SizeSuffix `config:"max_size"`
IgnoreCase bool `config:"ignore_case"`
HashFilter string `config:"hash_filter"`
}
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "filter", Opt: &Opt, Options: OptionsInfo, Reload: Reload})
}
// Opt is the default config for the filter
var Opt = Options{
MinAge: fs.DurationOff, // These have to be set here as the options are parsed once before the defaults are set
MaxAge: fs.DurationOff,
MinSize: fs.SizeSuffix(-1),
MaxSize: fs.SizeSuffix(-1),
}
// FilesMap describes the map of files to transfer
type FilesMap map[string]struct{}
// Filter describes any filtering in operation
type Filter struct {
Opt Options
ModTimeFrom time.Time
ModTimeTo time.Time
fileRules rules
dirRules rules
metaRules rules
files FilesMap // files if filesFrom
dirs FilesMap // dirs from filesFrom
hashFilterN uint64 // if non 0 do hash filtering
hashFilterK uint64 // select partition K/N
}
// NewFilter parses the command line options and creates a Filter
// object. If opt is nil, then DefaultOpt will be used
func NewFilter(opt *Options) (f *Filter, err error) {
f = &Filter{}
// Make a copy of the options
if opt != nil {
f.Opt = *opt
} else {
f.Opt = Opt
}
// Filter flags
if f.Opt.MinAge.IsSet() {
f.ModTimeTo = time.Now().Add(-time.Duration(f.Opt.MinAge))
fs.Debugf(nil, "--min-age %v to %v", f.Opt.MinAge, f.ModTimeTo)
}
if f.Opt.MaxAge.IsSet() {
f.ModTimeFrom = time.Now().Add(-time.Duration(f.Opt.MaxAge))
if !f.ModTimeTo.IsZero() && f.ModTimeTo.Before(f.ModTimeFrom) {
return nil, fmt.Errorf("filter: --min-age %q can't be larger than --max-age %q", opt.MinAge, opt.MaxAge)
}
fs.Debugf(nil, "--max-age %v to %v", f.Opt.MaxAge, f.ModTimeFrom)
}
if f.Opt.HashFilter != "" {
f.hashFilterK, f.hashFilterN, err = parseHashFilter(f.Opt.HashFilter)
if err != nil {
return nil, err
}
fs.Debugf(nil, "Using --hash-filter %d/%d", f.hashFilterK, f.hashFilterN)
}
err = parseRules(&f.Opt.RulesOpt, f.Add, f.Clear)
if err != nil {
return nil, err
}
err = parseRules(&f.Opt.MetaRules, f.metaRules.Add, f.metaRules.clear)
if err != nil {
return nil, err
}
inActive := f.InActive()
for _, rule := range f.Opt.FilesFrom {
if !inActive {
return nil, fmt.Errorf("the usage of --files-from overrides all other filters, it should be used alone or with --files-from-raw")
}
f.initAddFile() // init to show --files-from set even if no files within
err := forEachLine(rule, false, func(line string) error {
return f.AddFile(line)
})
if err != nil {
return nil, err
}
}
for _, rule := range f.Opt.FilesFromRaw {
// --files-from-raw can be used with --files-from, hence we do
// not need to get the value of f.InActive again
if !inActive {
return nil, fmt.Errorf("the usage of --files-from-raw overrides all other filters, it should be used alone or with --files-from")
}
f.initAddFile() // init to show --files-from set even if no files within
err := forEachLine(rule, true, func(line string) error {
return f.AddFile(line)
})
if err != nil {
return nil, err
}
}
if fs.GetConfig(context.Background()).Dump&fs.DumpFilters != 0 {
fmt.Println("--- start filters ---")
fmt.Println(f.DumpFilters())
fmt.Println("--- end filters ---")
}
return f, nil
}
// Parse the --hash-filter arguments into k/n
func parseHashFilter(hashFilter string) (k, n uint64, err error) {
slash := strings.IndexRune(hashFilter, '/')
if slash < 0 {
return 0, 0, fmt.Errorf("filter: --hash-filter: no / found")
}
kStr, nStr := hashFilter[:slash], hashFilter[slash+1:]
n, err = strconv.ParseUint(nStr, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("filter: --hash-filter: can't parse N=%q: %v", nStr, err)
}
if n == 0 {
return 0, 0, fmt.Errorf("filter: --hash-filter: N must be greater than 0")
}
if kStr == "@" {
k = rand.Uint64N(n)
} else {
k, err = strconv.ParseUint(kStr, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("filter: --hash-filter: can't parse K=%q: %v", kStr, err)
}
k %= n
}
return k, n, nil
}
func mustNewFilter(opt *Options) *Filter {
f, err := NewFilter(opt)
if err != nil {
panic(err)
}
return f
}
// addDirGlobs adds directory globs from the file glob passed in
func (f *Filter) addDirGlobs(Include bool, glob string) error {
for _, dirGlob := range globToDirGlobs(glob) {
// Don't add "/" as we always include the root
if dirGlob == "/" {
continue
}
dirRe, err := GlobPathToRegexp(dirGlob, f.Opt.IgnoreCase)
if err != nil {
return err
}
f.dirRules.add(Include, dirRe)
}
return nil
}
// Add adds a filter rule with include or exclude status indicated
func (f *Filter) Add(Include bool, glob string) error {
isDirRule := strings.HasSuffix(glob, "/")
isFileRule := !isDirRule
// Make excluding "dir/" equivalent to excluding "dir/**"
if isDirRule && !Include {
glob += "**"
}
if strings.Contains(glob, "**") {
isDirRule, isFileRule = true, true
}
re, err := GlobPathToRegexp(glob, f.Opt.IgnoreCase)
if err != nil {
return err
}
if isFileRule {
f.fileRules.add(Include, re)
// If include rule work out what directories are needed to scan
// if exclude rule, we can't rule anything out
// Unless it is `*` which matches everything
// NB ** and /** are DirRules
if Include || glob == "*" {
err = f.addDirGlobs(Include, glob)
if err != nil {
return err
}
}
}
if isDirRule {
f.dirRules.add(Include, re)
}
return nil
}
// AddRule adds a filter rule with include/exclude indicated by the prefix
//
// These are
//
// - glob
// - glob
// !
//
// '+' includes the glob, '-' excludes it and '!' resets the filter list
//
// Line comments may be introduced with '#' or ';'
func (f *Filter) AddRule(rule string) error {
return addRule(rule, f.Add, f.Clear)
}
// initAddFile creates f.files and f.dirs
func (f *Filter) initAddFile() {
if f.files == nil {
f.files = make(FilesMap)
f.dirs = make(FilesMap)
}
}
// AddFile adds a single file to the files from list
func (f *Filter) AddFile(file string) error {
f.initAddFile()
file = strings.Trim(file, "/")
f.files[file] = struct{}{}
// Put all the parent directories into f.dirs
for {
file = path.Dir(file)
if file == "." {
break
}
if _, found := f.dirs[file]; found {
break
}
f.dirs[file] = struct{}{}
}
return nil
}
// Files returns all the files from the `--files-from` list
//
// It may be nil if the list is empty
func (f *Filter) Files() FilesMap {
return f.files
}
// Clear clears all the filter rules
func (f *Filter) Clear() {
f.fileRules.clear()
f.dirRules.clear()
f.metaRules.clear()
}
// InActive returns false if any filters are active
func (f *Filter) InActive() bool {
return (f.files == nil &&
f.ModTimeFrom.IsZero() &&
f.ModTimeTo.IsZero() &&
f.Opt.MinSize < 0 &&
f.Opt.MaxSize < 0 &&
f.fileRules.len() == 0 &&
f.dirRules.len() == 0 &&
f.metaRules.len() == 0 &&
len(f.Opt.ExcludeFile) == 0 &&
f.hashFilterN == 0)
}
// IncludeRemote returns whether this remote passes the filter rules.
func (f *Filter) IncludeRemote(remote string) bool {
// filesFrom takes precedence
if f.files != nil {
_, include := f.files[remote]
return include
}
if f.hashFilterN != 0 {
// Normalise the remote first in case we are using a
// case insensitive remote or a remote which needs
// unicode normalisation. This means all the remotes
// which could be normalised together will be in the
// same partition.
normalized := norm.NFC.String(remote)
normalized = strings.ToLower(normalized)
hashBytes := md5.Sum([]byte(normalized))
hash := binary.LittleEndian.Uint64(hashBytes[:])
partition := hash % f.hashFilterN
if partition != f.hashFilterK {
return false
}
}
return f.fileRules.include(remote)
}
// ListContainsExcludeFile checks if exclude file is present in the list.
func (f *Filter) ListContainsExcludeFile(entries fs.DirEntries) bool {
if len(f.Opt.ExcludeFile) == 0 {
return false
}
for _, entry := range entries {
obj, ok := entry.(fs.Object)
if ok {
basename := path.Base(obj.Remote())
if slices.Contains(f.Opt.ExcludeFile, basename) {
return true
}
}
}
return false
}
// IncludeDirectory returns a function which checks whether this
// directory should be included in the sync or not.
func (f *Filter) IncludeDirectory(ctx context.Context, fs fs.Fs) func(string) (bool, error) {
return func(remote string) (bool, error) {
remote = strings.Trim(remote, "/")
// first check if we need to remove directory based on
// the exclude file
excl, err := f.DirContainsExcludeFile(ctx, fs, remote)
if err != nil {
return false, err
}
if excl {
return false, nil
}
// filesFrom takes precedence
if f.files != nil {
_, include := f.dirs[remote]
return include, nil
}
remote += "/"
return f.dirRules.include(remote), nil
}
}
// DirContainsExcludeFile checks if exclude file is present in a
// directory. If fs is nil, it works properly if ExcludeFile is an
// empty string (for testing).
func (f *Filter) DirContainsExcludeFile(ctx context.Context, fremote fs.Fs, remote string) (bool, error) {
if len(f.Opt.ExcludeFile) > 0 {
for _, excludeFile := range f.Opt.ExcludeFile {
exists, err := fs.FileExists(ctx, fremote, path.Join(remote, excludeFile))
if err != nil {
return false, err
}
if exists {
return true, nil
}
}
}
return false, nil
}
// Include returns whether this object should be included into the
// sync or not and logs the reason for exclusion if not included
func (f *Filter) Include(remote string, size int64, modTime time.Time, metadata fs.Metadata) bool {
// filesFrom takes precedence
if f.files != nil {
_, include := f.files[remote]
if !include {
fs.Debugf(remote, "Excluded (FilesFrom Filter)")
}
return include
}
if !f.ModTimeFrom.IsZero() && modTime.Before(f.ModTimeFrom) {
fs.Debugf(remote, "Excluded (ModTime Filter)")
return false
}
if !f.ModTimeTo.IsZero() && modTime.After(f.ModTimeTo) {
fs.Debugf(remote, "Excluded (ModTime Filter)")
return false
}
if f.Opt.MinSize >= 0 && size < int64(f.Opt.MinSize) {
fs.Debugf(remote, "Excluded (Size Filter)")
return false
}
if f.Opt.MaxSize >= 0 && size > int64(f.Opt.MaxSize) {
fs.Debugf(remote, "Excluded (Size Filter)")
return false
}
if f.metaRules.len() > 0 {
metadatas := make([]string, 0, len(metadata)+1)
for key, value := range metadata {
metadatas = append(metadatas, fmt.Sprintf("%s=%s", key, value))
}
if len(metadata) == 0 {
// If there is no metadata, add a null one
// otherwise the default action isn't taken
metadatas = append(metadatas, "\x00=\x00")
}
if !f.metaRules.includeMany(metadatas) {
fs.Debugf(remote, "Excluded (Metadata Filter)")
return false
}
}
include := f.IncludeRemote(remote)
if !include {
fs.Debugf(remote, "Excluded (Path Filter)")
}
return include
}
// IncludeObject returns whether this object should be included into
// the sync or not. This is a convenience function to avoid calling
// o.ModTime(), which is an expensive operation.
func (f *Filter) IncludeObject(ctx context.Context, o fs.Object) bool {
var modTime time.Time
if !f.ModTimeFrom.IsZero() || !f.ModTimeTo.IsZero() {
modTime = o.ModTime(ctx)
} else {
modTime = time.Unix(0, 0)
}
var metadata fs.Metadata
if f.metaRules.len() > 0 {
var err error
metadata, err = fs.GetMetadata(ctx, o)
if err != nil {
fs.Errorf(o, "Failed to read metadata: %v", err)
metadata = nil
}
}
return f.Include(o.Remote(), o.Size(), modTime, metadata)
}
// DumpFilters dumps the filters in textual form, 1 per line
func (f *Filter) DumpFilters() string {
rules := []string{}
if !f.ModTimeFrom.IsZero() {
rules = append(rules, fmt.Sprintf("Last-modified date must be equal or greater than: %s", f.ModTimeFrom.String()))
}
if !f.ModTimeTo.IsZero() {
rules = append(rules, fmt.Sprintf("Last-modified date must be equal or less than: %s", f.ModTimeTo.String()))
}
if f.Opt.MinSize >= 0 {
rules = append(rules, fmt.Sprintf("Minimum size is: %s", f.Opt.MinSize.ByteUnit()))
}
if f.Opt.MaxSize >= 0 {
rules = append(rules, fmt.Sprintf("Maximum size is: %s", f.Opt.MaxSize.ByteUnit()))
}
rules = append(rules, "--- File filter rules ---")
for _, rule := range f.fileRules.rules {
rules = append(rules, rule.String())
}
rules = append(rules, "--- Directory filter rules ---")
for _, dirRule := range f.dirRules.rules {
rules = append(rules, dirRule.String())
}
if f.metaRules.len() > 0 {
rules = append(rules, "--- Metadata filter rules ---")
for _, metaRule := range f.metaRules.rules {
rules = append(rules, metaRule.String())
}
}
return strings.Join(rules, "\n")
}
// HaveFilesFrom returns true if --files-from has been supplied
func (f *Filter) HaveFilesFrom() bool {
return f.files != nil
}
var errFilesFromNotSet = errors.New("--files-from not set so can't use Filter.ListR")
// MakeListR makes function to return all the files set using --files-from
func (f *Filter) MakeListR(ctx context.Context, NewObject func(ctx context.Context, remote string) (fs.Object, error)) fs.ListRFn {
return func(ctx context.Context, dir string, callback fs.ListRCallback) error {
ci := fs.GetConfig(ctx)
if !f.HaveFilesFrom() {
return errFilesFromNotSet
}
var (
checkers = ci.Checkers
remotes = make(chan string, checkers)
g, gCtx = errgroup.WithContext(ctx)
)
for range checkers {
g.Go(func() (err error) {
var entries = make(fs.DirEntries, 1)
for remote := range remotes {
entries[0], err = NewObject(gCtx, remote)
if err == fs.ErrorObjectNotFound {
// Skip files that are not found
} else if err != nil {
return err
} else {
err = callback(entries)
if err != nil {
return err
}
}
}
return nil
})
}
outer:
for remote := range f.files {
select {
case remotes <- remote:
case <-gCtx.Done():
break outer
}
}
close(remotes)
return g.Wait()
}
}
// UsesDirectoryFilters returns true if the filter uses directory
// filters and false if it doesn't.
//
// This is used in deciding whether to walk directories or use ListR
func (f *Filter) UsesDirectoryFilters() bool {
if len(f.dirRules.rules) == 0 {
return false
}
rule := f.dirRules.rules[0]
re := rule.Regexp.String()
if rule.Include && re == "^.*$" {
return false
}
return true
}
// Context key for config
type configContextKeyType struct{}
var configContextKey = configContextKeyType{}
// GetConfig returns the global or context sensitive config
func GetConfig(ctx context.Context) *Filter {
if ctx == nil {
return globalConfig
}
c := ctx.Value(configContextKey)
if c == nil {
return globalConfig
}
return c.(*Filter)
}
// CopyConfig copies the global config (if any) from srcCtx into
// dstCtx returning the new context.
func CopyConfig(dstCtx, srcCtx context.Context) context.Context {
if srcCtx == nil {
return dstCtx
}
c := srcCtx.Value(configContextKey)
if c == nil {
return dstCtx
}
return context.WithValue(dstCtx, configContextKey, c)
}
// AddConfig returns a mutable config structure based on a shallow
// copy of that found in ctx and returns a new context with that added
// to it.
func AddConfig(ctx context.Context) (context.Context, *Filter) {
c := GetConfig(ctx)
cCopy := new(Filter)
*cCopy = *c
newCtx := context.WithValue(ctx, configContextKey, cCopy)
return newCtx, cCopy
}
// ReplaceConfig replaces the filter config in the ctx with the one
// passed in and returns a new context with that added to it.
func ReplaceConfig(ctx context.Context, f *Filter) context.Context {
newCtx := context.WithValue(ctx, configContextKey, f)
return newCtx
}
// Context key for the "use filter" flag
type useFlagContextKeyType struct{}
var useFlagContextKey = useFlagContextKeyType{}
// GetUseFilter obtains the "use filter" flag from context
// The flag tells filter-aware backends (Drive) to constrain List using filter
func GetUseFilter(ctx context.Context) bool {
if ctx != nil {
if pVal := ctx.Value(useFlagContextKey); pVal != nil {
return *(pVal.(*bool))
}
}
return false
}
// SetUseFilter returns a context having (re)set the "use filter" flag
func SetUseFilter(ctx context.Context, useFilter bool) context.Context {
if useFilter == GetUseFilter(ctx) {
return ctx // Minimize depth of nested contexts
}
pVal := new(bool)
*pVal = useFilter
return context.WithValue(ctx, useFlagContextKey, pVal)
}
// Reload the filters from the flags
func Reload(ctx context.Context) (err error) {
fi := GetConfig(ctx)
newFilter, err := NewFilter(&Opt)
if err != nil {
return err
}
*fi = *newFilter
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/filter/filter_test.go | fs/filter/filter_test.go | package filter
import (
"context"
"fmt"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewFilterDefault(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
assert.False(t, f.Opt.DeleteExcluded)
assert.Equal(t, fs.SizeSuffix(-1), f.Opt.MinSize)
assert.Equal(t, fs.SizeSuffix(-1), f.Opt.MaxSize)
assert.Len(t, f.fileRules.rules, 0)
assert.Len(t, f.dirRules.rules, 0)
assert.Len(t, f.metaRules.rules, 0)
assert.Nil(t, f.files)
assert.True(t, f.InActive())
}
func TestParseHashFilter(t *testing.T) {
for _, test := range []struct {
hashFilter string
n uint64
k uint64
err string
}{
{hashFilter: "", err: "no / found"},
{hashFilter: "17", err: "no / found"},
{hashFilter: "-1/2", err: "can't parse K="},
{hashFilter: "1/-2", err: "can't parse N="},
{hashFilter: "0/0", err: "N must be greater than 0"},
{hashFilter: "0/18446744073709551615", k: 0, n: 18446744073709551615},
{hashFilter: "0/18446744073709551616", err: "can't parse N="},
{hashFilter: "18446744073709551615/1", k: 0, n: 1},
{hashFilter: "18446744073709551616/1", err: "can't parse K="},
{hashFilter: "1/2", k: 1, n: 2},
{hashFilter: "17/3", k: 2, n: 3},
{hashFilter: "@/1", k: 0, n: 1},
} {
gotK, gotN, gotErr := parseHashFilter(test.hashFilter)
if test.err != "" {
assert.Error(t, gotErr)
assert.ErrorContains(t, gotErr, test.err, test.hashFilter)
} else {
assert.Equal(t, test.k, gotK, test.hashFilter)
assert.Equal(t, test.n, gotN, test.hashFilter)
assert.NoError(t, gotErr, test.hashFilter)
}
}
}
// testFile creates a temp file with the contents
func testFile(t *testing.T, contents string) string {
out, err := os.CreateTemp("", "filter_test")
require.NoError(t, err)
defer func() {
err := out.Close()
require.NoError(t, err)
}()
_, err = out.Write([]byte(contents))
require.NoError(t, err)
s := out.Name()
return s
}
func TestNewFilterForbiddenMixOfFilesFromAndFilterRule(t *testing.T) {
Opt := Opt
// Set up the input
Opt.FilterRule = []string{"- filter1", "- filter1b"}
Opt.FilesFrom = []string{testFile(t, "#comment\nfiles1\nfiles2\n")}
rm := func(p string) {
err := os.Remove(p)
if err != nil {
t.Logf("error removing %q: %v", p, err)
}
}
// Reset the input
defer func() {
rm(Opt.FilesFrom[0])
}()
_, err := NewFilter(&Opt)
require.Error(t, err)
require.Contains(t, err.Error(), "the usage of --files-from overrides all other filters")
}
func TestNewFilterForbiddenMixOfFilesFromRawAndFilterRule(t *testing.T) {
Opt := Opt
// Set up the input
Opt.FilterRule = []string{"- filter1", "- filter1b"}
Opt.FilesFromRaw = []string{testFile(t, "#comment\nfiles1\nfiles2\n")}
rm := func(p string) {
err := os.Remove(p)
if err != nil {
t.Logf("error removing %q: %v", p, err)
}
}
// Reset the input
defer func() {
rm(Opt.FilesFromRaw[0])
}()
_, err := NewFilter(&Opt)
require.Error(t, err)
require.Contains(t, err.Error(), "the usage of --files-from-raw overrides all other filters")
}
func TestNewFilterWithFilesFromAlone(t *testing.T) {
Opt := Opt
// Set up the input
Opt.FilesFrom = []string{testFile(t, "#comment\nfiles1\nfiles2\n")}
rm := func(p string) {
err := os.Remove(p)
if err != nil {
t.Logf("error removing %q: %v", p, err)
}
}
// Reset the input
defer func() {
rm(Opt.FilesFrom[0])
}()
f, err := NewFilter(&Opt)
require.NoError(t, err)
assert.Len(t, f.files, 2)
for _, name := range []string{"files1", "files2"} {
_, ok := f.files[name]
if !ok {
t.Errorf("Didn't find file %q in f.files", name)
}
}
}
func TestNewFilterWithFilesFromRaw(t *testing.T) {
Opt := Opt
// Set up the input
Opt.FilesFromRaw = []string{testFile(t, "#comment\nfiles1\nfiles2\n")}
rm := func(p string) {
err := os.Remove(p)
if err != nil {
t.Logf("error removing %q: %v", p, err)
}
}
// Reset the input
defer func() {
rm(Opt.FilesFromRaw[0])
}()
f, err := NewFilter(&Opt)
require.NoError(t, err)
assert.Len(t, f.files, 3)
for _, name := range []string{"#comment", "files1", "files2"} {
_, ok := f.files[name]
if !ok {
t.Errorf("Didn't find file %q in f.files", name)
}
}
}
func TestNewFilterFullExceptFilesFromOpt(t *testing.T) {
Opt := Opt
mins := fs.SizeSuffix(100 * 1024)
maxs := fs.SizeSuffix(1000 * 1024)
// Set up the input
Opt.DeleteExcluded = true
Opt.FilterRule = []string{"- filter1", "- filter1b"}
Opt.FilterFrom = []string{testFile(t, "#comment\n+ filter2\n- filter3\n")}
Opt.ExcludeRule = []string{"exclude1"}
Opt.ExcludeFrom = []string{testFile(t, "#comment\nexclude2\nexclude3\n")}
Opt.IncludeRule = []string{"include1"}
Opt.IncludeFrom = []string{testFile(t, "#comment\ninclude2\ninclude3\n")}
Opt.MinSize = mins
Opt.MaxSize = maxs
rm := func(p string) {
err := os.Remove(p)
if err != nil {
t.Logf("error removing %q: %v", p, err)
}
}
// Reset the input
defer func() {
rm(Opt.FilterFrom[0])
rm(Opt.ExcludeFrom[0])
rm(Opt.IncludeFrom[0])
}()
f, err := NewFilter(&Opt)
require.NoError(t, err)
assert.True(t, f.Opt.DeleteExcluded)
assert.Equal(t, f.Opt.MinSize, mins)
assert.Equal(t, f.Opt.MaxSize, maxs)
got := f.DumpFilters()
want := `Minimum size is: 100 KiB
Maximum size is: 1000 KiB
--- File filter rules ---
+ (^|/)include1$
+ (^|/)include2$
+ (^|/)include3$
- (^|/)exclude1$
- (^|/)exclude2$
- (^|/)exclude3$
- (^|/)filter1$
- (^|/)filter1b$
+ (^|/)filter2$
- (^|/)filter3$
- ^.*$
--- Directory filter rules ---
+ ^.*$
- ^.*$`
assert.Equal(t, want, got)
assert.False(t, f.InActive())
}
type includeTest struct {
in string
size int64
modTime int64
want bool
}
func testInclude(t *testing.T, f *Filter, tests []includeTest) {
t.Helper()
for _, test := range tests {
got := f.Include(test.in, test.size, time.Unix(test.modTime, 0), nil)
assert.Equal(t, test.want, got, fmt.Sprintf("in=%q, size=%v, modTime=%v", test.in, test.size, time.Unix(test.modTime, 0)))
}
}
type includeDirTest struct {
in string
want bool
}
func testDirInclude(t *testing.T, f *Filter, tests []includeDirTest) {
for _, test := range tests {
got, err := f.IncludeDirectory(context.Background(), nil)(test.in)
require.NoError(t, err)
assert.Equal(t, test.want, got, test.in)
}
}
func TestNewFilterIncludeFiles(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
err = f.AddFile("file1.jpg")
require.NoError(t, err)
err = f.AddFile("/file2.jpg")
require.NoError(t, err)
assert.Equal(t, FilesMap{
"file1.jpg": {},
"file2.jpg": {},
}, f.files)
assert.Equal(t, FilesMap{}, f.dirs)
testInclude(t, f, []includeTest{
{"file1.jpg", 0, 0, true},
{"file2.jpg", 1, 0, true},
{"potato/file2.jpg", 2, 0, false},
{"file3.jpg", 3, 0, false},
})
assert.False(t, f.InActive())
}
func TestNewFilterIncludeFilesDirs(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
for _, path := range []string{
"path/to/dir/file1.png",
"/path/to/dir/file2.png",
"/path/to/file3.png",
"/path/to/dir2/file4.png",
} {
err = f.AddFile(path)
require.NoError(t, err)
}
assert.Equal(t, FilesMap{
"path": {},
"path/to": {},
"path/to/dir": {},
"path/to/dir2": {},
}, f.dirs)
testDirInclude(t, f, []includeDirTest{
{"path", true},
{"path/to", true},
{"path/to/", true},
{"/path/to", true},
{"/path/to/", true},
{"path/to/dir", true},
{"path/to/dir2", true},
{"path/too", false},
{"path/three", false},
{"four", false},
})
}
func TestNewFilterHaveFilesFrom(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
assert.Equal(t, false, f.HaveFilesFrom())
require.NoError(t, f.AddFile("file"))
assert.Equal(t, true, f.HaveFilesFrom())
}
func TestNewFilterMakeListR(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
// Check error if no files
listR := f.MakeListR(context.Background(), nil)
err = listR(context.Background(), "", nil)
assert.EqualError(t, err, errFilesFromNotSet.Error())
// Add some files
for _, path := range []string{
"path/to/dir/file1.png",
"/path/to/dir/file2.png",
"/path/to/file3.png",
"/path/to/dir2/file4.png",
"notfound",
} {
err = f.AddFile(path)
require.NoError(t, err)
}
assert.Equal(t, 5, len(f.files))
// NewObject function for MakeListR
newObjects := FilesMap{}
var newObjectMu sync.Mutex
NewObject := func(ctx context.Context, remote string) (fs.Object, error) {
newObjectMu.Lock()
defer newObjectMu.Unlock()
if remote == "notfound" {
return nil, fs.ErrorObjectNotFound
} else if remote == "error" {
return nil, assert.AnError
}
newObjects[remote] = struct{}{}
return mockobject.New(remote), nil
}
// Callback for ListRFn
listRObjects := FilesMap{}
var callbackMu sync.Mutex
listRcallback := func(entries fs.DirEntries) error {
callbackMu.Lock()
defer callbackMu.Unlock()
for _, entry := range entries {
listRObjects[entry.Remote()] = struct{}{}
}
return nil
}
// Make the listR and call it
listR = f.MakeListR(context.Background(), NewObject)
err = listR(context.Background(), "", listRcallback)
require.NoError(t, err)
// Check that the correct objects were created and listed
want := FilesMap{
"path/to/dir/file1.png": {},
"path/to/dir/file2.png": {},
"path/to/file3.png": {},
"path/to/dir2/file4.png": {},
}
assert.Equal(t, want, newObjects)
assert.Equal(t, want, listRObjects)
// Now check an error is returned from NewObject
require.NoError(t, f.AddFile("error"))
err = listR(context.Background(), "", listRcallback)
require.EqualError(t, err, assert.AnError.Error())
// The checker will exit by the error above
ci := fs.GetConfig(context.Background())
ci.Checkers = 1
// Now check an error is returned from NewObject
require.NoError(t, f.AddFile("error"))
err = listR(context.Background(), "", listRcallback)
require.EqualError(t, err, assert.AnError.Error())
}
func TestNewFilterMinSize(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
f.Opt.MinSize = 100
testInclude(t, f, []includeTest{
{"file1.jpg", 100, 0, true},
{"file2.jpg", 101, 0, true},
{"potato/file2.jpg", 99, 0, false},
})
assert.False(t, f.InActive())
}
func TestNewFilterMaxSize(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
f.Opt.MaxSize = 100
testInclude(t, f, []includeTest{
{"file1.jpg", 100, 0, true},
{"file2.jpg", 101, 0, false},
{"potato/file2.jpg", 99, 0, true},
})
assert.False(t, f.InActive())
}
func TestNewFilterMinAndMaxAge(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
f.ModTimeFrom = time.Unix(1440000002, 0)
f.ModTimeTo = time.Unix(1440000003, 0)
testInclude(t, f, []includeTest{
{"file1.jpg", 100, 1440000000, false},
{"file2.jpg", 101, 1440000001, false},
{"file3.jpg", 102, 1440000002, true},
{"potato/file1.jpg", 98, 1440000003, true},
{"potato/file2.jpg", 99, 1440000004, false},
})
assert.False(t, f.InActive())
}
func TestNewFilterMinAge(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
f.ModTimeTo = time.Unix(1440000002, 0)
testInclude(t, f, []includeTest{
{"file1.jpg", 100, 1440000000, true},
{"file2.jpg", 101, 1440000001, true},
{"file3.jpg", 102, 1440000002, true},
{"potato/file1.jpg", 98, 1440000003, false},
{"potato/file2.jpg", 99, 1440000004, false},
})
assert.False(t, f.InActive())
}
func TestNewFilterMaxAge(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
f.ModTimeFrom = time.Unix(1440000002, 0)
testInclude(t, f, []includeTest{
{"file1.jpg", 100, 1440000000, false},
{"file2.jpg", 101, 1440000001, false},
{"file3.jpg", 102, 1440000002, true},
{"potato/file1.jpg", 98, 1440000003, true},
{"potato/file2.jpg", 99, 1440000004, true},
})
assert.False(t, f.InActive())
}
func TestNewFilterMatches(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
add := func(s string) {
err := f.AddRule(s)
require.NoError(t, err)
}
add("+ cleared")
add("!")
add("- /file1.jpg")
add("+ /file2.png")
add("+ /*.jpg")
add("- /*.png")
add("- /potato")
add("+ /sausage1")
add("+ /sausage2*")
add("+ /sausage3**")
add("+ /a/*.jpg")
add("- *")
testInclude(t, f, []includeTest{
{"cleared", 100, 0, false},
{"file1.jpg", 100, 0, false},
{"file2.png", 100, 0, true},
{"FILE2.png", 100, 0, false},
{"afile2.png", 100, 0, false},
{"file3.jpg", 101, 0, true},
{"file4.png", 101, 0, false},
{"potato", 101, 0, false},
{"sausage1", 101, 0, true},
{"sausage1/potato", 101, 0, false},
{"sausage2potato", 101, 0, true},
{"sausage2/potato", 101, 0, false},
{"sausage3/potato", 101, 0, true},
{"a/one.jpg", 101, 0, true},
{"a/one.png", 101, 0, false},
{"unicorn", 99, 0, false},
})
testDirInclude(t, f, []includeDirTest{
{"sausage1", false},
{"sausage2", false},
{"sausage2/sub", false},
{"sausage2/sub/dir", false},
{"sausage3", true},
{"SAUSAGE3", false},
{"sausage3/sub", true},
{"sausage3/sub/dir", true},
{"sausage4", false},
{"a", true},
})
assert.False(t, f.InActive())
}
func TestNewFilterMatchesIgnoreCase(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
f.Opt.IgnoreCase = true
add := func(s string) {
err := f.AddRule(s)
require.NoError(t, err)
}
add("+ /file2.png")
add("+ /sausage3**")
add("- *")
testInclude(t, f, []includeTest{
{"file2.png", 100, 0, true},
{"FILE2.png", 100, 0, true},
})
testDirInclude(t, f, []includeDirTest{
{"sausage3", true},
{"SAUSAGE3", true},
})
assert.False(t, f.InActive())
}
func TestNewFilterMatchesRegexp(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
add := func(s string) {
err := f.AddRule(s)
require.NoError(t, err)
}
add(`+ /{{file\d+\.png}}`)
add(`+ *.{{(?i)jpg}}`)
add(`- *`)
testInclude(t, f, []includeTest{
{"file2.png", 100, 0, true},
{"sub/file2.png", 100, 0, false},
{"file123.png", 100, 0, true},
{"File123.png", 100, 0, false},
{"something.jpg", 100, 0, true},
{"deep/path/something.JPG", 100, 0, true},
{"something.gif", 100, 0, false},
})
testDirInclude(t, f, []includeDirTest{
{"anything at all", true},
})
assert.False(t, f.InActive())
}
func TestNewFilterHashFilter(t *testing.T) {
const e1 = "filé1.jpg" // one of the unicode E characters
const e2 = "filé1.jpg" // a different unicode E character
assert.NotEqual(t, e1, e2)
for i := 0; i <= 4; i++ {
opt := Opt
opt.HashFilter = fmt.Sprintf("%d/4", i)
opt.ExcludeRule = []string{"*.bin"}
f, err := NewFilter(&opt)
require.NoError(t, err)
t.Run(opt.HashFilter, func(t *testing.T) {
testInclude(t, f, []includeTest{
{"file1.jpg", 0, 0, i == 0 || i == 4},
{"FILE1.jpg", 0, 0, i == 0 || i == 4},
{"file2.jpg", 1, 0, i == 2},
{"File2.jpg", 1, 0, i == 2},
{"file3.jpg", 2, 0, i == 1},
{"file4.jpg", 3, 0, i == 2},
{"file5.jpg", 4, 0, i == 0 || i == 4},
{"file6.jpg", 5, 0, i == 1},
{"file7.jpg", 6, 0, i == 3},
{"file8.jpg", 7, 0, i == 3},
{"file9.jpg", 7, 0, i == 1},
{e1, 0, 0, i == 3},
{e2, 0, 0, i == 3},
{"hello" + e1, 0, 0, i == 2},
{"HELLO" + e2, 0, 0, i == 2},
{"hello1" + e1, 0, 0, i == 1},
{"Hello1" + e2, 0, 0, i == 1},
{"exclude.bin", 8, 0, false},
})
})
assert.False(t, f.InActive())
}
}
type includeTestMetadata struct {
in string
metadata fs.Metadata
want bool
}
func testIncludeMetadata(t *testing.T, f *Filter, tests []includeTestMetadata) {
for _, test := range tests {
got := f.Include(test.in, 0, time.Time{}, test.metadata)
assert.Equal(t, test.want, got, fmt.Sprintf("in=%q, metadata=%+v", test.in, test.metadata))
}
}
func TestNewFilterMetadataInclude(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
add := func(s string) {
err := f.metaRules.AddRule(s)
require.NoError(t, err)
}
add(`+ t*=t*`)
add(`- *`)
testIncludeMetadata(t, f, []includeTestMetadata{
{"nil", nil, false},
{"empty", fs.Metadata{}, false},
{"ok1", fs.Metadata{"thing": "thang"}, true},
{"ok2", fs.Metadata{"thing1": "thang1"}, true},
{"missing", fs.Metadata{"Thing1": "Thang1"}, false},
})
assert.False(t, f.InActive())
}
func TestNewFilterMetadataExclude(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
add := func(s string) {
err := f.metaRules.AddRule(s)
require.NoError(t, err)
}
add(`- thing=thang`)
add(`+ *`)
testIncludeMetadata(t, f, []includeTestMetadata{
{"nil", nil, true},
{"empty", fs.Metadata{}, true},
{"ok1", fs.Metadata{"thing": "thang"}, false},
{"missing1", fs.Metadata{"thing1": "thang1"}, true},
})
assert.False(t, f.InActive())
}
func TestFilterAddDirRuleOrFileRule(t *testing.T) {
for _, test := range []struct {
included bool
glob string
want string
}{
{
false,
"potato",
`--- File filter rules ---
- (^|/)potato$
--- Directory filter rules ---`,
},
{
true,
"potato",
`--- File filter rules ---
+ (^|/)potato$
--- Directory filter rules ---
+ ^.*$`,
},
{
false,
"potato/",
`--- File filter rules ---
- (^|/)potato/.*$
--- Directory filter rules ---
- (^|/)potato/.*$`,
},
{
true,
"potato/",
`--- File filter rules ---
--- Directory filter rules ---
+ (^|/)potato/$`,
},
{
false,
"*",
`--- File filter rules ---
- (^|/)[^/]*$
--- Directory filter rules ---
- ^.*$`,
},
{
true,
"*",
`--- File filter rules ---
+ (^|/)[^/]*$
--- Directory filter rules ---
+ ^.*$`,
},
{
false,
".*{,/**}",
`--- File filter rules ---
- (^|/)\.[^/]*(|/.*)$
--- Directory filter rules ---
- (^|/)\.[^/]*(|/.*)$`,
},
{
true,
"a/b/c/d",
`--- File filter rules ---
+ (^|/)a/b/c/d$
--- Directory filter rules ---
+ (^|/)a/b/c/$
+ (^|/)a/b/$
+ (^|/)a/$`,
},
} {
f, err := NewFilter(nil)
require.NoError(t, err)
err = f.Add(test.included, test.glob)
require.NoError(t, err)
got := f.DumpFilters()
assert.Equal(t, test.want, got, fmt.Sprintf("Add(%v, %q)", test.included, test.glob))
}
}
func testFilterForEachLine(t *testing.T, useStdin, raw bool) {
file := testFile(t, `; comment
one
# another comment
two
# indented comment
three
four
five
six `)
defer func() {
err := os.Remove(file)
require.NoError(t, err)
}()
lines := []string{}
fileName := file
if useStdin {
in, err := os.Open(file)
require.NoError(t, err)
oldStdin := os.Stdin
os.Stdin = in
defer func() {
os.Stdin = oldStdin
_ = in.Close()
}()
fileName = "-"
}
err := forEachLine(fileName, raw, func(s string) error {
lines = append(lines, s)
return nil
})
require.NoError(t, err)
if raw {
assert.Equal(t, "; comment,one,# another comment,,,two, # indented comment,three ,four ,five, six ",
strings.Join(lines, ","))
} else {
assert.Equal(t, "one,two,three,four,five,six", strings.Join(lines, ","))
}
}
func TestFilterForEachLine(t *testing.T) {
testFilterForEachLine(t, false, false)
}
func TestFilterForEachLineStdin(t *testing.T) {
testFilterForEachLine(t, true, false)
}
func TestFilterForEachLineWithRaw(t *testing.T) {
testFilterForEachLine(t, false, true)
}
func TestFilterForEachLineStdinWithRaw(t *testing.T) {
testFilterForEachLine(t, true, true)
}
func TestFilterMatchesFromDocs(t *testing.T) {
for _, test := range []struct {
glob string
included bool
file string
ignoreCase bool
}{
{"file.jpg", true, "file.jpg", false},
{"file.jpg", true, "directory/file.jpg", false},
{"file.jpg", false, "afile.jpg", false},
{"file.jpg", false, "directory/afile.jpg", false},
{"/file.jpg", true, "file.jpg", false},
{"/file.jpg", false, "afile.jpg", false},
{"/file.jpg", false, "directory/file.jpg", false},
{"*.jpg", true, "file.jpg", false},
{"*.jpg", true, "directory/file.jpg", false},
{"*.jpg", false, "file.jpg/anotherfile.png", false},
{"dir/**", true, "dir/file.jpg", false},
{"dir/**", true, "dir/dir1/dir2/file.jpg", false},
{"dir/**", false, "directory/file.jpg", false},
{"dir/**", false, "adir/file.jpg", false},
{"l?ss", true, "less", false},
{"l?ss", true, "lass", false},
{"l?ss", false, "floss", false},
{"h[ae]llo", true, "hello", false},
{"h[ae]llo", true, "hallo", false},
{"h[ae]llo", false, "hullo", false},
{"{one,two}_potato", true, "one_potato", false},
{"{one,two}_potato", true, "two_potato", false},
{"{one,two}_potato", false, "three_potato", false},
{"{one,two}_potato", false, "_potato", false},
{"\\*.jpg", true, "*.jpg", false},
{"\\\\.jpg", true, "\\.jpg", false},
{"\\[one\\].jpg", true, "[one].jpg", false},
{"potato", true, "potato", false},
{"potato", false, "POTATO", false},
{"potato", true, "potato", true},
{"potato", true, "POTATO", true},
} {
f, err := NewFilter(nil)
require.NoError(t, err)
if test.ignoreCase {
f.Opt.IgnoreCase = true
}
err = f.Add(true, test.glob)
require.NoError(t, err)
err = f.Add(false, "*")
require.NoError(t, err)
included := f.Include(test.file, 0, time.Unix(0, 0), nil)
if included != test.included {
t.Errorf("%q match %q: want %v got %v", test.glob, test.file, test.included, included)
}
}
}
func TestNewFilterUsesDirectoryFilters(t *testing.T) {
for i, test := range []struct {
rules []string
want bool
}{
{
rules: []string{},
want: false,
},
{
rules: []string{
"+ *",
},
want: false,
},
{
rules: []string{
"+ *.jpg",
"- *",
},
want: false,
},
{
rules: []string{
"- *.jpg",
},
want: false,
},
{
rules: []string{
"- *.jpg",
"+ *",
},
want: false,
},
{
rules: []string{
"+ dir/*.jpg",
"- *",
},
want: true,
},
{
rules: []string{
"+ dir/**",
},
want: true,
},
{
rules: []string{
"- dir/**",
},
want: true,
},
{
rules: []string{
"- /dir/**",
},
want: true,
},
} {
what := fmt.Sprintf("#%d", i)
f, err := NewFilter(nil)
require.NoError(t, err)
for _, rule := range test.rules {
err := f.AddRule(rule)
require.NoError(t, err, what)
}
got := f.UsesDirectoryFilters()
assert.Equal(t, test.want, got, fmt.Sprintf("%s: %s", what, f.DumpFilters()))
}
}
func TestGetConfig(t *testing.T) {
ctx := context.Background()
// Check nil
//lint:ignore SA1012 false positive when running staticcheck, we want to test passing a nil Context and therefore ignore lint suggestion to use context.TODO
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1012
config := GetConfig(nil)
assert.Equal(t, globalConfig, config)
// Check empty config
config = GetConfig(ctx)
assert.Equal(t, globalConfig, config)
// Check adding a config
ctx2, config2 := AddConfig(ctx)
require.NoError(t, config2.AddRule("+ *.jpg"))
assert.NotEqual(t, config2, config)
// Check can get config back
config2ctx := GetConfig(ctx2)
assert.Equal(t, config2, config2ctx)
// Check ReplaceConfig
f, err := NewFilter(nil)
require.NoError(t, err)
ctx3 := ReplaceConfig(ctx, f)
assert.Equal(t, globalConfig, GetConfig(ctx3))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/filter/glob.go | fs/filter/glob.go | // rsync style glob parser
package filter
import (
"bytes"
"fmt"
"regexp"
"strings"
"github.com/rclone/rclone/fs"
)
// GlobPathToRegexp converts an rsync style glob path to a regexp
func GlobPathToRegexp(glob string, ignoreCase bool) (*regexp.Regexp, error) {
return globToRegexp(glob, true, true, ignoreCase)
}
// GlobStringToRegexp converts an rsync style glob string to a regexp
//
// Without adding of anchors but with ignoring of case, i.e. called
// `GlobStringToRegexp(glob, false, true)`, it takes a lenient approach
// where the glob "sum" would match "CheckSum", more similar to text
// search functions than strict glob filtering.
//
// With adding of anchors and not ignoring case, i.e. called
// `GlobStringToRegexp(glob, true, false)`, it uses a strict glob
// interpretation where the previous example would have to be changed to
// "*Sum" to match "CheckSum".
func GlobStringToRegexp(glob string, addAnchors bool, ignoreCase bool) (*regexp.Regexp, error) {
return globToRegexp(glob, false, addAnchors, ignoreCase)
}
// globToRegexp converts an rsync style glob to a regexp
//
// Set pathMode true for matching of path/file names, e.g.
// special treatment of path separator `/` and double asterisk `**`,
// see filtering.md for details.
//
// Set addAnchors true to add start of string `^` and end of string `$` anchors.
func globToRegexp(glob string, pathMode bool, addAnchors bool, ignoreCase bool) (*regexp.Regexp, error) {
var re bytes.Buffer
if ignoreCase {
_, _ = re.WriteString("(?i)")
}
if addAnchors {
if pathMode {
if strings.HasPrefix(glob, "/") {
glob = glob[1:]
_ = re.WriteByte('^')
} else {
_, _ = re.WriteString("(^|/)")
}
} else {
_, _ = re.WriteString("^")
}
}
consecutiveStars := 0
insertStars := func() error {
if consecutiveStars > 0 {
if pathMode {
switch consecutiveStars {
case 1:
_, _ = re.WriteString(`[^/]*`)
case 2:
_, _ = re.WriteString(`.*`)
default:
return fmt.Errorf("too many stars in %q", glob)
}
} else {
switch consecutiveStars {
case 1:
_, _ = re.WriteString(`.*`)
default:
return fmt.Errorf("too many stars in %q", glob)
}
}
}
consecutiveStars = 0
return nil
}
overwriteLastChar := func(c byte) {
buf := re.Bytes()
buf[len(buf)-1] = c
}
inBraces := false
inBrackets := 0
slashed := false
inRegexp := false // inside {{ ... }}
inRegexpEnd := false // have received }} waiting for more
var next, last rune
for _, c := range glob {
next, last = c, next
if slashed {
_, _ = re.WriteRune(c)
slashed = false
continue
}
if inRegexpEnd {
if c == '}' {
// Regexp is ending with }} choose longest segment
// Replace final ) with }
overwriteLastChar('}')
_ = re.WriteByte(')')
continue
} else {
inRegexpEnd = false
}
}
if inRegexp {
if c == '}' && last == '}' {
inRegexp = false
inRegexpEnd = true
// Replace final } with )
overwriteLastChar(')')
} else {
_, _ = re.WriteRune(c)
}
continue
}
if c != '*' {
err := insertStars()
if err != nil {
return nil, err
}
}
if inBrackets > 0 {
_, _ = re.WriteRune(c)
if c == '[' {
inBrackets++
}
if c == ']' {
inBrackets--
}
continue
}
switch c {
case '\\':
_, _ = re.WriteRune(c)
slashed = true
case '*':
consecutiveStars++
case '?':
if pathMode {
_, _ = re.WriteString(`[^/]`)
} else {
_, _ = re.WriteString(`.`)
}
case '[':
_, _ = re.WriteRune(c)
inBrackets++
case ']':
return nil, fmt.Errorf("mismatched ']' in glob %q", glob)
case '{':
if inBraces {
if last == '{' {
inRegexp = true
inBraces = false
} else {
return nil, fmt.Errorf("can't nest '{' '}' in glob %q", glob)
}
} else {
inBraces = true
_ = re.WriteByte('(')
}
case '}':
if !inBraces {
return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
}
_ = re.WriteByte(')')
inBraces = false
case ',':
if inBraces {
_ = re.WriteByte('|')
} else {
_, _ = re.WriteRune(c)
}
case '.', '+', '(', ')', '|', '^', '$': // regexp meta characters not dealt with above
_ = re.WriteByte('\\')
_, _ = re.WriteRune(c)
default:
_, _ = re.WriteRune(c)
}
}
err := insertStars()
if err != nil {
return nil, err
}
if inBrackets > 0 {
return nil, fmt.Errorf("mismatched '[' and ']' in glob %q", glob)
}
if inBraces {
return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
}
if inRegexp {
return nil, fmt.Errorf("mismatched '{{' and '}}' in glob %q", glob)
}
if addAnchors {
_ = re.WriteByte('$')
}
result, err := regexp.Compile(re.String())
if err != nil {
return nil, fmt.Errorf("bad glob pattern %q (regexp %q): %w", glob, re.String(), err)
}
return result, nil
}
var (
// Can't deal with
// / or ** in {}
// {{ regexp }}
tooHardRe = regexp.MustCompile(`({[^{}]*(\*\*|/)[^{}]*})|\{\{|\}\}`)
// Squash all /
squashSlash = regexp.MustCompile(`/{2,}`)
)
// globToDirGlobs takes a file glob and turns it into a series of
// directory globs. When matched with a directory (with a trailing /)
// this should answer the question as to whether this glob could be in
// this directory.
func globToDirGlobs(glob string) (out []string) {
if tooHardRe.MatchString(glob) {
// Can't figure this one out so return any directory might match
fs.Infof(nil, "Can't figure out directory filters from %q: looking in all directories", glob)
out = append(out, "/**")
return out
}
// Get rid of multiple /s
glob = squashSlash.ReplaceAllString(glob, "/")
// Split on / or **
// (** can contain /)
for {
i := strings.LastIndex(glob, "/")
j := strings.LastIndex(glob, "**")
what := ""
if j > i {
i = j
what = "**"
}
if i < 0 {
if len(out) == 0 {
out = append(out, "/**")
}
break
}
glob = glob[:i]
newGlob := glob + what + "/"
if len(out) == 0 || out[len(out)-1] != newGlob {
out = append(out, newGlob)
}
}
return out
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/filter/glob_test.go | fs/filter/glob_test.go | package filter
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGlobStringToRegexp(t *testing.T) {
for _, test := range []struct {
in string
want string
error string
}{
{``, ``, ``},
{`potato`, `potato`, ``},
{`potato,sausage`, `potato,sausage`, ``},
{`/potato`, `/potato`, ``},
{`potato?sausage`, `potato.sausage`, ``},
{`potat[oa]`, `potat[oa]`, ``},
{`potat[a-z]or`, `potat[a-z]or`, ``},
{`potat[[:alpha:]]or`, `potat[[:alpha:]]or`, ``},
{`'.' '+' '(' ')' '|' '^' '$'`, `'\.' '\+' '\(' '\)' '\|' '\^' '\$'`, ``},
{`*.jpg`, `.*\.jpg`, ``},
{`a{b,c,d}e`, `a(b|c|d)e`, ``},
{`potato**`, ``, `too many stars`},
{`potato**sausage`, ``, `too many stars`},
{`*.p[lm]`, `.*\.p[lm]`, ``},
{`[\[\]]`, `[\[\]]`, ``},
{`***potato`, ``, `too many stars`},
{`***`, ``, `too many stars`},
{`ab]c`, ``, `mismatched ']'`},
{`ab[c`, ``, `mismatched '[' and ']'`},
{`ab{x{cd`, ``, `can't nest`},
{`ab{}}cd`, ``, `mismatched '{' and '}'`},
{`ab}c`, ``, `mismatched '{' and '}'`},
{`ab{c`, ``, `mismatched '{' and '}'`},
{`*.{jpg,png,gif}`, `.*\.(jpg|png|gif)`, ``},
{`[a--b]`, ``, `bad glob pattern`},
{`a\*b`, `a\*b`, ``},
{`a\\b`, `a\\b`, ``},
{`a{{.*}}b`, `a(.*)b`, ``},
{`a{{.*}`, ``, `mismatched '{{' and '}}'`},
{`{{regexp}}`, `(regexp)`, ``},
{`\{{{regexp}}`, `\{(regexp)`, ``},
{`/{{regexp}}`, `/(regexp)`, ``},
{`/{{\d{8}}}`, `/(\d{8})`, ``},
{`/{{\}}}`, `/(\})`, ``},
{`{{(?i)regexp}}`, `((?i)regexp)`, ``},
} {
for _, ignoreCase := range []bool{false, true} {
for _, addAnchors := range []bool{false, true} {
gotRe, err := GlobStringToRegexp(test.in, addAnchors, ignoreCase)
if test.error == "" {
require.NoError(t, err, test.in)
prefix := ""
suffix := ""
if ignoreCase {
prefix += "(?i)"
}
if addAnchors {
prefix += "^"
suffix += "$"
}
got := gotRe.String()
assert.Equal(t, prefix+test.want+suffix, got, test.in)
} else {
require.Error(t, err, test.in)
assert.Contains(t, err.Error(), test.error, test.in)
assert.Nil(t, gotRe)
}
}
}
}
}
func TestGlobPathToRegexp(t *testing.T) {
for _, test := range []struct {
in string
want string
error string
}{
{``, `(^|/)$`, ``},
{`potato`, `(^|/)potato$`, ``},
{`potato,sausage`, `(^|/)potato,sausage$`, ``},
{`/potato`, `^potato$`, ``},
{`potato?sausage`, `(^|/)potato[^/]sausage$`, ``},
{`potat[oa]`, `(^|/)potat[oa]$`, ``},
{`potat[a-z]or`, `(^|/)potat[a-z]or$`, ``},
{`potat[[:alpha:]]or`, `(^|/)potat[[:alpha:]]or$`, ``},
{`'.' '+' '(' ')' '|' '^' '$'`, `(^|/)'\.' '\+' '\(' '\)' '\|' '\^' '\$'$`, ``},
{`*.jpg`, `(^|/)[^/]*\.jpg$`, ``},
{`a{b,c,d}e`, `(^|/)a(b|c|d)e$`, ``},
{`potato**`, `(^|/)potato.*$`, ``},
{`potato**sausage`, `(^|/)potato.*sausage$`, ``},
{`*.p[lm]`, `(^|/)[^/]*\.p[lm]$`, ``},
{`[\[\]]`, `(^|/)[\[\]]$`, ``},
{`***potato`, ``, `too many stars`},
{`***`, ``, `too many stars`},
{`ab]c`, ``, `mismatched ']'`},
{`ab[c`, ``, `mismatched '[' and ']'`},
{`ab{x{cd`, ``, `can't nest`},
{`ab{}}cd`, ``, `mismatched '{' and '}'`},
{`ab}c`, ``, `mismatched '{' and '}'`},
{`ab{c`, ``, `mismatched '{' and '}'`},
{`*.{jpg,png,gif}`, `(^|/)[^/]*\.(jpg|png|gif)$`, ``},
{`[a--b]`, ``, `bad glob pattern`},
{`a\*b`, `(^|/)a\*b$`, ``},
{`a\\b`, `(^|/)a\\b$`, ``},
{`a{{.*}}b`, `(^|/)a(.*)b$`, ``},
{`a{{.*}`, ``, `mismatched '{{' and '}}'`},
{`{{regexp}}`, `(^|/)(regexp)$`, ``},
{`\{{{regexp}}`, `(^|/)\{(regexp)$`, ``},
{`/{{regexp}}`, `^(regexp)$`, ``},
{`/{{\d{8}}}`, `^(\d{8})$`, ``},
{`/{{\}}}`, `^(\})$`, ``},
{`{{(?i)regexp}}`, `(^|/)((?i)regexp)$`, ``},
} {
for _, ignoreCase := range []bool{false, true} {
gotRe, err := GlobPathToRegexp(test.in, ignoreCase)
if test.error == "" {
require.NoError(t, err, test.in)
prefix := ""
if ignoreCase {
prefix = "(?i)"
}
got := gotRe.String()
assert.Equal(t, prefix+test.want, got, test.in)
} else {
require.Error(t, err, test.in)
assert.Contains(t, err.Error(), test.error, test.in)
assert.Nil(t, gotRe)
}
}
}
}
func TestGlobToDirGlobs(t *testing.T) {
for _, test := range []struct {
in string
want []string
}{
{`*`, []string{"/**"}},
{`/*`, []string{"/"}},
{`*.jpg`, []string{"/**"}},
{`/*.jpg`, []string{"/"}},
{`//*.jpg`, []string{"/"}},
{`///*.jpg`, []string{"/"}},
{`/a/*.jpg`, []string{"/a/", "/"}},
{`/a//*.jpg`, []string{"/a/", "/"}},
{`/a///*.jpg`, []string{"/a/", "/"}},
{`/a/b/*.jpg`, []string{"/a/b/", "/a/", "/"}},
{`a/*.jpg`, []string{"a/"}},
{`a/b/*.jpg`, []string{"a/b/", "a/"}},
{`*/*/*.jpg`, []string{"*/*/", "*/"}},
{`a/b/`, []string{"a/b/", "a/"}},
{`a/b`, []string{"a/"}},
{`a/b/*.{jpg,png,gif}`, []string{"a/b/", "a/"}},
{`/a/{jpg,png,gif}/*.{jpg,png,gif}`, []string{"/a/{jpg,png,gif}/", "/a/", "/"}},
{`a/{a,a*b,a**c}/d/`, []string{"/**"}},
{`/a/{a,a*b,a/c,d}/d/`, []string{"/**"}},
{`/a/{{.*}}/d/`, []string{"/**"}},
{`**`, []string{"**/"}},
{`a**`, []string{"a**/"}},
{`a**b`, []string{"a**/"}},
{`a**b**c**d`, []string{"a**b**c**/", "a**b**/", "a**/"}},
{`a**b/c**d`, []string{"a**b/c**/", "a**b/", "a**/"}},
{`/A/a**b/B/c**d/C/`, []string{"/A/a**b/B/c**d/C/", "/A/a**b/B/c**d/", "/A/a**b/B/c**/", "/A/a**b/B/", "/A/a**b/", "/A/a**/", "/A/", "/"}},
{`/var/spool/**/ncw`, []string{"/var/spool/**/", "/var/spool/", "/var/", "/"}},
{`var/spool/**/ncw/`, []string{"var/spool/**/ncw/", "var/spool/**/", "var/spool/", "var/"}},
{"/file1.jpg", []string{`/`}},
{"/file2.png", []string{`/`}},
{"/*.jpg", []string{`/`}},
{"/*.png", []string{`/`}},
{"/potato", []string{`/`}},
{"/sausage1", []string{`/`}},
{"/sausage2*", []string{`/`}},
{"/sausage3**", []string{`/sausage3**/`, "/"}},
{"/a/*.jpg", []string{`/a/`, "/"}},
} {
_, err := GlobPathToRegexp(test.in, false)
assert.NoError(t, err)
got := globToDirGlobs(test.in)
assert.Equal(t, test.want, got, test.in)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/filter/filterflags/filterflags.go | fs/filter/filterflags/filterflags.go | // Package filterflags implements command line flags to set up a filter
package filterflags
import (
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/filter"
"github.com/spf13/pflag"
)
// AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) {
flags.AddFlagsFromOptions(flagSet, "", filter.OptionsInfo)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/logger/logger_test.go | fs/logger/logger_test.go | //go:build !plan9
package logger_test
import (
"path/filepath"
"testing"
"github.com/rclone/rclone/fs/logger"
"github.com/rogpeppe/go-internal/testscript"
)
// TestMain drives the tests
func TestMain(m *testing.M) {
// This enables the testscript package. See:
// https://bitfieldconsulting.com/golang/cli-testing
// https://pkg.go.dev/github.com/rogpeppe/go-internal@v1.11.0/testscript
testscript.Main(m, map[string]func(){
"rclone": logger.Main,
})
}
func TestLogger(t *testing.T) {
// Usage: https://bitfieldconsulting.com/golang/cli-testing
testscript.Run(t, testscript.Params{
Dir: "testdata/script",
Setup: func(env *testscript.Env) error {
env.Setenv("SRC", filepath.Join("$WORK", "src"))
env.Setenv("DST", filepath.Join("$WORK", "dst"))
return nil
},
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/logger/logger.go | fs/logger/logger.go | // Package logger implements testing for the sync (and bisync) logger
package logger
import (
_ "github.com/rclone/rclone/backend/all" // import all backends
"github.com/rclone/rclone/cmd"
_ "github.com/rclone/rclone/cmd/all" // import all commands
_ "github.com/rclone/rclone/lib/plugin" // import plugins
)
// Main enables the testscript package. See:
// https://bitfieldconsulting.com/golang/cli-testing
// https://pkg.go.dev/github.com/rogpeppe/go-internal@v1.11.0/testscript
func Main() {
cmd.Main()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/hash/hash.go | fs/hash/hash.go | // Package hash provides hash utilities for Fs.
package hash
import (
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"hash"
"hash/crc32"
"io"
"strings"
"github.com/jzelinskie/whirlpool"
"github.com/zeebo/blake3"
"github.com/zeebo/xxh3"
)
// Type indicates a standard hashing algorithm
type Type int
type hashDefinition struct {
width int
name string
alias string
newFunc func() hash.Hash
hashType Type
}
var (
type2hash = map[Type]*hashDefinition{}
name2hash = map[string]*hashDefinition{}
alias2hash = map[string]*hashDefinition{}
supported = []Type{}
)
// RegisterHash adds a new Hash to the list and returns it Type
func RegisterHash(name, alias string, width int, newFunc func() hash.Hash) Type {
hashType := Type(1 << len(supported))
supported = append(supported, hashType)
definition := &hashDefinition{
name: name,
alias: alias,
width: width,
newFunc: newFunc,
hashType: hashType,
}
type2hash[hashType] = definition
name2hash[name] = definition
alias2hash[alias] = definition
return hashType
}
// SupportOnly makes the hash package only support the types passed
// in. Used for testing.
//
// It returns the previously supported types.
func SupportOnly(new []Type) (old []Type) {
old = supported
supported = new
return old
}
// ErrUnsupported should be returned by filesystem,
// if it is requested to deliver an unsupported hash type.
var ErrUnsupported = errors.New("hash type not supported")
var (
// None indicates no hashes are supported
None Type
// MD5 indicates MD5 support
MD5 Type
// SHA1 indicates SHA-1 support
SHA1 Type
// Whirlpool indicates Whirlpool support
Whirlpool Type
// CRC32 indicates CRC-32 support
CRC32 Type
// SHA256 indicates SHA-256 support
SHA256 Type
// SHA512 indicates SHA-512 support
SHA512 Type
// BLAKE3 indicates BLAKE3 support
BLAKE3 Type
// XXH3 indicates XXH3 support, also known as XXH3-64, a variant of xxHash
XXH3 Type
// XXH128 indicates XXH128 support, also known as XXH3-128, a variant of xxHash
XXH128 Type
)
type xxh128Hasher struct {
xxh3.Hasher
}
// Sum overrides xxh3.Sum to return value based on Sum128 instead of the default Sum64.
func (h *xxh128Hasher) Sum(b []byte) []byte {
buf := h.Sum128().Bytes()
return buf[:]
}
func init() {
MD5 = RegisterHash("md5", "MD5", 32, md5.New)
SHA1 = RegisterHash("sha1", "SHA-1", 40, sha1.New)
Whirlpool = RegisterHash("whirlpool", "Whirlpool", 128, whirlpool.New)
CRC32 = RegisterHash("crc32", "CRC-32", 8, func() hash.Hash { return crc32.NewIEEE() })
SHA256 = RegisterHash("sha256", "SHA-256", 64, sha256.New)
SHA512 = RegisterHash("sha512", "SHA-512", 128, sha512.New)
BLAKE3 = RegisterHash("blake3", "BLAKE3", 64, func() hash.Hash { return blake3.New() })
XXH3 = RegisterHash("xxh3", "XXH3", 16, func() hash.Hash { return xxh3.New() })
XXH128 = RegisterHash("xxh128", "XXH128", 32, func() hash.Hash { return &xxh128Hasher{} })
}
// Supported returns a set of all the supported hashes by
// HashStream and MultiHasher.
func Supported() Set {
return NewHashSet(supported...)
}
// Width returns the width in characters for any HashType
func Width(hashType Type, base64Encoded bool) int {
if hash := type2hash[hashType]; hash != nil {
if base64Encoded {
return base64.URLEncoding.EncodedLen(hash.width / 2)
}
return hash.width
}
return 0
}
// Stream will calculate hashes of all supported hash types.
func Stream(r io.Reader) (map[Type]string, error) {
return StreamTypes(r, Supported())
}
// StreamTypes will calculate hashes of the requested hash types.
func StreamTypes(r io.Reader, set Set) (map[Type]string, error) {
hashers, err := fromTypes(set)
if err != nil {
return nil, err
}
_, err = io.Copy(toMultiWriter(hashers), r)
if err != nil {
return nil, err
}
var ret = make(map[Type]string)
for k, v := range hashers {
ret[k] = hex.EncodeToString(v.Sum(nil))
}
return ret, nil
}
// String returns a string representation of the hash type.
// The function will panic if the hash type is unknown.
func (h Type) String() string {
if h == None {
return "none"
}
if hash := type2hash[h]; hash != nil {
return hash.name
}
panic(fmt.Sprintf("internal error: unknown hash type: 0x%x", int(h)))
}
// Set a Type from a flag.
// Both name and alias are accepted.
func (h *Type) Set(s string) error {
if s == "none" || s == "None" {
*h = None
return nil
}
if hash := name2hash[strings.ToLower(s)]; hash != nil {
*h = hash.hashType
return nil
}
if hash := alias2hash[s]; hash != nil {
*h = hash.hashType
return nil
}
return fmt.Errorf("unknown hash type %q", s)
}
// Type of the value
func (h Type) Type() string {
return "string"
}
// fromTypes will return hashers for all the requested types.
// The types must be a subset of SupportedHashes,
// and this function must support all types.
func fromTypes(set Set) (map[Type]hash.Hash, error) {
if !set.SubsetOf(Supported()) {
return nil, fmt.Errorf("requested set %08x contains unknown hash types", int(set))
}
hashers := map[Type]hash.Hash{}
for _, t := range set.Array() {
hash := type2hash[t]
if hash == nil {
panic(fmt.Sprintf("internal error: Unsupported hash type %v", t))
}
hashers[t] = hash.newFunc()
}
return hashers, nil
}
// toMultiWriter will return a set of hashers into a
// single multiwriter, where one write will update all
// the hashers.
func toMultiWriter(h map[Type]hash.Hash) io.Writer {
// Convert to to slice
var w = make([]io.Writer, 0, len(h))
for _, v := range h {
w = append(w, v)
}
return io.MultiWriter(w...)
}
// A MultiHasher will construct various hashes on
// all incoming writes.
type MultiHasher struct {
w io.Writer
size int64
h map[Type]hash.Hash // Hashes
}
// NewMultiHasher will return a hash writer that will write all
// supported hash types.
func NewMultiHasher() *MultiHasher {
h, err := NewMultiHasherTypes(Supported())
if err != nil {
panic("internal error: could not create multihasher")
}
return h
}
// NewMultiHasherTypes will return a hash writer that will write
// the requested hash types.
func NewMultiHasherTypes(set Set) (*MultiHasher, error) {
hashers, err := fromTypes(set)
if err != nil {
return nil, err
}
m := MultiHasher{h: hashers, w: toMultiWriter(hashers)}
return &m, nil
}
func (m *MultiHasher) Write(p []byte) (n int, err error) {
n, err = m.w.Write(p)
m.size += int64(n)
return n, err
}
// Sums returns the sums of all accumulated hashes as hex encoded
// strings.
func (m *MultiHasher) Sums() map[Type]string {
dst := make(map[Type]string)
for k, v := range m.h {
dst[k] = hex.EncodeToString(v.Sum(nil))
}
return dst
}
// Sum returns the specified hash from the multihasher
func (m *MultiHasher) Sum(hashType Type) ([]byte, error) {
h, ok := m.h[hashType]
if !ok {
return nil, ErrUnsupported
}
return h.Sum(nil), nil
}
// SumString returns the specified hash from the multihasher as a hex or base64 encoded string
func (m *MultiHasher) SumString(hashType Type, base64Encoded bool) (string, error) {
sum, err := m.Sum(hashType)
if err != nil {
return "", err
}
if base64Encoded {
return base64.URLEncoding.EncodeToString(sum), nil
}
return hex.EncodeToString(sum), nil
}
// Size returns the number of bytes written
func (m *MultiHasher) Size() int64 {
return m.size
}
// A Set Indicates one or more hash types.
type Set int
// NewHashSet will create a new hash set with the hash types supplied
func NewHashSet(t ...Type) Set {
h := Set(None)
return h.Add(t...)
}
// Add one or more hash types to the set.
// Returns the modified hash set.
func (h *Set) Add(t ...Type) Set {
for _, v := range t {
*h |= Set(v)
}
return *h
}
// Contains returns true if the
func (h Set) Contains(t Type) bool {
return int(h)&int(t) != 0
}
// Overlap returns the overlapping hash types
func (h Set) Overlap(t Set) Set {
return Set(int(h) & int(t))
}
// SubsetOf will return true if all types of h
// is present in the set c
func (h Set) SubsetOf(c Set) bool {
return int(h)|int(c) == int(c)
}
// GetOne will return a hash type.
// Currently the first is returned, but it could be
// improved to return the strongest.
func (h Set) GetOne() Type {
v := int(h)
i := uint(0)
for v != 0 {
if v&1 != 0 {
return Type(1 << i)
}
i++
v >>= 1
}
return None
}
// Array returns an array of all hash types in the set
func (h Set) Array() (ht []Type) {
v := int(h)
i := uint(0)
for v != 0 {
if v&1 != 0 {
ht = append(ht, Type(1<<i))
}
i++
v >>= 1
}
return ht
}
// Count returns the number of hash types in the set
func (h Set) Count() int {
if int(h) == 0 {
return 0
}
// credit: https://code.google.com/u/arnehormann/
x := uint64(h)
x -= (x >> 1) & 0x5555555555555555
x = (x>>2)&0x3333333333333333 + x&0x3333333333333333
x += x >> 4
x &= 0x0f0f0f0f0f0f0f0f
x *= 0x0101010101010101
return int(x >> 56)
}
// String returns a string representation of the hash set.
// The function will panic if it contains an unknown type.
func (h Set) String() string {
a := h.Array()
var r []string
for _, v := range a {
r = append(r, v.String())
}
return "[" + strings.Join(r, ", ") + "]"
}
// Equals checks to see if src == dst, but ignores empty strings
// and returns true if either is empty.
func Equals(src, dst string) bool {
if src == "" || dst == "" {
return true
}
return src == dst
}
// HelpString returns help message with supported hashes
func HelpString(indent int) string {
padding := strings.Repeat(" ", indent)
var help strings.Builder
help.WriteString(padding)
help.WriteString("Supported hashes are:\n")
for _, h := range supported {
fmt.Fprintf(&help, "%s- %v\n", padding, h.String())
}
return help.String()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/hash/hash_test.go | fs/hash/hash_test.go | package hash_test
import (
"bytes"
"fmt"
"io"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/spf13/pflag"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check it satisfies the interface
var _ pflag.Value = (*hash.Type)(nil)
func TestHashSet(t *testing.T) {
var h hash.Set
assert.Equal(t, 0, h.Count())
a := h.Array()
assert.Len(t, a, 0)
h = h.Add(hash.MD5)
fs.Log(nil, fmt.Sprint(h))
assert.Equal(t, 1, h.Count())
assert.Equal(t, hash.MD5, h.GetOne())
a = h.Array()
assert.Len(t, a, 1)
assert.Equal(t, a[0], hash.MD5)
// Test overlap, with all hashes
h = h.Overlap(hash.Supported())
assert.Equal(t, 1, h.Count())
assert.Equal(t, hash.MD5, h.GetOne())
assert.True(t, h.SubsetOf(hash.Supported()))
assert.True(t, h.SubsetOf(hash.NewHashSet(hash.MD5)))
h = h.Add(hash.SHA1)
assert.Equal(t, 2, h.Count())
one := h.GetOne()
if !(one == hash.MD5 || one == hash.SHA1) {
t.Fatalf("expected to be either MD5 or SHA1, got %v", one)
}
assert.True(t, h.SubsetOf(hash.Supported()))
assert.False(t, h.SubsetOf(hash.NewHashSet(hash.MD5)))
assert.False(t, h.SubsetOf(hash.NewHashSet(hash.SHA1)))
assert.True(t, h.SubsetOf(hash.NewHashSet(hash.MD5, hash.SHA1)))
a = h.Array()
assert.Len(t, a, 2)
ol := h.Overlap(hash.NewHashSet(hash.MD5))
assert.Equal(t, 1, ol.Count())
assert.True(t, ol.Contains(hash.MD5))
assert.False(t, ol.Contains(hash.SHA1))
ol = h.Overlap(hash.NewHashSet(hash.MD5, hash.SHA1))
assert.Equal(t, 2, ol.Count())
assert.True(t, ol.Contains(hash.MD5))
assert.True(t, ol.Contains(hash.SHA1))
}
type hashTest struct {
input []byte
output map[hash.Type]string
}
var hashTestSet = []hashTest{
{
input: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14},
output: map[hash.Type]string{
hash.MD5: "bf13fc19e5151ac57d4252e0e0f87abe",
hash.SHA1: "3ab6543c08a75f292a5ecedac87ec41642d12166",
hash.Whirlpool: "eddf52133d4566d763f716e853d6e4efbabd29e2c2e63f56747b1596172851d34c2df9944beb6640dbdbe3d9b4eb61180720a79e3d15baff31c91e43d63869a4",
hash.CRC32: "a6041d7e",
hash.SHA256: "c839e57675862af5c21bd0a15413c3ec579e0d5522dab600bc6c3489b05b8f54",
hash.SHA512: "008e7e9b5d94d37bf5e07c955890f730f137a41b8b0db16cb535a9b4cb5632c2bccff31685ec470130fe10e2258a0ab50ab587472258f3132ccf7d7d59fb91db",
hash.BLAKE3: "0a7276a407a3be1b4d31488318ee05a335aad5a3b82c4420e592a8178c9e86bb",
hash.XXH3: "4b83b0c51c543525",
hash.XXH128: "438de241a57d684214f67657f7aad93b",
},
},
// Empty data set
{
input: []byte{},
output: map[hash.Type]string{
hash.MD5: "d41d8cd98f00b204e9800998ecf8427e",
hash.SHA1: "da39a3ee5e6b4b0d3255bfef95601890afd80709",
hash.Whirlpool: "19fa61d75522a4669b44e39c1d2e1726c530232130d407f89afee0964997f7a73e83be698b288febcf88e3e03c4f0757ea8964e59b63d93708b138cc42a66eb3",
hash.CRC32: "00000000",
hash.SHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
hash.SHA512: "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e",
hash.BLAKE3: "af1349b9f5f9a1a6a0404dea36dcc9499bcb25c9adc112b7cc9a93cae41f3262",
hash.XXH3: "2d06800538d394c2",
hash.XXH128: "99aa06d3014798d86001c324468d497f",
},
},
}
func TestMultiHasher(t *testing.T) {
for _, test := range hashTestSet {
mh := hash.NewMultiHasher()
n, err := io.Copy(mh, bytes.NewBuffer(test.input))
require.NoError(t, err)
assert.Len(t, test.input, int(n))
sums := mh.Sums()
for k, v := range sums {
expect, ok := test.output[k]
require.True(t, ok, "test output for hash not found")
assert.Equal(t, expect, v)
}
// Test that all are present
for k, v := range test.output {
expect, ok := sums[k]
require.True(t, ok, "test output for hash not found")
assert.Equal(t, expect, v)
}
}
}
func TestMultiHasherTypes(t *testing.T) {
h := hash.SHA1
for _, test := range hashTestSet {
mh, err := hash.NewMultiHasherTypes(hash.NewHashSet(h))
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(mh, bytes.NewBuffer(test.input))
require.NoError(t, err)
assert.Len(t, test.input, int(n))
sums := mh.Sums()
assert.Len(t, sums, 1)
assert.Equal(t, sums[h], test.output[h])
}
}
func TestHashStream(t *testing.T) {
for _, test := range hashTestSet {
sums, err := hash.Stream(bytes.NewBuffer(test.input))
require.NoError(t, err)
for k, v := range sums {
expect, ok := test.output[k]
require.True(t, ok)
assert.Equal(t, v, expect)
}
// Test that all are present
for k, v := range test.output {
expect, ok := sums[k]
require.True(t, ok)
assert.Equal(t, v, expect)
}
}
}
func TestHashStreamTypes(t *testing.T) {
h := hash.SHA1
for _, test := range hashTestSet {
sums, err := hash.StreamTypes(bytes.NewBuffer(test.input), hash.NewHashSet(h))
require.NoError(t, err)
assert.Len(t, sums, 1)
assert.Equal(t, sums[h], test.output[h])
}
}
func TestHashSetStringer(t *testing.T) {
h := hash.NewHashSet(hash.SHA1, hash.MD5)
assert.Equal(t, "[md5, sha1]", h.String())
h = hash.NewHashSet(hash.SHA1)
assert.Equal(t, "[sha1]", h.String())
h = hash.NewHashSet()
assert.Equal(t, "[]", h.String())
}
func TestHashStringer(t *testing.T) {
h := hash.MD5
assert.Equal(t, "md5", h.String())
h = hash.SHA1
assert.Equal(t, "sha1", h.String())
h = hash.None
assert.Equal(t, "none", h.String())
}
func TestHashSetter(t *testing.T) {
var ht hash.Type
assert.NoError(t, ht.Set("none"))
assert.Equal(t, hash.None, ht)
assert.NoError(t, ht.Set("None"))
assert.Equal(t, hash.None, ht)
assert.NoError(t, ht.Set("md5"))
assert.Equal(t, hash.MD5, ht)
assert.NoError(t, ht.Set("MD5"))
assert.Equal(t, hash.MD5, ht)
assert.NoError(t, ht.Set("sha1"))
assert.Equal(t, hash.SHA1, ht)
assert.NoError(t, ht.Set("SHA-1"))
assert.Equal(t, hash.SHA1, ht)
assert.NoError(t, ht.Set("SHA1"))
assert.Equal(t, hash.SHA1, ht)
assert.NoError(t, ht.Set("Sha1"))
assert.Equal(t, hash.SHA1, ht)
assert.Error(t, ht.Set("Sha-1"))
}
func TestHashTypeStability(t *testing.T) {
assert.Equal(t, hash.Type(0), hash.None)
assert.Equal(t, hash.Type(1), hash.MD5)
assert.Equal(t, hash.Type(2), hash.SHA1)
assert.True(t, hash.Supported().Contains(hash.MD5))
assert.True(t, hash.Supported().Contains(hash.SHA1))
assert.False(t, hash.Supported().Contains(hash.None))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/dirtree/dirtree_test.go | fs/dirtree/dirtree_test.go | package dirtree
import (
"fmt"
"testing"
"github.com/rclone/rclone/fstest/mockdir"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNew(t *testing.T) {
dt := New()
assert.Equal(t, "", dt.String())
}
func TestParentDir(t *testing.T) {
assert.Equal(t, "root/parent", parentDir("root/parent/file"))
assert.Equal(t, "parent", parentDir("parent/file"))
assert.Equal(t, "", parentDir("parent"))
assert.Equal(t, "", parentDir(""))
}
func TestDirTreeAdd(t *testing.T) {
dt := New()
o := mockobject.New("potato")
dt.Add(o)
assert.Equal(t, `/
potato
`, dt.String())
o = mockobject.New("dir/subdir/sausage")
dt.Add(o)
assert.Equal(t, `/
potato
dir/subdir/
sausage
`, dt.String())
}
func TestDirTreeAddDir(t *testing.T) {
dt := New()
d := mockdir.New("potato")
dt.Add(d)
assert.Equal(t, `/
potato/
`, dt.String())
d = mockdir.New("dir/subdir/sausage")
dt.AddDir(d)
assert.Equal(t, `/
potato/
dir/subdir/
sausage/
dir/subdir/sausage/
`, dt.String())
d = mockdir.New("")
dt.AddDir(d)
assert.Equal(t, `/
potato/
dir/subdir/
sausage/
dir/subdir/sausage/
`, dt.String())
}
func TestDirTreeAddEntry(t *testing.T) {
dt := New()
d := mockdir.New("dir/subdir/sausagedir")
dt.AddEntry(d)
o := mockobject.New("dir/subdir2/sausage2")
dt.AddEntry(o)
assert.Equal(t, `/
dir/
dir/
subdir/
subdir2/
dir/subdir/
sausagedir/
dir/subdir/sausagedir/
dir/subdir2/
sausage2
`, dt.String())
}
func TestDirTreeFind(t *testing.T) {
dt := New()
parent, foundObj := dt.Find("dir/subdir/sausage")
assert.Equal(t, "dir/subdir", parent)
assert.Nil(t, foundObj)
o := mockobject.New("dir/subdir/sausage")
dt.Add(o)
parent, foundObj = dt.Find("dir/subdir/sausage")
assert.Equal(t, "dir/subdir", parent)
assert.Equal(t, o, foundObj)
}
func TestDirTreeCheckParent(t *testing.T) {
dt := New()
o := mockobject.New("dir/subdir/sausage")
dt.Add(o)
assert.Equal(t, `dir/subdir/
sausage
`, dt.String())
dt.checkParent("", "dir/subdir", nil)
assert.Equal(t, `/
dir/
dir/
subdir/
dir/subdir/
sausage
`, dt.String())
}
func TestDirTreeCheckParents(t *testing.T) {
dt := New()
dt.Add(mockobject.New("dir/subdir/sausage"))
dt.Add(mockobject.New("dir/subdir2/sausage2"))
dt.CheckParents("")
dt.Sort() // sort since the exact order of adding parents is not defined
assert.Equal(t, `/
dir/
dir/
subdir/
subdir2/
dir/subdir/
sausage
dir/subdir2/
sausage2
`, dt.String())
}
func TestDirTreeSort(t *testing.T) {
dt := New()
dt.Add(mockobject.New("dir/subdir/B"))
dt.Add(mockobject.New("dir/subdir/A"))
assert.Equal(t, `dir/subdir/
B
A
`, dt.String())
dt.Sort()
assert.Equal(t, `dir/subdir/
A
B
`, dt.String())
}
func TestDirTreeDirs(t *testing.T) {
dt := New()
dt.Add(mockobject.New("dir/subdir/sausage"))
dt.Add(mockobject.New("dir/subdir2/sausage2"))
dt.CheckParents("")
assert.Equal(t, []string{
"",
"dir",
"dir/subdir",
"dir/subdir2",
}, dt.Dirs())
}
func TestDirTreePrune(t *testing.T) {
dt := New()
dt.Add(mockobject.New("file"))
dt.Add(mockobject.New("dir/subdir/sausage"))
dt.Add(mockobject.New("dir/subdir2/sausage2"))
dt.Add(mockobject.New("dir/file"))
dt.Add(mockobject.New("dir2/file"))
dt.CheckParents("")
err := dt.Prune(map[string]bool{
"dir": true,
})
require.NoError(t, err)
assert.Equal(t, `/
file
dir2/
dir2/
file
`, dt.String())
}
func BenchmarkCheckParents(b *testing.B) {
for _, N := range []int{1e2, 1e3, 1e4, 1e5, 1e6} {
b.Run(fmt.Sprintf("%d", N), func(b *testing.B) {
b.StopTimer()
dt := New()
for i := range N {
remote := fmt.Sprintf("dir%09d/file%09d.txt", i, 1)
o := mockobject.New(remote)
dt.Add(o)
}
b.StartTimer()
for b.Loop() {
dt.CheckParents("")
}
})
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/dirtree/dirtree.go | fs/dirtree/dirtree.go | // Package dirtree contains the DirTree type which is used for
// building filesystem hierarchies in memory.
package dirtree
import (
"bytes"
"fmt"
"path"
"sort"
"time"
"github.com/rclone/rclone/fs"
)
// DirTree is a map of directories to entries
type DirTree map[string]fs.DirEntries
// New returns a fresh DirTree
func New() DirTree {
return make(DirTree)
}
// parentDir finds the parent directory of path
func parentDir(entryPath string) string {
dirPath := path.Dir(entryPath)
if dirPath == "." {
dirPath = ""
}
return dirPath
}
// Add an entry to the tree
// it doesn't create parents
func (dt DirTree) Add(entry fs.DirEntry) {
dirPath := parentDir(entry.Remote())
dt[dirPath] = append(dt[dirPath], entry)
}
// AddDir adds a directory entry to the tree
// this creates the directory itself if required
// it doesn't create parents
func (dt DirTree) AddDir(entry fs.DirEntry) {
dirPath := entry.Remote()
if dirPath == "" {
return
}
dt.Add(entry)
// create the directory itself if it doesn't exist already
if _, ok := dt[dirPath]; !ok {
dt[dirPath] = nil
}
}
// AddEntry adds the entry and creates the parents for it regardless
// of whether it is a file or a directory.
func (dt DirTree) AddEntry(entry fs.DirEntry) {
switch entry.(type) {
case fs.Directory:
dt.AddDir(entry)
case fs.Object:
dt.Add(entry)
default:
panic("unknown entry type")
}
remoteParent := parentDir(entry.Remote())
dt.checkParent("", remoteParent, nil)
}
// Find returns the DirEntry for filePath or nil if not found
//
// None that Find does a O(N) search so can be slow
func (dt DirTree) Find(filePath string) (parentPath string, entry fs.DirEntry) {
parentPath = parentDir(filePath)
for _, entry := range dt[parentPath] {
if entry.Remote() == filePath {
return parentPath, entry
}
}
return parentPath, nil
}
// checkParent checks that dirPath has a *Dir in its parent
//
// If dirs is not nil it must contain entries for every *Dir found in
// the tree. It is used to speed up the checking when calling this
// repeatedly.
func (dt DirTree) checkParent(root, dirPath string, dirs map[string]struct{}) {
var parentPath string
for {
if dirPath == root {
return
}
// Can rely on dirs to have all directories in it so
// we don't need to call Find.
if dirs != nil {
if _, found := dirs[dirPath]; found {
return
}
parentPath = parentDir(dirPath)
} else {
var entry fs.DirEntry
parentPath, entry = dt.Find(dirPath)
if entry != nil {
return
}
}
dt[parentPath] = append(dt[parentPath], fs.NewDir(dirPath, time.Now()))
if dirs != nil {
dirs[dirPath] = struct{}{}
}
dirPath = parentPath
}
}
// CheckParents checks every directory in the tree has *Dir in its parent
func (dt DirTree) CheckParents(root string) {
dirs := make(map[string]struct{})
// Find all the directories and stick them in dirs
for _, entries := range dt {
for _, entry := range entries {
if _, ok := entry.(fs.Directory); ok {
dirs[entry.Remote()] = struct{}{}
}
}
}
for dirPath := range dt {
dt.checkParent(root, dirPath, dirs)
}
}
// Sort sorts all the Entries
func (dt DirTree) Sort() {
for _, entries := range dt {
sort.Stable(entries)
}
}
// Dirs returns the directories in sorted order
func (dt DirTree) Dirs() (dirNames []string) {
for dirPath := range dt {
dirNames = append(dirNames, dirPath)
}
sort.Strings(dirNames)
return dirNames
}
// Prune remove directories from a directory tree. dirNames contains
// all directories to remove as keys, with true as values. dirNames
// will be modified in the function.
func (dt DirTree) Prune(dirNames map[string]bool) error {
// We use map[string]bool to avoid recursion (and potential
// stack exhaustion).
// First we need delete directories from their parents.
for dName, remove := range dirNames {
if !remove {
// Currently all values should be
// true, therefore this should not
// happen. But this makes function
// more predictable.
fs.Infof(dName, "Directory in the map for prune, but the value is false")
continue
}
if dName == "" {
// if dName is root, do nothing (no parent exist)
continue
}
parent := parentDir(dName)
// It may happen that dt does not have a dName key,
// since directory was excluded based on a filter. In
// such case the loop will be skipped.
for i, entry := range dt[parent] {
switch x := entry.(type) {
case fs.Directory:
if x.Remote() == dName {
// the slice is not sorted yet
// to delete item
// a) replace it with the last one
dt[parent][i] = dt[parent][len(dt[parent])-1]
// b) remove last
dt[parent] = dt[parent][:len(dt[parent])-1]
// we modify a slice within a loop, but we stop
// iterating immediately
break
}
case fs.Object:
// do nothing
default:
return fmt.Errorf("unknown object type %T", entry)
}
}
}
for len(dirNames) > 0 {
// According to golang specs, if new keys were added
// during range iteration, they may be skipped.
for dName, remove := range dirNames {
if !remove {
fs.Infof(dName, "Directory in the map for prune, but the value is false")
continue
}
// First, add all subdirectories to dirNames.
// It may happen that dt[dName] does not exist.
// If so, the loop will be skipped.
for _, entry := range dt[dName] {
switch x := entry.(type) {
case fs.Directory:
excludeDir := x.Remote()
dirNames[excludeDir] = true
case fs.Object:
// do nothing
default:
return fmt.Errorf("unknown object type %T", entry)
}
}
// Then remove current directory from DirTree
delete(dt, dName)
// and from dirNames
delete(dirNames, dName)
}
}
return nil
}
// String emits a simple representation of the DirTree
func (dt DirTree) String() string {
out := new(bytes.Buffer)
for _, dir := range dt.Dirs() {
_, _ = fmt.Fprintf(out, "%s/\n", dir)
for _, entry := range dt[dir] {
flag := ""
if _, ok := entry.(fs.Directory); ok {
flag = "/"
}
_, _ = fmt.Fprintf(out, " %s%s\n", path.Base(entry.Remote()), flag)
}
}
return out.String()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/cache/cache.go | fs/cache/cache.go | // Package cache implements the Fs cache
package cache
import (
"context"
"runtime"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/lib/cache"
)
var (
once sync.Once // creation
c *cache.Cache
mu sync.Mutex // mutex to protect remap
remap = map[string]string{} // map user supplied names to canonical names - [fsString]canonicalName
childParentMap = map[string]string{} // tracks a one-to-many relationship between parent dirs and their direct children files - [child]parent
)
// Create the cache just once
func createOnFirstUse() {
once.Do(func() {
ci := fs.GetConfig(context.Background())
c = cache.New()
c.SetExpireDuration(time.Duration(ci.FsCacheExpireDuration))
c.SetExpireInterval(time.Duration(ci.FsCacheExpireInterval))
c.SetFinalizer(func(value any) {
if s, ok := value.(fs.Shutdowner); ok {
_ = fs.CountError(context.Background(), s.Shutdown(context.Background()))
}
})
})
}
// Canonicalize looks up fsString in the mapping from user supplied
// names to canonical names and return the canonical form
func Canonicalize(fsString string) string {
createOnFirstUse()
mu.Lock()
canonicalName, ok := remap[fsString]
mu.Unlock()
if !ok {
return fsString
}
fs.Debugf(nil, "fs cache: switching user supplied name %q for canonical name %q", fsString, canonicalName)
return canonicalName
}
// Put in a mapping from fsString => canonicalName if they are different
func addMapping(fsString, canonicalName string) {
if canonicalName == fsString {
return
}
mu.Lock()
remap[fsString] = canonicalName
mu.Unlock()
}
// addChild tracks known file (child) to directory (parent) relationships.
// Note that the canonicalName of a child will always equal that of its parent,
// but not everything with an equal canonicalName is a child.
// It could be an alias or overridden version of a directory.
func addChild(child, parent string) {
if child == parent {
return
}
mu.Lock()
childParentMap[child] = parent
mu.Unlock()
}
// returns true if name is definitely known to be a child (i.e. a file, not a dir).
// returns false if name is a dir or if we don't know.
func isChild(child string) bool {
mu.Lock()
_, found := childParentMap[child]
mu.Unlock()
return found
}
// ensures that we return fs.ErrorIsFile when necessary
func getError(fsString string, err error) error {
if err != nil && err != fs.ErrorIsFile {
return err
}
if isChild(fsString) {
return fs.ErrorIsFile
}
return nil
}
// GetFn gets an fs.Fs named fsString either from the cache or creates
// it afresh with the create function
func GetFn(ctx context.Context, fsString string, create func(ctx context.Context, fsString string) (fs.Fs, error)) (f fs.Fs, err error) {
createOnFirstUse()
canonicalFsString := Canonicalize(fsString)
created := false
value, err := c.Get(canonicalFsString, func(canonicalFsString string) (f any, ok bool, err error) {
f, err = create(ctx, fsString) // always create the backend with the original non-canonicalised string
ok = err == nil || err == fs.ErrorIsFile
created = ok
return f, ok, err
})
f, ok := value.(fs.Fs)
if err != nil && err != fs.ErrorIsFile {
if ok {
return f, err // for possible future uses of PutErr
}
return nil, err
}
// Check we stored the Fs at the canonical name
if created {
canonicalName := fs.ConfigString(f)
if canonicalName != canonicalFsString {
if err == nil { // it's a dir
fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", canonicalFsString, canonicalName)
value, found := c.Rename(canonicalFsString, canonicalName)
if found {
f = value.(fs.Fs)
}
addMapping(canonicalFsString, canonicalName)
} else { // it's a file
// the fs we cache is always the file's parent, never the file,
// but we use the childParentMap to return the correct error status based on the fsString passed in.
fs.Debugf(nil, "fs cache: renaming child cache item %q to be canonical for parent %q", canonicalFsString, canonicalName)
value, found := c.Rename(canonicalFsString, canonicalName) // rename the file entry to parent
if found {
f = value.(fs.Fs) // if parent already exists, use it
}
Put(canonicalName, f) // force err == nil for the cache
addMapping(canonicalFsString, canonicalName) // note the fsString-canonicalName connection for future lookups
addChild(fsString, canonicalName) // note the file-directory connection for future lookups
}
}
}
return f, getError(fsString, err) // ensure fs.ErrorIsFile is returned when necessary
}
// Pin f into the cache until Unpin is called
func Pin(f fs.Fs) {
createOnFirstUse()
c.Pin(fs.ConfigString(f))
}
// PinUntilFinalized pins f into the cache until x is garbage collected
//
// This calls runtime.SetFinalizer on x so it shouldn't have a
// finalizer already.
func PinUntilFinalized(f fs.Fs, x any) {
Pin(f)
runtime.SetFinalizer(x, func(_ any) {
Unpin(f)
})
}
// Unpin f from the cache
func Unpin(f fs.Fs) {
createOnFirstUse()
c.Unpin(fs.ConfigString(f))
}
// To avoid circular dependencies these are filled in by fs/rc/jobs/job.go
var (
// JobGetJobID for internal use only
JobGetJobID func(context.Context) (int64, bool)
// JobOnFinish for internal use only
JobOnFinish func(int64, func()) (func(), error)
)
// Get gets an fs.Fs named fsString either from the cache or creates it afresh
func Get(ctx context.Context, fsString string) (f fs.Fs, err error) {
// If we are making a long lived backend which lives longer
// than this request, we want to disconnect it from the
// current context and in particular any WithCancel contexts,
// but we want to preserve the config embedded in the context.
newCtx := context.Background()
newCtx = fs.CopyConfig(newCtx, ctx)
newCtx = filter.CopyConfig(newCtx, ctx)
f, err = GetFn(newCtx, fsString, fs.NewFs)
if f == nil || (err != nil && err != fs.ErrorIsFile) {
return f, err
}
// If this is part of an rc job then pin the backend until it finishes
if JobOnFinish != nil && JobGetJobID != nil {
if jobID, ok := JobGetJobID(ctx); ok {
// fs.Debugf(f, "Pin for job %d", jobID)
Pin(f)
_, _ = JobOnFinish(jobID, func() {
// fs.Debugf(f, "Unpin for job %d", jobID)
Unpin(f)
})
}
}
return f, err
}
// GetArr gets []fs.Fs from []fsStrings either from the cache or creates it afresh
func GetArr(ctx context.Context, fsStrings []string) (f []fs.Fs, err error) {
var fArr []fs.Fs
for _, fsString := range fsStrings {
f1, err1 := GetFn(ctx, fsString, fs.NewFs)
if err1 != nil {
return fArr, err1
}
fArr = append(fArr, f1)
}
return fArr, nil
}
// PutErr puts an fs.Fs named fsString into the cache with err
func PutErr(fsString string, f fs.Fs, err error) {
createOnFirstUse()
canonicalName := fs.ConfigString(f)
c.PutErr(canonicalName, f, err)
addMapping(fsString, canonicalName)
if err == fs.ErrorIsFile {
addChild(fsString, canonicalName)
}
}
// Put puts an fs.Fs named fsString into the cache
func Put(fsString string, f fs.Fs) {
PutErr(fsString, f, nil)
}
// ClearConfig deletes all entries which were based on the config name passed in
//
// Returns number of entries deleted
func ClearConfig(name string) (deleted int) {
createOnFirstUse()
ClearMappingsPrefix(name)
return c.DeletePrefix(name + ":")
}
// Clear removes everything from the cache
func Clear() {
createOnFirstUse()
c.Clear()
ClearMappings()
}
// Entries returns the number of entries in the cache
func Entries() int {
createOnFirstUse()
return c.Entries()
}
// ClearMappings removes everything from remap and childParentMap
func ClearMappings() {
mu.Lock()
defer mu.Unlock()
remap = map[string]string{}
childParentMap = map[string]string{}
}
// ClearMappingsPrefix deletes all mappings to parents with given prefix
//
// Returns number of entries deleted
func ClearMappingsPrefix(prefix string) (deleted int) {
mu.Lock()
do := func(mapping map[string]string) {
for key, val := range mapping {
if !strings.HasPrefix(val, prefix) {
continue
}
delete(mapping, key)
deleted++
}
}
do(remap)
do(childParentMap)
mu.Unlock()
return deleted
}
// EntriesWithPinCount returns the number of pinned and unpinned entries in the cache
//
// Each entry is counted only once, regardless of entry.pinCount
func EntriesWithPinCount() (pinned, unpinned int) {
createOnFirstUse()
return c.EntriesWithPinCount()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/cache/cache_test.go | fs/cache/cache_test.go | package cache
import (
"context"
"errors"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/mockfs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
called = 0
errSentinel = errors.New("an error")
)
func mockNewFs(t *testing.T) func(ctx context.Context, path string) (fs.Fs, error) {
called = 0
create := func(ctx context.Context, path string) (f fs.Fs, err error) {
assert.Equal(t, 0, called)
called++
switch path {
case "mock:/":
return mockfs.NewFs(ctx, "mock", "/", nil)
case "mock:/file.txt", "mock:file.txt", "mock:/file2.txt", "mock:file2.txt":
fMock, err := mockfs.NewFs(ctx, "mock", "/", nil)
require.NoError(t, err)
return fMock, fs.ErrorIsFile
case "mock:/error":
return nil, errSentinel
}
t.Fatalf("Unknown path %q", path)
panic("unreachable")
}
t.Cleanup(Clear)
return create
}
func TestGet(t *testing.T) {
create := mockNewFs(t)
assert.Equal(t, 0, Entries())
f, err := GetFn(context.Background(), "mock:/", create)
require.NoError(t, err)
assert.Equal(t, 1, Entries())
f2, err := GetFn(context.Background(), "mock:/", create)
require.NoError(t, err)
assert.Equal(t, f, f2)
}
func TestGetFile(t *testing.T) {
defer ClearMappings()
create := mockNewFs(t)
assert.Equal(t, 0, Entries())
f, err := GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, f)
assert.Equal(t, 1, Entries())
f2, err := GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, f2)
assert.Equal(t, f, f2)
// check it is also found when referred to by parent name
f2, err = GetFn(context.Background(), "mock:/", create)
require.Nil(t, err)
require.NotNil(t, f2)
assert.Equal(t, f, f2)
}
func TestGetFile2(t *testing.T) {
defer ClearMappings()
create := mockNewFs(t)
assert.Equal(t, 0, Entries())
f, err := GetFn(context.Background(), "mock:file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, f)
assert.Equal(t, 1, Entries())
f2, err := GetFn(context.Background(), "mock:file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, f2)
assert.Equal(t, f, f2)
// check it is also found when referred to by parent name
f2, err = GetFn(context.Background(), "mock:/", create)
require.Nil(t, err)
require.NotNil(t, f2)
assert.Equal(t, f, f2)
}
func TestGetError(t *testing.T) {
create := mockNewFs(t)
assert.Equal(t, 0, Entries())
f, err := GetFn(context.Background(), "mock:/error", create)
require.Equal(t, errSentinel, err)
require.Equal(t, nil, f)
assert.Equal(t, 0, Entries())
}
func TestPutErr(t *testing.T) {
create := mockNewFs(t)
f, err := mockfs.NewFs(context.Background(), "mock", "", nil)
require.NoError(t, err)
assert.Equal(t, 0, Entries())
PutErr("mock:/", f, fs.ErrorNotFoundInConfigFile)
assert.Equal(t, 1, Entries())
fNew, err := GetFn(context.Background(), "mock:/", create)
require.True(t, errors.Is(err, fs.ErrorNotFoundInConfigFile))
require.Equal(t, f, fNew)
assert.Equal(t, 1, Entries())
// Check canonicalisation
PutErr("mock:/file.txt", f, fs.ErrorNotFoundInConfigFile)
fNew, err = GetFn(context.Background(), "mock:/file.txt", create)
require.True(t, errors.Is(err, fs.ErrorNotFoundInConfigFile))
require.Equal(t, f, fNew)
assert.Equal(t, 1, Entries())
}
func TestPut(t *testing.T) {
create := mockNewFs(t)
f, err := mockfs.NewFs(context.Background(), "mock", "/alien", nil)
require.NoError(t, err)
assert.Equal(t, 0, Entries())
Put("mock:/alien", f)
assert.Equal(t, 1, Entries())
fNew, err := GetFn(context.Background(), "mock:/alien", create)
require.NoError(t, err)
require.Equal(t, f, fNew)
assert.Equal(t, 1, Entries())
// Check canonicalisation
Put("mock:/alien/", f)
fNew, err = GetFn(context.Background(), "mock:/alien/", create)
require.NoError(t, err)
require.Equal(t, f, fNew)
assert.Equal(t, 1, Entries())
}
func TestPin(t *testing.T) {
create := mockNewFs(t)
// Test pinning and unpinning nonexistent
f, err := mockfs.NewFs(context.Background(), "mock", "/alien", nil)
require.NoError(t, err)
Pin(f)
Unpin(f)
// Now test pinning an existing
f2, err := GetFn(context.Background(), "mock:/", create)
require.NoError(t, err)
Pin(f2)
Unpin(f2)
}
func TestPinFile(t *testing.T) {
defer ClearMappings()
create := mockNewFs(t)
// Test pinning and unpinning nonexistent
f, err := mockfs.NewFs(context.Background(), "mock", "/file.txt", nil)
require.NoError(t, err)
Pin(f)
Unpin(f)
// Now test pinning an existing
f2, err := GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
assert.Equal(t, 1, len(childParentMap))
Pin(f2)
assert.Equal(t, 1, Entries())
pinned, unpinned := EntriesWithPinCount()
assert.Equal(t, 1, pinned)
assert.Equal(t, 0, unpinned)
Unpin(f2)
assert.Equal(t, 1, Entries())
pinned, unpinned = EntriesWithPinCount()
assert.Equal(t, 0, pinned)
assert.Equal(t, 1, unpinned)
// try a different child of the same parent, and parent
// should not add additional cache items
called = 0 // this one does create() because we haven't seen it before and don't yet know it's a file
f3, err := GetFn(context.Background(), "mock:/file2.txt", create)
assert.Equal(t, fs.ErrorIsFile, err)
assert.Equal(t, 1, Entries())
assert.Equal(t, 2, len(childParentMap))
parent, err := GetFn(context.Background(), "mock:/", create)
assert.NoError(t, err)
assert.Equal(t, 1, Entries())
assert.Equal(t, 2, len(childParentMap))
Pin(f3)
assert.Equal(t, 1, Entries())
pinned, unpinned = EntriesWithPinCount()
assert.Equal(t, 1, pinned)
assert.Equal(t, 0, unpinned)
Unpin(f3)
assert.Equal(t, 1, Entries())
pinned, unpinned = EntriesWithPinCount()
assert.Equal(t, 0, pinned)
assert.Equal(t, 1, unpinned)
Pin(parent)
assert.Equal(t, 1, Entries())
pinned, unpinned = EntriesWithPinCount()
assert.Equal(t, 1, pinned)
assert.Equal(t, 0, unpinned)
Unpin(parent)
assert.Equal(t, 1, Entries())
pinned, unpinned = EntriesWithPinCount()
assert.Equal(t, 0, pinned)
assert.Equal(t, 1, unpinned)
// all 3 should have equal configstrings
assert.Equal(t, fs.ConfigString(f2), fs.ConfigString(f3))
assert.Equal(t, fs.ConfigString(f2), fs.ConfigString(parent))
}
func TestClearConfig(t *testing.T) {
create := mockNewFs(t)
assert.Equal(t, 0, Entries())
_, err := GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
assert.Equal(t, 1, Entries())
assert.Equal(t, 1, ClearConfig("mock"))
assert.Equal(t, 0, Entries())
}
func TestClear(t *testing.T) {
create := mockNewFs(t)
// Create something
_, err := GetFn(context.Background(), "mock:/", create)
require.NoError(t, err)
assert.Equal(t, 1, Entries())
Clear()
assert.Equal(t, 0, Entries())
}
func TestEntries(t *testing.T) {
create := mockNewFs(t)
assert.Equal(t, 0, Entries())
// Create something
_, err := GetFn(context.Background(), "mock:/", create)
require.NoError(t, err)
assert.Equal(t, 1, Entries())
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/march/march.go | fs/march/march.go | // Package march traverses two directories in lock step
package march
import (
"cmp"
"context"
"fmt"
"path"
"slices"
"strings"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/transform"
"golang.org/x/text/unicode/norm"
)
// matchTransformFn converts a name into a form which is used for
// comparison in matchListings.
type matchTransformFn func(name string) string
// list a directory into callback returning err
type listDirFn func(dir string, callback fs.ListRCallback) (err error)
// March holds the data used to traverse two Fs simultaneously,
// calling Callback for each match
type March struct {
// parameters
Ctx context.Context // context for background goroutines
Fdst fs.Fs // source Fs
Fsrc fs.Fs // dest Fs
Dir string // directory
NoTraverse bool // don't traverse the destination
SrcIncludeAll bool // don't include all files in the src
DstIncludeAll bool // don't include all files in the destination
Callback Marcher // object to call with results
NoCheckDest bool // transfer all objects regardless without checking dst
NoUnicodeNormalization bool // don't normalize unicode characters in filenames
// internal state
srcListDir listDirFn // function to call to list a directory in the src
dstListDir listDirFn // function to call to list a directory in the dst
transforms []matchTransformFn
}
// Marcher is called on each match
type Marcher interface {
// SrcOnly is called for a DirEntry found only in the source
SrcOnly(src fs.DirEntry) (recurse bool)
// DstOnly is called for a DirEntry found only in the destination
DstOnly(dst fs.DirEntry) (recurse bool)
// Match is called for a DirEntry found both in the source and destination
Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool)
}
// init sets up a march over opt.Fsrc, and opt.Fdst calling back callback for each match
// Note: this will flag filter-aware backends on the source side
func (m *March) init(ctx context.Context) {
ci := fs.GetConfig(ctx)
m.srcListDir = m.makeListDir(ctx, m.Fsrc, m.SrcIncludeAll, m.srcKey)
if !m.NoTraverse {
m.dstListDir = m.makeListDir(ctx, m.Fdst, m.DstIncludeAll, m.dstKey)
}
// Now create the matching transform
// ..normalise the UTF8 first
if !m.NoUnicodeNormalization {
m.transforms = append(m.transforms, norm.NFC.String)
}
// ..if destination is caseInsensitive then make it lower case
// case Insensitive | src | dst | lower case compare |
// | No | No | No |
// | Yes | No | No |
// | No | Yes | Yes |
// | Yes | Yes | Yes |
if m.Fdst.Features().CaseInsensitive || ci.IgnoreCaseSync {
m.transforms = append(m.transforms, strings.ToLower)
}
}
// srcOrDstKey turns a directory entry into a sort key using the defined transforms.
func (m *March) srcOrDstKey(entry fs.DirEntry, isSrc bool) string {
if entry == nil {
return ""
}
name := path.Base(entry.Remote())
_, isDirectory := entry.(fs.Directory)
if isSrc {
name = transform.Path(m.Ctx, name, isDirectory)
}
for _, transform := range m.transforms {
name = transform(name)
}
// Suffix entries to make identically named files and
// directories sort consistently with directories first.
if isDirectory {
name += "D"
} else {
name += "F"
}
return name
}
// srcKey turns a directory entry into a sort key using the defined transforms.
func (m *March) srcKey(entry fs.DirEntry) string {
return m.srcOrDstKey(entry, true)
}
// dstKey turns a directory entry into a sort key using the defined transforms.
func (m *March) dstKey(entry fs.DirEntry) string {
return m.srcOrDstKey(entry, false)
}
// makeListDir makes constructs a listing function for the given fs
// and includeAll flags for marching through the file system.
// Note: this will optionally flag filter-aware backends!
func (m *March) makeListDir(ctx context.Context, f fs.Fs, includeAll bool, keyFn list.KeyFn) listDirFn {
ci := fs.GetConfig(ctx)
fi := filter.GetConfig(ctx)
if !(ci.UseListR && f.Features().ListR != nil) && // !--fast-list active and
!(ci.NoTraverse && fi.HaveFilesFrom()) { // !(--files-from and --no-traverse)
return func(dir string, callback fs.ListRCallback) (err error) {
dirCtx := filter.SetUseFilter(m.Ctx, f.Features().FilterAware && !includeAll) // make filter-aware backends constrain List
return list.DirSortedFn(dirCtx, f, includeAll, dir, callback, keyFn)
}
}
// This returns a closure for use when --fast-list is active or for when
// --files-from and --no-traverse is set
var (
mu sync.Mutex
started bool
dirs dirtree.DirTree
dirsErr error
)
return func(dir string, callback fs.ListRCallback) (err error) {
mu.Lock()
if !started {
dirCtx := filter.SetUseFilter(m.Ctx, f.Features().FilterAware && !includeAll) // make filter-aware backends constrain List
dirs, dirsErr = walk.NewDirTree(dirCtx, f, m.Dir, includeAll, ci.MaxDepth)
started = true
}
if dirsErr != nil {
mu.Unlock()
return dirsErr
}
entries, ok := dirs[dir]
if !ok {
mu.Unlock()
return fs.ErrorDirNotFound
}
delete(dirs, dir)
mu.Unlock()
// We use a stable sort here just in case there are
// duplicates. Assuming the remote delivers the entries in a
// consistent order, this will give the best user experience
// in syncing as it will use the first entry for the sync
// comparison.
slices.SortStableFunc(entries, func(a, b fs.DirEntry) int {
return cmp.Compare(keyFn(a), keyFn(b))
})
return callback(entries)
}
}
// listDirJob describe a directory listing that needs to be done
type listDirJob struct {
srcRemote string
dstRemote string
srcDepth int
dstDepth int
noSrc bool
noDst bool
}
// Run starts the matching process off
func (m *March) Run(ctx context.Context) error {
ci := fs.GetConfig(ctx)
fi := filter.GetConfig(ctx)
m.init(ctx)
srcDepth := ci.MaxDepth
if srcDepth < 0 {
srcDepth = fs.MaxLevel
}
dstDepth := srcDepth
if fi.Opt.DeleteExcluded {
dstDepth = fs.MaxLevel
}
var mu sync.Mutex // Protects vars below
var jobError error
var errCount int
// Start some directory listing go routines
var wg sync.WaitGroup // sync closing of go routines
var traversing sync.WaitGroup // running directory traversals
checkers := ci.Checkers
in := make(chan listDirJob, checkers)
for range checkers {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-m.Ctx.Done():
return
case job, ok := <-in:
if !ok {
return
}
jobs, err := m.processJob(job)
if err != nil {
mu.Lock()
// Keep reference only to the first encountered error
if jobError == nil {
jobError = err
}
errCount++
mu.Unlock()
}
if len(jobs) > 0 {
traversing.Add(len(jobs))
go func() {
// Now we have traversed this directory, send these
// jobs off for traversal in the background
for _, newJob := range jobs {
select {
case <-m.Ctx.Done():
// discard job if finishing
traversing.Done()
case in <- newJob:
}
}
}()
}
traversing.Done()
}
}
}()
}
// Start the process
traversing.Add(1)
in <- listDirJob{
srcRemote: m.Dir,
srcDepth: srcDepth - 1,
dstRemote: m.Dir,
dstDepth: dstDepth - 1,
noDst: m.NoCheckDest,
}
go func() {
// when the context is cancelled discard the remaining jobs
<-m.Ctx.Done()
for range in {
traversing.Done()
}
}()
traversing.Wait()
close(in)
wg.Wait()
if errCount > 1 {
return fmt.Errorf("march failed with %d error(s): first error: %w", errCount, jobError)
}
return jobError
}
// Check to see if the context has been cancelled
func (m *March) aborting() bool {
select {
case <-m.Ctx.Done():
return true
default:
}
return false
}
// Process the two listings, matching up the items in the two slices
// using the transform function on each name first.
//
// Into srcOnly go Entries which only exist in the srcList
// Into dstOnly go Entries which only exist in the dstList
// Into match go matchPair's of src and dst which have the same name
//
// This checks for duplicates and checks the list is sorted.
func (m *March) matchListings(srcChan, dstChan <-chan fs.DirEntry, srcOnly, dstOnly func(fs.DirEntry), match func(dst, src fs.DirEntry)) error {
var (
srcPrev, dstPrev fs.DirEntry
srcPrevName, dstPrevName string
src, dst fs.DirEntry
srcHasMore, dstHasMore = true, true
srcName, dstName string
)
srcDone := func() {
srcPrevName = srcName
srcPrev = src
src = nil
srcName = ""
}
dstDone := func() {
dstPrevName = dstName
dstPrev = dst
dst = nil
dstName = ""
}
for {
if m.aborting() {
return m.Ctx.Err()
}
// Reload src and dst if needed - we set them to nil if used
if src == nil {
src, srcHasMore = <-srcChan
srcName = m.srcKey(src)
}
if dst == nil {
dst, dstHasMore = <-dstChan
dstName = m.dstKey(dst)
}
if !srcHasMore && !dstHasMore {
break
}
if src != nil && srcPrev != nil {
if srcName == srcPrevName && fs.DirEntryType(srcPrev) == fs.DirEntryType(src) {
fs.Logf(src, "Duplicate %s found in source - ignoring", fs.DirEntryType(src))
srcDone() // skip the src and retry the dst
continue
} else if srcName < srcPrevName {
// this should never happen since we sort the listings
panic("Out of order listing in source")
}
}
if dst != nil && dstPrev != nil {
if dstName == dstPrevName && fs.DirEntryType(dst) == fs.DirEntryType(dstPrev) {
fs.Logf(dst, "Duplicate %s found in destination - ignoring", fs.DirEntryType(dst))
dstDone() // skip the dst and retry the src
continue
} else if dstName < dstPrevName {
// this should never happen since we sort the listings
panic("Out of order listing in destination")
}
}
switch {
case src != nil && dst != nil:
// we can't use CompareDirEntries because srcName, dstName could
// be different from src.Remote() or dst.Remote()
srcType := fs.DirEntryType(src)
dstType := fs.DirEntryType(dst)
if srcName > dstName || (srcName == dstName && srcType > dstType) {
dstOnly(dst)
dstDone()
} else if srcName < dstName || (srcName == dstName && srcType < dstType) {
srcOnly(src)
srcDone()
} else {
match(dst, src)
dstDone()
srcDone()
}
case src == nil:
dstOnly(dst)
dstDone()
case dst == nil:
srcOnly(src)
srcDone()
}
}
return nil
}
// processJob processes a listDirJob listing the source and
// destination directories, comparing them and returning a slice of
// more jobs
//
// returns errors using processError
func (m *March) processJob(job listDirJob) ([]listDirJob, error) {
var (
jobs []listDirJob
srcChan = make(chan fs.DirEntry, 100)
dstChan = make(chan fs.DirEntry, 100)
srcListErr, dstListErr error
wg sync.WaitGroup
ci = fs.GetConfig(m.Ctx)
)
// List the src and dst directories
if !job.noSrc {
srcChan := srcChan // duplicate this as we may override it later
wg.Add(1)
go func() {
defer wg.Done()
srcListErr = m.srcListDir(job.srcRemote, func(entries fs.DirEntries) error {
for _, entry := range entries {
srcChan <- entry
}
return nil
})
close(srcChan)
}()
} else {
close(srcChan)
}
startedDst := false
if !m.NoTraverse && !job.noDst {
startedDst = true
wg.Add(1)
go func() {
defer wg.Done()
dstListErr = m.dstListDir(job.dstRemote, func(entries fs.DirEntries) error {
for _, entry := range entries {
dstChan <- entry
}
return nil
})
close(dstChan)
}()
}
// If NoTraverse is set, then try to find a matching object
// for each item in the srcList to head dst object
if m.NoTraverse && !m.NoCheckDest {
startedDst = true
workers := ci.Checkers
originalSrcChan := srcChan
srcChan = make(chan fs.DirEntry, 100)
type matchTask struct {
src fs.DirEntry // src object to find in destination
dstMatch chan<- fs.DirEntry // channel to receive matching dst object or nil
}
matchTasks := make(chan matchTask, workers)
dstMatches := make(chan (<-chan fs.DirEntry), workers)
// Create the tasks from the originalSrcChan. These are put into matchTasks for
// processing and dstMatches so they can be retrieved in order.
go func() {
for src := range originalSrcChan {
srcChan <- src
dstMatch := make(chan fs.DirEntry, 1)
matchTasks <- matchTask{
src: src,
dstMatch: dstMatch,
}
dstMatches <- dstMatch
}
close(matchTasks)
}()
// Get the tasks from the queue and find a matching object.
var workerWg sync.WaitGroup
for range workers {
workerWg.Add(1)
go func() {
defer workerWg.Done()
for t := range matchTasks {
// Can't match directories with NewObject
if _, ok := t.src.(fs.Object); !ok {
t.dstMatch <- nil
continue
}
leaf := path.Base(t.src.Remote())
dst, err := m.Fdst.NewObject(m.Ctx, path.Join(job.dstRemote, leaf))
if err != nil {
dst = nil
}
t.dstMatch <- dst
}
}()
}
// Close dstResults when all the workers have finished
go func() {
workerWg.Wait()
close(dstMatches)
}()
// Read the matches in order and send them to dstChan if found.
wg.Add(1)
go func() {
defer wg.Done()
for dstMatch := range dstMatches {
dst := <-dstMatch
// Note that dst may be nil here
// We send these on so we don't deadlock the reader
dstChan <- dst
}
close(srcChan)
close(dstChan)
}()
}
if !startedDst {
close(dstChan)
}
// Work out what to do and do it
err := m.matchListings(srcChan, dstChan, func(src fs.DirEntry) {
recurse := m.Callback.SrcOnly(src)
if recurse && job.srcDepth > 0 {
jobs = append(jobs, listDirJob{
srcRemote: src.Remote(),
dstRemote: src.Remote(),
srcDepth: job.srcDepth - 1,
noDst: true,
})
}
}, func(dst fs.DirEntry) {
recurse := m.Callback.DstOnly(dst)
if recurse && job.dstDepth > 0 {
jobs = append(jobs, listDirJob{
srcRemote: dst.Remote(),
dstRemote: dst.Remote(),
dstDepth: job.dstDepth - 1,
noSrc: true,
})
}
}, func(dst, src fs.DirEntry) {
recurse := m.Callback.Match(m.Ctx, dst, src)
if recurse && job.srcDepth > 0 && job.dstDepth > 0 {
jobs = append(jobs, listDirJob{
srcRemote: src.Remote(),
dstRemote: dst.Remote(),
srcDepth: job.srcDepth - 1,
dstDepth: job.dstDepth - 1,
})
}
})
if err != nil {
return nil, err
}
// Wait for listings to complete and report errors
wg.Wait()
if srcListErr != nil {
if job.srcRemote != "" {
fs.Errorf(job.srcRemote, "error reading source directory: %v", srcListErr)
} else {
fs.Errorf(m.Fsrc, "error reading source root directory: %v", srcListErr)
}
srcListErr = fs.CountError(m.Ctx, srcListErr)
return nil, srcListErr
}
if dstListErr == fs.ErrorDirNotFound {
// Copy the stuff anyway
} else if dstListErr != nil {
if job.dstRemote != "" {
fs.Errorf(job.dstRemote, "error reading destination directory: %v", dstListErr)
} else {
fs.Errorf(m.Fdst, "error reading destination root directory: %v", dstListErr)
}
dstListErr = fs.CountError(m.Ctx, dstListErr)
return nil, dstListErr
}
return jobs, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/march/march_test.go | fs/march/march_test.go | // Internal tests for march
package march
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/mockdir"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/text/unicode/norm"
)
// Some times used in the tests
var (
t1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
)
func TestMain(m *testing.M) {
fstest.TestMain(m)
}
type marchTester struct {
ctx context.Context // internal context for controlling go-routines
cancel func() // cancel the context
srcOnly fs.DirEntries
dstOnly fs.DirEntries
match fs.DirEntries
entryMutex sync.Mutex
errorMu sync.Mutex // Mutex covering the error variables
err error
noRetryErr error
fatalErr error
noTraverse bool
}
// DstOnly have an object which is in the destination only
func (mt *marchTester) DstOnly(dst fs.DirEntry) (recurse bool) {
mt.entryMutex.Lock()
mt.dstOnly = append(mt.dstOnly, dst)
mt.entryMutex.Unlock()
switch dst.(type) {
case fs.Object:
return false
case fs.Directory:
return true
default:
panic("Bad object in DirEntries")
}
}
// SrcOnly have an object which is in the source only
func (mt *marchTester) SrcOnly(src fs.DirEntry) (recurse bool) {
mt.entryMutex.Lock()
mt.srcOnly = append(mt.srcOnly, src)
mt.entryMutex.Unlock()
switch src.(type) {
case fs.Object:
return false
case fs.Directory:
return true
default:
panic("Bad object in DirEntries")
}
}
// Match is called when src and dst are present, so sync src to dst
func (mt *marchTester) Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool) {
mt.entryMutex.Lock()
mt.match = append(mt.match, src)
mt.entryMutex.Unlock()
switch src.(type) {
case fs.Object:
return false
case fs.Directory:
// Do the same thing to the entire contents of the directory
_, ok := dst.(fs.Directory)
if ok {
return true
}
// FIXME src is dir, dst is file
err := errors.New("can't overwrite file with directory")
fs.Errorf(dst, "%v", err)
mt.processError(err)
default:
panic("Bad object in DirEntries")
}
return false
}
func (mt *marchTester) processError(err error) {
if err == nil {
return
}
mt.errorMu.Lock()
defer mt.errorMu.Unlock()
switch {
case fserrors.IsFatalError(err):
if !mt.aborting() {
fs.Errorf(nil, "Cancelling sync due to fatal error: %v", err)
mt.cancel()
}
mt.fatalErr = err
case fserrors.IsNoRetryError(err):
mt.noRetryErr = err
default:
mt.err = err
}
}
func (mt *marchTester) currentError() error {
mt.errorMu.Lock()
defer mt.errorMu.Unlock()
if mt.fatalErr != nil {
return mt.fatalErr
}
if mt.err != nil {
return mt.err
}
return mt.noRetryErr
}
func (mt *marchTester) aborting() bool {
return mt.ctx.Err() != nil
}
func TestMarch(t *testing.T) {
for _, test := range []struct {
what string
fileSrcOnly []string
dirSrcOnly []string
fileDstOnly []string
dirDstOnly []string
fileMatch []string
dirMatch []string
noTraverse bool
fastList bool
}{
{
what: "source only",
fileSrcOnly: []string{"test", "test2", "test3", "sub dir/test4"},
dirSrcOnly: []string{"sub dir"},
},
{
what: "identical",
fileMatch: []string{"test", "test2", "sub dir/test3", "sub dir/sub sub dir/test4"},
dirMatch: []string{"sub dir", "sub dir/sub sub dir"},
},
{
what: "typical sync",
fileSrcOnly: []string{"srcOnly", "srcOnlyDir/sub"},
dirSrcOnly: []string{"srcOnlyDir"},
fileMatch: []string{"match", "matchDir/match file"},
dirMatch: []string{"matchDir"},
fileDstOnly: []string{"dstOnly", "dstOnlyDir/sub"},
dirDstOnly: []string{"dstOnlyDir"},
},
{
what: "no traverse source only",
fileSrcOnly: []string{"test", "test2", "test3", "sub dir/test4"},
dirSrcOnly: []string{"sub dir"},
noTraverse: true,
},
{
what: "no traverse identical",
fileMatch: []string{"test", "test2", "sub dir/test3", "sub dir/sub sub dir/test4"},
noTraverse: true,
},
{
what: "no traverse typical sync",
fileSrcOnly: []string{"srcOnly", "srcOnlyDir/sub"},
fileMatch: []string{"match", "matchDir/match file"},
noTraverse: true,
},
{
what: "fast list source only",
fileSrcOnly: []string{"test", "test2", "test3", "sub dir/test4"},
dirSrcOnly: []string{"sub dir"},
fastList: true,
},
{
what: "fast list identical",
fileMatch: []string{"test", "test2", "sub dir/test3", "sub dir/sub sub dir/test4"},
dirMatch: []string{"sub dir", "sub dir/sub sub dir"},
fastList: true,
},
{
what: "fast list typical sync",
fileSrcOnly: []string{"srcOnly", "srcOnlyDir/sub"},
dirSrcOnly: []string{"srcOnlyDir"},
fileMatch: []string{"match", "matchDir/match file"},
dirMatch: []string{"matchDir"},
fileDstOnly: []string{"dstOnly", "dstOnlyDir/sub"},
dirDstOnly: []string{"dstOnlyDir"},
fastList: true,
},
} {
t.Run(fmt.Sprintf("TestMarch-%s", test.what), func(t *testing.T) {
r := fstest.NewRun(t)
var srcOnly []fstest.Item
var dstOnly []fstest.Item
var match []fstest.Item
ctx, cancel := context.WithCancel(context.Background())
for _, f := range test.fileSrcOnly {
srcOnly = append(srcOnly, r.WriteFile(f, "hello world", t1))
}
for _, f := range test.fileDstOnly {
dstOnly = append(dstOnly, r.WriteObject(ctx, f, "hello world", t1))
}
for _, f := range test.fileMatch {
match = append(match, r.WriteBoth(ctx, f, "hello world", t1))
}
ctx, ci := fs.AddConfig(ctx)
ci.UseListR = test.fastList
fi := filter.GetConfig(ctx)
// Local backend doesn't implement ListR, so monkey patch it for this test
if test.fastList && r.Flocal.Features().ListR == nil {
r.Flocal.Features().ListR = func(ctx context.Context, dir string, callback fs.ListRCallback) error {
r.Flocal.Features().ListR = nil // disable ListR to avoid infinite recursion
return walk.ListR(ctx, r.Flocal, dir, true, -1, walk.ListAll, callback)
}
defer func() {
r.Flocal.Features().ListR = nil
}()
}
mt := &marchTester{
ctx: ctx,
cancel: cancel,
noTraverse: test.noTraverse,
}
m := &March{
Ctx: ctx,
Fdst: r.Fremote,
Fsrc: r.Flocal,
Dir: "",
NoTraverse: test.noTraverse,
Callback: mt,
DstIncludeAll: fi.Opt.DeleteExcluded,
}
mt.processError(m.Run(ctx))
mt.cancel()
err := mt.currentError()
require.NoError(t, err)
precision := fs.GetModifyWindow(ctx, r.Fremote, r.Flocal)
fstest.CompareItems(t, mt.srcOnly, srcOnly, test.dirSrcOnly, precision, "srcOnly")
fstest.CompareItems(t, mt.dstOnly, dstOnly, test.dirDstOnly, precision, "dstOnly")
fstest.CompareItems(t, mt.match, match, test.dirMatch, precision, "match")
})
}
}
// matchPair is a matched pair of direntries returned by matchListings
type matchPair struct {
src, dst fs.DirEntry
}
func TestMatchListings(t *testing.T) {
var (
a = mockobject.Object("a")
A = mockobject.Object("A")
b = mockobject.Object("b")
c = mockobject.Object("c")
d = mockobject.Object("d")
uE1 = mockobject.Object("é") // one of the unicode E characters
uE2 = mockobject.Object("é") // a different unicode E character
dirA = mockdir.New("A")
dirb = mockdir.New("b")
)
for _, test := range []struct {
what string
input fs.DirEntries // pairs of input src, dst
srcOnly fs.DirEntries
dstOnly fs.DirEntries
matches []matchPair // pairs of output
transforms []matchTransformFn
}{
{
what: "only src or dst",
input: fs.DirEntries{
a, nil,
b, nil,
c, nil,
d, nil,
},
srcOnly: fs.DirEntries{
a, b, c, d,
},
},
{
what: "typical sync #1",
input: fs.DirEntries{
a, nil,
b, b,
nil, c,
nil, d,
},
srcOnly: fs.DirEntries{
a,
},
dstOnly: fs.DirEntries{
c, d,
},
matches: []matchPair{
{b, b},
},
},
{
what: "typical sync #2",
input: fs.DirEntries{
a, a,
b, b,
nil, c,
d, d,
},
dstOnly: fs.DirEntries{
c,
},
matches: []matchPair{
{a, a},
{b, b},
{d, d},
},
},
{
what: "One duplicate",
input: fs.DirEntries{
A, A,
a, a,
a, nil,
b, b,
},
matches: []matchPair{
{A, A},
{a, a},
{b, b},
},
},
{
what: "Two duplicates",
input: fs.DirEntries{
a, a,
a, a,
a, nil,
},
matches: []matchPair{
{a, a},
},
},
{
what: "Case insensitive duplicate - no transform",
input: fs.DirEntries{
a, a,
A, A,
},
matches: []matchPair{
{A, A},
{a, a},
},
},
{
what: "Case insensitive duplicate - transform to lower case",
input: fs.DirEntries{
a, A,
A, a,
},
matches: []matchPair{
{a, A}, // the first duplicate will be returned with a stable sort
},
transforms: []matchTransformFn{strings.ToLower},
},
{
what: "Unicode near-duplicate that becomes duplicate with normalization",
input: fs.DirEntries{
uE1, uE1,
uE2, uE2,
},
matches: []matchPair{
{uE1, uE1},
},
transforms: []matchTransformFn{norm.NFC.String},
},
{
what: "Unicode near-duplicate with no normalization",
input: fs.DirEntries{
uE1, uE1,
uE2, uE2,
},
matches: []matchPair{
{uE1, uE1},
{uE2, uE2},
},
},
{
what: "File and directory are not duplicates - srcOnly",
input: fs.DirEntries{
dirA, nil,
A, nil,
},
srcOnly: fs.DirEntries{
dirA,
A,
},
},
{
what: "File and directory are not duplicates - matches",
input: fs.DirEntries{
dirA, dirA,
A, A,
},
matches: []matchPair{
{dirA, dirA},
{A, A},
},
},
{
what: "Sync with directory #1",
input: fs.DirEntries{
dirA, nil,
A, nil,
b, b,
nil, c,
nil, d,
},
srcOnly: fs.DirEntries{
dirA,
A,
},
dstOnly: fs.DirEntries{
c, d,
},
matches: []matchPair{
{b, b},
},
},
{
what: "Sync with 2 directories",
input: fs.DirEntries{
dirA, dirA,
A, nil,
nil, dirb,
nil, b,
},
srcOnly: fs.DirEntries{
A,
},
dstOnly: fs.DirEntries{
dirb,
b,
},
matches: []matchPair{
{dirA, dirA},
},
},
{
what: "Sync with duplicate files and dirs",
input: fs.DirEntries{
dirA, A,
A, dirA,
},
matches: []matchPair{
{dirA, dirA},
{A, A},
},
},
} {
t.Run(fmt.Sprintf("TestMatchListings-%s", test.what), func(t *testing.T) {
ctx := context.Background()
var wg sync.WaitGroup
// Skeleton March for testing
m := March{
Ctx: context.Background(),
transforms: test.transforms,
}
// Make a channel to send the source (0) or dest (1) using a list.Sorter
makeChan := func(offset int) <-chan fs.DirEntry {
out := make(chan fs.DirEntry)
key := m.dstKey
if offset == 0 {
key = m.srcKey
}
ls, err := list.NewSorter(ctx, nil, list.SortToChan(out), key)
require.NoError(t, err)
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < len(test.input); i += 2 {
entry := test.input[i+offset]
if entry != nil {
require.NoError(t, ls.Add(fs.DirEntries{entry}))
}
}
require.NoError(t, ls.Send())
ls.CleanUp()
close(out)
}()
return out
}
var srcOnly fs.DirEntries
srcOnlyFn := func(entry fs.DirEntry) {
srcOnly = append(srcOnly, entry)
}
var dstOnly fs.DirEntries
dstOnlyFn := func(entry fs.DirEntry) {
dstOnly = append(dstOnly, entry)
}
var matches []matchPair
matchFn := func(dst, src fs.DirEntry) {
matches = append(matches, matchPair{dst: dst, src: src})
}
err := m.matchListings(makeChan(0), makeChan(1), srcOnlyFn, dstOnlyFn, matchFn)
require.NoError(t, err)
wg.Wait()
assert.Equal(t, test.srcOnly, srcOnly, test.what, "srcOnly differ")
assert.Equal(t, test.dstOnly, dstOnly, test.what, "dstOnly differ")
assert.Equal(t, test.matches, matches, test.what, "matches differ")
// now swap src and dst
srcOnly, dstOnly, matches = nil, nil, nil
err = m.matchListings(makeChan(0), makeChan(1), srcOnlyFn, dstOnlyFn, matchFn)
require.NoError(t, err)
wg.Wait()
assert.Equal(t, test.srcOnly, srcOnly, test.what, "srcOnly differ")
assert.Equal(t, test.dstOnly, dstOnly, test.what, "dstOnly differ")
assert.Equal(t, test.matches, matches, test.what, "matches differ")
})
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/list/helpers_test.go | fs/list/helpers_test.go | package list
import (
"context"
"errors"
"fmt"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Mock callback to collect the entries
func mockCallback(entries fs.DirEntries) error {
// Do nothing or log for testing purposes
return nil
}
func TestNewListRHelper(t *testing.T) {
callback := mockCallback
helper := NewHelper(callback)
assert.NotNil(t, helper)
assert.Equal(t, fmt.Sprintf("%p", callback), fmt.Sprintf("%p", helper.callback))
assert.Empty(t, helper.entries)
}
func TestListRHelperAdd(t *testing.T) {
callbackInvoked := false
callback := func(entries fs.DirEntries) error {
callbackInvoked = true
return nil
}
helper := NewHelper(callback)
entry := mockobject.Object("A")
require.NoError(t, helper.Add(entry))
assert.Len(t, helper.entries, 1)
assert.False(t, callbackInvoked, "Callback should not be invoked before reaching 100 entries")
// Check adding a nil entry doesn't change anything
require.NoError(t, helper.Add(nil))
assert.Len(t, helper.entries, 1)
assert.False(t, callbackInvoked, "Callback should not be invoked before reaching 100 entries")
}
func TestListRHelperSend(t *testing.T) {
entry := mockobject.Object("A")
callbackInvoked := false
callback := func(entries fs.DirEntries) error {
callbackInvoked = true
assert.Equal(t, 100, len(entries))
for _, obj := range entries {
assert.Equal(t, entry, obj)
}
return nil
}
helper := NewHelper(callback)
// Add 100 entries to force the callback to be invoked
for range 100 {
require.NoError(t, helper.Add(entry))
}
assert.Len(t, helper.entries, 0)
assert.True(t, callbackInvoked, "Callback should be invoked after 100 entries")
}
func TestListRHelperFlush(t *testing.T) {
entry := mockobject.Object("A")
callbackInvoked := false
callback := func(entries fs.DirEntries) error {
callbackInvoked = true
assert.Equal(t, 1, len(entries))
for _, obj := range entries {
assert.Equal(t, entry, obj)
}
return nil
}
helper := NewHelper(callback)
require.NoError(t, helper.Add(entry))
assert.False(t, callbackInvoked, "Callback should not have been invoked yet")
require.NoError(t, helper.Flush())
assert.True(t, callbackInvoked, "Callback should be invoked on flush")
assert.Len(t, helper.entries, 0, "Entries should be cleared after flush")
}
type mockListPfs struct {
t *testing.T
entries fs.DirEntries
err error
errorAfter int
}
func (f *mockListPfs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
assert.Equal(f.t, "dir", dir)
count := 0
for entries := f.entries; len(entries) > 0; entries = entries[2:] {
err = callback(entries[:2])
if err != nil {
return err
}
count += 2
if f.err != nil && count >= f.errorAfter {
return f.err
}
}
return nil
}
// check interface
var _ fs.ListPer = (*mockListPfs)(nil)
func TestListWithListP(t *testing.T) {
ctx := context.Background()
var entries fs.DirEntries
for i := range 26 {
entries = append(entries, mockobject.New(fmt.Sprintf("%c", 'A'+i)))
}
t.Run("NoError", func(t *testing.T) {
f := &mockListPfs{
t: t,
entries: entries,
}
gotEntries, err := WithListP(ctx, "dir", f)
require.NoError(t, err)
assert.Equal(t, entries, gotEntries)
})
t.Run("Error", func(t *testing.T) {
f := &mockListPfs{t: t,
entries: entries,
err: errors.New("BOOM"),
errorAfter: 10,
}
gotEntries, err := WithListP(ctx, "dir", f)
assert.Equal(t, f.err, err)
assert.Equal(t, entries[:10], gotEntries)
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/list/sorter_test.go | fs/list/sorter_test.go | package list
import (
"cmp"
"context"
"fmt"
"slices"
"strings"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/mockdir"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSorter(t *testing.T) {
ctx := context.Background()
da := mockdir.New("a")
oA := mockobject.Object("A")
callback := func(entries fs.DirEntries) error {
require.Equal(t, fs.DirEntries{oA, da}, entries)
return nil
}
ls, err := NewSorter(ctx, nil, callback, nil)
require.NoError(t, err)
assert.Equal(t, fmt.Sprintf("%p", callback), fmt.Sprintf("%p", ls.callback))
assert.Equal(t, fmt.Sprintf("%p", identityKeyFn), fmt.Sprintf("%p", ls.keyFn))
assert.Equal(t, fs.DirEntries(nil), ls.entries)
// Test Add
err = ls.Add(fs.DirEntries{da})
require.NoError(t, err)
assert.Equal(t, fs.DirEntries{da}, ls.entries)
err = ls.Add(fs.DirEntries{oA})
require.NoError(t, err)
assert.Equal(t, fs.DirEntries{da, oA}, ls.entries)
// Test Send
err = ls.Send()
require.NoError(t, err)
// Test Cleanup
ls.CleanUp()
assert.Equal(t, fs.DirEntries(nil), ls.entries)
}
func TestSorterIdentity(t *testing.T) {
ctx := context.Background()
cmpFn := func(a, b fs.DirEntry) int {
return cmp.Compare(a.Remote(), b.Remote())
}
callback := func(entries fs.DirEntries) error {
assert.True(t, slices.IsSortedFunc(entries, cmpFn))
assert.Equal(t, "a", entries[0].Remote())
return nil
}
ls, err := NewSorter(ctx, nil, callback, nil)
require.NoError(t, err)
defer ls.CleanUp()
// Add things in reverse alphabetical order
for i := 'z'; i >= 'a'; i-- {
err = ls.Add(fs.DirEntries{mockobject.Object(string(i))})
require.NoError(t, err)
}
assert.Equal(t, "z", ls.entries[0].Remote())
assert.False(t, slices.IsSortedFunc(ls.entries, cmpFn))
// Check they get sorted
err = ls.Send()
require.NoError(t, err)
}
func TestSorterKeyFn(t *testing.T) {
ctx := context.Background()
keyFn := func(entry fs.DirEntry) string {
s := entry.Remote()
return string('z' - s[0])
}
cmpFn := func(a, b fs.DirEntry) int {
return cmp.Compare(keyFn(a), keyFn(b))
}
callback := func(entries fs.DirEntries) error {
assert.True(t, slices.IsSortedFunc(entries, cmpFn))
assert.Equal(t, "z", entries[0].Remote())
return nil
}
ls, err := NewSorter(ctx, nil, callback, keyFn)
require.NoError(t, err)
defer ls.CleanUp()
// Add things in reverse sorted order
for i := 'a'; i <= 'z'; i++ {
err = ls.Add(fs.DirEntries{mockobject.Object(string(i))})
require.NoError(t, err)
}
assert.Equal(t, "a", ls.entries[0].Remote())
assert.False(t, slices.IsSortedFunc(ls.entries, cmpFn))
// Check they get sorted
err = ls.Send()
require.NoError(t, err)
}
// testFs implements enough of the fs.Fs interface for Sorter
type testFs struct {
t *testing.T
entriesMap map[string]fs.DirEntry
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *testFs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
entry, ok := f.entriesMap[remote]
assert.True(f.t, ok, "entry not found")
if !ok {
return nil, fs.ErrorObjectNotFound
}
obj, ok := entry.(fs.Object)
assert.True(f.t, ok, "expected entry to be object: %#v", entry)
if !ok {
return nil, fs.ErrorObjectNotFound
}
return obj, nil
}
// String outputs info about the Fs
func (f *testFs) String() string {
return "testFs"
}
// used to sort the entries case insensitively
func keyCaseInsensitive(entry fs.DirEntry) string {
return strings.ToLower(entry.Remote())
}
// Test the external sorting
func testSorterExt(t *testing.T, cutoff, N int, wantExtSort bool, keyFn KeyFn) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
ci.ListCutoff = cutoff
// Make the directory entries
entriesMap := make(map[string]fs.DirEntry, N)
for i := range N {
remote := fmt.Sprintf("%010d", i)
prefix := "a"
if i%3 == 0 {
prefix = "A"
}
remote = prefix + remote
if i%2 == 0 {
entriesMap[remote] = mockobject.New(remote)
} else {
entriesMap[remote] = mockdir.New(remote)
}
}
assert.Equal(t, N, len(entriesMap))
f := &testFs{t: t, entriesMap: entriesMap}
// In the callback delete entries from the map when they are
// found
prevKey := ""
callback := func(entries fs.DirEntries) error {
for _, gotEntry := range entries {
remote := gotEntry.Remote()
key := remote
if keyFn != nil {
key = keyFn(gotEntry)
}
require.Less(t, prevKey, key, "Not sorted")
prevKey = key
wantEntry, ok := entriesMap[remote]
assert.True(t, ok, "Entry not found %q", remote)
_, wantDir := wantEntry.(fs.Directory)
_, gotDir := wantEntry.(fs.Directory)
_, wantObj := wantEntry.(fs.Object)
_, gotObj := wantEntry.(fs.Object)
require.True(t, (wantDir && gotDir) || (wantObj && gotObj), "Wrong types %#v, %#v", wantEntry, gotEntry)
delete(entriesMap, remote)
}
return nil
}
ls, err := NewSorter(ctx, f, callback, keyFn)
require.NoError(t, err)
// Send the entries in random (map) order
for _, entry := range entriesMap {
err = ls.Add(fs.DirEntries{entry})
require.NoError(t, err)
}
// Check we are extsorting if required
assert.Equal(t, wantExtSort, ls.extSort)
// Test Send
err = ls.Send()
require.NoError(t, err)
// All the entries should have been seen
assert.Equal(t, 0, len(entriesMap))
// Test Cleanup
ls.CleanUp()
assert.Equal(t, fs.DirEntries(nil), ls.entries)
}
// Test the external sorting
func TestSorterExt(t *testing.T) {
for _, test := range []struct {
cutoff int
N int
wantExtSort bool
keyFn KeyFn
}{
{cutoff: 1000, N: 100, wantExtSort: false},
{cutoff: 100, N: 1000, wantExtSort: true},
{cutoff: 1000, N: 100, wantExtSort: false, keyFn: keyCaseInsensitive},
{cutoff: 100, N: 1000, wantExtSort: true, keyFn: keyCaseInsensitive},
{cutoff: 100001, N: 100000, wantExtSort: false},
{cutoff: 100000, N: 100001, wantExtSort: true},
// {cutoff: 100_000, N: 1_000_000, wantExtSort: true},
// {cutoff: 100_000, N: 10_000_000, wantExtSort: true},
} {
t.Run(fmt.Sprintf("cutoff=%d,N=%d,wantExtSort=%v,keyFn=%v", test.cutoff, test.N, test.wantExtSort, test.keyFn != nil), func(t *testing.T) {
testSorterExt(t, test.cutoff, test.N, test.wantExtSort, test.keyFn)
})
}
}
// benchFs implements enough of the fs.Fs interface for Sorter
type benchFs struct{}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (benchFs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// Recreate the mock objects
return mockobject.New(remote), nil
}
// String outputs info about the Fs
func (benchFs) String() string {
return "benchFs"
}
func BenchmarkSorterExt(t *testing.B) {
const cutoff = 1000
const N = 10_000_000
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
ci.ListCutoff = cutoff
keyFn := keyCaseInsensitive
// In the callback check entries are in order
prevKey := ""
entriesReceived := 0
callback := func(entries fs.DirEntries) error {
for _, gotEntry := range entries {
remote := gotEntry.Remote()
key := remote
if keyFn != nil {
key = keyFn(gotEntry)
}
require.Less(t, prevKey, key, "Not sorted")
prevKey = key
entriesReceived++
}
return nil
}
f := benchFs{}
ls, err := NewSorter(ctx, f, callback, keyFn)
require.NoError(t, err)
// Send the entries in reverse order in batches of 1000 like the backends do
var entries = make(fs.DirEntries, 0, 1000)
for i := N - 1; i >= 0; i-- {
remote := fmt.Sprintf("%050d", i) // UUID length plus a bit
prefix := "a"
if i%3 == 0 {
prefix = "A"
}
remote = prefix + remote
if i%2 == 0 {
entries = append(entries, mockobject.New(remote))
} else {
entries = append(entries, mockdir.New(remote))
}
if len(entries) > 1000 {
err = ls.Add(entries)
require.NoError(t, err)
entries = entries[:0]
}
}
err = ls.Add(entries)
require.NoError(t, err)
// Check we are extsorting
assert.True(t, ls.extSort)
// Test Send
err = ls.Send()
require.NoError(t, err)
// All the entries should have been seen
assert.Equal(t, N, entriesReceived)
// Cleanup
ls.CleanUp()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/list/list_test.go | fs/list/list_test.go | package list
import (
"context"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/mockdir"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// NB integration tests for DirSorted are in
// fs/operations/listdirsorted_test.go
func TestFilterAndSortIncludeAll(t *testing.T) {
da := mockdir.New("a")
oA := mockobject.Object("A")
db := mockdir.New("b")
oB := mockobject.Object("B")
dc := mockdir.New("c")
oC := mockobject.Object("C")
dd := mockdir.New("d")
oD := mockobject.Object("D")
entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
includeObject := func(ctx context.Context, o fs.Object) bool {
return o != oB
}
includeDirectory := func(remote string) (bool, error) {
return remote != "c", nil
}
// no filter
newEntries, err := filterAndSortDir(context.Background(), entries, true, "", includeObject, includeDirectory)
require.NoError(t, err)
assert.Equal(t,
newEntries,
fs.DirEntries{oA, oB, oC, oD, da, db, dc, dd},
)
// filter
newEntries, err = filterAndSortDir(context.Background(), entries, false, "", includeObject, includeDirectory)
require.NoError(t, err)
assert.Equal(t,
newEntries,
fs.DirEntries{oA, oC, oD, da, db, dd},
)
}
func TestFilterAndSortCheckDir(t *testing.T) {
// Check the different kinds of error when listing "dir"
da := mockdir.New("dir")
da2 := mockdir.New("dir/") // double slash dir - allowed for bucket based remotes
oA := mockobject.Object("diR/a")
db := mockdir.New("dir/b")
oB := mockobject.Object("dir/B/sub")
dc := mockdir.New("dir/c")
oC := mockobject.Object("dir/C")
dd := mockdir.New("dir/d")
oD := mockobject.Object("dir/D")
entries := fs.DirEntries{da, da2, oA, db, oB, dc, oC, dd, oD}
newEntries, err := filterAndSortDir(context.Background(), entries, true, "dir", nil, nil)
require.NoError(t, err)
assert.Equal(t,
fs.DirEntries{da2, oC, oD, db, dc, dd},
newEntries,
)
}
func TestFilterAndSortCheckDirRoot(t *testing.T) {
// Check the different kinds of error when listing the root ""
da := mockdir.New("")
da2 := mockdir.New("/") // doubleslash dir allowed on bucket based remotes
oA := mockobject.Object("A")
db := mockdir.New("b")
oB := mockobject.Object("B/sub")
dc := mockdir.New("c")
oC := mockobject.Object("C")
dd := mockdir.New("d")
oD := mockobject.Object("D")
entries := fs.DirEntries{da, da2, oA, db, oB, dc, oC, dd, oD}
newEntries, err := filterAndSortDir(context.Background(), entries, true, "", nil, nil)
require.NoError(t, err)
assert.Equal(t,
fs.DirEntries{da2, oA, oC, oD, db, dc, dd},
newEntries,
)
}
type unknownDirEntry string
func (o unknownDirEntry) Fs() fs.Info { return fs.Unknown }
func (o unknownDirEntry) String() string { return string(o) }
func (o unknownDirEntry) Remote() string { return string(o) }
func (o unknownDirEntry) ModTime(ctx context.Context) (t time.Time) { return t }
func (o unknownDirEntry) Size() int64 { return 0 }
func TestFilterAndSortUnknown(t *testing.T) {
// Check that an unknown entry produces an error
da := mockdir.New("")
oA := mockobject.Object("A")
ub := unknownDirEntry("b")
oB := mockobject.Object("B/sub")
entries := fs.DirEntries{da, oA, ub, oB}
newEntries, err := filterAndSortDir(context.Background(), entries, true, "", nil, nil)
assert.Error(t, err, "error")
assert.Nil(t, newEntries)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/list/sorter.go | fs/list/sorter.go | package list
import (
"cmp"
"context"
"errors"
"fmt"
"slices"
"strings"
"sync"
"time"
"github.com/lanrat/extsort"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/errcount"
"golang.org/x/sync/errgroup"
)
// NewObjecter is the minimum facilities we need from the fs.Fs passed into NewSorter.
type NewObjecter interface {
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
NewObject(ctx context.Context, remote string) (fs.Object, error)
}
// Sorter implements an efficient mechanism for sorting list entries.
//
// If there are a large number of entries (above `--list-cutoff`),
// this may be done on disk instead of in memory.
//
// Supply entries with the Add method, call Send at the end to deliver
// the sorted entries and finalise with CleanUp regardless of whether
// you called Add or Send.
//
// Sorted entries are delivered to the callback supplied to NewSorter
// when the Send method is called.
type Sorter struct {
ctx context.Context // context for everything
ci *fs.ConfigInfo // config we are using
cancel func() // cancel all background operations
mu sync.Mutex // protect the below
f NewObjecter // fs that we are listing
callback fs.ListRCallback // where to send the sorted entries to
entries fs.DirEntries // accumulated entries
keyFn KeyFn // transform an entry into a sort key
cutoff int // number of entries above which we start extsort
extSort bool // true if we are ext sorting
inputChan chan string // for sending data to the ext sort
outputChan <-chan string // for receiving data from the ext sort
errChan <-chan error // for getting errors from the ext sort
sorter *extsort.StringSorter // external string sort
errs *errcount.ErrCount // accumulate errors
}
// KeyFn turns an entry into a sort key
type KeyFn func(entry fs.DirEntry) string
// identityKeyFn maps an entry to its Remote
func identityKeyFn(entry fs.DirEntry) string {
return entry.Remote()
}
// NewSorter creates a new Sorter with callback for sorted entries to
// be delivered to. keyFn is used to process each entry to get a key
// function, if nil then it will just use entry.Remote()
func NewSorter(ctx context.Context, f NewObjecter, callback fs.ListRCallback, keyFn KeyFn) (*Sorter, error) {
ci := fs.GetConfig(ctx)
ctx, cancel := context.WithCancel(ctx)
if keyFn == nil {
keyFn = identityKeyFn
}
return &Sorter{
ctx: ctx,
ci: ci,
cancel: cancel,
f: f,
callback: callback,
keyFn: keyFn,
cutoff: ci.ListCutoff,
errs: errcount.New(),
}, nil
}
// Turn a directory entry into a combined key and data for extsort
func (ls *Sorter) entryToKey(entry fs.DirEntry) string {
// To start with we just use the Remote to recover the object
// To make more efficient we would serialize the object here
remote := entry.Remote()
remote = strings.TrimRight(remote, "/")
if _, isDir := entry.(fs.Directory); isDir {
remote += "/"
}
key := ls.keyFn(entry) + "\x00" + remote
return key
}
// Turn an exsort key back into a directory entry
func (ls *Sorter) keyToEntry(ctx context.Context, key string) (entry fs.DirEntry, err error) {
null := strings.IndexRune(key, '\x00')
if null < 0 {
return nil, errors.New("sorter: failed to deserialize: missing null")
}
remote := key[null+1:]
if remote, isDir := strings.CutSuffix(remote, "/"); isDir {
// Is a directory
//
// Note this creates a very minimal directory entry which should be fine for the
// bucket based remotes this code will be run on.
entry = fs.NewDir(remote, time.Time{})
} else {
obj, err := ls.f.NewObject(ctx, remote)
if err != nil {
fs.Errorf(ls.f, "sorter: failed to re-create object %q: %v", remote, err)
return nil, fmt.Errorf("sorter: failed to re-create object: %w", err)
}
entry = obj
}
return entry, nil
}
func (ls *Sorter) sendEntriesToExtSort(entries fs.DirEntries) (err error) {
for _, entry := range entries {
select {
case ls.inputChan <- ls.entryToKey(entry):
case err = <-ls.errChan:
if err != nil {
return err
}
}
}
select {
case err = <-ls.errChan:
default:
}
return err
}
func (ls *Sorter) startExtSort() (err error) {
fs.Logf(ls.f, "Switching to on disk sorting as more than %d entries in one directory detected", ls.cutoff)
ls.inputChan = make(chan string, 100)
// Options to control the extsort
opt := extsort.Config{
NumWorkers: 8, // small effect
ChanBuffSize: 1024, // small effect
SortedChanBuffSize: 1024, // makes a lot of difference
ChunkSize: 32 * 1024, // tuned for 50 char records (UUID sized)
// Defaults
// ChunkSize: int(1e6), // amount of records to store in each chunk which will be written to disk
// NumWorkers: 2, // maximum number of workers to use for parallel sorting
// ChanBuffSize: 1, // buffer size for merging chunks
// SortedChanBuffSize: 10, // buffer size for passing records to output
// TempFilesDir: "", // empty for use OS default ex: /tmp
}
ls.sorter, ls.outputChan, ls.errChan = extsort.Strings(ls.inputChan, &opt)
go ls.sorter.Sort(ls.ctx)
// Show we are extsorting now
ls.extSort = true
// Send the accumulated entries to the sorter
fs.Debugf(ls.f, "Sending accumulated directory entries to disk")
err = ls.sendEntriesToExtSort(ls.entries)
fs.Debugf(ls.f, "Done sending accumulated directory entries to disk")
clear(ls.entries)
ls.entries = nil
return err
}
// Add entries to the list sorter.
//
// Does not call the callback.
//
// Safe to call from concurrent go routines
func (ls *Sorter) Add(entries fs.DirEntries) error {
ls.mu.Lock()
defer ls.mu.Unlock()
if ls.extSort {
err := ls.sendEntriesToExtSort(entries)
if err != nil {
return err
}
} else {
ls.entries = append(ls.entries, entries...)
if len(ls.entries) >= ls.cutoff {
err := ls.startExtSort()
if err != nil {
return err
}
}
}
return nil
}
// Number of entries to batch in list helper
const listHelperBatchSize = 100
// listHelper is used to turn keys into entries concurrently
type listHelper struct {
ls *Sorter // parent
keys []string // keys being built up
entries fs.DirEntries // entries processed concurrently as a batch
errs []error // errors processed concurrently
}
// NewlistHelper should be with the callback passed in
func (ls *Sorter) newListHelper() *listHelper {
return &listHelper{
ls: ls,
entries: make(fs.DirEntries, listHelperBatchSize),
errs: make([]error, listHelperBatchSize),
}
}
// send sends the stored entries to the callback if there are >= max
// entries.
func (lh *listHelper) send(max int) (err error) {
if len(lh.keys) < max {
return nil
}
// Turn this batch into objects in parallel
g, gCtx := errgroup.WithContext(lh.ls.ctx)
g.SetLimit(lh.ls.ci.Checkers)
for i, key := range lh.keys {
g.Go(func() error {
lh.entries[i], lh.errs[i] = lh.ls.keyToEntry(gCtx, key)
return nil
})
}
err = g.Wait()
if err != nil {
return err
}
// Account errors and collect OK entries
toSend := lh.entries[:0]
for i := range lh.keys {
entry, err := lh.entries[i], lh.errs[i]
if err != nil {
lh.ls.errs.Add(err)
} else if entry != nil {
toSend = append(toSend, entry)
}
}
// fmt.Println(lh.keys)
// fmt.Println(toSend)
err = lh.ls.callback(toSend)
clear(lh.entries)
clear(lh.errs)
lh.keys = lh.keys[:0]
return err
}
// Add an entry to the stored entries and send them if there are more
// than a certain amount
func (lh *listHelper) Add(key string) error {
lh.keys = append(lh.keys, key)
return lh.send(100)
}
// Flush the stored entries (if any) sending them to the callback
func (lh *listHelper) Flush() error {
return lh.send(1)
}
// Send the sorted entries to the callback.
func (ls *Sorter) Send() (err error) {
ls.mu.Lock()
defer ls.mu.Unlock()
if ls.extSort {
close(ls.inputChan)
list := ls.newListHelper()
outer:
for {
select {
case key, ok := <-ls.outputChan:
if !ok {
break outer
}
err := list.Add(key)
if err != nil {
return err
}
case err := <-ls.errChan:
if err != nil {
return err
}
}
}
err = list.Flush()
if err != nil {
return err
}
return ls.errs.Err("sorter")
}
// Sort the directory entries by Remote
//
// We use a stable sort here just in case there are
// duplicates. Assuming the remote delivers the entries in a
// consistent order, this will give the best user experience
// in syncing as it will use the first entry for the sync
// comparison.
slices.SortStableFunc(ls.entries, func(a, b fs.DirEntry) int {
return cmp.Compare(ls.keyFn(a), ls.keyFn(b))
})
return ls.callback(ls.entries)
}
// CleanUp the Sorter, cleaning up any memory / files.
//
// It is safe and encouraged to call this regardless of whether you
// called Send or not.
//
// This does not call the callback
func (ls *Sorter) CleanUp() {
ls.mu.Lock()
defer ls.mu.Unlock()
ls.cancel()
clear(ls.entries)
ls.entries = nil
ls.extSort = false
}
// SortToChan makes a callback for the Sorter which sends the output
// to the channel provided.
func SortToChan(out chan<- fs.DirEntry) fs.ListRCallback {
return func(entries fs.DirEntries) error {
for _, entry := range entries {
out <- entry
}
return nil
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/list/list.go | fs/list/list.go | // Package list contains list functions
package list
import (
"context"
"fmt"
"sort"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/lib/bucket"
)
// DirSorted reads Object and *Dir into entries for the given Fs.
//
// dir is the start directory, "" for root
//
// If includeAll is specified all files will be added, otherwise only
// files and directories passing the filter will be added.
//
// Files will be returned in sorted order
func DirSorted(ctx context.Context, f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) {
// Get unfiltered entries from the fs
entries, err = f.List(ctx, dir)
accounting.Stats(ctx).Listed(int64(len(entries)))
if err != nil {
return nil, err
}
// This should happen only if exclude files lives in the
// starting directory, otherwise ListDirSorted should not be
// called.
fi := filter.GetConfig(ctx)
if !includeAll && fi.ListContainsExcludeFile(entries) {
fs.Debugf(dir, "Excluded")
return nil, nil
}
return filterAndSortDir(ctx, entries, includeAll, dir, fi.IncludeObject, fi.IncludeDirectory(ctx, f))
}
// listP for every backend
func listP(ctx context.Context, f fs.Fs, dir string, callback fs.ListRCallback) error {
if doListP := f.Features().ListP; doListP != nil {
return doListP(ctx, dir, callback)
}
// Fallback to List
entries, err := f.List(ctx, dir)
if err != nil {
return err
}
return callback(entries)
}
// DirSortedFn reads Object and *Dir into entries for the given Fs.
//
// dir is the start directory, "" for root
//
// If includeAll is specified all files will be added, otherwise only
// files and directories passing the filter will be added.
//
// Files will be returned through callback in sorted order
func DirSortedFn(ctx context.Context, f fs.Fs, includeAll bool, dir string, callback fs.ListRCallback, keyFn KeyFn) (err error) {
stats := accounting.Stats(ctx)
fi := filter.GetConfig(ctx)
// Sort the entries, in or out of memory
sorter, err := NewSorter(ctx, f, callback, keyFn)
if err != nil {
return fmt.Errorf("failed to create directory sorter: %w", err)
}
defer sorter.CleanUp()
// Get unfiltered entries from the fs
err = listP(ctx, f, dir, func(entries fs.DirEntries) error {
stats.Listed(int64(len(entries)))
// This should happen only if exclude files lives in the
// starting directory, otherwise ListDirSorted should not be
// called.
if !includeAll && fi.ListContainsExcludeFile(entries) {
fs.Debugf(dir, "Excluded")
return nil
}
entries, err := filterDir(ctx, entries, includeAll, dir, fi.IncludeObject, fi.IncludeDirectory(ctx, f))
if err != nil {
return err
}
return sorter.Add(entries)
})
if err != nil {
return err
}
return sorter.Send()
}
// Filter the entries passed in
func filterDir(ctx context.Context, entries fs.DirEntries, includeAll bool, dir string,
IncludeObject func(ctx context.Context, o fs.Object) bool,
IncludeDirectory func(remote string) (bool, error)) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter
prefix := ""
if dir != "" {
prefix = dir
if !bucket.IsAllSlashes(dir) {
prefix += "/"
}
}
for _, entry := range entries {
ok := true
// check includes and types
switch x := entry.(type) {
case fs.Object:
// Make sure we don't delete excluded files if not required
if !includeAll && !IncludeObject(ctx, x) {
ok = false
fs.Debugf(x, "Excluded")
}
case fs.Directory:
if !includeAll {
include, err := IncludeDirectory(x.Remote())
if err != nil {
return nil, err
}
if !include {
ok = false
fs.Debugf(x, "Excluded")
}
}
default:
return nil, fmt.Errorf("unknown object type %T", entry)
}
// check remote name belongs in this directory
remote := entry.Remote()
switch {
case !ok:
// ignore
case !strings.HasPrefix(remote, prefix):
ok = false
fs.Errorf(entry, "Entry doesn't belong in directory %q (too short) - ignoring", dir)
case remote == dir:
ok = false
fs.Errorf(entry, "Entry doesn't belong in directory %q (same as directory) - ignoring", dir)
case strings.ContainsRune(remote[len(prefix):], '/') && !bucket.IsAllSlashes(remote[len(prefix):]):
ok = false
fs.Errorf(entry, "Entry doesn't belong in directory %q (contains subdir) - ignoring", dir)
default:
// ok
}
if ok {
newEntries = append(newEntries, entry)
}
}
return newEntries, nil
}
// filter and sort the entries
func filterAndSortDir(ctx context.Context, entries fs.DirEntries, includeAll bool, dir string,
IncludeObject func(ctx context.Context, o fs.Object) bool,
IncludeDirectory func(remote string) (bool, error)) (newEntries fs.DirEntries, err error) {
// Filter the directory entries (in place)
entries, err = filterDir(ctx, entries, includeAll, dir, IncludeObject, IncludeDirectory)
if err != nil {
return nil, err
}
// Sort the directory entries by Remote
//
// We use a stable sort here just in case there are
// duplicates. Assuming the remote delivers the entries in a
// consistent order, this will give the best user experience
// in syncing as it will use the first entry for the sync
// comparison.
sort.Stable(entries)
return entries, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/list/helpers.go | fs/list/helpers.go | package list
import (
"context"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
)
// Listing helpers used by backends
// Helper is used in the implementation of ListR to accumulate DirEntries
type Helper struct {
callback fs.ListRCallback
entries fs.DirEntries
}
// NewHelper should be called from ListR with the callback passed in
func NewHelper(callback fs.ListRCallback) *Helper {
return &Helper{
callback: callback,
}
}
// send sends the stored entries to the callback if there are >= max
// entries.
func (lh *Helper) send(max int) (err error) {
if len(lh.entries) >= max {
err = lh.callback(lh.entries)
lh.entries = lh.entries[:0]
}
return err
}
// Add an entry to the stored entries and send them if there are more
// than a certain amount
func (lh *Helper) Add(entry fs.DirEntry) error {
if entry == nil {
return nil
}
lh.entries = append(lh.entries, entry)
return lh.send(100)
}
// Flush the stored entries (if any) sending them to the callback
func (lh *Helper) Flush() error {
return lh.send(1)
}
// WithListP implements the List interface with ListP
//
// It should be used in backends which support ListP to implement
// List.
func WithListP(ctx context.Context, dir string, list fs.ListPer) (entries fs.DirEntries, err error) {
err = list.ListP(ctx, dir, func(newEntries fs.DirEntries) error {
accounting.Stats(ctx).Listed(int64(len(newEntries)))
entries = append(entries, newEntries...)
return nil
})
return entries, err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/walk/walk.go | fs/walk/walk.go | // Package walk walks directories
package walk
import (
"context"
"errors"
"fmt"
"path"
"sort"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/list"
)
// ErrorSkipDir is used as a return value from Walk to indicate that the
// directory named in the call is to be skipped. It is not returned as
// an error by any function.
var ErrorSkipDir = errors.New("skip this directory")
// ErrorCantListR is returned by WalkR if the underlying Fs isn't
// capable of doing a recursive listing.
var ErrorCantListR = errors.New("recursive directory listing not available")
// Func is the type of the function called for directory
// visited by Walk. The path argument contains remote path to the directory.
//
// If there was a problem walking to directory named by path, the
// incoming error will describe the problem and the function can
// decide how to handle that error (and Walk will not descend into
// that directory). If an error is returned, processing stops. The
// sole exception is when the function returns the special value
// ErrorSkipDir. If the function returns ErrorSkipDir, Walk skips the
// directory's contents entirely.
type Func func(path string, entries fs.DirEntries, err error) error
// Walk lists the directory.
//
// If includeAll is not set it will use the filters defined.
//
// If maxLevel is < 0 then it will recurse indefinitely, else it will
// only do maxLevel levels.
//
// It calls fn for each tranche of DirEntries read.
//
// Note that fn will not be called concurrently whereas the directory
// listing will proceed concurrently.
//
// Parent directories are always listed before their children.
//
// This is implemented by WalkR if Config.UseListR is true
// and f supports it and level > 1, or WalkN otherwise.
//
// If --files-from and --no-traverse is set then a DirTree will be
// constructed with just those files in and then walked with WalkR
//
// Note: this will flag filter-aware backends!
//
// NB (f, path) to be replaced by fs.Dir at some point
func Walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
ci := fs.GetConfig(ctx)
fi := filter.GetConfig(ctx)
ctx = filter.SetUseFilter(ctx, f.Features().FilterAware && !includeAll) // make filter-aware backends constrain List
if ci.NoTraverse && fi.HaveFilesFrom() {
return walkR(ctx, f, path, includeAll, maxLevel, fn, fi.MakeListR(ctx, f.NewObject))
}
// FIXME should this just be maxLevel < 0 - why the maxLevel > 1
if (maxLevel < 0 || maxLevel > 1) && ci.UseListR && f.Features().ListR != nil {
return walkListR(ctx, f, path, includeAll, maxLevel, fn)
}
return walkListDirSorted(ctx, f, path, includeAll, maxLevel, fn)
}
// ListType is uses to choose which combination of files or directories is requires
type ListType byte
// Types of listing for ListR
const (
ListObjects ListType = 1 << iota // list objects only
ListDirs // list dirs only
ListAll = ListObjects | ListDirs // list files and dirs
)
// Objects returns true if the list type specifies objects
func (l ListType) Objects() bool {
return (l & ListObjects) != 0
}
// Dirs returns true if the list type specifies dirs
func (l ListType) Dirs() bool {
return (l & ListDirs) != 0
}
// Filter in (inplace) to only contain the type of list entry required
func (l ListType) Filter(in *fs.DirEntries) {
if l == ListAll {
return
}
out := (*in)[:0]
for _, entry := range *in {
switch entry.(type) {
case fs.Object:
if l.Objects() {
out = append(out, entry)
}
case fs.Directory:
if l.Dirs() {
out = append(out, entry)
}
default:
fs.Errorf(nil, "Unknown object type %T", entry)
}
}
*in = out
}
// ListR lists the directory recursively.
//
// If includeAll is not set it will use the filters defined.
//
// If maxLevel is < 0 then it will recurse indefinitely, else it will
// only do maxLevel levels.
//
// If synthesizeDirs is set then for bucket-based remotes it will
// synthesize directories from the file structure. This uses extra
// memory so don't set this if you don't need directories, likewise do
// set this if you are interested in directories.
//
// It calls fn for each tranche of DirEntries read. Note that these
// don't necessarily represent a directory
//
// Note that fn will not be called concurrently whereas the directory
// listing will proceed concurrently.
//
// Directories are not listed in any particular order so you can't
// rely on parents coming before children or alphabetical ordering
//
// This is implemented by using ListR on the backend if possible and
// efficient, otherwise by Walk.
//
// Note: this will flag filter-aware backends
//
// NB (f, path) to be replaced by fs.Dir at some point
func ListR(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, listType ListType, fn fs.ListRCallback) error {
fi := filter.GetConfig(ctx)
// FIXME disable this with --no-fast-list ??? `--disable ListR` will do it...
doListR := f.Features().ListR
// Can't use ListR if...
if doListR == nil || // ...no ListR
fi.HaveFilesFrom() || // ...using --files-from
maxLevel >= 0 || // ...using bounded recursion
len(fi.Opt.ExcludeFile) > 0 || // ...using --exclude-file
fi.UsesDirectoryFilters() { // ...using any directory filters
return listRwalk(ctx, f, path, includeAll, maxLevel, listType, fn)
}
ctx = filter.SetUseFilter(ctx, f.Features().FilterAware && !includeAll) // make filter-aware backends constrain List
return listR(ctx, f, path, includeAll, listType, fn, doListR, listType.Dirs() && f.Features().BucketBased)
}
// listRwalk walks the file tree for ListR using Walk
// Note: this will flag filter-aware backends (via Walk)
func listRwalk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, listType ListType, fn fs.ListRCallback) error {
var listErr error
walkErr := Walk(ctx, f, path, includeAll, maxLevel, func(path string, entries fs.DirEntries, err error) error {
// Carry on listing but return the error at the end
if err != nil {
listErr = err
err = fs.CountError(ctx, err)
fs.Errorf(path, "error listing: %v", err)
return nil
}
listType.Filter(&entries)
return fn(entries)
})
if listErr != nil {
return listErr
}
return walkErr
}
// dirMap keeps track of directories made for bucket-based remotes.
// true => directory has been sent
// false => directory has been seen but not sent
type dirMap struct {
mu sync.Mutex
m map[string]bool
root string
}
// make a new dirMap
func newDirMap(root string) *dirMap {
return &dirMap{
m: make(map[string]bool),
root: root,
}
}
// add adds a directory and parents with sent
func (dm *dirMap) add(dir string, sent bool) {
for {
if dir == dm.root || dir == "" {
return
}
currentSent, found := dm.m[dir]
if found {
// If it has been sent already then nothing more to do
if currentSent {
return
}
// If not sent already don't override
if !sent {
return
}
// currentSent == false && sent == true so needs overriding
}
dm.m[dir] = sent
// Add parents in as unsent
dir = parentDir(dir)
sent = false
}
}
// parentDir finds the parent directory of path
func parentDir(entryPath string) string {
dirPath := path.Dir(entryPath)
if dirPath == "." {
dirPath = ""
}
return dirPath
}
// add all the directories in entries and their parents to the dirMap
func (dm *dirMap) addEntries(entries fs.DirEntries) error {
dm.mu.Lock()
defer dm.mu.Unlock()
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:
dm.add(parentDir(x.Remote()), false)
case fs.Directory:
dm.add(x.Remote(), true)
default:
return fmt.Errorf("unknown object type %T", entry)
}
}
return nil
}
// send any missing parents to fn
func (dm *dirMap) sendEntries(fn fs.ListRCallback) (err error) {
// Count the strings first so we allocate the minimum memory
n := 0
for _, sent := range dm.m {
if !sent {
n++
}
}
if n == 0 {
return nil
}
dirs := make([]string, 0, n)
// Fill the dirs up then sort it
for dir, sent := range dm.m {
if !sent {
dirs = append(dirs, dir)
}
}
sort.Strings(dirs)
// Now convert to bulkier Dir in batches and send
now := time.Now()
list := list.NewHelper(fn)
for _, dir := range dirs {
err = list.Add(fs.NewDir(dir, now))
if err != nil {
return err
}
}
return list.Flush()
}
// listR walks the file tree using ListR
func listR(ctx context.Context, f fs.Fs, path string, includeAll bool, listType ListType, fn fs.ListRCallback, doListR fs.ListRFn, synthesizeDirs bool) error {
fi := filter.GetConfig(ctx)
includeDirectory := fi.IncludeDirectory(ctx, f)
if !includeAll {
includeAll = fi.InActive()
}
var dm *dirMap
if synthesizeDirs {
dm = newDirMap(path)
}
var mu sync.Mutex
err := doListR(ctx, path, func(entries fs.DirEntries) (err error) {
accounting.Stats(ctx).Listed(int64(len(entries)))
if synthesizeDirs {
err = dm.addEntries(entries)
if err != nil {
return err
}
}
listType.Filter(&entries)
if !includeAll {
filteredEntries := entries[:0]
for _, entry := range entries {
var include bool
switch x := entry.(type) {
case fs.Object:
include = fi.IncludeObject(ctx, x)
case fs.Directory:
include, err = includeDirectory(x.Remote())
if err != nil {
return err
}
default:
return fmt.Errorf("unknown object type %T", entry)
}
if include {
filteredEntries = append(filteredEntries, entry)
}
}
entries = filteredEntries
}
mu.Lock()
defer mu.Unlock()
return fn(entries)
})
if err != nil {
return err
}
if synthesizeDirs {
err = dm.sendEntries(fn)
if err != nil {
return err
}
}
return nil
}
// walkListDirSorted lists the directory.
//
// It implements Walk using non recursive directory listing.
func walkListDirSorted(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
return walk(ctx, f, path, includeAll, maxLevel, fn, list.DirSorted)
}
// walkListR lists the directory.
//
// It implements Walk using recursive directory listing if
// available, or returns ErrorCantListR if not.
func walkListR(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
listR := f.Features().ListR
if listR == nil {
return ErrorCantListR
}
return walkR(ctx, f, path, includeAll, maxLevel, fn, listR)
}
type listDirFunc func(ctx context.Context, fs fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error)
func walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listDir listDirFunc) error {
var (
wg sync.WaitGroup // sync closing of go routines
traversing sync.WaitGroup // running directory traversals
doClose sync.Once // close the channel once
mu sync.Mutex // stop fn being called concurrently
ci = fs.GetConfig(ctx) // current config
)
// listJob describe a directory listing that needs to be done
type listJob struct {
remote string
depth int
}
in := make(chan listJob, ci.Checkers)
errs := make(chan error, 1)
quit := make(chan struct{})
closeQuit := func() {
doClose.Do(func() {
close(quit)
go func() {
for range in {
traversing.Done()
}
}()
})
}
for range ci.Checkers {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case job, ok := <-in:
if !ok {
return
}
entries, err := listDir(ctx, f, includeAll, job.remote)
var jobs []listJob
if err == nil && job.depth != 0 {
entries.ForDir(func(dir fs.Directory) {
// Recurse for the directory
jobs = append(jobs, listJob{
remote: dir.Remote(),
depth: job.depth - 1,
})
})
}
mu.Lock()
err = fn(job.remote, entries, err)
mu.Unlock()
// NB once we have passed entries to fn we mustn't touch it again
if err != nil && err != ErrorSkipDir {
traversing.Done()
err = fs.CountError(ctx, err)
fs.Errorf(job.remote, "error listing: %v", err)
closeQuit()
// Send error to error channel if space
select {
case errs <- err:
default:
}
continue
}
if err == nil && len(jobs) > 0 {
traversing.Add(len(jobs))
go func() {
// Now we have traversed this directory, send these
// jobs off for traversal in the background
for _, newJob := range jobs {
in <- newJob
}
}()
}
traversing.Done()
case <-quit:
return
}
}
}()
}
// Start the process
traversing.Add(1)
in <- listJob{
remote: path,
depth: maxLevel - 1,
}
traversing.Wait()
close(in)
wg.Wait()
close(errs)
// return the first error returned or nil
return <-errs
}
func walkRDirTree(ctx context.Context, f fs.Fs, startPath string, includeAll bool, maxLevel int, listR fs.ListRFn) (dirtree.DirTree, error) {
fi := filter.GetConfig(ctx)
dirs := dirtree.New()
// Entries can come in arbitrary order. We use toPrune to keep
// all directories to exclude later.
toPrune := make(map[string]bool)
includeDirectory := fi.IncludeDirectory(ctx, f)
var mu sync.Mutex
err := listR(ctx, startPath, func(entries fs.DirEntries) error {
accounting.Stats(ctx).Listed(int64(len(entries)))
mu.Lock()
defer mu.Unlock()
for _, entry := range entries {
slashes := strings.Count(entry.Remote(), "/")
excluded := true
switch x := entry.(type) {
case fs.Object:
// Make sure we don't delete excluded files if not required
if includeAll || fi.IncludeObject(ctx, x) {
if maxLevel < 0 || slashes <= maxLevel-1 {
dirs.Add(x)
excluded = false
}
}
// Make sure we include any parent directories of excluded objects
if excluded {
dirPath := parentDir(x.Remote())
slashes--
if maxLevel >= 0 {
for ; slashes > maxLevel-1; slashes-- {
dirPath = parentDir(dirPath)
}
}
inc, err := includeDirectory(dirPath)
if err != nil {
return err
}
if inc || includeAll {
// If the directory doesn't exist already, create it
_, obj := dirs.Find(dirPath)
if obj == nil {
dirs.AddDir(fs.NewDir(dirPath, time.Now()))
}
}
}
// Check if we need to prune a directory later.
if !includeAll && len(fi.Opt.ExcludeFile) > 0 {
basename := path.Base(x.Remote())
for _, excludeFile := range fi.Opt.ExcludeFile {
if basename == excludeFile {
excludeDir := parentDir(x.Remote())
toPrune[excludeDir] = true
}
}
}
case fs.Directory:
inc, err := includeDirectory(x.Remote())
if err != nil {
return err
}
if includeAll || inc {
if maxLevel < 0 || slashes <= maxLevel-1 {
if slashes == maxLevel-1 {
// Just add the object if at maxLevel
dirs.Add(x)
} else {
dirs.AddDir(x)
}
}
}
default:
return fmt.Errorf("unknown object type %T", entry)
}
}
return nil
})
if err != nil {
return nil, err
}
dirs.CheckParents(startPath)
if len(dirs) == 0 {
dirs[startPath] = nil
}
err = dirs.Prune(toPrune)
if err != nil {
return nil, err
}
dirs.Sort()
return dirs, nil
}
// Create a DirTree using List
func walkNDirTree(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, listDir listDirFunc) (dirtree.DirTree, error) {
dirs := make(dirtree.DirTree)
fn := func(dirPath string, entries fs.DirEntries, err error) error {
if err == nil {
dirs[dirPath] = entries
}
return err
}
err := walk(ctx, f, path, includeAll, maxLevel, fn, listDir)
if err != nil {
return nil, err
}
return dirs, nil
}
// NewDirTree returns a DirTree filled with the directory listing
// using the parameters supplied.
//
// If includeAll is not set it will use the filters defined.
//
// If maxLevel is < 0 then it will recurse indefinitely, else it will
// only do maxLevel levels.
//
// This is implemented by WalkR if f supports ListR and level > 1, or
// WalkN otherwise.
//
// If --files-from and --no-traverse is set then a DirTree will be
// constructed with just those files in.
//
// NB (f, path) to be replaced by fs.Dir at some point
func NewDirTree(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int) (dirtree.DirTree, error) {
ci := fs.GetConfig(ctx)
fi := filter.GetConfig(ctx)
// if --no-traverse and --files-from build DirTree just from files
if ci.NoTraverse && fi.HaveFilesFrom() {
return walkRDirTree(ctx, f, path, includeAll, maxLevel, fi.MakeListR(ctx, f.NewObject))
}
// if have ListR; and recursing; and not using --files-from; then build a DirTree with ListR
if ListR := f.Features().ListR; (maxLevel < 0 || maxLevel > 1) && ListR != nil && !fi.HaveFilesFrom() {
return walkRDirTree(ctx, f, path, includeAll, maxLevel, ListR)
}
// otherwise just use List
return walkNDirTree(ctx, f, path, includeAll, maxLevel, list.DirSorted)
}
func walkR(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listR fs.ListRFn) error {
dirs, err := walkRDirTree(ctx, f, path, includeAll, maxLevel, listR)
if err != nil {
return err
}
skipping := false
skipPrefix := ""
emptyDir := fs.DirEntries{}
for _, dirPath := range dirs.Dirs() {
if skipping {
// Skip over directories as required
if strings.HasPrefix(dirPath, skipPrefix) {
continue
}
skipping = false
}
entries := dirs[dirPath]
if entries == nil {
entries = emptyDir
}
err = fn(dirPath, entries, nil)
if err == ErrorSkipDir {
skipping = true
skipPrefix = dirPath
if skipPrefix != "" {
skipPrefix += "/"
}
} else if err != nil {
return err
}
}
return nil
}
// GetAll runs ListR getting all the results
func GetAll(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int) (objs []fs.Object, dirs []fs.Directory, err error) {
err = ListR(ctx, f, path, includeAll, maxLevel, ListAll, func(entries fs.DirEntries) error {
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:
objs = append(objs, x)
case fs.Directory:
dirs = append(dirs, x)
}
}
return nil
})
return
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/walk/walk_test.go | fs/walk/walk_test.go | package walk
import (
"context"
"errors"
"fmt"
"io"
"strings"
"sync"
"testing"
"github.com/rclone/rclone/fs"
_ "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fstest/mockdir"
"github.com/rclone/rclone/fstest/mockfs"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var errDirNotFound, errorBoom error
func init() {
errDirNotFound = fserrors.FsError(fs.ErrorDirNotFound)
fserrors.Count(errDirNotFound)
errorBoom = fserrors.FsError(errors.New("boom"))
fserrors.Count(errorBoom)
}
type (
listResult struct {
entries fs.DirEntries
err error
}
listResults map[string]listResult
errorMap map[string]error
listDirs struct {
mu sync.Mutex
t *testing.T
fs fs.Fs
includeAll bool
results listResults
walkResults listResults
walkErrors errorMap
finalError error
checkMaps bool
maxLevel int
}
)
func newListDirs(t *testing.T, f fs.Fs, includeAll bool, results listResults, walkErrors errorMap, finalError error) *listDirs {
return &listDirs{
t: t,
fs: f,
includeAll: includeAll,
results: results,
walkErrors: walkErrors,
walkResults: listResults{},
finalError: finalError,
checkMaps: true,
maxLevel: -1,
}
}
// NoCheckMaps marks the maps as to be ignored at the end
func (ls *listDirs) NoCheckMaps() *listDirs {
ls.checkMaps = false
return ls
}
// SetLevel(1) turns off recursion
func (ls *listDirs) SetLevel(maxLevel int) *listDirs {
ls.maxLevel = maxLevel
return ls
}
// ListDir returns the expected listing for the directory
func (ls *listDirs) ListDir(ctx context.Context, f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) {
ls.mu.Lock()
defer ls.mu.Unlock()
assert.Equal(ls.t, ls.fs, f)
assert.Equal(ls.t, ls.includeAll, includeAll)
// Fetch results for this path
result, ok := ls.results[dir]
if !ok {
ls.t.Errorf("Unexpected list of %q", dir)
return nil, errors.New("unexpected list")
}
delete(ls.results, dir)
// Put expected results for call of WalkFn
ls.walkResults[dir] = result
return result.entries, result.err
}
// ListR returns the expected listing for the directory using ListR
func (ls *listDirs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
ls.mu.Lock()
defer ls.mu.Unlock()
var errorReturn error
for dirPath, result := range ls.results {
// Put expected results for call of WalkFn
// Note that we don't call the function at all if we got an error
if result.err != nil {
errorReturn = result.err
}
if errorReturn == nil {
err = callback(result.entries)
require.NoError(ls.t, err)
ls.walkResults[dirPath] = result
}
}
ls.results = listResults{}
return errorReturn
}
// IsFinished checks everything expected was used up
func (ls *listDirs) IsFinished() {
if ls.checkMaps {
assert.Equal(ls.t, errorMap{}, ls.walkErrors)
assert.Equal(ls.t, listResults{}, ls.results)
assert.Equal(ls.t, listResults{}, ls.walkResults)
}
}
// WalkFn is called by the walk to test the expectations
func (ls *listDirs) WalkFn(dir string, entries fs.DirEntries, err error) error {
ls.mu.Lock()
defer ls.mu.Unlock()
// ls.t.Logf("WalkFn(%q, %v, %q)", dir, entries, err)
// Fetch expected entries and err
result, ok := ls.walkResults[dir]
if !ok {
ls.t.Errorf("Unexpected walk of %q (result not found)", dir)
return errors.New("result not found")
}
delete(ls.walkResults, dir)
// Check arguments are as expected
assert.Equal(ls.t, result.entries, entries)
assert.Equal(ls.t, result.err, err)
// Fetch return value
returnErr, ok := ls.walkErrors[dir]
if !ok {
ls.t.Errorf("Unexpected walk of %q (error not found)", dir)
return errors.New("error not found")
}
delete(ls.walkErrors, dir)
return returnErr
}
// Walk does the walk and tests the expectations
func (ls *listDirs) Walk() {
err := walk(context.Background(), nil, "", ls.includeAll, ls.maxLevel, ls.WalkFn, ls.ListDir)
assert.True(ls.t, errors.Is(ls.finalError, err))
ls.IsFinished()
}
// WalkR does the walkR and tests the expectations
func (ls *listDirs) WalkR() {
err := walkR(context.Background(), nil, "", ls.includeAll, ls.maxLevel, ls.WalkFn, ls.ListR)
assert.Equal(ls.t, ls.finalError, err)
if ls.finalError == nil {
ls.IsFinished()
}
}
func testWalkEmpty(t *testing.T) *listDirs {
return newListDirs(t, nil, false,
listResults{
"": {entries: fs.DirEntries{}, err: nil},
},
errorMap{
"": nil,
},
nil,
)
}
func TestWalkEmpty(t *testing.T) { testWalkEmpty(t).Walk() }
func TestWalkREmpty(t *testing.T) { testWalkEmpty(t).WalkR() }
func testWalkEmptySkip(t *testing.T) *listDirs {
return newListDirs(t, nil, true,
listResults{
"": {entries: fs.DirEntries{}, err: nil},
},
errorMap{
"": ErrorSkipDir,
},
nil,
)
}
func TestWalkEmptySkip(t *testing.T) { testWalkEmptySkip(t).Walk() }
func TestWalkREmptySkip(t *testing.T) { testWalkEmptySkip(t).WalkR() }
func testWalkNotFound(t *testing.T) *listDirs {
return newListDirs(t, nil, true,
listResults{
"": {err: errDirNotFound},
},
errorMap{
"": errDirNotFound,
},
errDirNotFound,
)
}
func TestWalkNotFound(t *testing.T) { testWalkNotFound(t).Walk() }
func TestWalkRNotFound(t *testing.T) { testWalkNotFound(t).WalkR() }
func TestWalkNotFoundMaskError(t *testing.T) {
// this doesn't work for WalkR
newListDirs(t, nil, true,
listResults{
"": {err: errDirNotFound},
},
errorMap{
"": nil,
},
nil,
).Walk()
}
func TestWalkNotFoundSkipError(t *testing.T) {
// this doesn't work for WalkR
newListDirs(t, nil, true,
listResults{
"": {err: errDirNotFound},
},
errorMap{
"": ErrorSkipDir,
},
nil,
).Walk()
}
func testWalkLevels(t *testing.T, maxLevel int) *listDirs {
da := mockdir.New("a")
oA := mockobject.Object("A")
db := mockdir.New("a/b")
oB := mockobject.Object("a/B")
dc := mockdir.New("a/b/c")
oC := mockobject.Object("a/b/C")
dd := mockdir.New("a/b/c/d")
oD := mockobject.Object("a/b/c/D")
return newListDirs(t, nil, false,
listResults{
"": {entries: fs.DirEntries{oA, da}, err: nil},
"a": {entries: fs.DirEntries{oB, db}, err: nil},
"a/b": {entries: fs.DirEntries{oC, dc}, err: nil},
"a/b/c": {entries: fs.DirEntries{oD, dd}, err: nil},
"a/b/c/d": {entries: fs.DirEntries{}, err: nil},
},
errorMap{
"": nil,
"a": nil,
"a/b": nil,
"a/b/c": nil,
"a/b/c/d": nil,
},
nil,
).SetLevel(maxLevel)
}
func TestWalkLevels(t *testing.T) { testWalkLevels(t, -1).Walk() }
func TestWalkRLevels(t *testing.T) { testWalkLevels(t, -1).WalkR() }
func TestWalkLevelsNoRecursive10(t *testing.T) { testWalkLevels(t, 10).Walk() }
func TestWalkRLevelsNoRecursive10(t *testing.T) { testWalkLevels(t, 10).WalkR() }
func TestWalkNDirTree(t *testing.T) {
ls := testWalkLevels(t, -1)
entries, err := walkNDirTree(context.Background(), nil, "", ls.includeAll, ls.maxLevel, ls.ListDir)
require.NoError(t, err)
assert.Equal(t, `/
A
a/
a/
B
b/
a/b/
C
c/
a/b/c/
D
d/
a/b/c/d/
`, entries.String())
}
func testWalkLevelsNoRecursive(t *testing.T) *listDirs {
da := mockdir.New("a")
oA := mockobject.Object("A")
return newListDirs(t, nil, false,
listResults{
"": {entries: fs.DirEntries{oA, da}, err: nil},
},
errorMap{
"": nil,
},
nil,
).SetLevel(1)
}
func TestWalkLevelsNoRecursive(t *testing.T) { testWalkLevelsNoRecursive(t).Walk() }
func TestWalkRLevelsNoRecursive(t *testing.T) { testWalkLevelsNoRecursive(t).WalkR() }
func testWalkLevels2(t *testing.T) *listDirs {
da := mockdir.New("a")
oA := mockobject.Object("A")
db := mockdir.New("a/b")
oB := mockobject.Object("a/B")
return newListDirs(t, nil, false,
listResults{
"": {entries: fs.DirEntries{oA, da}, err: nil},
"a": {entries: fs.DirEntries{oB, db}, err: nil},
},
errorMap{
"": nil,
"a": nil,
},
nil,
).SetLevel(2)
}
func TestWalkLevels2(t *testing.T) { testWalkLevels2(t).Walk() }
func TestWalkRLevels2(t *testing.T) { testWalkLevels2(t).WalkR() }
func testWalkSkip(t *testing.T) *listDirs {
da := mockdir.New("a")
db := mockdir.New("a/b")
dc := mockdir.New("a/b/c")
return newListDirs(t, nil, false,
listResults{
"": {entries: fs.DirEntries{da}, err: nil},
"a": {entries: fs.DirEntries{db}, err: nil},
"a/b": {entries: fs.DirEntries{dc}, err: nil},
},
errorMap{
"": nil,
"a": nil,
"a/b": ErrorSkipDir,
},
nil,
)
}
func TestWalkSkip(t *testing.T) { testWalkSkip(t).Walk() }
func TestWalkRSkip(t *testing.T) { testWalkSkip(t).WalkR() }
func walkErrors(t *testing.T, expectedErr error) *listDirs {
lr := listResults{}
em := errorMap{}
de := make(fs.DirEntries, 10)
for i := range de {
path := string('0' + rune(i))
de[i] = mockdir.New(path)
lr[path] = listResult{entries: nil, err: fs.ErrorDirNotFound}
em[path] = fs.ErrorDirNotFound
}
lr[""] = listResult{entries: de, err: nil}
em[""] = nil
return newListDirs(t, nil, true,
lr,
em,
expectedErr,
).NoCheckMaps()
}
func testWalkErrors(t *testing.T) *listDirs {
return walkErrors(t, errDirNotFound)
}
func testWalkRErrors(t *testing.T) *listDirs {
return walkErrors(t, fs.ErrorDirNotFound)
}
func TestWalkErrors(t *testing.T) { testWalkErrors(t).Walk() }
func TestWalkRErrors(t *testing.T) { testWalkRErrors(t).WalkR() }
func makeTree(level int, terminalErrors bool) (listResults, errorMap) {
lr := listResults{}
em := errorMap{}
var fill func(path string, level int)
fill = func(path string, level int) {
de := fs.DirEntries{}
if level > 0 {
for _, a := range "0123456789" {
subPath := string(a)
if path != "" {
subPath = path + "/" + subPath
}
de = append(de, mockdir.New(subPath))
fill(subPath, level-1)
}
}
lr[path] = listResult{entries: de, err: nil}
em[path] = nil
if level == 0 && terminalErrors {
em[path] = errorBoom
}
}
fill("", level)
return lr, em
}
func testWalkMulti(t *testing.T) *listDirs {
lr, em := makeTree(3, false)
return newListDirs(t, nil, true,
lr,
em,
nil,
)
}
func TestWalkMulti(t *testing.T) { testWalkMulti(t).Walk() }
func TestWalkRMulti(t *testing.T) { testWalkMulti(t).WalkR() }
func testWalkMultiErrors(t *testing.T) *listDirs {
lr, em := makeTree(3, true)
return newListDirs(t, nil, true,
lr,
em,
errorBoom,
).NoCheckMaps()
}
func TestWalkMultiErrors(t *testing.T) { testWalkMultiErrors(t).Walk() }
func TestWalkRMultiErrors(t *testing.T) { testWalkMultiErrors(t).Walk() }
// a very simple listRcallback function
func makeListRCallback(entries fs.DirEntries, err error) fs.ListRFn {
return func(ctx context.Context, dir string, callback fs.ListRCallback) error {
if err == nil {
err = callback(entries)
}
return err
}
}
func TestWalkRDirTree(t *testing.T) {
for _, test := range []struct {
entries fs.DirEntries
want string
err error
root string
level int
exclude string
}{
{
entries: fs.DirEntries{},
want: "/\n",
level: -1,
},
{
entries: fs.DirEntries{mockobject.Object("a")},
want: `/
a
`,
level: -1,
},
{
entries: fs.DirEntries{mockobject.Object("a/b")},
want: `/
a/
a/
b
`,
level: -1,
},
{
entries: fs.DirEntries{mockobject.Object("a/b/c/d")},
want: `/
a/
a/
b/
a/b/
c/
a/b/c/
d
`,
level: -1,
},
{
entries: fs.DirEntries{mockobject.Object("a")},
err: errorBoom,
level: -1,
},
{
entries: fs.DirEntries{
mockobject.Object("0/1/2/3"),
mockobject.Object("4/5/6/7"),
mockobject.Object("8/9/a/b"),
mockobject.Object("c/d/e/f"),
mockobject.Object("g/h/i/j"),
mockobject.Object("k/l/m/n"),
mockobject.Object("o/p/q/r"),
mockobject.Object("s/t/u/v"),
mockobject.Object("w/x/y/z"),
},
want: `/
0/
4/
8/
c/
g/
k/
o/
s/
w/
0/
1/
0/1/
2/
0/1/2/
3
4/
5/
4/5/
6/
4/5/6/
7
8/
9/
8/9/
a/
8/9/a/
b
c/
d/
c/d/
e/
c/d/e/
f
g/
h/
g/h/
i/
g/h/i/
j
k/
l/
k/l/
m/
k/l/m/
n
o/
p/
o/p/
q/
o/p/q/
r
s/
t/
s/t/
u/
s/t/u/
v
w/
x/
w/x/
y/
w/x/y/
z
`,
level: -1,
},
{
entries: fs.DirEntries{
mockobject.Object("a/b/c/d/e/f1"),
mockobject.Object("a/b/c/d/e/f2"),
mockobject.Object("a/b/c/d/e/f3"),
},
want: `a/b/c/
d/
a/b/c/d/
e/
a/b/c/d/e/
f1
f2
f3
`,
root: "a/b/c",
level: -1,
},
{
entries: fs.DirEntries{
mockobject.Object("A"),
mockobject.Object("a/B"),
mockobject.Object("a/b/C"),
mockobject.Object("a/b/c/D"),
mockobject.Object("a/b/c/d/E"),
},
want: `/
A
a/
a/
B
b/
a/b/
`,
level: 2,
},
{
entries: fs.DirEntries{
mockobject.Object("a/b/c"),
mockobject.Object("a/b/c/d/e"),
},
want: `/
a/
a/
b/
a/b/
`,
level: 2,
},
{
entries: fs.DirEntries{
mockobject.Object("a/.bzEmpty"),
mockobject.Object("a/b1/.bzEmpty"),
mockobject.Object("a/b2/.bzEmpty"),
},
want: `/
a/
a/
.bzEmpty
b1/
b2/
a/b1/
.bzEmpty
a/b2/
.bzEmpty
`,
level: -1,
exclude: ""},
{
entries: fs.DirEntries{
mockobject.Object("a/.bzEmpty"),
mockobject.Object("a/b1/.bzEmpty"),
mockobject.Object("a/b2/.bzEmpty"),
},
want: `/
a/
a/
b1/
b2/
a/b1/
a/b2/
`,
level: -1,
exclude: ".bzEmpty",
},
} {
ctx := context.Background()
if test.exclude != "" {
fi, err := filter.NewFilter(nil)
require.NoError(t, err)
require.NoError(t, fi.Add(false, test.exclude))
// Change the active filter
ctx = filter.ReplaceConfig(ctx, fi)
}
r, err := walkRDirTree(ctx, nil, test.root, test.exclude == "", test.level, makeListRCallback(test.entries, test.err))
what := fmt.Sprintf("%+v", test)
assert.Equal(t, test.err, err, what)
assert.Equal(t, test.want, r.String(), what)
}
}
func TestWalkRDirTreeExclude(t *testing.T) {
ctx := context.Background()
fi := filter.GetConfig(ctx)
for _, test := range []struct {
entries fs.DirEntries
want string
err error
root string
level int
excludeFile string
includeAll bool
}{
{fs.DirEntries{mockobject.Object("a"), mockobject.Object("ignore")}, "", nil, "", -1, "ignore", false},
{fs.DirEntries{mockobject.Object("a")}, `/
a
`, nil, "", -1, "ignore", false},
{fs.DirEntries{
mockobject.Object("a"),
mockobject.Object("b/b"),
mockobject.Object("b/.ignore"),
}, `/
a
`, nil, "", -1, ".ignore", false},
{fs.DirEntries{
mockobject.Object("a"),
mockobject.Object("b/.ignore"),
mockobject.Object("b/b"),
}, `/
a
b/
b/
.ignore
b
`, nil, "", -1, ".ignore", true},
{fs.DirEntries{
mockobject.Object("a"),
mockobject.Object("b/b"),
mockobject.Object("b/c/d/e"),
mockobject.Object("b/c/ign"),
mockobject.Object("b/c/x"),
}, `/
a
b/
b/
b
`, nil, "", -1, "ign", false},
{fs.DirEntries{
mockobject.Object("a"),
mockobject.Object("b/b"),
mockobject.Object("b/c/d/e"),
mockobject.Object("b/c/ign"),
mockobject.Object("b/c/x"),
}, `/
a
b/
b/
b
c/
b/c/
d/
ign
x
b/c/d/
e
`, nil, "", -1, "ign", true},
} {
fi.Opt.ExcludeFile = []string{test.excludeFile}
r, err := walkRDirTree(context.Background(), nil, test.root, test.includeAll, test.level, makeListRCallback(test.entries, test.err))
assert.Equal(t, test.err, err, fmt.Sprintf("%+v", test))
assert.Equal(t, test.want, r.String(), fmt.Sprintf("%+v", test))
}
// Set to default value, to avoid side effects
fi.Opt.ExcludeFile = nil
}
func TestListType(t *testing.T) {
assert.Equal(t, true, ListObjects.Objects())
assert.Equal(t, false, ListObjects.Dirs())
assert.Equal(t, false, ListDirs.Objects())
assert.Equal(t, true, ListDirs.Dirs())
assert.Equal(t, true, ListAll.Objects())
assert.Equal(t, true, ListAll.Dirs())
var (
a = mockobject.Object("a")
b = mockobject.Object("b")
dir = mockdir.New("dir")
adir = mockobject.Object("dir/a")
dir2 = mockdir.New("dir2")
origEntries = fs.DirEntries{
a, b, dir, adir, dir2,
}
dirEntries = fs.DirEntries{
dir, dir2,
}
objEntries = fs.DirEntries{
a, b, adir,
}
)
copyOrigEntries := func() (out fs.DirEntries) {
out = make(fs.DirEntries, len(origEntries))
copy(out, origEntries)
return out
}
got := copyOrigEntries()
ListAll.Filter(&got)
assert.Equal(t, origEntries, got)
got = copyOrigEntries()
ListObjects.Filter(&got)
assert.Equal(t, objEntries, got)
got = copyOrigEntries()
ListDirs.Filter(&got)
assert.Equal(t, dirEntries, got)
}
func TestListR(t *testing.T) {
ctx := context.Background()
objects := fs.DirEntries{
mockobject.Object("a"),
mockobject.Object("b"),
mockdir.New("dir"),
mockobject.Object("dir/a"),
mockobject.Object("dir/b"),
mockobject.Object("dir/c"),
}
f, err := mockfs.NewFs(ctx, "mock", "/", nil)
require.NoError(t, err)
var got []string
clearCallback := func() {
got = nil
}
callback := func(entries fs.DirEntries) error {
for _, entry := range entries {
got = append(got, entry.Remote())
}
return nil
}
doListR := func(ctx context.Context, dir string, callback fs.ListRCallback) error {
var os fs.DirEntries
for _, o := range objects {
if dir == "" || strings.HasPrefix(o.Remote(), dir+"/") {
os = append(os, o)
}
}
return callback(os)
}
fi, err := filter.NewFilter(nil)
require.NoError(t, err)
require.NoError(t, fi.AddRule("+ b"))
require.NoError(t, fi.AddRule("- *"))
// Change the active filter
ctx = filter.ReplaceConfig(ctx, fi)
// Base case
clearCallback()
err = listR(ctx, f, "", true, ListAll, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"a", "b", "dir", "dir/a", "dir/b", "dir/c"}, got)
// Base case - with Objects
clearCallback()
err = listR(ctx, f, "", true, ListObjects, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"a", "b", "dir/a", "dir/b", "dir/c"}, got)
// Base case - with Dirs
clearCallback()
err = listR(ctx, f, "", true, ListDirs, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"dir"}, got)
// With filter
clearCallback()
err = listR(ctx, f, "", false, ListAll, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"b", "dir", "dir/b"}, got)
// With filter - with Objects
clearCallback()
err = listR(ctx, f, "", false, ListObjects, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"b", "dir/b"}, got)
// With filter - with Dir
clearCallback()
err = listR(ctx, f, "", false, ListDirs, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"dir"}, got)
// With filter and subdir
clearCallback()
err = listR(ctx, f, "dir", false, ListAll, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"dir/b"}, got)
// Now bucket-based
objects = fs.DirEntries{
mockobject.Object("a"),
mockobject.Object("b"),
mockobject.Object("dir/a"),
mockobject.Object("dir/b"),
mockobject.Object("dir/subdir/c"),
mockdir.New("dir/subdir"),
}
// Base case
clearCallback()
err = listR(ctx, f, "", true, ListAll, callback, doListR, true)
require.NoError(t, err)
require.Equal(t, []string{"a", "b", "dir/a", "dir/b", "dir/subdir/c", "dir/subdir", "dir"}, got)
// With filter
clearCallback()
err = listR(ctx, f, "", false, ListAll, callback, doListR, true)
require.NoError(t, err)
require.Equal(t, []string{"b", "dir/b", "dir/subdir", "dir"}, got)
// With filter and subdir
clearCallback()
err = listR(ctx, f, "dir", false, ListAll, callback, doListR, true)
require.NoError(t, err)
require.Equal(t, []string{"dir/b", "dir/subdir"}, got)
// With filter and subdir - with Objects
clearCallback()
err = listR(ctx, f, "dir", false, ListObjects, callback, doListR, true)
require.NoError(t, err)
require.Equal(t, []string{"dir/b"}, got)
// With filter and subdir - with Dirs
clearCallback()
err = listR(ctx, f, "dir", false, ListDirs, callback, doListR, true)
require.NoError(t, err)
require.Equal(t, []string{"dir/subdir"}, got)
}
func TestDirMapAdd(t *testing.T) {
type add struct {
dir string
sent bool
}
for i, test := range []struct {
root string
in []add
want map[string]bool
}{
{
root: "",
in: []add{
{"", true},
},
want: map[string]bool{},
},
{
root: "",
in: []add{
{"a/b/c", true},
},
want: map[string]bool{
"a/b/c": true,
"a/b": false,
"a": false,
},
},
{
root: "",
in: []add{
{"a/b/c", true},
{"a/b", true},
},
want: map[string]bool{
"a/b/c": true,
"a/b": true,
"a": false,
},
},
{
root: "",
in: []add{
{"a/b", true},
{"a/b/c", false},
},
want: map[string]bool{
"a/b/c": false,
"a/b": true,
"a": false,
},
},
{
root: "root",
in: []add{
{"root/a/b", true},
{"root/a/b/c", false},
},
want: map[string]bool{
"root/a/b/c": false,
"root/a/b": true,
"root/a": false,
},
},
} {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
dm := newDirMap(test.root)
for _, item := range test.in {
dm.add(item.dir, item.sent)
}
assert.Equal(t, test.want, dm.m)
})
}
}
func TestDirMapAddEntries(t *testing.T) {
dm := newDirMap("")
entries := fs.DirEntries{
mockobject.Object("dir/a"),
mockobject.Object("dir/b"),
mockdir.New("dir"),
mockobject.Object("dir2/a"),
mockobject.Object("dir2/b"),
}
require.NoError(t, dm.addEntries(entries))
assert.Equal(t, map[string]bool{"dir": true, "dir2": false}, dm.m)
}
func TestDirMapSendEntries(t *testing.T) {
var got []string
clearCallback := func() {
got = nil
}
callback := func(entries fs.DirEntries) error {
for _, entry := range entries {
got = append(got, entry.Remote())
}
return nil
}
// general test
dm := newDirMap("")
entries := fs.DirEntries{
mockobject.Object("dir/a"),
mockobject.Object("dir/b"),
mockdir.New("dir"),
mockobject.Object("dir2/a"),
mockobject.Object("dir2/b"),
mockobject.Object("dir1/a"),
mockobject.Object("dir3/b"),
}
require.NoError(t, dm.addEntries(entries))
clearCallback()
err := dm.sendEntries(callback)
require.NoError(t, err)
assert.Equal(t, []string{
"dir1",
"dir2",
"dir3",
}, got)
// return error from callback
callback2 := func(entries fs.DirEntries) error {
return io.EOF
}
err = dm.sendEntries(callback2)
require.Equal(t, io.EOF, err)
// empty
dm = newDirMap("")
clearCallback()
err = dm.sendEntries(callback)
require.NoError(t, err)
assert.Equal(t, []string(nil), got)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/chunkedreader/chunkedreader_test.go | fs/chunkedreader/chunkedreader_test.go | package chunkedreader
import (
"context"
"fmt"
"io"
"math/rand"
"testing"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestMain drives the tests
func TestMain(m *testing.M) {
fstest.TestMain(m)
}
func TestChunkedReader(t *testing.T) {
ctx := context.Background()
o := mockobject.New("test.bin").WithContent([]byte("hello"), mockobject.SeekModeRegular)
const MB = 1024 * 1024
for _, test := range []struct {
initialChunkSize int64
maxChunkSize int64
streams int
crType any
unknownSize bool
}{
{-1, MB, 0, new(sequential), false},
{MB, 10 * MB, 0, new(sequential), false},
{MB, 10 * MB, 1, new(sequential), false},
{MB, 10 * MB, 1, new(sequential), true},
{MB, 10 * MB, 2, new(parallel), false},
{MB, 10 * MB, 2, new(sequential), true},
} {
what := fmt.Sprintf("%+v", test)
o.SetUnknownSize(test.unknownSize)
cr := New(ctx, o, test.initialChunkSize, test.maxChunkSize, test.streams)
assert.IsType(t, test.crType, cr, what)
require.NoError(t, cr.Close(), what)
}
}
func testRead(content []byte, mode mockobject.SeekMode, streams int) func(*testing.T) {
return func(t *testing.T) {
ctx := context.Background()
chunkSizes := []int64{-1, 0, 1, 15, 16, 17, 1023, 1024, 1025, 2000}
offsets := []int64{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33,
63, 64, 65, 511, 512, 513, 1023, 1024, 1025}
limits := []int64{-1, 0, 1, 31, 32, 33, 1023, 1024, 1025}
cl := int64(len(content))
bl := 32
buf := make([]byte, bl)
o := mockobject.New("test.bin").WithContent(content, mode)
for ics, cs := range chunkSizes {
for icsMax, csMax := range chunkSizes {
// skip tests where chunkSize is much bigger than maxChunkSize
if ics > icsMax+1 {
continue
}
t.Run(fmt.Sprintf("Chunksize_%d_%d", cs, csMax), func(t *testing.T) {
cr := New(ctx, o, cs, csMax, streams)
for _, offset := range offsets {
for _, limit := range limits {
what := fmt.Sprintf("offset %d, limit %d", offset, limit)
p, err := cr.RangeSeek(ctx, offset, io.SeekStart, limit)
if offset >= cl {
require.Error(t, err, what)
return
}
require.NoError(t, err, what)
require.Equal(t, offset, p, what)
n, err := cr.Read(buf)
end := min(offset+int64(bl), cl)
l := int(end - offset)
if l < bl {
require.Equal(t, io.EOF, err, what)
} else {
require.NoError(t, err, what)
}
require.Equal(t, l, n, what)
require.Equal(t, content[offset:end], buf[:n], what)
}
}
})
}
}
}
}
func testErrorAfterClose(t *testing.T, streams int) {
ctx := context.Background()
content := makeContent(t, 1024)
o := mockobject.New("test.bin").WithContent(content, mockobject.SeekModeNone)
// Close
cr := New(ctx, o, 0, 0, streams)
require.NoError(t, cr.Close())
require.Error(t, cr.Close())
// Read
cr = New(ctx, o, 0, 0, streams)
require.NoError(t, cr.Close())
var buf [1]byte
_, err := cr.Read(buf[:])
require.Error(t, err)
// Seek
cr = New(ctx, o, 0, 0, streams)
require.NoError(t, cr.Close())
_, err = cr.Seek(1, io.SeekCurrent)
require.Error(t, err)
// RangeSeek
cr = New(ctx, o, 0, 0, streams)
require.NoError(t, cr.Close())
_, err = cr.RangeSeek(ctx, 1, io.SeekCurrent, 0)
require.Error(t, err)
}
func makeContent(t *testing.T, size int) []byte {
content := make([]byte, size)
r := rand.New(rand.NewSource(42))
_, err := io.ReadFull(r, content)
assert.NoError(t, err)
return content
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.